diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index eca6eabb218..4e20472994b 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -20,6 +20,7 @@ jobs: # the tag MAJOR.MINOR.PATCH event, but we still need to deploy the maven-release-plugin master commit. token: ${{ secrets.GH_TOKEN }} fetch-depth: 1 # only need the HEAD commit as license check isn't run + submodules: true - name: Cache local Maven repository uses: actions/cache@v2 with: diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 0454fe71e1e..8b5fe603222 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -23,6 +23,7 @@ jobs: # We push Javadocs to the gh-pages branch on commit. token: ${{ secrets.GH_TOKEN }} fetch-depth: 0 # allow build-bin/idl_to_gh_pages to get the full history + submodules: true - name: Cache local Maven repository uses: actions/cache@v2 with: diff --git a/.github/workflows/docker_push.yml b/.github/workflows/docker_push.yml index 64a83bc54d0..71d21c6b3b1 100644 --- a/.github/workflows/docker_push.yml +++ b/.github/workflows/docker_push.yml @@ -17,6 +17,7 @@ jobs: uses: actions/checkout@v2 with: fetch-depth: 1 # only needed to get the sha label + submodules: true # We can't cache Docker without using buildx because GH actions restricts /var/lib/docker # That's ok because DOCKER_PARENT_IMAGE is always ghcr.io and local anyway. - name: Docker Push diff --git a/.github/workflows/helm_release.yml b/.github/workflows/helm_release.yml index b315b0c27ba..3a80465b014 100644 --- a/.github/workflows/helm_release.yml +++ b/.github/workflows/helm_release.yml @@ -14,6 +14,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@v1 + with: + submodules: true - name: Configure Git run: | diff --git a/.github/workflows/helm_test.yml b/.github/workflows/helm_test.yml index 2b766e224bd..dfa26fcb4e4 100644 --- a/.github/workflows/helm_test.yml +++ b/.github/workflows/helm_test.yml @@ -16,6 +16,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@v1 + with: + submodules: true - name: Configure Git run: | diff --git a/.github/workflows/readme_test.yml b/.github/workflows/readme_test.yml index 10019bd6b54..fd81d155392 100644 --- a/.github/workflows/readme_test.yml +++ b/.github/workflows/readme_test.yml @@ -30,6 +30,7 @@ jobs: uses: actions/checkout@v2 with: fetch-depth: 1 + submodules: true # Setup latest JDK. We do this to ensure users don't need to use the same version as our # release process. Release uses JDK 11, the last version that can target 1.6 bytecode. - name: Setup java @@ -82,7 +83,7 @@ jobs: key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} restore-keys: ${{ runner.os }}-maven- - name: zipkin-server/README.md # Tests the build which is re-used for a few images - run: ./mvnw -T1C -q --batch-mode -DskipTests --also-make -pl zipkin-server clean package + run: ./mvnw -T1C -q --batch-mode -DskipTests -Dcheckstyle.skip=true --also-make -pl zipkin-server clean package - name: docker/README.md - openzipkin/zipkin run: | build-bin/docker/docker_build openzipkin/zipkin:test && @@ -102,34 +103,4 @@ jobs: build-bin/docker/docker_test_image openzipkin/zipkin-ui:test env: DOCKER_FILE: docker/test-images/zipkin-ui/Dockerfile - RELEASE_FROM_MAVEN_BUILD: true - - name: docker/test-images/zipkin-cassandra/README.md - run: | - build-bin/docker/docker_build openzipkin/zipkin-cassandra:test && - build-bin/docker/docker_test_image openzipkin/zipkin-cassandra:test - env: - DOCKER_FILE: docker/test-images/zipkin-cassandra/Dockerfile - - name: docker/test-images/zipkin-elasticsearch6/README.md - run: | - build-bin/docker/docker_build openzipkin/zipkin-elasticsearch6:test && - build-bin/docker/docker_test_image openzipkin/zipkin-elasticsearch6:test - env: - DOCKER_FILE: docker/test-images/zipkin-elasticsearch6/Dockerfile - - name: docker/test-images/zipkin-elasticsearch7/README.md - run: | - build-bin/docker/docker_build openzipkin/zipkin-elasticsearch7:test && - build-bin/docker/docker_test_image openzipkin/zipkin-elasticsearch7:test - env: - DOCKER_FILE: docker/test-images/zipkin-elasticsearch7/Dockerfile - - name: docker/test-images/zipkin-kafka/README.md - run: | - build-bin/docker/docker_build openzipkin/zipkin-kafka:test && - build-bin/docker/docker_test_image openzipkin/zipkin-kafka:test - env: - DOCKER_FILE: docker/test-images/zipkin-kafka/Dockerfile - - name: docker/test-images/zipkin-mysql/README.md - run: | - build-bin/docker/docker_build openzipkin/zipkin-mysql:test && - build-bin/docker/docker_test_image openzipkin/zipkin-mysql:test - env: - DOCKER_FILE: docker/test-images/zipkin-mysql/Dockerfile + RELEASE_FROM_MAVEN_BUILD: true \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index ab4f765aa0e..00000000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,72 +0,0 @@ -# yamllint --format github .github/workflows/test.yml ---- -name: test - -# We don't test documentation-only commits. -on: - # We run tests on non-tagged pushes to master that aren't a commit made by the release plugin - push: - tags: "" - branches: master - paths-ignore: - - "**/*.md" - - "charts/**" - # We also run tests on pull requests targeted at the master branch. - pull_request: - branches: master - paths-ignore: - - "**/*.md" - - "charts/**" - -jobs: - test: - runs-on: ubuntu-20.04 # newest available distribution, aka focal - if: "!contains(github.event.head_commit.message, 'maven-release-plugin')" - steps: - - name: Checkout Repository - uses: actions/checkout@v2 - with: - fetch-depth: 0 # full git history for license check - - name: Cache local Maven repository - uses: actions/cache@v2 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: ${{ runner.os }}-maven- - - name: Cache NPM Packages - uses: actions/cache@v2 - with: - path: ~/.npm - key: ${{ runner.os }}-npm-packages-${{ hashFiles('zipkin-lens/package-lock.json') }} - - name: Test without Docker - run: build-bin/maven_go_offline && build-bin/test -Ddocker.skip=true - test_docker: - runs-on: ubuntu-20.04 # newest available distribution, aka focal - if: "!contains(github.event.head_commit.message, 'maven-release-plugin')" - strategy: - matrix: - include: - - name: zipkin-collector-kafka - - name: zipkin-collector-rabbitmq - - name: zipkin-storage-cassandra - - name: zipkin-storage-elasticsearch - - name: zipkin-storage-mysql-v1 - steps: - - name: Checkout Repository - uses: actions/checkout@v2 - with: - fetch-depth: 1 # -Dlicense.skip=true so we don't need a full clone - - name: Cache local Maven repository - uses: actions/cache@v2 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: ${{ runner.os }}-maven- - # We can't cache Docker without using buildx because GH actions restricts /var/lib/docker - # That's ok because DOCKER_PARENT_IMAGE is always ghcr.io and local anyway. - - name: Test with Docker - run: - | # configure_test seeds NPM cache, which isn't needed for these tests - build-bin/maven/maven_go_offline && - build-bin/docker/configure_docker && - build-bin/test -pl :${{ matrix.name }} --am -Dlicense.skip=true diff --git a/.gitignore b/.gitignore index bc8776b02e7..0f257f47102 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,5 @@ _site/ # This project does not use Yarn but some developers may use it to e.g., start zipkin-lens dev server. # It doesn't hurt to just exclude it here. yarn.lock + +zipkin-server/server-starter/src/main/resources/version.properties \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000000..10610c8508d --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "skywalking"] + path = skywalking + url = https://github.com/apache/skywalking.git diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml index d43647bdb0a..fdc6412e56c 100644 --- a/benchmarks/pom.xml +++ b/benchmarks/pom.xml @@ -105,11 +105,6 @@ zipkin-server ${project.version} - - ${project.groupId}.zipkin2 - zipkin-storage-elasticsearch - ${project.version} - com.squareup.wire wire-runtime @@ -127,18 +122,6 @@ test - - - ${project.groupId}.zipkin2 - zipkin-storage-cassandra - ${project.version} - - - io.netty - * - - - com.datastax.oss java-driver-core @@ -151,12 +134,6 @@ - - ${project.groupId}.zipkin2 - zipkin-storage-mysql-v1 - ${project.version} - test - org.mariadb.jdbc mariadb-java-client diff --git a/benchmarks/src/main/java/zipkin2/collector/MetricsBenchmarks.java b/benchmarks/src/main/java/zipkin2/collector/MetricsBenchmarks.java deleted file mode 100644 index 420b5c65113..00000000000 --- a/benchmarks/src/main/java/zipkin2/collector/MetricsBenchmarks.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector; - -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.prometheus.PrometheusConfig; -import io.micrometer.prometheus.PrometheusMeterRegistry; -import java.util.concurrent.TimeUnit; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.runner.Runner; -import org.openjdk.jmh.runner.RunnerException; -import org.openjdk.jmh.runner.options.Options; -import org.openjdk.jmh.runner.options.OptionsBuilder; -import zipkin2.server.internal.MicrometerCollectorMetrics; - -@Measurement(iterations = 80, time = 1) -@Warmup(iterations = 20, time = 1) -@Fork(3) -@BenchmarkMode(Mode.AverageTime) -@OutputTimeUnit(TimeUnit.MICROSECONDS) -@State(Scope.Thread) -@Threads(1) -public class MetricsBenchmarks { - static final int LONG_SPAN = 5000; - static final int MEDIUM_SPAN = 1000; - static final int SHORT_SPAN = 500; - private MeterRegistry registry = new PrometheusMeterRegistry(PrometheusConfig.DEFAULT); - private InMemoryCollectorMetrics inMemoryCollectorMetrics = new InMemoryCollectorMetrics(); - private MicrometerCollectorMetrics micrometerCollectorMetrics = new MicrometerCollectorMetrics(registry); - - @Benchmark - public int incrementBytes_longSpans_inMemory() { - return incrementBytes(inMemoryCollectorMetrics, LONG_SPAN); - } - - @Benchmark - public int incrementBytes_longSpans_Actuate() { - return incrementBytes(micrometerCollectorMetrics, LONG_SPAN); - } - - @Benchmark - public int incrementBytes_mediumSpans_inMemory() { - return incrementBytes(inMemoryCollectorMetrics, MEDIUM_SPAN); - } - - @Benchmark - public int incrementBytes_mediumSpans_Actuate() { - return incrementBytes(micrometerCollectorMetrics, MEDIUM_SPAN); - } - - @Benchmark - public int incrementBytes_shortSpans_inMemory() { - return incrementBytes(inMemoryCollectorMetrics, SHORT_SPAN); - } - - @Benchmark - public int incrementBytes_shortSpans_Actuate() { - return incrementBytes(micrometerCollectorMetrics, SHORT_SPAN); - } - - private int incrementBytes(CollectorMetrics collectorMetrics, int bytes) { - collectorMetrics.incrementBytes(bytes); - return bytes; - } - - // Convenience main entry-point - public static void main(String[] args) throws RunnerException { - Options opt = new OptionsBuilder() - .include(".*" + MetricsBenchmarks.class.getSimpleName() + ".*") - .threads(40) - .build(); - - new Runner(opt).run(); - } -} diff --git a/benchmarks/src/main/java/zipkin2/elasticsearch/internal/BulkRequestBenchmarks.java b/benchmarks/src/main/java/zipkin2/elasticsearch/internal/BulkRequestBenchmarks.java deleted file mode 100644 index 051d046ccc4..00000000000 --- a/benchmarks/src/main/java/zipkin2/elasticsearch/internal/BulkRequestBenchmarks.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import com.linecorp.armeria.common.HttpRequest; -import com.linecorp.armeria.common.HttpRequestWriter; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.PooledByteBufAllocator; -import java.util.concurrent.TimeUnit; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.runner.Runner; -import org.openjdk.jmh.runner.RunnerException; -import org.openjdk.jmh.runner.options.Options; -import org.openjdk.jmh.runner.options.OptionsBuilder; -import zipkin2.Span; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.elasticsearch.ElasticsearchStorage; -import zipkin2.elasticsearch.internal.BulkCallBuilder.IndexEntry; -import zipkin2.elasticsearch.internal.client.HttpCall; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static zipkin2.elasticsearch.ElasticsearchVersion.V6_0; -import static zipkin2.storage.cassandra.internal.Resources.resourceToString; - -@Measurement(iterations = 5, time = 1) -@Warmup(iterations = 10, time = 1) -@Fork(3) -@BenchmarkMode(Mode.SampleTime) -@OutputTimeUnit(TimeUnit.MICROSECONDS) -@State(Scope.Thread) -@Threads(2) -public class BulkRequestBenchmarks { - static final Span CLIENT_SPAN = - SpanBytesDecoder.JSON_V2.decodeOne(resourceToString("/zipkin2-client.json").getBytes(UTF_8)); - - final ElasticsearchStorage es = ElasticsearchStorage.newBuilder(() -> null).build(); - final long indexTimestamp = CLIENT_SPAN.timestampAsLong() / 1000L; - final String spanIndex = - es.indexNameFormatter().formatTypeAndTimestampForInsert("span", '-', indexTimestamp); - final IndexEntry entry = - BulkCallBuilder.newIndexEntry(spanIndex, "span", CLIENT_SPAN, BulkIndexWriter.SPAN); - - @Benchmark public ByteBuf writeRequest_singleSpan() { - return BulkCallBuilder.serialize(PooledByteBufAllocator.DEFAULT, entry, true); - } - - @Benchmark public HttpRequest buildAndWriteRequest_singleSpan() { - BulkCallBuilder builder = new BulkCallBuilder(es, V6_0, "index-span"); - builder.index(spanIndex, "span", CLIENT_SPAN, BulkIndexWriter.SPAN); - HttpCall.RequestSupplier supplier = builder.build().request; - HttpRequestWriter request = HttpRequest.streaming(supplier.headers()); - supplier.writeBody(request::tryWrite); - return request; - } - - @Benchmark public HttpRequest buildAndWriteRequest_tenSpans() { - BulkCallBuilder builder = new BulkCallBuilder(es, V6_0, "index-span"); - for (int i = 0; i < 10; i++) { - builder.index(spanIndex, "span", CLIENT_SPAN, BulkIndexWriter.SPAN); - } - HttpCall.RequestSupplier supplier = builder.build().request; - HttpRequestWriter request = HttpRequest.streaming(supplier.headers()); - supplier.writeBody(request::tryWrite); - return request; - } - - // Convenience main entry-point - public static void main(String[] args) throws RunnerException { - Options opt = new OptionsBuilder() - .addProfiler("gc") - .include(".*" + BulkRequestBenchmarks.class.getSimpleName() + ".*") - .build(); - - new Runner(opt).run(); - } -} diff --git a/benchmarks/src/main/java/zipkin2/internal/DelayLimiterBenchmarks.java b/benchmarks/src/main/java/zipkin2/internal/DelayLimiterBenchmarks.java deleted file mode 100644 index 87c44bdd1e2..00000000000 --- a/benchmarks/src/main/java/zipkin2/internal/DelayLimiterBenchmarks.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.internal; - -import java.util.Random; -import java.util.concurrent.TimeUnit; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.runner.Runner; -import org.openjdk.jmh.runner.RunnerException; -import org.openjdk.jmh.runner.options.Options; -import org.openjdk.jmh.runner.options.OptionsBuilder; - -@Measurement(iterations = 5, time = 1) -@Warmup(iterations = 10, time = 1) -@Fork(3) -@BenchmarkMode(Mode.SampleTime) -@OutputTimeUnit(TimeUnit.MICROSECONDS) -@State(Scope.Thread) -@Threads(2) -public class DelayLimiterBenchmarks { - - final Random rng = new Random(); - final DelayLimiter limiter = DelayLimiter.newBuilder() - .ttl(1L, TimeUnit.HOURS) // legacy default from Cassandra - .cardinality(5 * 4000) // Ex. 5 site tags with cardinality 4000 each - .build(); - - @Benchmark public boolean shouldInvoke_randomData() { - return limiter.shouldInvoke(rng.nextLong()); - } - - @Benchmark public boolean shouldInvoke_sameData() { - return limiter.shouldInvoke(1L); - } - - // Convenience main entry-point - public static void main(String[] args) throws RunnerException { - Options opt = new OptionsBuilder() - .addProfiler("gc") - .include(".*" + DelayLimiterBenchmarks.class.getSimpleName() + ".*") - .build(); - - new Runner(opt).run(); - } -} diff --git a/benchmarks/src/main/java/zipkin2/internal/ReadBufferBenchmarks.java b/benchmarks/src/main/java/zipkin2/internal/ReadBufferBenchmarks.java deleted file mode 100644 index 7a7ad10b780..00000000000 --- a/benchmarks/src/main/java/zipkin2/internal/ReadBufferBenchmarks.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.internal; - -import java.util.concurrent.TimeUnit; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.runner.Runner; -import org.openjdk.jmh.runner.RunnerException; -import org.openjdk.jmh.runner.options.Options; -import org.openjdk.jmh.runner.options.OptionsBuilder; - -@Measurement(iterations = 5, time = 1) -@Warmup(iterations = 10, time = 1) -@Fork(3) -@BenchmarkMode(Mode.SampleTime) -@OutputTimeUnit(TimeUnit.MICROSECONDS) -@State(Scope.Thread) -@Threads(1) -public class ReadBufferBenchmarks { - byte[] longBuff = { - (byte) 0x01, (byte) 0x02, (byte) 0x03, (byte) 0x04, - (byte) 0x05, (byte) 0x06, (byte) 0x07, (byte) 0x08, - }; - - @Benchmark public long readLong() { - int pos = 0; - return (longBuff[pos] & 0xffL) << 56 - | (longBuff[pos + 1] & 0xffL) << 48 - | (longBuff[pos + 2] & 0xffL) << 40 - | (longBuff[pos + 3] & 0xffL) << 32 - | (longBuff[pos + 4] & 0xffL) << 24 - | (longBuff[pos + 5] & 0xffL) << 16 - | (longBuff[pos + 6] & 0xffL) << 8 - | (longBuff[pos + 7] & 0xffL); - } - - @Benchmark public long readLong_localArray() { - int pos = 0; - byte[] longBuff = this.longBuff; - return (longBuff[pos] & 0xffL) << 56 - | (longBuff[pos + 1] & 0xffL) << 48 - | (longBuff[pos + 2] & 0xffL) << 40 - | (longBuff[pos + 3] & 0xffL) << 32 - | (longBuff[pos + 4] & 0xffL) << 24 - | (longBuff[pos + 5] & 0xffL) << 16 - | (longBuff[pos + 6] & 0xffL) << 8 - | (longBuff[pos + 7] & 0xffL); - } - - @Benchmark public long readLong_8arity_localArray() { - int pos = 0; - return readLong( - longBuff[pos] & 0xff, - longBuff[pos + 1] & 0xff, - longBuff[pos + 2] & 0xff, - longBuff[pos + 3] & 0xff, - longBuff[pos + 4] & 0xff, - longBuff[pos + 5] & 0xff, - longBuff[pos + 6] & 0xff, - longBuff[pos + 7] & 0xff - ); - } - - @Benchmark public long readLong_8arity() { - int pos = 0; - byte[] longBuff = this.longBuff; - return readLong( - longBuff[pos] & 0xff, - longBuff[pos + 1] & 0xff, - longBuff[pos + 2] & 0xff, - longBuff[pos + 3] & 0xff, - longBuff[pos + 4] & 0xff, - longBuff[pos + 5] & 0xff, - longBuff[pos + 6] & 0xff, - longBuff[pos + 7] & 0xff - ); - } - - static long readLong(int p0, int p1, int p2, int p3, int p4, int p5, int p6, int p7) { - return (p0 & 0xffL) << 56 - | (p1 & 0xffL) << 48 - | (p2 & 0xffL) << 40 - | (p3 & 0xffL) << 32 - | (p4 & 0xffL) << 24 - | (p5 & 0xffL) << 16 - | (p6 & 0xffL) << 8 - | (p7 & 0xffL); - } - - @Benchmark public long readLongReverseBytes() { - return Long.reverseBytes(readLong()); - } - - // Convenience main entry-point - public static void main(String[] args) throws RunnerException { - Options opt = new OptionsBuilder() - .include(".*" + ReadBufferBenchmarks.class.getSimpleName() + ".*") - .addProfiler("gc") - .build(); - - new Runner(opt).run(); - } -} diff --git a/benchmarks/src/main/java/zipkin2/internal/WriteBufferBenchmarks.java b/benchmarks/src/main/java/zipkin2/internal/WriteBufferBenchmarks.java deleted file mode 100644 index b6926778e69..00000000000 --- a/benchmarks/src/main/java/zipkin2/internal/WriteBufferBenchmarks.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.internal; - -import java.nio.ByteBuffer; -import java.nio.charset.Charset; -import java.util.concurrent.TimeUnit; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.runner.Runner; -import org.openjdk.jmh.runner.RunnerException; -import org.openjdk.jmh.runner.options.Options; -import org.openjdk.jmh.runner.options.OptionsBuilder; - -@Measurement(iterations = 5, time = 1) -@Warmup(iterations = 10, time = 1) -@Fork(3) -@BenchmarkMode(Mode.AverageTime) -@OutputTimeUnit(TimeUnit.MICROSECONDS) -@State(Scope.Thread) -@Threads(1) -public class WriteBufferBenchmarks { - static final Charset UTF_8 = Charset.forName("UTF-8"); - // Order id = d07c4daa-0fa9-4c03-90b1-e06c4edae250 doesn't exist - static final String CHINESE_UTF8 = "订单d07c4daa-0fa9-4c03-90b1-e06c4edae250不存在"; - static final int CHINESE_UTF8_SIZE = UTF_8.encode(CHINESE_UTF8).remaining(); - /* length-prefixing a 1 KiB span */ - static final int TEST_INT = 1024; - /* epoch micros timestamp */ - static final long TEST_LONG = 1472470996199000L; - byte[] bytes = new byte[8]; - WriteBuffer buffer = WriteBuffer.wrap(bytes); - - @Benchmark public int utf8SizeInBytes_chinese() { - return WriteBuffer.utf8SizeInBytes(CHINESE_UTF8); - } - - @Benchmark public byte[] writeUtf8_chinese() { - byte[] bytesUtf8 = new byte[CHINESE_UTF8_SIZE]; - WriteBuffer.wrap(bytesUtf8, 0).writeUtf8(CHINESE_UTF8); - return bytesUtf8; - } - - @Benchmark public ByteBuffer writeUtf8_chinese_jdk() { - return UTF_8.encode(CHINESE_UTF8); - } - - @Benchmark public int varIntSizeInBytes_32() { - return WriteBuffer.varintSizeInBytes(TEST_INT); - } - - @Benchmark public int varIntSizeInBytes_64() { - return WriteBuffer.varintSizeInBytes(TEST_LONG); - } - - @Benchmark public int writeVarint_32() { - buffer.writeVarint(TEST_INT); - return buffer.pos(); - } - - @Benchmark public int writeVarint_64() { - buffer.writeVarint(TEST_LONG); - return buffer.pos(); - } - - @Benchmark public int writeLongLe() { - buffer.writeLongLe(TEST_LONG); - return buffer.pos(); - } - - // Convenience main entry-point - public static void main(String[] args) throws RunnerException { - Options opt = new OptionsBuilder() - .include(".*" + WriteBufferBenchmarks.class.getSimpleName() + ".*") - .build(); - - new Runner(opt).run(); - } -} diff --git a/benchmarks/src/main/java/zipkin2/server/internal/throttle/ThrottledCallBenchmarks.java b/benchmarks/src/main/java/zipkin2/server/internal/throttle/ThrottledCallBenchmarks.java deleted file mode 100644 index 5e9ce58bef1..00000000000 --- a/benchmarks/src/main/java/zipkin2/server/internal/throttle/ThrottledCallBenchmarks.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.throttle; - -import com.linecorp.armeria.common.metric.NoopMeterRegistry; -import com.netflix.concurrency.limits.limit.FixedLimit; -import com.netflix.concurrency.limits.limiter.SimpleLimiter; -import java.io.IOException; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.function.Predicate; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.TearDown; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.runner.Runner; -import org.openjdk.jmh.runner.RunnerException; -import org.openjdk.jmh.runner.options.Options; -import org.openjdk.jmh.runner.options.OptionsBuilder; -import zipkin2.Call; -import zipkin2.Callback; - -@Measurement(iterations = 5, time = 1) -@Warmup(iterations = 10, time = 1) -@Fork(3) -@BenchmarkMode(Mode.SampleTime) -@OutputTimeUnit(TimeUnit.MICROSECONDS) -@State(Scope.Thread) -@Threads(2) -public class ThrottledCallBenchmarks { - ExecutorService fakeCallExecutor = Executors.newSingleThreadExecutor(); - ExecutorService executor = Executors.newSingleThreadExecutor(); - ThrottledCall call; - - @Setup public void setup() { - executor = Executors.newSingleThreadExecutor(); - fakeCallExecutor = Executors.newSingleThreadExecutor(); - SimpleLimiter limiter = SimpleLimiter.newBuilder().limit(FixedLimit.of(1)).build(); - LimiterMetrics metrics = new LimiterMetrics(NoopMeterRegistry.get()); - Predicate isOverCapacity = RejectedExecutionException.class::isInstance; - call = - new ThrottledCall(new FakeCall(fakeCallExecutor), executor, limiter, metrics, isOverCapacity); - } - - @TearDown public void tearDown() { - executor.shutdown(); - fakeCallExecutor.shutdown(); - } - - @Benchmark public Object execute() throws IOException { - return call.clone().execute(); - } - - @Benchmark public void execute_overCapacity() throws IOException { - ThrottledCall overCapacity = (ThrottledCall) call.clone(); - ((FakeCall) overCapacity.delegate).overCapacity = true; - - try { - overCapacity.execute(); - } catch (RejectedExecutionException e) { - assert e == OVER_CAPACITY; - } - } - - @Benchmark public void execute_throttled() throws IOException { - call.limiter.acquire(null); // capacity is 1, so this will overdo it. - call.clone().execute(); - } - - static final RejectedExecutionException OVER_CAPACITY = new RejectedExecutionException(); - - static final class FakeCall extends Call.Base { - final Executor executor; - boolean overCapacity = false; - - FakeCall(Executor executor) { - this.executor = executor; - } - - @Override public Void doExecute() throws IOException { - if (overCapacity) throw OVER_CAPACITY; - return null; - } - - @Override public void doEnqueue(Callback callback) { - executor.execute(() -> { - if (overCapacity) { - callback.onError(OVER_CAPACITY); - } else { - callback.onSuccess(null); - } - }); - } - - @Override public FakeCall clone() { - return new FakeCall(executor); - } - } - - // Convenience main entry-point - public static void main(String[] args) throws RunnerException { - Options opt = new OptionsBuilder() - .addProfiler("gc") - .include(".*" + ThrottledCallBenchmarks.class.getSimpleName()) - .build(); - - new Runner(opt).run(); - } -} diff --git a/build-bin/maven/maven_build b/build-bin/maven/maven_build index edb9069fd02..5c28f0966ec 100755 --- a/build-bin/maven/maven_build +++ b/build-bin/maven/maven_build @@ -20,5 +20,5 @@ if [ -x ./mvnw ]; then alias mvn=${PWD}/mvnw; fi ( if [ "${MAVEN_PROJECT_BASEDIR:-.}" != "." ]; then cd ${MAVEN_PROJECT_BASEDIR}; fi - mvn -T1C -q --batch-mode -DskipTests package "$@" + mvn -T1C -q --batch-mode -DskipTests -Dcheckstyle.skip=true package "$@" ) diff --git a/build-bin/maven/maven_deploy b/build-bin/maven/maven_deploy index 2dd2ae2a267..1cde935ca36 100755 --- a/build-bin/maven/maven_deploy +++ b/build-bin/maven/maven_deploy @@ -20,4 +20,4 @@ export MAVEN_OPTS="$($(dirname "$0")/maven_opts)" # This script deploys a SNAPSHOT or release version to Sonatype. # # Note: In CI, `configure_maven_deploy` must be called before invoking this. -./mvnw --batch-mode -s ./.settings.xml -Prelease -nsu -DskipTests clean deploy $@ +./mvnw --batch-mode -s ./.settings.xml -Prelease -nsu -DskipTests -Dcheckstyle.skip=true clean deploy $@ diff --git a/build-bin/maven/maven_release b/build-bin/maven/maven_release index 0ef28e48117..7832ad6f79a 100755 --- a/build-bin/maven/maven_release +++ b/build-bin/maven/maven_release @@ -30,6 +30,7 @@ release_branch=${2:-master} # Checkout master, as we release from master, not a tag ref git fetch --no-tags --prune --depth=1 origin +refs/heads/${release_branch}:refs/remotes/origin/${release_branch} git checkout ${release_branch} +git submodule update --init --recursive # Ensure no one pushed commits since this release tag as it would fail later commands commit_local_release_branch=$(git show --pretty='format:%H' ${release_branch}) @@ -40,4 +41,4 @@ if [ "$commit_local_release_branch" != "$commit_remote_release_branch" ]; then fi # Prepare and push release commits and the version tag (N.N.N), which triggers deployment. -./mvnw --batch-mode -nsu -DreleaseVersion=${release_version} -Denforcer.fail=false -Darguments="-DskipTests -Denforcer.fail=false" release:prepare +./mvnw --batch-mode -nsu -DreleaseVersion=${release_version} -Denforcer.fail=false -Darguments="-DskipTests -Denforcer.fail=false -Dcheckstyle.skip=true" release:prepare diff --git a/docker/Dockerfile b/docker/Dockerfile index 75a3813acbc..9cc7ec9a222 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -79,7 +79,7 @@ LABEL org.opencontainers.image.description="Zipkin slim distribution on OpenJDK COPY --from=install --chown=${USER} /install/zipkin-slim/ /zipkin/ -EXPOSE 9411 +EXPOSE 9411 9412 FROM base-server as zipkin LABEL org.opencontainers.image.description="Zipkin full distribution on OpenJDK and Alpine Linux" @@ -90,4 +90,4 @@ ENV MODULE_OPTS= COPY --from=install --chown=${USER} /install/zipkin/ /zipkin/ # Zipkin's full distribution includes Scribe support (albeit disabled) -EXPOSE 9410 9411 +EXPOSE 9411 9412 diff --git a/pom.xml b/pom.xml index 497ea330398..e53edd70b8b 100755 --- a/pom.xml +++ b/pom.xml @@ -25,10 +25,6 @@ zipkin zipkin-tests - zipkin-junit - zipkin-junit5 - zipkin-storage - zipkin-collector zipkin-server @@ -522,7 +518,7 @@ - [1.8,16) + [1.8,18) diff --git a/skywalking b/skywalking new file mode 160000 index 00000000000..8e529ee9560 --- /dev/null +++ b/skywalking @@ -0,0 +1 @@ +Subproject commit 8e529ee95604fb01a8bd31c272763393f3c70525 diff --git a/zipkin-collector/README.md b/zipkin-collector/README.md deleted file mode 100644 index 38e824cfc6c..00000000000 --- a/zipkin-collector/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# zipkin-collector - -Modules here implement popular transport options available by default in -the [server build](../zipkin-server). - -Please note all modules here require JRE 8+ - -These libraries are also usable outside the server, for example in -custom collectors or storage pipelines. While compatibility guarantees -are strong, choices may be dropped over time. - -Collector modules ending in `-v1` are discouraged for new sites as they -use an older data model. At some point in the future, we will stop -publishing v1 collector options. diff --git a/zipkin-collector/activemq/RATIONALE.md b/zipkin-collector/activemq/RATIONALE.md deleted file mode 100644 index b4c589a6d65..00000000000 --- a/zipkin-collector/activemq/RATIONALE.md +++ /dev/null @@ -1,32 +0,0 @@ -# Rational for collector-activemq - -## Diverse need -ActiveMQ was formerly requested in April, 2018 through issue #1990 which had two other thumbs-up. An -early draft of this implementation was developed by @IAMTJW and resulting in another user asking for -it. In June of 2019 there were a couple more requests for this on gitter, notably about Amazon MQ. - -## On ActiveMQ 5.x -All users who expressed interest were interestd in ActiveMQ 5.x (aka Classic), not Artemis. -Moreover, at the time of creation Amazon MQ only supported ActiveMQ 5.x. - -Artemis has higher throughput potential, but has more conflicting dependencies and would add 8MiB to -the server. Moreover, no-one has asked for it. - -## On part of the default server -ActiveMQ's client is 2MiB, which will increase the jar size, something that we've been tracking -recently. To be fair, this is not a large module. In comparison, one dependency of Kafka, `zstd-jni` -alone is almost 4MiB. There are no dependencies likely to conflict at runtime, and only one dodgy -dependency, [hawtbuf](https://github.com/fusesource/hawtbuf), on account of it being abandoned since -2014. - -Apart from size, ActiveMQ is a stable integration, included in Spring Boot, and could be useful for -other integrations as an in-memory queue. Moreover, bundling makes integration with zipkin-aws far -easier in the same way as bundling elasticsearch does. - -## On a potential single-transport client - -This package is using the normal activemq-jms client. During a [mail thread](http://activemq.2283324.n4.nabble.com/Interest-in-using-ActiveMQ-as-a-trace-data-transport-for-Zipkin-td4749755.html), we learned the -the STOMP and AMQP 1.0 protocol are the more portable options for a portable integration as -ActiveMQ, Artemis and RabbitMQ all support these. On the other hand Kafka does not support these -protocols. Any future portability work could be limited by this. Meanwhile, using the standard JMS -client will make troubleshooting most natural to end users. diff --git a/zipkin-collector/activemq/README.md b/zipkin-collector/activemq/README.md deleted file mode 100644 index f5e6982eb19..00000000000 --- a/zipkin-collector/activemq/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# collector-activemq - -## ActiveMQCollector -This collector consumes an ActiveMQ 5.x queue for messages that contain a list of spans. Underneath -this uses the ActiveMQ 5.x JMS client, which has two notable dependencies `slf4j-api` and `hawtbuf`. - -The message's binary data includes a list of spans. Supported encodings -are the same as the http [POST /spans](https://zipkin.io/zipkin-api/#/paths/%252Fspans) body. - -### Json -The message's binary data is a list of spans in json. The first character must be '[' (decimal 91). - -`Codec.JSON.writeSpans(spans)` performs the correct json encoding. - -Here's an example, sending a list of a single span to the zipkin queue: - -```bash -$ curl -u admin:admin -X POST -s localhost:8161/api/message/zipkin?type=queue \ - -H "Content-Type: application/json" \ - -d '[{"traceId":"1","name":"bang","id":"2","timestamp":1470150004071068,"duration":1,"localEndpoint":{"serviceName":"flintstones"},"tags":{"lc":"bamm-bamm"}}]' -``` diff --git a/zipkin-collector/activemq/pom.xml b/zipkin-collector/activemq/pom.xml deleted file mode 100644 index 41ef0468520..00000000000 --- a/zipkin-collector/activemq/pom.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - 4.0.0 - - - io.zipkin.zipkin2 - zipkin-collector-parent - 2.24.4-SNAPSHOT - - - zipkin-collector-activemq - Collector: ActiveMQ - Zipkin span collector for ActiveMQ transport - - - ${project.basedir}/../.. - 5.16.3 - - - - - ${project.groupId} - zipkin-collector - ${project.version} - - - - org.apache.activemq - activemq-client - ${activemq.version} - - - - org.apache.activemq - activemq-broker - ${activemq.version} - test - - - - org.apache.activemq.tooling - activemq-junit - ${activemq.version} - test - - - - diff --git a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQCollector.java b/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQCollector.java deleted file mode 100644 index 9eadcde99b3..00000000000 --- a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQCollector.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.activemq; - -import java.io.IOException; -import java.io.UncheckedIOException; -import javax.jms.JMSException; -import org.apache.activemq.ActiveMQConnectionFactory; -import zipkin2.CheckResult; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorComponent; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.storage.StorageComponent; - -/** This collector consumes encoded binary messages from a ActiveMQ queue. */ -public final class ActiveMQCollector extends CollectorComponent { - public static Builder builder() { - return new Builder(); - } - - /** Configuration including defaults needed to consume spans from a ActiveMQ queue. */ - public static final class Builder extends CollectorComponent.Builder { - Collector.Builder delegate = Collector.newBuilder(ActiveMQCollector.class); - CollectorMetrics metrics = CollectorMetrics.NOOP_METRICS; - ActiveMQConnectionFactory connectionFactory; - String queue = "zipkin"; - int concurrency = 1; - - @Override public Builder storage(StorageComponent storage) { - this.delegate.storage(storage); - return this; - } - - @Override public Builder sampler(CollectorSampler sampler) { - this.delegate.sampler(sampler); - return this; - } - - @Override public Builder metrics(CollectorMetrics metrics) { - if (metrics == null) throw new NullPointerException("metrics == null"); - this.metrics = metrics.forTransport("activemq"); - this.delegate.metrics(this.metrics); - return this; - } - - public Builder connectionFactory(ActiveMQConnectionFactory connectionFactory) { - if (connectionFactory == null) throw new NullPointerException("connectionFactory == null"); - this.connectionFactory = connectionFactory; - return this; - } - - /** Queue zipkin spans will be consumed from. Defaults to "zipkin". */ - public Builder queue(String queue) { - if (queue == null) throw new NullPointerException("queue == null"); - this.queue = queue; - return this; - } - - /** Count of concurrent message listeners on the queue. Defaults to 1 */ - public Builder concurrency(int concurrency) { - if (concurrency < 1) throw new IllegalArgumentException("concurrency < 1"); - this.concurrency = concurrency; - return this; - } - - @Override public ActiveMQCollector build() { - if (connectionFactory == null) throw new NullPointerException("connectionFactory == null"); - return new ActiveMQCollector(this); - } - } - - final String queue; - final LazyInit lazyInit; - - ActiveMQCollector(Builder builder) { - this.queue = builder.queue; - this.lazyInit = new LazyInit(builder); - } - - @Override public ActiveMQCollector start() { - lazyInit.init(); - return this; - } - - @Override public CheckResult check() { - if (lazyInit.result == null) { - return CheckResult.failed(new IllegalStateException("Collector not yet started")); - } - return lazyInit.result.checkResult; - } - - @Override public void close() throws IOException { - lazyInit.close(); - } - - @Override public final String toString() { - return "ActiveMQCollector{" - + "brokerURL=" + lazyInit.connectionFactory.getBrokerURL() - + ", queue=" + lazyInit.queue - + "}"; - } - - static RuntimeException uncheckedException(String prefix, JMSException e) { - Exception cause = e.getLinkedException(); - if (cause instanceof IOException) { - return new UncheckedIOException(prefix + message(cause), (IOException) cause); - } - return new RuntimeException(prefix + message(e), e); - } - - static String message(Exception cause) { - return cause.getMessage() != null ? cause.getMessage() : cause.getClass().getSimpleName(); - } -} diff --git a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQSpanConsumer.java b/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQSpanConsumer.java deleted file mode 100644 index 9cfdb19a41f..00000000000 --- a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQSpanConsumer.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.activemq; - -import java.io.Closeable; -import java.io.IOException; -import java.util.LinkedHashMap; -import java.util.Map; -import javax.jms.BytesMessage; -import javax.jms.JMSException; -import javax.jms.Message; -import javax.jms.MessageListener; -import javax.jms.Queue; -import javax.jms.QueueReceiver; -import javax.jms.QueueSession; -import javax.jms.Session; -import javax.jms.TextMessage; -import org.apache.activemq.ActiveMQConnection; -import org.apache.activemq.transport.TransportListener; -import zipkin2.Callback; -import zipkin2.CheckResult; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorMetrics; - -import static java.nio.charset.StandardCharsets.UTF_8; - -/** - * Consumes spans from messages on a ActiveMQ queue. Malformed messages will be discarded. Errors in - * the storage component will similarly be ignored, with no retry of the message. - */ -final class ActiveMQSpanConsumer implements TransportListener, MessageListener, Closeable { - static final Callback NOOP = new Callback() { - @Override public void onSuccess(Void value) { - } - - @Override public void onError(Throwable t) { - } - }; - - static final CheckResult - CLOSED = CheckResult.failed(new IllegalStateException("Collector intentionally closed")), - INTERRUPTION = CheckResult.failed(new IOException("Recoverable error on ActiveMQ connection")); - - final Collector collector; - final CollectorMetrics metrics; - - final ActiveMQConnection connection; - final Map sessionToReceiver = new LinkedHashMap<>(); - - volatile CheckResult checkResult = CheckResult.OK; - - ActiveMQSpanConsumer(Collector collector, CollectorMetrics metrics, ActiveMQConnection conn) { - this.collector = collector; - this.metrics = metrics; - this.connection = conn; - connection.addTransportListener(this); - } - - /** JMS contract is one session per thread: we need a new session up to our concurrency level. */ - void registerInNewSession(ActiveMQConnection connection, String queue) throws JMSException { - // Pass redundant info as we can't use default method in activeMQ - QueueSession session = connection.createQueueSession(false, Session.AUTO_ACKNOWLEDGE); - // No need to do anything on ActiveMQ side as physical queues are created on demand - Queue destination = session.createQueue(queue); - QueueReceiver receiver = session.createReceiver(destination); - receiver.setMessageListener(this); - sessionToReceiver.put(session, receiver); - } - - @Override public void onCommand(Object o) { - } - - @Override public void onException(IOException error) { - checkResult = CheckResult.failed(error); - } - - @Override public void transportInterupted() { - checkResult = INTERRUPTION; - } - - @Override public void transportResumed() { - checkResult = CheckResult.OK; - } - - @Override public void onMessage(Message message) { - metrics.incrementMessages(); - byte[] serialized; // TODO: consider how to reuse buffers here - try { - if (message instanceof BytesMessage) { - BytesMessage bytesMessage = (BytesMessage) message; - serialized = new byte[(int) bytesMessage.getBodyLength()]; - bytesMessage.readBytes(serialized); - } else if (message instanceof TextMessage) { - String text = ((TextMessage) message).getText(); - serialized = text.getBytes(UTF_8); - } else { - metrics.incrementMessagesDropped(); - return; - } - } catch (Exception e) { - metrics.incrementMessagesDropped(); - return; - } - - metrics.incrementBytes(serialized.length); - if (serialized.length == 0) return; // lenient on empty messages - collector.acceptSpans(serialized, NOOP); - } - - @Override public void close() { - if (checkResult == CLOSED) return; - checkResult = CLOSED; - connection.removeTransportListener(this); - try { - for (Map.Entry sessionReceiver : sessionToReceiver.entrySet()) { - sessionReceiver.getValue().setMessageListener(null); // deregister this - sessionReceiver.getKey().close(); - } - connection.close(); - } catch (JMSException ignored) { - // EmptyCatch ignored - } - } -} diff --git a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/LazyInit.java b/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/LazyInit.java deleted file mode 100644 index 2e518ad6009..00000000000 --- a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/LazyInit.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.activemq; - -import javax.jms.JMSException; -import org.apache.activemq.ActiveMQConnection; -import org.apache.activemq.ActiveMQConnectionFactory; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorMetrics; - -import static zipkin2.collector.activemq.ActiveMQCollector.uncheckedException; - -/** - * Lazy creates a connection and registers a message listener up to the specified concurrency level. - * This listener will also receive health notifications. - */ -final class LazyInit { - final Collector collector; - final CollectorMetrics metrics; - final ActiveMQConnectionFactory connectionFactory; - final String queue; - final int concurrency; - - volatile ActiveMQSpanConsumer result; - - LazyInit(ActiveMQCollector.Builder builder) { - collector = builder.delegate.build(); - metrics = builder.metrics; - connectionFactory = builder.connectionFactory; - queue = builder.queue; - concurrency = builder.concurrency; - } - - ActiveMQSpanConsumer init() { - if (result == null) { - synchronized (this) { - if (result == null) { - result = doInit(); - } - } - } - return result; - } - - void close() { - ActiveMQSpanConsumer maybe = result; - if (maybe != null) result.close(); - } - - ActiveMQSpanConsumer doInit() { - final ActiveMQConnection connection; - try { - connection = (ActiveMQConnection) connectionFactory.createQueueConnection(); - connection.start(); - } catch (JMSException e) { - throw uncheckedException("Unable to establish connection to ActiveMQ broker: ", e); - } - - try { - ActiveMQSpanConsumer result = new ActiveMQSpanConsumer(collector, metrics, connection); - - for (int i = 0; i < concurrency; i++) { - result.registerInNewSession(connection, queue); - } - - return result; - } catch (JMSException e) { - try { - connection.close(); - } catch (JMSException ignored) { - // EmptyCatch ignored - } - throw uncheckedException("Unable to create queueReceiver(" + queue + "): ", e); - } - } -} diff --git a/zipkin-collector/activemq/src/test/java/zipkin2/collector/activemq/ITActiveMQCollector.java b/zipkin-collector/activemq/src/test/java/zipkin2/collector/activemq/ITActiveMQCollector.java deleted file mode 100644 index 72949e4ca78..00000000000 --- a/zipkin-collector/activemq/src/test/java/zipkin2/collector/activemq/ITActiveMQCollector.java +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.activemq; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; -import org.apache.activemq.ActiveMQConnectionFactory; -import org.apache.activemq.junit.EmbeddedActiveMQBroker; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TestName; -import zipkin2.Call; -import zipkin2.Callback; -import zipkin2.Component; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.collector.InMemoryCollectorMetrics; -import zipkin2.storage.ForwardingStorageComponent; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.StorageComponent; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.LOTS_OF_SPANS; -import static zipkin2.TestObjects.UTF_8; -import static zipkin2.codec.SpanBytesEncoder.PROTO3; -import static zipkin2.codec.SpanBytesEncoder.THRIFT; - -public class ITActiveMQCollector { - List spans = Arrays.asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]); - - @ClassRule public static EmbeddedActiveMQBroker activemq = new EmbeddedActiveMQBroker(); - @Rule public TestName testName = new TestName(); - @Rule public ExpectedException thrown = ExpectedException.none(); - - InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics(); - InMemoryCollectorMetrics activemqMetrics = metrics.forTransport("activemq"); - - CopyOnWriteArraySet threadsProvidingSpans = new CopyOnWriteArraySet<>(); - LinkedBlockingQueue> receivedSpans = new LinkedBlockingQueue<>(); - SpanConsumer consumer = (spans) -> { - threadsProvidingSpans.add(Thread.currentThread()); - receivedSpans.add(spans); - return Call.create(null); - }; - - ActiveMQCollector collector; - - @Before public void start() { - collector = builder().build().start(); - } - - @After public void stop() throws IOException { - collector.close(); - } - - @Test public void checkPasses() { - assertThat(collector.check().ok()).isTrue(); - } - - @Test public void startFailsWithInvalidActiveMqServer() throws Exception { - collector.close(); - - ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory(); - // we can be pretty certain ActiveMQ isn't running on localhost port 80 - connectionFactory.setBrokerURL("tcp://localhost:80"); - collector = builder().connectionFactory(connectionFactory).build(); - - thrown.expect(UncheckedIOException.class); - thrown.expectMessage("Unable to establish connection to ActiveMQ broker: Connection refused"); - collector.start(); - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test public void toStringContainsOnlySummaryInformation() { - assertThat(collector).hasToString(String.format("ActiveMQCollector{brokerURL=%s, queue=%s}", - activemq.getVmURL(), testName.getMethodName()) - ); - } - - /** Ensures list encoding works: a json encoded list of spans */ - @Test public void messageWithMultipleSpans_json() throws Exception { - messageWithMultipleSpans(SpanBytesEncoder.JSON_V1); - } - - /** Ensures list encoding works: a version 2 json list of spans */ - @Test public void messageWithMultipleSpans_json2() throws Exception { - messageWithMultipleSpans(SpanBytesEncoder.JSON_V2); - } - - /** Ensures list encoding works: proto3 ListOfSpans */ - @Test public void messageWithMultipleSpans_proto3() throws Exception { - messageWithMultipleSpans(SpanBytesEncoder.PROTO3); - } - - void messageWithMultipleSpans(SpanBytesEncoder encoder) throws Exception { - byte[] message = encoder.encodeList(spans); - activemq.pushMessage(collector.queue, message); - - assertThat(receivedSpans.take()).isEqualTo(spans); - - assertThat(activemqMetrics.messages()).isEqualTo(1); - assertThat(activemqMetrics.messagesDropped()).isZero(); - assertThat(activemqMetrics.bytes()).isEqualTo(message.length); - assertThat(activemqMetrics.spans()).isEqualTo(spans.size()); - assertThat(activemqMetrics.spansDropped()).isZero(); - } - - /** Ensures malformed spans don't hang the collector */ - @Test public void skipsMalformedData() throws Exception { - byte[] malformed1 = "[\"='".getBytes(UTF_8); // screwed up json - byte[] malformed2 = "malformed".getBytes(UTF_8); - activemq.pushMessage(collector.queue, THRIFT.encodeList(spans)); - activemq.pushMessage(collector.queue, new byte[0]); - activemq.pushMessage(collector.queue, malformed1); - activemq.pushMessage(collector.queue, malformed2); - activemq.pushMessage(collector.queue, THRIFT.encodeList(spans)); - - Thread.sleep(1000); - - assertThat(activemqMetrics.messages()).isEqualTo(5); - assertThat(activemqMetrics.messagesDropped()).isEqualTo(2); // only malformed, not empty - assertThat(activemqMetrics.bytes()) - .isEqualTo(THRIFT.encodeList(spans).length * 2 + malformed1.length + malformed2.length); - assertThat(activemqMetrics.spans()).isEqualTo(spans.size() * 2); - assertThat(activemqMetrics.spansDropped()).isZero(); - } - - /** Guards against errors that leak from storage, such as InvalidQueryException */ - @Test public void skipsOnSpanStorageException() throws Exception { - collector.close(); - - AtomicInteger counter = new AtomicInteger(); - consumer = (input) -> new Call.Base() { - @Override protected Void doExecute() { - throw new AssertionError(); - } - - @Override protected void doEnqueue(Callback callback) { - if (counter.getAndIncrement() == 1) { - callback.onError(new RuntimeException("storage fell over")); - } else { - receivedSpans.add(spans); - callback.onSuccess(null); - } - } - - @Override public Call clone() { - throw new AssertionError(); - } - }; - - activemq.pushMessage(collector.queue, PROTO3.encodeList(spans)); - activemq.pushMessage(collector.queue, PROTO3.encodeList(spans)); // tossed on error - activemq.pushMessage(collector.queue, PROTO3.encodeList(spans)); - - collector = builder().storage(buildStorage(consumer)).build().start(); - - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - // the only way we could read this, is if the malformed span was skipped. - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - - assertThat(activemqMetrics.messages()).isEqualTo(3); - assertThat(activemqMetrics.messagesDropped()).isZero(); // storage failure not message failure - assertThat(activemqMetrics.bytes()).isEqualTo(PROTO3.encodeList(spans).length * 3); - assertThat(activemqMetrics.spans()).isEqualTo(spans.size() * 3); - assertThat(activemqMetrics.spansDropped()).isEqualTo(spans.size()); // only one dropped - } - - @Test public void messagesDistributedAcrossMultipleThreadsSuccessfully() throws Exception { - collector.close(); - - CountDownLatch latch = new CountDownLatch(2); - collector = builder().concurrency(2).storage(buildStorage((spans) -> { - latch.countDown(); - try { - latch.await(); // await the other one as this proves 2 threads are in use - } catch (InterruptedException e) { - throw new AssertionError(e); - } - return consumer.accept(spans); - })).build().start(); - - activemq.pushMessage(collector.queue, ""); // empty bodies don't go to storage - activemq.pushMessage(collector.queue, PROTO3.encodeList(spans)); - activemq.pushMessage(collector.queue, PROTO3.encodeList(spans)); - - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - latch.countDown(); - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - - assertThat(threadsProvidingSpans.size()).isEqualTo(2); - - assertThat(activemqMetrics.messages()).isEqualTo(3); // 2 + empty body for warmup - assertThat(activemqMetrics.messagesDropped()).isZero(); - assertThat(activemqMetrics.bytes()).isEqualTo(PROTO3.encodeList(spans).length * 2); - assertThat(activemqMetrics.spans()).isEqualTo(spans.size() * 2); - assertThat(activemqMetrics.spansDropped()).isZero(); - } - - ActiveMQCollector.Builder builder() { - return ActiveMQCollector.builder() - .connectionFactory(activemq.createConnectionFactory()) - .storage(buildStorage(consumer)) - .metrics(metrics) - // prevent test flakes by having each run in an individual queue - .queue(testName.getMethodName()); - } - - static StorageComponent buildStorage(final SpanConsumer spanConsumer) { - return new ForwardingStorageComponent() { - @Override protected StorageComponent delegate() { - throw new AssertionError(); - } - - @Override public SpanConsumer spanConsumer() { - return spanConsumer; - } - }; - } -} diff --git a/zipkin-collector/activemq/src/test/resources/simplelogger.properties b/zipkin-collector/activemq/src/test/resources/simplelogger.properties deleted file mode 100644 index 41089aca77e..00000000000 --- a/zipkin-collector/activemq/src/test/resources/simplelogger.properties +++ /dev/null @@ -1,8 +0,0 @@ -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options - -org.slf4j.simpleLogger.logFile=System.out -org.slf4j.simpleLogger.defaultLogLevel=warn -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS - -org.slf4j.simpleLogger.log.zipkin2.collector.activemq=debug diff --git a/zipkin-collector/core/pom.xml b/zipkin-collector/core/pom.xml deleted file mode 100644 index eff1b899b15..00000000000 --- a/zipkin-collector/core/pom.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - 4.0.0 - - - io.zipkin.zipkin2 - zipkin-collector-parent - 2.24.4-SNAPSHOT - - - zipkin-collector - Collector: Core Library - - - ${project.basedir}/../.. - - - - - org.slf4j - slf4j-api - ${slf4j.version} - - - - uk.org.lidalia - slf4j-test - 1.2.0 - test - - - diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/Collector.java b/zipkin-collector/core/src/main/java/zipkin2/collector/Collector.java deleted file mode 100644 index 8f07c5e4ef3..00000000000 --- a/zipkin-collector/core/src/main/java/zipkin2/collector/Collector.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.Executor; -import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.Callback; -import zipkin2.Span; -import zipkin2.SpanBytesDecoderDetector; -import zipkin2.codec.BytesDecoder; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.storage.StorageComponent; - -import static java.lang.String.format; -import static java.util.logging.Level.FINE; -import static zipkin2.Call.propagateIfFatal; - -/** - * This component takes action on spans received from a transport. This includes deserializing, - * sampling and scheduling for storage. - * - *

Callbacks passed do not propagate to the storage layer. They only return success or failures - * before storage is attempted. This ensures that calling threads are disconnected from storage - * threads. - */ -public class Collector { // not final for mock - static final Callback NOOP_CALLBACK = new Callback() { - @Override public void onSuccess(Void value) { - } - - @Override public void onError(Throwable t) { - } - }; - - /** Needed to scope this to the correct logging category */ - public static Builder newBuilder(Class loggingClass) { - if (loggingClass == null) throw new NullPointerException("loggingClass == null"); - return new Builder(LoggerFactory.getLogger(loggingClass.getName())); - } - - public static final class Builder { - final Logger logger; - StorageComponent storage; - CollectorSampler sampler; - CollectorMetrics metrics; - - Builder(Logger logger) { - this.logger = logger; - } - - /** Sets {@link {@link CollectorComponent.Builder#storage(StorageComponent)}} */ - public Builder storage(StorageComponent storage) { - if (storage == null) throw new NullPointerException("storage == null"); - this.storage = storage; - return this; - } - - /** Sets {@link {@link CollectorComponent.Builder#metrics(CollectorMetrics)}} */ - public Builder metrics(CollectorMetrics metrics) { - if (metrics == null) throw new NullPointerException("metrics == null"); - this.metrics = metrics; - return this; - } - - /** Sets {@link {@link CollectorComponent.Builder#sampler(CollectorSampler)}} */ - public Builder sampler(CollectorSampler sampler) { - if (sampler == null) throw new NullPointerException("sampler == null"); - this.sampler = sampler; - return this; - } - - public Collector build() { - return new Collector(this); - } - } - - final Logger logger; - final CollectorMetrics metrics; - final CollectorSampler sampler; - final StorageComponent storage; - - Collector(Builder builder) { - if (builder.logger == null) throw new NullPointerException("logger == null"); - this.logger = builder.logger; - this.metrics = builder.metrics == null ? CollectorMetrics.NOOP_METRICS : builder.metrics; - if (builder.storage == null) throw new NullPointerException("storage == null"); - this.storage = builder.storage; - this.sampler = builder.sampler == null ? CollectorSampler.ALWAYS_SAMPLE : builder.sampler; - } - - public void accept(List spans, Callback callback) { - accept(spans, callback, Runnable::run); - } - - /** - * Calls to get the storage component could be blocking. This ensures requests that block - * callers (such as http or gRPC) do not add additional load during such events. - * - * @param executor the executor used to enqueue the storage request. - */ - public void accept(List spans, Callback callback, Executor executor) { - if (spans.isEmpty()) { - callback.onSuccess(null); - return; - } - metrics.incrementSpans(spans.size()); - - List sampledSpans = sample(spans); - if (sampledSpans.isEmpty()) { - callback.onSuccess(null); - return; - } - - // In order to ensure callers are not blocked, we swap callbacks when we get to the storage - // phase of this process. Here, we create a callback whose sole purpose is classifying later - // errors on this bundle of spans in the same log category. This allows people to only turn on - // debug logging in one place. - try { - executor.execute(new StoreSpans(sampledSpans)); - callback.onSuccess(null); - } catch (Throwable unexpected) { // ensure if a future is supplied we always set value or error - callback.onError(unexpected); - throw unexpected; - } - } - - /** Like {@link #acceptSpans(byte[], BytesDecoder, Callback)}, except using a byte buffer. */ - public void acceptSpans(ByteBuffer encoded, SpanBytesDecoder decoder, Callback callback, - Executor executor) { - List spans; - try { - spans = decoder.decodeList(encoded); - } catch (RuntimeException | Error e) { - handleDecodeError(e, callback); - return; - } - accept(spans, callback, executor); - } - - /** - * Before calling this, call {@link CollectorMetrics#incrementMessages()}, and {@link - * CollectorMetrics#incrementBytes(int)}. Do not call any other metrics callbacks as those are - * handled internal to this method. - * - * @param serialized not empty message - */ - public void acceptSpans(byte[] serialized, Callback callback) { - BytesDecoder decoder; - try { - decoder = SpanBytesDecoderDetector.decoderForListMessage(serialized); - } catch (RuntimeException | Error e) { - handleDecodeError(e, callback); - return; - } - acceptSpans(serialized, decoder, callback); - } - - /** - * Before calling this, call {@link CollectorMetrics#incrementMessages()}, and {@link - * CollectorMetrics#incrementBytes(int)}. Do not call any other metrics callbacks as those are - * handled internal to this method. - * - * @param serializedSpans not empty message - */ - public void acceptSpans( - byte[] serializedSpans, BytesDecoder decoder, Callback callback) { - List spans; - try { - spans = decodeList(decoder, serializedSpans); - } catch (RuntimeException | Error e) { - handleDecodeError(e, callback); - return; - } - accept(spans, callback); - } - - List decodeList(BytesDecoder decoder, byte[] serialized) { - List out = new ArrayList<>(); - decoder.decodeList(serialized, out); - return out; - } - - void store(List sampledSpans, Callback callback) { - storage.spanConsumer().accept(sampledSpans).enqueue(callback); - } - - String idString(Span span) { - return span.traceId() + "/" + span.id(); - } - - List sample(List input) { - List sampled = new ArrayList<>(input.size()); - for (int i = 0, length = input.size(); i < length; i++) { - Span s = input.get(i); - if (sampler.isSampled(s.traceId(), Boolean.TRUE.equals(s.debug()))) { - sampled.add(s); - } - } - int dropped = input.size() - sampled.size(); - if (dropped > 0) metrics.incrementSpansDropped(dropped); - return sampled; - } - - class StoreSpans implements Callback, Runnable { - final List spans; - - StoreSpans(List spans) { - this.spans = spans; - } - - @Override public void run() { - try { - store(spans, this); - } catch (RuntimeException | Error e) { - // While unexpected, invoking the storage command could raise an error synchronously. When - // that's the case, we wouldn't have invoked callback.onSuccess, so we need to handle the - // error here. - onError(e); - } - } - - @Override public void onSuccess(Void value) { - } - - @Override public void onError(Throwable t) { - handleStorageError(spans, t, NOOP_CALLBACK); - } - - @Override public String toString() { - return appendSpanIds(spans, new StringBuilder("StoreSpans(")) + ")"; - } - } - - void handleDecodeError(Throwable e, Callback callback) { - metrics.incrementMessagesDropped(); - handleError(e, "Cannot decode spans"::toString, callback); - } - - /** - * When storing spans, an exception can be raised before or after the fact. This adds context of - * span ids to give logs more relevance. - */ - void handleStorageError(List spans, Throwable e, Callback callback) { - metrics.incrementSpansDropped(spans.size()); - // The exception could be related to a span being huge. Instead of filling logs, - // print trace id, span id pairs - handleError(e, () -> appendSpanIds(spans, new StringBuilder("Cannot store spans ")), callback); - } - - void handleError(Throwable e, Supplier defaultLogMessage, Callback callback) { - propagateIfFatal(e); - callback.onError(e); - if (!logger.isDebugEnabled()) return; - - String error = e.getMessage() != null ? e.getMessage() : ""; - // We have specific code that customizes log messages. Use this when the case. - if (error.startsWith("Malformed") || error.startsWith("Truncated")) { - logger.debug(error, e); - } else { // otherwise, beautify the message - String message = - format("%s due to %s(%s)", defaultLogMessage.get(), e.getClass().getSimpleName(), error); - logger.debug(message, e); - } - } - - // TODO: this logic needs to be redone as service names are more important than span IDs. Also, - // span IDs repeat between client and server! - String appendSpanIds(List spans, StringBuilder message) { - message.append("["); - int i = 0; - Iterator iterator = spans.iterator(); - while (iterator.hasNext() && i++ < 3) { - message.append(idString(iterator.next())); - if (iterator.hasNext()) message.append(", "); - } - if (iterator.hasNext()) message.append("..."); - - return message.append("]").toString(); - } -} diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorComponent.java b/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorComponent.java deleted file mode 100644 index 99d800017a7..00000000000 --- a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorComponent.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector; - -import java.util.List; -import zipkin2.Component; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.StorageComponent; - -/** - * The collector represents the server-side of a transport. Its job is to take spans from a - * transport and store ones it has sampled. - * - *

Call {@link #start()} to start collecting spans. - */ -public abstract class CollectorComponent extends Component { - - /** - * Starts the server-side of the transport, typically listening or looking up a queue. - * - *

Many implementations block the calling thread until services are available. - */ - public abstract CollectorComponent start(); - - public abstract static class Builder { - /** - * Once spans are sampled, they are {@link SpanConsumer#accept(List)} queued for storage} using - * this component. - */ - public abstract Builder storage(StorageComponent storage); - - /** - * Aggregates and reports collection metrics to a monitoring system. Should be {@link - * CollectorMetrics#forTransport(String) scoped to this transport}. Defaults to no-op. - */ - public abstract Builder metrics(CollectorMetrics metrics); - - /** - * {@link CollectorSampler#isSampled(String, boolean) samples spans} to reduce load on the - * storage system. Defaults to always sample. - */ - public abstract Builder sampler(CollectorSampler sampler); - - public abstract CollectorComponent build(); - } -} diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorMetrics.java b/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorMetrics.java deleted file mode 100644 index e55213c9d63..00000000000 --- a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorMetrics.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector; - -import java.util.Collection; -import java.util.List; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.storage.SpanConsumer; - -/** - * Instrumented applications report spans over a transport such as Kafka. Zipkin collectors receive - * these messages, {@link SpanBytesDecoder#decode(byte[], Collection) decoding them into spans}, - * {@link CollectorSampler#isSampled(String, boolean) apply sampling}, and {@link - * SpanConsumer#accept(List) queues them for storage}. - * - *

Callbacks on this type are invoked by zipkin collectors to improve the visibility of the - * system. A typical implementation will report metrics to a telemetry system for analysis and - * reporting. - * - *

Spans Collected vs Queryable Spans

- * - *

A span queried may be comprised of multiple spans collected. While instrumentation should - * report complete spans, Instrumentation often patch the same span twice, ex adding annotations. - * Also, RPC spans include at least 2 messages due to the client and the server reporting - * separately. Finally, some storage components merge patches at ingest. For these reasons, you - * should be cautious to alert on queryable spans vs stored spans, unless you control the - * instrumentation in such a way that queryable spans/message is reliable. - * - *

Key Relationships

- * - *

The following relationships can be used to consider health of the tracing system. - * - *

- * 
    - *
  • Successful Messages = {@link #incrementMessages() Accepted messages} - - * {@link #incrementMessagesDropped() Dropped messages}. Alert when this is less than amount of - * messages sent from instrumentation.
  • - *
  • Stored spans <= {@link #incrementSpans(int) Accepted spans} - {@link - * #incrementSpansDropped(int) Dropped spans}. Alert when this drops below the - * {@link CollectorSampler#isSampled(long, boolean) collection-tier sample rate}. - *
  • - *
- *
- */ -public interface CollectorMetrics { - - /** - * Those who wish to partition metrics by transport can call this method to include the transport - * type in the backend metric key. - * - *

For example, an implementation may by default report {@link #incrementSpans(int) incremented - * spans} to the key "zipkin.collector.span.accepted". When {@code metrics.forTransport("kafka"} - * is called, the counter would report to "zipkin.collector.kafka.span.accepted" - * - * @param transportType ex "http", "rabbitmq", "kafka" - */ - CollectorMetrics forTransport(String transportType); - - /** - * Increments count of messages received, which contain 0 or more spans. Ex POST requests or Kafka - * messages consumed. - */ - void incrementMessages(); - - /** - * Increments count of messages that could not be read. Ex malformed content, or peer disconnect. - */ - void incrementMessagesDropped(); - - /** - * Increments the count of spans read from a successful message. When bundling is used, accepted - * spans will be a larger number than successful messages. - */ - void incrementSpans(int quantity); - - /** - * Increments the number of bytes containing serialized spans in a message. - * - *

Note: this count should relate to the raw data structures, like json or thrift, and discount - * compression, enveloping, etc. - */ - void incrementBytes(int quantity); - - /** - * Increments the count of spans dropped for any reason. For example, failure queueing to storage - * or sampling decisions. - */ - void incrementSpansDropped(int quantity); - - CollectorMetrics NOOP_METRICS = - new CollectorMetrics() { - - @Override - public CollectorMetrics forTransport(String transportType) { - return this; - } - - @Override - public void incrementMessages() {} - - @Override - public void incrementMessagesDropped() {} - - @Override - public void incrementSpans(int quantity) {} - - @Override - public void incrementBytes(int quantity) {} - - @Override - public void incrementSpansDropped(int quantity) {} - - @Override - public String toString() { - return "NoOpCollectorMetrics"; - } - }; -} diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorSampler.java b/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorSampler.java deleted file mode 100644 index 152638d0401..00000000000 --- a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorSampler.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector; - -import zipkin2.Span; -import zipkin2.internal.HexCodec; - -/** - * CollectorSampler decides if a particular trace should be "sampled", i.e. recorded in permanent - * storage. This involves a consistent decision based on the span's trace ID with one notable - * exception: {@link Span#debug() Debug} spans are always stored. - * - *

Implementation

- * - *

Accepts a percentage of trace ids by comparing their absolute value against a potentially - * dynamic boundary. eg {@code isSampled == abs(traceId) <= boundary} - * - *

While idempotent, this implementation's sample rate won't exactly match the input rate because - * trace ids are not perfectly distributed across 64bits. For example, tests have shown an error - * rate of 3% when 100K trace ids are {@link java.util.Random#nextLong random}. - */ -public abstract class CollectorSampler { - public static final CollectorSampler ALWAYS_SAMPLE = CollectorSampler.create(1.0f); - - /** - * Returns a trace ID sampler with the indicated rate. - * - * @param rate minimum sample rate is 0.0001, or 0.01% of traces - */ - public static CollectorSampler create(float rate) { - if (rate < 0 || rate > 1) - throw new IllegalArgumentException("rate should be between 0 and 1: was " + rate); - final long boundary = (long) (Long.MAX_VALUE * rate); // safe cast as less <= 1 - return new CollectorSampler() { - @Override - protected long boundary() { - return boundary; - } - }; - } - - protected abstract long boundary(); - - /** - * Returns true if spans with this trace ID should be recorded to storage. - * - *

Zipkin v1 allows storage-layer sampling, which can help prevent spikes in traffic from - * overloading the system. Debug spans are always stored. - * - *

This uses only the lower 64 bits of the trace ID as instrumentation still send mixed trace - * ID width. - * - * @param hexTraceId the lower 64 bits of the span's trace ID are checked against the boundary - * @param debug when true, always passes sampling - */ - public boolean isSampled(String hexTraceId, boolean debug) { - if (Boolean.TRUE.equals(debug)) return true; - long traceId = HexCodec.lowerHexToUnsignedLong(hexTraceId); - // The absolute value of Long.MIN_VALUE is larger than a long, so Math.abs returns identity. - // This converts to MAX_VALUE to avoid always dropping when traceId == Long.MIN_VALUE - long t = traceId == Long.MIN_VALUE ? Long.MAX_VALUE : Math.abs(traceId); - return t <= boundary(); - } - - @Override - public String toString() { - return "CollectorSampler(" + boundary() + ")"; - } - - protected CollectorSampler() {} -} diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/InMemoryCollectorMetrics.java b/zipkin-collector/core/src/main/java/zipkin2/collector/InMemoryCollectorMetrics.java deleted file mode 100644 index 7555556ba9f..00000000000 --- a/zipkin-collector/core/src/main/java/zipkin2/collector/InMemoryCollectorMetrics.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector; - -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicInteger; - -public final class InMemoryCollectorMetrics implements CollectorMetrics { - - private final ConcurrentHashMap metrics; - private final String messages; - private final String messagesDropped; - private final String bytes; - private final String spans; - private final String spansDropped; - - public InMemoryCollectorMetrics() { - this(new ConcurrentHashMap<>(), null); - } - - InMemoryCollectorMetrics(ConcurrentHashMap metrics, String transport) { - this.metrics = metrics; - this.messages = scope("messages", transport); - this.messagesDropped = scope("messagesDropped", transport); - this.bytes = scope("bytes", transport); - this.spans = scope("spans", transport); - this.spansDropped = scope("spansDropped", transport); - } - - @Override - public InMemoryCollectorMetrics forTransport(String transportType) { - if (transportType == null) throw new NullPointerException("transportType == null"); - return new InMemoryCollectorMetrics(metrics, transportType); - } - - @Override - public void incrementMessages() { - increment(messages, 1); - } - - public int messages() { - return get(messages); - } - - @Override - public void incrementMessagesDropped() { - increment(messagesDropped, 1); - } - - public int messagesDropped() { - return get(messagesDropped); - } - - @Override - public void incrementBytes(int quantity) { - increment(bytes, quantity); - } - - public int bytes() { - return get(bytes); - } - - @Override - public void incrementSpans(int quantity) { - increment(spans, quantity); - } - - public int spans() { - return get(spans); - } - - @Override - public void incrementSpansDropped(int quantity) { - increment(spansDropped, quantity); - } - - public int spansDropped() { - return get(spansDropped); - } - - public void clear() { - metrics.clear(); - } - - private int get(String key) { - AtomicInteger atomic = metrics.get(key); - return atomic == null ? 0 : atomic.get(); - } - - private void increment(String key, int quantity) { - if (quantity == 0) return; - while (true) { - AtomicInteger metric = metrics.get(key); - if (metric == null) { - metric = metrics.putIfAbsent(key, new AtomicInteger(quantity)); - if (metric == null) return; // won race creating the entry - } - - while (true) { - int oldValue = metric.get(); - int update = oldValue + quantity; - if (metric.compareAndSet(oldValue, update)) return; // won race updating - } - } - } - - static String scope(String key, String transport) { - return key + (transport == null ? "" : "." + transport); - } -} diff --git a/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorSamplerTest.java b/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorSamplerTest.java deleted file mode 100644 index ebbf04ae224..00000000000 --- a/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorSamplerTest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector; - -import java.util.stream.Stream; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import zipkin2.Span; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.data.Percentage.withPercentage; -import static zipkin2.TestObjects.LOTS_OF_SPANS; - -public class CollectorSamplerTest { - - @Rule public ExpectedException thrown = ExpectedException.none(); - - /** - * Math.abs("8000000000000000") returns a negative, we coerse to "7fffffffffffffff" to avoid - * always dropping when trace_id == "8000000000000000" - */ - @Test - public void mostNegativeNumberDefence() { - CollectorSampler sampler = CollectorSampler.create(0.1f); - - assertThat(sampler.isSampled("8000000000000000", false)) - .isEqualTo(sampler.isSampled("7fffffffffffffff", false)); - } - - @Test - public void debugWins() { - CollectorSampler sampler = CollectorSampler.create(0.0f); - - assertThat(sampler.isSampled("8000000000000000", true)).isTrue(); - } - - @Test - public void retain10Percent() { - float sampleRate = 0.1f; - CollectorSampler sampler = CollectorSampler.create(sampleRate); - - assertThat(lotsOfSpans().filter(s -> sampler.isSampled(s.traceId(), false)).count()) - .isCloseTo((long) (LOTS_OF_SPANS.length * sampleRate), withPercentage(3)); - } - - /** The collector needs to apply the same decision to incremental updates in a trace. */ - @Test - public void idempotent() { - CollectorSampler sampler1 = CollectorSampler.create(0.1f); - CollectorSampler sampler2 = CollectorSampler.create(0.1f); - - assertThat(lotsOfSpans().filter(s -> sampler1.isSampled(s.traceId(), false)).toArray()) - .containsExactly( - lotsOfSpans().filter(s -> sampler2.isSampled(s.traceId(), false)).toArray()); - } - - @Test - public void zeroMeansDropAllTraces() { - CollectorSampler sampler = CollectorSampler.create(0.0f); - - assertThat(lotsOfSpans().filter(s -> sampler.isSampled(s.traceId(), false))).isEmpty(); - } - - @Test - public void oneMeansKeepAllTraces() { - CollectorSampler sampler = CollectorSampler.create(1.0f); - - assertThat(lotsOfSpans().filter(s -> sampler.isSampled(s.traceId(), false))) - .hasSize(LOTS_OF_SPANS.length); - } - - @Test - public void rateCantBeNegative() { - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("rate should be between 0 and 1: was -1.0"); - - CollectorSampler.create(-1.0f); - } - - @Test - public void rateCantBeOverOne() { - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("rate should be between 0 and 1: was 1.1"); - - CollectorSampler.create(1.1f); - } - - static Stream lotsOfSpans() { - return Stream.of(LOTS_OF_SPANS).parallel(); - } -} diff --git a/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorTest.java b/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorTest.java deleted file mode 100644 index 22682db5c01..00000000000 --- a/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorTest.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector; - -import java.util.concurrent.RejectedExecutionException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.LoggerFactory; -import uk.org.lidalia.slf4jext.Level; -import uk.org.lidalia.slf4jtest.TestLogger; -import uk.org.lidalia.slf4jtest.TestLoggerFactory; -import zipkin2.Callback; -import zipkin2.Span; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.storage.InMemoryStorage; -import zipkin2.storage.StorageComponent; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; -import static zipkin2.TestObjects.CLIENT_SPAN; -import static zipkin2.TestObjects.TRACE; -import static zipkin2.TestObjects.UTF_8; - -public class CollectorTest { - InMemoryStorage storage = InMemoryStorage.newBuilder().build(); - Callback callback = mock(Callback.class); - CollectorMetrics metrics = mock(CollectorMetrics.class); - Collector collector; - private TestLogger testLogger = TestLoggerFactory.getTestLogger(""); - - @Before - public void setup() { - testLogger.clearAll(); - collector = spy( - new Collector.Builder(testLogger).metrics(metrics).storage(storage).build()); - when(collector.idString(CLIENT_SPAN)).thenReturn("1"); // to make expectations easier to read - } - - @After - public void after() { - verifyNoMoreInteractions(metrics, callback); - } - - @Test - public void unsampledSpansArentStored() { - collector = new Collector.Builder(LoggerFactory.getLogger("")) - .sampler(CollectorSampler.create(0.0f)) - .metrics(metrics) - .storage(storage) - .build(); - - collector.accept(TRACE, callback); - - verify(callback).onSuccess(null); - assertThat(testLogger.getLoggingEvents()).isEmpty(); - verify(metrics).incrementSpans(4); - verify(metrics).incrementSpansDropped(4); - assertThat(storage.getTraces()).isEmpty(); - } - - @Test - public void errorDetectingFormat() { - collector.acceptSpans(new byte[] {'f', 'o', 'o'}, callback); - - verify(callback).onError(any(RuntimeException.class)); - verify(metrics).incrementMessagesDropped(); - } - - @Test - public void acceptSpans_jsonV2() { - byte[] bytes = SpanBytesEncoder.JSON_V2.encodeList(TRACE); - collector.acceptSpans(bytes, callback); - - verify(collector).acceptSpans(bytes, SpanBytesDecoder.JSON_V2, callback); - - verify(callback).onSuccess(null); - assertThat(testLogger.getLoggingEvents()).isEmpty(); - verify(metrics).incrementSpans(4); - assertThat(storage.getTraces()).containsOnly(TRACE); - } - - @Test - public void acceptSpans_decodingError() { - byte[] bytes = "[\"='".getBytes(UTF_8); // screwed up json - collector.acceptSpans(bytes, SpanBytesDecoder.JSON_V2, callback); - - verify(callback).onError(any(IllegalArgumentException.class)); - assertDebugLogIs("Malformed reading List from json"); - verify(metrics).incrementMessagesDropped(); - } - - @Test - public void accept_storageError() { - StorageComponent storage = mock(StorageComponent.class); - RuntimeException error = new RuntimeException("storage disabled"); - when(storage.spanConsumer()).thenThrow(error); - collector = new Collector.Builder(LoggerFactory.getLogger("")) - .metrics(metrics) - .storage(storage) - .build(); - - collector.accept(TRACE, callback); - - verify(callback).onSuccess(null); // error is async - assertDebugLogIs("Cannot store spans [1, 2, 2, ...] due to RuntimeException(storage disabled)"); - verify(metrics).incrementSpans(4); - verify(metrics).incrementSpansDropped(4); - } - - @Test - public void acceptSpans_emptyMessageOk() { - byte[] bytes = new byte[] {'[', ']'}; - collector.acceptSpans(bytes, callback); - - verify(collector).acceptSpans(bytes, SpanBytesDecoder.JSON_V1, callback); - - verify(callback).onSuccess(null); - assertThat(testLogger.getLoggingEvents()).isEmpty(); - assertThat(storage.getTraces()).isEmpty(); - } - - @Test - public void storeSpansCallback_toStringIncludesSpanIds() { - Span span2 = CLIENT_SPAN.toBuilder().id("3").build(); - when(collector.idString(span2)).thenReturn("3"); - - assertThat(collector.new StoreSpans(asList(CLIENT_SPAN, span2))) - .hasToString("StoreSpans([1, 3])"); - } - - @Test - public void storeSpansCallback_toStringIncludesSpanIds_noMoreThan3() { - assertThat(unprefixIdString(collector.new StoreSpans(TRACE).toString())) - .hasToString("StoreSpans([1, 1, 2, ...])"); - } - - @Test - public void storeSpansCallback_onErrorWithNullMessage() { - RuntimeException error = new RuntimeException(); - - Callback callback = collector.new StoreSpans(TRACE); - callback.onError(error); - - assertDebugLogIs("Cannot store spans [1, 1, 2, ...] due to RuntimeException()"); - verify(metrics).incrementSpansDropped(4); - } - - @Test - public void storeSpansCallback_onErrorWithMessage() { - IllegalArgumentException error = new IllegalArgumentException("no beer"); - Callback callback = collector.new StoreSpans(TRACE); - callback.onError(error); - - assertDebugLogIs("Cannot store spans [1, 1, 2, ...] due to IllegalArgumentException(no beer)"); - verify(metrics).incrementSpansDropped(4); - } - - @Test - public void errorAcceptingSpans_onErrorRejectedExecution() { - RuntimeException error = new RejectedExecutionException("slow down"); - collector.handleStorageError(TRACE, error, callback); - - verify(callback).onError(error); - assertDebugLogIs( - "Cannot store spans [1, 1, 2, ...] due to RejectedExecutionException(slow down)"); - verify(metrics).incrementSpansDropped(4); - } - - public void handleStorageError_onErrorWithNullMessage() { - RuntimeException error = new RuntimeException(); - collector.handleStorageError(TRACE, error, callback); - - verify(callback).onError(error); - assertDebugLogIs("Cannot store spans [1, 1, 2, ...] due to RuntimeException()"); - verify(metrics).incrementSpansDropped(4); - } - - @Test - public void handleStorageError_onErrorWithMessage() { - RuntimeException error = new IllegalArgumentException("no beer"); - collector.handleStorageError(TRACE, error, callback); - - verify(callback).onError(error); - assertDebugLogIs("Cannot store spans [1, 1, 2, ...] due to IllegalArgumentException(no beer)"); - verify(metrics).incrementSpansDropped(4); - } - - @Test - public void handleDecodeError_onErrorWithNullMessage() { - RuntimeException error = new RuntimeException(); - collector.handleDecodeError(error, callback); - - verify(callback).onError(error); - assertDebugLogIs("Cannot decode spans due to RuntimeException()"); - verify(metrics).incrementMessagesDropped(); - } - - @Test - public void handleDecodeError_onErrorWithMessage() { - IllegalArgumentException error = new IllegalArgumentException("no beer"); - collector.handleDecodeError(error, callback); - - verify(callback).onError(error); - assertDebugLogIs("Cannot decode spans due to IllegalArgumentException(no beer)"); - verify(metrics).incrementMessagesDropped(); - } - - @Test - public void handleDecodeError_doesntWrapMessageOnMalformedException() { - IllegalArgumentException error = new IllegalArgumentException("Malformed reading spans"); - collector.handleDecodeError(error, callback); - - verify(callback).onError(error); - assertDebugLogIs("Malformed reading spans"); - verify(metrics).incrementMessagesDropped(); - } - - @Test - public void handleDecodeError_doesntWrapMessageOnTruncatedException() { - IllegalArgumentException error = new IllegalArgumentException("Truncated reading spans"); - collector.handleDecodeError(error, callback); - - verify(callback).onError(error); - assertDebugLogIs("Truncated reading spans"); - verify(metrics).incrementMessagesDropped(); - } - - private String unprefixIdString(String msg) { - return msg.replaceAll("7180c278b62e8f6a216a2aea45d08fc9/000000000000000", ""); - } - - private void assertDebugLogIs(String message) { - assertThat(testLogger.getLoggingEvents()) - .hasSize(1) - .filteredOn(event -> event.getLevel().equals(Level.DEBUG)) - .extracting(event -> unprefixIdString(event.getMessage())) - .containsOnly(message); - } -} diff --git a/zipkin-collector/kafka/README.md b/zipkin-collector/kafka/README.md deleted file mode 100644 index c68ae2c1b36..00000000000 --- a/zipkin-collector/kafka/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# collector-kafka - -## KafkaCollector -This collector is implemented as a Kafka consumer supporting Kafka brokers running -version 0.10.0.0 or later. It polls a Kafka [topic](#kafka-configuration) for messages that contain -a list of spans in json or TBinaryProtocol big-endian encoding. These -spans are pushed to a span consumer. - -For information about running this collector as a module in Zipkin server, see -the [Zipkin Server README](../../zipkin-server/README.md#kafka-collector). - -When using this collector as a library outside of Zipkin server, -[zipkin2.collector.kafka.KafkaCollector.Builder](src/main/java/zipkin2/collector/kafka/KafkaCollector.java) -includes defaults that will operate against a Kafka topic name `zipkin`. - -## Encoding spans into Kafka messages -The message's binary data includes a list of spans. Supported encodings -are the same as the http [POST /spans](https://zipkin.io/zipkin-api/#/paths/%252Fspans) body. - -### Json -The message's binary data is a list of spans in json. The first character must be '[' (decimal 91). - -`Codec.JSON.writeSpans(spans)` performs the correct json encoding. - -Here's an example, sending a list of a single span to the zipkin topic: - -```bash -$ kafka-console-producer.sh --broker-list $ADVERTISED_HOST:9092 --topic zipkin -[{"traceId":"1","name":"bang","id":"2","timestamp":1470150004071068,"duration":1,"localEndpoint":{"serviceName":"flintstones"},"tags":{"lc":"bamm-bamm"}}] -``` - -### Thrift -The message's binary data includes a list header followed by N spans serialized in TBinaryProtocol - -`Codec.THRIFT.writeSpans(spans)` encodes spans in the following fashion: -``` -write_byte(12) // type of the list elements: 12 == struct -write_i32(count) // count of spans that will follow -for (int i = 0; i < count; i++) { - writeTBinaryProtocol(spans(i)) -} -``` - -### Legacy encoding -Older versions of zipkin accepted a single span per message, as opposed -to a list per message. This practice is deprecated, but still supported. - -## Kafka configuration - -Below are a few guidelines for the Kafka infrastructure used by this collector: -* The collector does not explicitly create the `zipkin` topic itself. If your cluster has auto topic creation enabled then it will be created by Kafka automatically using the broker configured defaults. We recommend therefor creating the topic manually before starting the collector, using configuration parameters adapted for your Zipkin setup. -* The collector will not fail if the `zipkin` topic does not exist, it will instead just wait for the topic to become available. -* A size based retention makes more sense than the default time based (1 week), to safeguard against large bursts of span data. -* The collector starts 1 instance of `KafkaConsumer` by default. We do recommend creating the `zipkin` topic with 6 or more partitions however, as it allows you to easily scale out the collector later by increasing the [KAFKA_STREAMS](../../zipkin-server/README.md#kafka-collector) parameter. -* As Zipkin reporter sends batches of spans which do not rely on any kind of ordering guarantee (key=null), you can increase the number of partitions without affecting ordering. It does not make sense however to have more `KafkaConsumer` instances than partitions as the instances will just be idle and not consume anything. -* Monitoring the consumer lag of the collector as well as the size of the topic will help you to decide if scaling up or down is needed. -* Tuning this collector should happen in coordination with the storage backend. Parameters like `max.poll.records`, `fetch.max.bytes` can prevent the collector from overloading the storage backend, or if it's sized properly they could instead be used to increase ingestion rate. -* A large and consistent consumer lag can indicate that the storage has difficulties with the ingestion rate and could be scaled up. - -## Logging -Zipkin by default suppresses all logging output from Kafka client operations as they can get quite verbose. Start Zipkin with `--logging.level.org.apache.kafka=INFO` or similar to override this during troubleshooting for example. diff --git a/zipkin-collector/kafka/pom.xml b/zipkin-collector/kafka/pom.xml deleted file mode 100644 index d6c2a113995..00000000000 --- a/zipkin-collector/kafka/pom.xml +++ /dev/null @@ -1,55 +0,0 @@ - - - - 4.0.0 - - - io.zipkin.zipkin2 - zipkin-collector-parent - 2.24.4-SNAPSHOT - - - zipkin-collector-kafka - Collector: Kafka 0.10+ - - - ${project.basedir}/../.. - 3.4.0 - - - - - ${project.groupId} - zipkin-collector - ${project.version} - - - - org.apache.kafka - kafka-clients - ${kafka.version} - - - - - com.fasterxml.jackson.core - jackson-databind - ${jackson.version} - test - - - diff --git a/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollector.java b/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollector.java deleted file mode 100644 index df4585b88cf..00000000000 --- a/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollector.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.kafka; - -import java.time.Duration; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.apache.kafka.clients.admin.AdminClient; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.KafkaFuture; -import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.errors.InterruptException; -import org.apache.kafka.common.serialization.ByteArrayDeserializer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.Call; -import zipkin2.CheckResult; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorComponent; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.StorageComponent; - -import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG; -import static org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG; -import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; -import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; -import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; - -/** - * This collector polls a Kafka topic for messages that contain TBinaryProtocol big-endian encoded - * lists of spans. These spans are pushed to a {@link SpanConsumer#accept span consumer}. - * - *

This collector uses a Kafka 0.10+ consumer. - */ -public final class KafkaCollector extends CollectorComponent { - private static final Logger LOG = LoggerFactory.getLogger(KafkaCollector.class); - - public static Builder builder() { - return new Builder(); - } - - /** Configuration including defaults needed to consume spans from a Kafka topic. */ - public static final class Builder extends CollectorComponent.Builder { - final Properties properties = new Properties(); - final Collector.Builder delegate = Collector.newBuilder(KafkaCollector.class); - CollectorMetrics metrics = CollectorMetrics.NOOP_METRICS; - String topic = "zipkin"; - int streams = 1; - - @Override - public Builder storage(StorageComponent storage) { - delegate.storage(storage); - return this; - } - - @Override - public Builder sampler(CollectorSampler sampler) { - delegate.sampler(sampler); - return this; - } - - @Override - public Builder metrics(CollectorMetrics metrics) { - if (metrics == null) throw new NullPointerException("metrics == null"); - this.metrics = metrics.forTransport("kafka"); - delegate.metrics(this.metrics); - return this; - } - - /** - * Topic zipkin spans will be consumed from. Defaults to "zipkin". Multiple topics may be - * specified if comma delimited. - */ - public Builder topic(String topic) { - if (topic == null) throw new NullPointerException("topic == null"); - this.topic = topic; - return this; - } - - /** The bootstrapServers connect string, ex. 127.0.0.1:9092. No default. */ - public Builder bootstrapServers(String bootstrapServers) { - if (bootstrapServers == null) throw new NullPointerException("bootstrapServers == null"); - properties.put(BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - return this; - } - - /** The consumer group this process is consuming on behalf of. Defaults to "zipkin" */ - public Builder groupId(String groupId) { - if (groupId == null) throw new NullPointerException("groupId == null"); - properties.put(GROUP_ID_CONFIG, groupId); - return this; - } - - /** Count of threads consuming the topic. Defaults to 1 */ - public Builder streams(int streams) { - this.streams = streams; - return this; - } - - /** - * By default, a consumer will be built from properties derived from builder defaults, as well - * as "auto.offset.reset" -> "earliest". Any properties set here will override the consumer - * config. - * - *

For example: Only consume spans since you connected by setting the below. - * - *

{@code
-     * Map overrides = new LinkedHashMap<>();
-     * overrides.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
-     * builder.overrides(overrides);
-     * }
- * - * @see org.apache.kafka.clients.consumer.ConsumerConfig - */ - public final Builder overrides(Map overrides) { - if (overrides == null) throw new NullPointerException("overrides == null"); - properties.putAll(overrides); - return this; - } - - @Override - public KafkaCollector build() { - return new KafkaCollector(this); - } - - Builder() { - // Settings below correspond to "New Consumer Configs" - // https://kafka.apache.org/documentation/#newconsumerconfigs - properties.put(GROUP_ID_CONFIG, "zipkin"); - properties.put(AUTO_OFFSET_RESET_CONFIG, "earliest"); - properties.put(KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); - properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); - } - } - - final LazyKafkaWorkers kafkaWorkers; - final Properties properties; - volatile AdminClient adminClient; - - KafkaCollector(Builder builder) { - kafkaWorkers = new LazyKafkaWorkers(builder); - properties = builder.properties; - } - - @Override - public KafkaCollector start() { - kafkaWorkers.start(); - return this; - } - - @Override - public CheckResult check() { - try { - CheckResult failure = kafkaWorkers.failure.get(); // check the kafka workers didn't quit - if (failure != null) return failure; - KafkaFuture maybeClusterId = getAdminClient().describeCluster().clusterId(); - maybeClusterId.get(1, TimeUnit.SECONDS); - return CheckResult.OK; - } catch (Throwable e) { - Call.propagateIfFatal(e); - return CheckResult.failed(e); - } - } - - AdminClient getAdminClient() { - if (adminClient == null) { - synchronized (this) { - if (adminClient == null) { - adminClient = AdminClient.create(properties); - } - } - } - return adminClient; - } - - @Override - public void close() { - kafkaWorkers.close(); - if (adminClient != null) adminClient.close(Duration.ofSeconds(1)); - } - - @Override public final String toString() { - return "KafkaCollector{" - + "bootstrapServers=" + properties.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG) - + ", topic=" + kafkaWorkers.builder.topic - + "}"; - } - - static final class LazyKafkaWorkers { - final int streams; - final Builder builder; - final AtomicReference failure = new AtomicReference<>(); - final CopyOnWriteArrayList workers = new CopyOnWriteArrayList<>(); - volatile ExecutorService pool; - - LazyKafkaWorkers(Builder builder) { - this.streams = builder.streams; - this.builder = builder; - } - - void start() { - if (pool == null) { - synchronized (this) { - if (pool == null) { - pool = compute(); - } - } - } - } - - void close() { - ExecutorService maybePool = pool; - if (maybePool == null) return; - for (KafkaCollectorWorker worker : workers) { - worker.stop(); - } - maybePool.shutdown(); - try { - if (!maybePool.awaitTermination(2, TimeUnit.SECONDS)) { - // Timeout exceeded: force shutdown - maybePool.shutdownNow(); - } - } catch (InterruptedException e) { - // at least we tried - } - } - - ExecutorService compute() { - ExecutorService pool = - streams == 1 ? Executors.newSingleThreadExecutor() : Executors.newFixedThreadPool(streams); - - for (int i = 0; i < streams; i++) { - // TODO: bad idea to lazy reference properties from a mutable builder - // copy them here and then pass this to the KafkaCollectorWorker constructor instead - KafkaCollectorWorker worker = new KafkaCollectorWorker(builder); - workers.add(worker); - pool.execute(guardFailures(worker)); - } - - return pool; - } - - Runnable guardFailures(final Runnable delegate) { - return () -> { - try { - delegate.run(); - } catch (InterruptException e) { - // Interrupts are normal on shutdown, intentionally swallow - } catch (KafkaException e) { - if (e.getCause() instanceof ConfigException) e = (KafkaException) e.getCause(); - LOG.error("Kafka worker exited with exception", e); - failure.set(CheckResult.failed(e)); - } catch (RuntimeException e) { - LOG.error("Kafka worker exited with exception", e); - failure.set(CheckResult.failed(e)); - } catch (Error e) { - LOG.error("Kafka worker exited with error", e); - failure.set(CheckResult.failed(new RuntimeException(e))); - } - }; - } - } -} diff --git a/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollectorWorker.java b/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollectorWorker.java deleted file mode 100644 index 98e3178a500..00000000000 --- a/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollectorWorker.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.kafka; - -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.common.TopicPartition; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.Callback; -import zipkin2.Span; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorMetrics; - -/** Consumes spans from Kafka messages, ignoring malformed input */ -final class KafkaCollectorWorker implements Runnable { - static final Logger LOG = LoggerFactory.getLogger(KafkaCollectorWorker.class); - static final Callback NOOP = - new Callback() { - @Override - public void onSuccess(Void value) {} - - @Override - public void onError(Throwable t) {} - }; - - final Properties properties; - final List topics; - final Collector collector; - final CollectorMetrics metrics; - // added for integration tests only, see ITKafkaCollector - final AtomicReference> assignedPartitions = - new AtomicReference<>(Collections.emptyList()); - final AtomicBoolean running = new AtomicBoolean(true); - - KafkaCollectorWorker(KafkaCollector.Builder builder) { - properties = builder.properties; - topics = Arrays.asList(builder.topic.split(",")); - collector = builder.delegate.build(); - metrics = builder.metrics; - } - - @Override - public void run() { - try (KafkaConsumer kafkaConsumer = new KafkaConsumer<>(properties)) { - kafkaConsumer.subscribe( - topics, - // added for integration tests only, see ITKafkaCollector - new ConsumerRebalanceListener() { - @Override - public void onPartitionsRevoked(Collection partitions) { - // technically we should remove only the revoked partitions but for test purposes it - // does not matter - assignedPartitions.set(Collections.emptyList()); - } - - @Override - public void onPartitionsAssigned(Collection partitions) { - assignedPartitions.set(Collections.unmodifiableList(new ArrayList<>(partitions))); - } - }); - LOG.debug("Kafka consumer starting polling loop."); - while (running.get()) { - final ConsumerRecords consumerRecords = kafkaConsumer.poll(Duration.of(1000, ChronoUnit.MILLIS)); - LOG.debug("Kafka polling returned batch of {} messages.", consumerRecords.count()); - for (ConsumerRecord record : consumerRecords) { - final byte[] bytes = record.value(); - metrics.incrementMessages(); - metrics.incrementBytes(bytes.length); - - if (bytes.length == 0) continue; // lenient on empty messages - - if (bytes.length < 2) { // need two bytes to check if protobuf - metrics.incrementMessagesDropped(); - } else { - // If we received legacy single-span encoding, decode it into a singleton list - if (!protobuf3(bytes) && bytes[0] <= 16 && bytes[0] != 12 /* thrift, but not list */) { - Span span; - try { - span = SpanBytesDecoder.THRIFT.decodeOne(bytes); - } catch (RuntimeException e) { - metrics.incrementMessagesDropped(); - continue; - } - collector.accept(Collections.singletonList(span), NOOP); - } else { - collector.acceptSpans(bytes, NOOP); - } - } - } - } - } catch (RuntimeException | Error e) { - LOG.warn("Unexpected error in polling loop spans", e); - throw e; - } finally { - LOG.debug("Kafka consumer polling loop stopped. Kafka consumer closed."); - } - } - - /** - * Stop the polling loop - */ - public void stop() { - running.set(false); - } - - /* span key or trace ID key */ - static boolean protobuf3(byte[] bytes) { - return bytes[0] == 10 && bytes[1] != 0; // varint follows and won't be zero - } -} diff --git a/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/ITKafkaCollector.java b/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/ITKafkaCollector.java deleted file mode 100644 index 2511e3753ae..00000000000 --- a/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/ITKafkaCollector.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.kafka; - -import java.util.Arrays; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.serialization.ByteArraySerializer; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.Call; -import zipkin2.Callback; -import zipkin2.Component; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.collector.InMemoryCollectorMetrics; -import zipkin2.storage.ForwardingStorageComponent; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.StorageComponent; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.CLIENT_SPAN; -import static zipkin2.TestObjects.LOTS_OF_SPANS; -import static zipkin2.TestObjects.UTF_8; -import static zipkin2.codec.SpanBytesEncoder.JSON_V2; -import static zipkin2.codec.SpanBytesEncoder.THRIFT; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -@Timeout(60) -class ITKafkaCollector { - @RegisterExtension KafkaExtension kafka = new KafkaExtension(); - - List spans = Arrays.asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]); - - InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics(); - InMemoryCollectorMetrics kafkaMetrics = metrics.forTransport("kafka"); - - CopyOnWriteArraySet threadsProvidingSpans = new CopyOnWriteArraySet<>(); - LinkedBlockingQueue> receivedSpans = new LinkedBlockingQueue<>(); - SpanConsumer consumer = (spans) -> { - threadsProvidingSpans.add(Thread.currentThread()); - receivedSpans.add(spans); - return Call.create(null); - }; - KafkaProducer producer; - - @BeforeEach void setup() { - metrics.clear(); - threadsProvidingSpans.clear(); - Properties config = new Properties(); - config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.bootstrapServer()); - producer = new KafkaProducer<>(config, new ByteArraySerializer(), new ByteArraySerializer()); - } - - @AfterEach void tearDown() { - if (producer != null) producer.close(); - } - - @Test void checkPasses() { - try (KafkaCollector collector = builder("check_passes").build()) { - assertThat(collector.check().ok()).isTrue(); - } - } - - /** - * Don't raise exception (crash process), rather fail status check! This allows the health check - * to report the cause. - */ - @Test void check_failsOnInvalidBootstrapServers() throws Exception { - - KafkaCollector.Builder builder = - builder("fail_invalid_bootstrap_servers").bootstrapServers("1.1.1.1"); - - try (KafkaCollector collector = builder.build()) { - collector.start(); - - Thread.sleep(1000L); // wait for crash - - assertThat(collector.check().error()) - .isInstanceOf(KafkaException.class) - .hasMessage("Invalid url in bootstrap.servers: 1.1.1.1"); - } - } - - /** - * If the Kafka broker(s) specified in the connection string are not available, the Kafka consumer - * library will attempt to reconnect indefinitely. The Kafka consumer will not throw an exception - * and does not expose the status of its connection to the Kafka broker(s) in its API. - *

- * An AdminClient API instance has been added to the connector to validate that connection with - * Kafka is available in every health check. This AdminClient reuses Consumer's properties to - * Connect to the cluster, and request a Cluster description to validate communication with - * Kafka. - */ - @Test void reconnectsIndefinitelyAndReportsUnhealthyWhenKafkaUnavailable() throws Exception { - KafkaCollector.Builder builder = - builder("fail_invalid_bootstrap_servers").bootstrapServers("localhost:" + 9092); - - try (KafkaCollector collector = builder.build()) { - collector.start(); - Thread.sleep(TimeUnit.SECONDS.toMillis(1)); - assertThat(collector.check().error()).isInstanceOf(TimeoutException.class); - } - } - - /** Ensures legacy encoding works: a single TBinaryProtocol encoded span */ - @Test void messageWithSingleThriftSpan() throws Exception { - KafkaCollector.Builder builder = builder("single_span"); - - byte[] bytes = THRIFT.encode(CLIENT_SPAN); - produceSpans(bytes, builder.topic); - - try (KafkaCollector collector = builder.build()) { - collector.start(); - assertThat(receivedSpans.take()).containsExactly(CLIENT_SPAN); - } - - assertThat(kafkaMetrics.messages()).isEqualTo(1); - assertThat(kafkaMetrics.messagesDropped()).isZero(); - assertThat(kafkaMetrics.bytes()).isEqualTo(bytes.length); - assertThat(kafkaMetrics.spans()).isEqualTo(1); - assertThat(kafkaMetrics.spansDropped()).isZero(); - } - - /** Ensures list encoding works: a TBinaryProtocol encoded list of spans */ - @Test void messageWithMultipleSpans_thrift() throws Exception { - messageWithMultipleSpans(builder("multiple_spans_thrift"), THRIFT); - } - - /** Ensures list encoding works: a json encoded list of spans */ - @Test void messageWithMultipleSpans_json() throws Exception { - messageWithMultipleSpans(builder("multiple_spans_json"), SpanBytesEncoder.JSON_V1); - } - - /** Ensures list encoding works: a version 2 json list of spans */ - @Test void messageWithMultipleSpans_json2() throws Exception { - messageWithMultipleSpans(builder("multiple_spans_json2"), SpanBytesEncoder.JSON_V2); - } - - /** Ensures list encoding works: proto3 ListOfSpans */ - @Test void messageWithMultipleSpans_proto3() throws Exception { - messageWithMultipleSpans(builder("multiple_spans_proto3"), SpanBytesEncoder.PROTO3); - } - - void messageWithMultipleSpans(KafkaCollector.Builder builder, SpanBytesEncoder encoder) - throws Exception { - byte[] message = encoder.encodeList(spans); - - produceSpans(message, builder.topic); - - try (KafkaCollector collector = builder.build()) { - collector.start(); - assertThat(receivedSpans.take()).containsAll(spans); - } - - assertThat(kafkaMetrics.messages()).isEqualTo(1); - assertThat(kafkaMetrics.messagesDropped()).isZero(); - assertThat(kafkaMetrics.bytes()).isEqualTo(message.length); - assertThat(kafkaMetrics.spans()).isEqualTo(spans.size()); - assertThat(kafkaMetrics.spansDropped()).isZero(); - } - - /** Ensures malformed spans don't hang the collector */ - @Test void skipsMalformedData() throws Exception { - KafkaCollector.Builder builder = builder("decoder_exception"); - - byte[] malformed1 = "[\"='".getBytes(UTF_8); // screwed up json - byte[] malformed2 = "malformed".getBytes(UTF_8); - produceSpans(THRIFT.encodeList(spans), builder.topic); - produceSpans(new byte[0], builder.topic); - produceSpans(malformed1, builder.topic); - produceSpans(malformed2, builder.topic); - produceSpans(THRIFT.encodeList(spans), builder.topic); - - try (KafkaCollector collector = builder.build()) { - collector.start(); - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - // the only way we could read this, is if the malformed spans were skipped. - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - } - - assertThat(kafkaMetrics.messages()).isEqualTo(5); - assertThat(kafkaMetrics.messagesDropped()).isEqualTo(2); // only malformed, not empty - assertThat(kafkaMetrics.bytes()) - .isEqualTo(THRIFT.encodeList(spans).length * 2 + malformed1.length + malformed2.length); - assertThat(kafkaMetrics.spans()).isEqualTo(spans.size() * 2); - assertThat(kafkaMetrics.spansDropped()).isZero(); - } - - /** Guards against errors that leak from storage, such as InvalidQueryException */ - @Test void skipsOnSpanStorageException() throws Exception { - AtomicInteger counter = new AtomicInteger(); - consumer = (input) -> new Call.Base() { - @Override protected Void doExecute() { - throw new AssertionError(); - } - - @Override protected void doEnqueue(Callback callback) { - if (counter.getAndIncrement() == 1) { - callback.onError(new RuntimeException("storage fell over")); - } else { - receivedSpans.add(spans); - callback.onSuccess(null); - } - } - - @Override public Call clone() { - throw new AssertionError(); - } - }; - final StorageComponent storage = buildStorage(consumer); - KafkaCollector.Builder builder = builder("storage_exception").storage(storage); - - produceSpans(THRIFT.encodeList(spans), builder.topic); - produceSpans(THRIFT.encodeList(spans), builder.topic); // tossed on error - produceSpans(THRIFT.encodeList(spans), builder.topic); - - try (KafkaCollector collector = builder.build()) { - collector.start(); - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - // the only way we could read this, is if the malformed span was skipped. - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - } - - assertThat(kafkaMetrics.messages()).isEqualTo(3); - assertThat(kafkaMetrics.messagesDropped()).isZero(); // storage failure isn't a message failure - assertThat(kafkaMetrics.bytes()).isEqualTo(THRIFT.encodeList(spans).length * 3); - assertThat(kafkaMetrics.spans()).isEqualTo(spans.size() * 3); - assertThat(kafkaMetrics.spansDropped()).isEqualTo(spans.size()); // only one dropped - } - - @Test void messagesDistributedAcrossMultipleThreadsSuccessfully() throws Exception { - KafkaCollector.Builder builder = builder("multi_thread", 2); - - kafka.prepareTopics(builder.topic, 2); - warmUpTopic(builder.topic); - - final byte[] traceBytes = JSON_V2.encodeList(spans); - try (KafkaCollector collector = builder.build()) { - collector.start(); - waitForPartitionAssignments(collector); - produceSpans(traceBytes, builder.topic, 0); - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - produceSpans(traceBytes, builder.topic, 1); - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - } - - assertThat(threadsProvidingSpans.size()).isEqualTo(2); - - assertThat(kafkaMetrics.messages()).isEqualTo(3); // 2 + empty body for warmup - assertThat(kafkaMetrics.messagesDropped()).isZero(); - assertThat(kafkaMetrics.bytes()).isEqualTo(traceBytes.length * 2); - assertThat(kafkaMetrics.spans()).isEqualTo(spans.size() * 2); - assertThat(kafkaMetrics.spansDropped()).isZero(); - } - - @Test void multipleTopicsCommaDelimited() { - try (KafkaCollector collector = builder("topic1,topic2").build()) { - collector.start(); - - assertThat(collector.kafkaWorkers.workers.get(0).topics).containsExactly("topic1", "topic2"); - } - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test void toStringContainsOnlySummaryInformation() { - try (KafkaCollector collector = builder("muah").build()) { - collector.start(); - - assertThat(collector).hasToString( - String.format("KafkaCollector{bootstrapServers=%s, topic=%s}", kafka.bootstrapServer(), - "muah") - ); - } - } - - /** - * Producing this empty message triggers auto-creation of the topic and gets things "warmed up" on - * the broker before the consumers subscribe. Without this, the topic is auto-created when the - * first consumer subscribes but there appears to be a race condition where the existence of the - * topic is not known to the partition assignor when the consumer group goes through its initial - * re-balance. As a result, no partitions are assigned, there are no further changes to group - * membership to trigger another re-balance, and no messages are consumed. This initial message is - * not necessary if the test broker is re-created for each test, but that increases execution time - * for the suite by a factor of 10x (2-3s to ~25s on my local machine). - */ - void warmUpTopic(String topic) { - produceSpans(new byte[0], topic); - } - - /** - * Wait until all kafka consumers created by the collector have at least one partition assigned. - */ - void waitForPartitionAssignments(KafkaCollector collector) throws Exception { - long consumersWithAssignments = 0; - while (consumersWithAssignments < collector.kafkaWorkers.streams) { - Thread.sleep(10); - consumersWithAssignments = - collector - .kafkaWorkers - .workers - .stream() - .filter(w -> !w.assignedPartitions.get().isEmpty()) - .count(); - } - } - - void produceSpans(byte[] spans, String topic) { - produceSpans(spans, topic, 0); - } - - void produceSpans(byte[] spans, String topic, Integer partition) { - producer.send(new ProducerRecord<>(topic, partition, null, spans)); - producer.flush(); - } - - KafkaCollector.Builder builder(String topic) { - return builder(topic, 1); - } - - KafkaCollector.Builder builder(String topic, int streams) { - return kafka.newCollectorBuilder(topic, streams) - .metrics(metrics) - .storage(buildStorage(consumer)); - } - - static StorageComponent buildStorage(final SpanConsumer spanConsumer) { - return new ForwardingStorageComponent() { - @Override protected StorageComponent delegate() { - throw new AssertionError(); - } - - @Override public SpanConsumer spanConsumer() { - return spanConsumer; - } - }; - } -} diff --git a/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/KafkaExtension.java b/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/KafkaExtension.java deleted file mode 100644 index 38459d495a2..00000000000 --- a/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/KafkaExtension.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.kafka; - -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.ExecutionException; -import org.apache.kafka.clients.admin.AdminClient; -import org.apache.kafka.clients.admin.AdminClientConfig; -import org.apache.kafka.clients.admin.NewTopic; -import org.apache.kafka.common.errors.TopicExistsException; -import org.junit.jupiter.api.extension.AfterAllCallback; -import org.junit.jupiter.api.extension.BeforeAllCallback; -import org.junit.jupiter.api.extension.ExtensionContext; -import org.opentest4j.TestAbortedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.InternetProtocol; -import org.testcontainers.containers.output.Slf4jLogConsumer; -import org.testcontainers.containers.wait.strategy.Wait; - -import static org.testcontainers.utility.DockerImageName.parse; - -class KafkaExtension implements BeforeAllCallback, AfterAllCallback { - static final Logger LOGGER = LoggerFactory.getLogger(KafkaExtension.class); - static final int KAFKA_PORT = 19092; - - final KafkaContainer container = new KafkaContainer(); - - @Override public void beforeAll(ExtensionContext context) { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - - container.start(); - LOGGER.info("Using bootstrapServer " + bootstrapServer()); - } - - void prepareTopics(String topics, int partitions) { - Properties config = new Properties(); - config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer()); - - List newTopics = new ArrayList<>(); - for (String topic : topics.split(",")) { - if ("".equals(topic)) continue; - newTopics.add(new NewTopic(topic, partitions, (short) 1)); - } - - try (AdminClient adminClient = AdminClient.create(config)) { - adminClient.createTopics(newTopics).all().get(); - } catch (InterruptedException | ExecutionException e) { - if (e.getCause() != null && e.getCause() instanceof TopicExistsException) return; - throw new TestAbortedException( - "Topics could not be created " + newTopics + ": " + e.getMessage(), e); - } - } - - String bootstrapServer() { - return container.getHost() + ":" + container.getMappedPort(KAFKA_PORT); - } - - KafkaCollector.Builder newCollectorBuilder(String topic, int streams) { - prepareTopics(topic, streams); - return KafkaCollector.builder().bootstrapServers(bootstrapServer()) - .topic(topic) - .groupId(topic + "_group") - .streams(streams); - } - - @Override public void afterAll(ExtensionContext context) { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - container.stop(); - } - - // mostly waiting for https://github.com/testcontainers/testcontainers-java/issues/3537 - static final class KafkaContainer extends GenericContainer { - KafkaContainer() { - super(parse("ghcr.io/openzipkin/zipkin-kafka:2.23.2")); - if ("true".equals(System.getProperty("docker.skip"))) { - throw new TestAbortedException("${docker.skip} == true"); - } - waitStrategy = Wait.forHealthcheck(); - // 19092 is for connections from the Docker host and needs to be used as a fixed port. - // TODO: someone who knows Kafka well, make ^^ comment better! - addFixedExposedPort(KAFKA_PORT, KAFKA_PORT, InternetProtocol.TCP); - withLogConsumer(new Slf4jLogConsumer(LOGGER)); - } - } -} diff --git a/zipkin-collector/kafka/src/test/resources/simplelogger.properties b/zipkin-collector/kafka/src/test/resources/simplelogger.properties deleted file mode 100644 index 42747f1736c..00000000000 --- a/zipkin-collector/kafka/src/test/resources/simplelogger.properties +++ /dev/null @@ -1,11 +0,0 @@ -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options - -org.slf4j.simpleLogger.logFile=System.out -org.slf4j.simpleLogger.defaultLogLevel=warn -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS - -org.slf4j.simpleLogger.log.com.github.charithe.kafka=info -org.slf4j.simpleLogger.log.zipkin2.collector.kafka=debug -# uncomment to include kafka consumer configuration in test logs -#logger.org.apache.kafka.clients.level=info diff --git a/zipkin-collector/pom.xml b/zipkin-collector/pom.xml deleted file mode 100644 index a9f7857acf7..00000000000 --- a/zipkin-collector/pom.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - - 4.0.0 - - - io.zipkin - zipkin-parent - 2.24.4-SNAPSHOT - - - io.zipkin.zipkin2 - zipkin-collector-parent - Collector - pom - - - ${project.basedir}/.. - 1.8 - java18 - - - - core - activemq - kafka - rabbitmq - scribe - - - - - ${project.groupId} - zipkin - ${project.version} - - - - - com.google.code.gson - gson - ${gson.version} - test - - - ${project.groupId} - zipkin-tests - ${project.version} - test - - - org.slf4j - slf4j-simple - ${slf4j.version} - test - - - org.testcontainers - testcontainers - ${testcontainers.version} - test - - - diff --git a/zipkin-collector/rabbitmq/README.md b/zipkin-collector/rabbitmq/README.md deleted file mode 100644 index e47f57d1389..00000000000 --- a/zipkin-collector/rabbitmq/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# collector-rabbitmq - -## RabbitMQCollector -This collector consumes a RabbitMQ queue for messages that contain a list of spans. -Its only dependencies besides Zipkin core are the `slf4j-api` and the [RabbitMQ Java Client](https://github.com/rabbitmq/rabbitmq-java-client). - -### Configuration - -The following configuration can be set for the RabbitMQ Collector. - -Property | Environment Variable | Description ---- | --- | --- -`zipkin.collector.rabbitmq.concurrency` | `RABBIT_CONCURRENCY` | Number of concurrent consumers. Defaults to `1` -`zipkin.collector.rabbitmq.connection-timeout` | `RABBIT_CONNECTION_TIMEOUT` | Milliseconds to wait establishing a connection. Defaults to `60000` (1 minute) -`zipkin.collector.rabbitmq.queue` | `RABBIT_QUEUE` | Queue from which to collect span messages. Defaults to `zipkin` -`zipkin.collector.rabbitmq.uri` | `RABBIT_URI` | [RabbitMQ URI spec](https://www.rabbitmq.com/uri-spec.html)-compliant URI, ex. `amqp://user:pass@host:10000/vhost` - -If the URI is set, the following properties will be ignored. - -Property | Environment Variable | Description ---- | --- | --- -`zipkin.collector.rabbitmq.addresses` | `RABBIT_ADDRESSES` | Comma-separated list of RabbitMQ addresses, ex. `localhost:5672,localhost:5673` -`zipkin.collector.rabbitmq.password` | `RABBIT_PASSWORD`| Password to use when connecting to RabbitMQ. Defaults to `guest` -`zipkin.collector.rabbitmq.username` | `RABBIT_USER` | Username to use when connecting to RabbitMQ. Defaults to `guest` -`zipkin.collector.rabbitmq.virtual-host` | `RABBIT_VIRTUAL_HOST` | RabbitMQ virtual host to use. Defaults to `/` -`zipkin.collector.rabbitmq.use-ssl` | `RABBIT_USE_SSL` | Set to `true` to use SSL when connecting to RabbitMQ - -### Caveats - -The configured queue will be idempotently declared as a durable queue. - -This collector uses one connection to RabbitMQ, with the configured `concurrency` number of threads -each using one channel to consume messages. - -Consumption is done with `autoAck` on, so messages that fail to process successfully are not retried. - -## Encoding spans into RabbitMQ messages -The message's body should be the bytes of an encoded list of spans. - -### JSON -A list of Spans in JSON. The first character must be '[' (decimal 91). - -`SpanBytesEncoder.JSON_V2.encodeList(spans)` performs the correct JSON encoding. - -## Local testing - -The following assumes you are running an instance of RabbitMQ locally on the default port (5672). -You can download and install RabbitMQ following [instructions available here](https://www.rabbitmq.com/download.html). -With the [RabbitMQ Management CLI](https://www.rabbitmq.com/management-cli.html) you can easily publish -one-off spans to RabbitMQ to be collected by this collector. - -1. Start RabbitMQ server -2. Start Zipkin server -```bash -$ RABBIT_ADDRESSES=localhost java -jar zipkin.jar -``` -3. Save an array of spans to a file like `sample-spans.json` -```json -[{"traceId":"9032b04972e475c5","id":"9032b04972e475c5","kind":"SERVER","name":"get","timestamp":1505990621526000,"duration":612898,"localEndpoint":{"serviceName":"brave-webmvc-example","ipv4":"192.168.1.113"},"remoteEndpoint":{"serviceName":"","ipv4":"127.0.0.1","port":60149},"tags":{"error":"500 Internal Server Error","http.path":"/a"}}] -``` -4. Publish them using the CLI -```bash -$ rabbitmqadmin publish exchange=amq.default routing_key=zipkin < sample-spans.json -``` diff --git a/zipkin-collector/rabbitmq/pom.xml b/zipkin-collector/rabbitmq/pom.xml deleted file mode 100644 index 1332cbf9434..00000000000 --- a/zipkin-collector/rabbitmq/pom.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - 4.0.0 - - - io.zipkin.zipkin2 - zipkin-collector-parent - 2.24.4-SNAPSHOT - - - zipkin-collector-rabbitmq - Collector: RabbitMQ - Zipkin span collector for RabbitMQ transport - - - ${project.basedir}/../.. - 4.12.0 - - - - - ${project.groupId} - zipkin-collector - ${project.version} - - - - com.rabbitmq - amqp-client - ${amqp-client.version} - - - diff --git a/zipkin-collector/rabbitmq/src/main/java/zipkin2/collector/rabbitmq/RabbitMQCollector.java b/zipkin-collector/rabbitmq/src/main/java/zipkin2/collector/rabbitmq/RabbitMQCollector.java deleted file mode 100644 index 973d3cdd99a..00000000000 --- a/zipkin-collector/rabbitmq/src/main/java/zipkin2/collector/rabbitmq/RabbitMQCollector.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.rabbitmq; - -import com.rabbitmq.client.AMQP.BasicProperties; -import com.rabbitmq.client.Address; -import com.rabbitmq.client.Channel; -import com.rabbitmq.client.Connection; -import com.rabbitmq.client.ConnectionFactory; -import com.rabbitmq.client.DefaultConsumer; -import com.rabbitmq.client.Envelope; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; -import zipkin2.Call; -import zipkin2.Callback; -import zipkin2.CheckResult; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorComponent; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.storage.StorageComponent; - -/** This collector consumes encoded binary messages from a RabbitMQ queue. */ -public final class RabbitMQCollector extends CollectorComponent { - static final Callback NOOP = new Callback() { - @Override public void onSuccess(Void value) { - } - - @Override public void onError(Throwable t) { - } - }; - - public static Builder builder() { - return new Builder(); - } - - /** Configuration including defaults needed to consume spans from a RabbitMQ queue. */ - public static final class Builder extends CollectorComponent.Builder { - Collector.Builder delegate = Collector.newBuilder(RabbitMQCollector.class); - CollectorMetrics metrics = CollectorMetrics.NOOP_METRICS; - String queue = "zipkin"; - ConnectionFactory connectionFactory = new ConnectionFactory(); - Address[] addresses; - int concurrency = 1; - - @Override - public Builder storage(StorageComponent storage) { - this.delegate.storage(storage); - return this; - } - - @Override - public Builder sampler(CollectorSampler sampler) { - this.delegate.sampler(sampler); - return this; - } - - @Override - public Builder metrics(CollectorMetrics metrics) { - if (metrics == null) throw new NullPointerException("metrics == null"); - this.metrics = metrics.forTransport("rabbitmq"); - this.delegate.metrics(this.metrics); - return this; - } - - public Builder addresses(List addresses) { - this.addresses = convertAddresses(addresses); - return this; - } - - public Builder concurrency(int concurrency) { - this.concurrency = concurrency; - return this; - } - - public Builder connectionFactory(ConnectionFactory connectionFactory) { - if (connectionFactory == null) throw new NullPointerException("connectionFactory == null"); - this.connectionFactory = connectionFactory; - return this; - } - - /** Queue zipkin spans will be consumed from. Defaults to "zipkin-spans". */ - public Builder queue(String queue) { - if (queue == null) throw new NullPointerException("queue == null"); - this.queue = queue; - return this; - } - - @Override - public RabbitMQCollector build() { - return new RabbitMQCollector(this); - } - } - - final String queue; - final LazyInit connection; - - RabbitMQCollector(Builder builder) { - this.queue = builder.queue; - this.connection = new LazyInit(builder); - } - - @Override - public RabbitMQCollector start() { - connection.get(); - return this; - } - - @Override - public CheckResult check() { - try { - start(); - CheckResult failure = connection.failure.get(); - if (failure != null) return failure; - return CheckResult.OK; - } catch (Throwable e) { - Call.propagateIfFatal(e); - return CheckResult.failed(e); - } - } - - @Override - public void close() throws IOException { - connection.close(); - } - - @Override public final String toString() { - return "RabbitMQCollector{addresses=" - + Arrays.toString(connection.builder.addresses) - + ", queue=" - + queue - + "}"; - } - - /** Lazy creates a connection and a queue before starting consumers */ - static final class LazyInit { - final Builder builder; - final AtomicReference failure = new AtomicReference<>(); - volatile Connection connection; - - // TODO: bad idea to lazy reference properties from a mutable builder - // copy them here and then pass this to the KafkaCollectorWorker ctor instead - LazyInit(Builder builder) { - this.builder = builder; - } - - Connection get() { - if (connection == null) { - synchronized (this) { - if (connection == null) { - connection = compute(); - } - } - } - return connection; - } - - void close() throws IOException { - Connection maybeConnection = connection; - if (maybeConnection != null) maybeConnection.close(); - } - - Connection compute() { - Connection connection; - try { - connection = - (builder.addresses == null) - ? builder.connectionFactory.newConnection() - : builder.connectionFactory.newConnection(builder.addresses); - declareQueueIfMissing(connection); - } catch (IOException e) { - throw new UncheckedIOException( - "Unable to establish connection to RabbitMQ server: " + e.getMessage(), e); - } catch (TimeoutException e) { - throw new RuntimeException( - "Timeout establishing connection to RabbitMQ server: " + e.getMessage(), e); - } - Collector collector = builder.delegate.build(); - CollectorMetrics metrics = builder.metrics; - - for (int i = 0; i < builder.concurrency; i++) { - String consumerTag = "zipkin-rabbitmq." + i; - try { - // this sets up a channel for each consumer thread. - // We don't track channels, as the connection will close its channels implicitly - Channel channel = connection.createChannel(); - RabbitMQSpanConsumer consumer = new RabbitMQSpanConsumer(channel, collector, metrics); - channel.basicConsume(builder.queue, true, consumerTag, consumer); - } catch (IOException e) { - throw new IllegalStateException("Failed to start RabbitMQ consumer " + consumerTag, e); - } - } - return connection; - } - - private void declareQueueIfMissing(Connection connection) throws IOException, TimeoutException { - Channel channel = connection.createChannel(); - try { - // check if queue already exists - channel.queueDeclarePassive(builder.queue); - channel.close(); - } catch (IOException maybeQueueDoesNotExist) { - Throwable cause = maybeQueueDoesNotExist.getCause(); - if (cause != null && cause.getMessage().contains("NOT_FOUND")) { - channel = connection.createChannel(); - channel.queueDeclare(builder.queue, true, false, false, null); - channel.close(); - } else { - throw maybeQueueDoesNotExist; - } - } - } - } - - /** - * Consumes spans from messages on a RabbitMQ queue. Malformed messages will be discarded. Errors - * in the storage component will similarly be ignored, with no retry of the message. - */ - static class RabbitMQSpanConsumer extends DefaultConsumer { - final Collector collector; - final CollectorMetrics metrics; - - RabbitMQSpanConsumer(Channel channel, Collector collector, CollectorMetrics metrics) { - super(channel); - this.collector = collector; - this.metrics = metrics; - } - - @Override - public void handleDelivery(String tag, Envelope envelope, BasicProperties props, byte[] body) { - metrics.incrementMessages(); - metrics.incrementBytes(body.length); - - if (body.length == 0) return; // lenient on empty messages - - collector.acceptSpans(body, NOOP); - } - } - - static Address[] convertAddresses(List addresses) { - Address[] addressArray = new Address[addresses.size()]; - for (int i = 0; i < addresses.size(); i++) { - String[] splitAddress = addresses.get(i).split(":", 100); - String host = splitAddress[0]; - int port = -1; - try { - if (splitAddress.length == 2) port = Integer.parseInt(splitAddress[1]); - } catch (NumberFormatException ignore) { - // EmptyCatch ignored - } - addressArray[i] = (port > 0) ? new Address(host, port) : new Address(host); - } - return addressArray; - } -} diff --git a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/ITRabbitMQCollector.java b/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/ITRabbitMQCollector.java deleted file mode 100644 index 5e084020e15..00000000000 --- a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/ITRabbitMQCollector.java +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.rabbitmq; - -import com.rabbitmq.client.Channel; -import com.rabbitmq.client.Connection; -import com.rabbitmq.client.ConnectionFactory; -import java.util.List; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.Call; -import zipkin2.Callback; -import zipkin2.Component; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.collector.InMemoryCollectorMetrics; -import zipkin2.storage.ForwardingStorageComponent; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.StorageComponent; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.LOTS_OF_SPANS; -import static zipkin2.TestObjects.UTF_8; -import static zipkin2.codec.SpanBytesEncoder.THRIFT; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -@Timeout(60) -class ITRabbitMQCollector { - @RegisterExtension RabbitMQExtension rabbit = new RabbitMQExtension(); - - List spans = asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]); - - InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics(); - InMemoryCollectorMetrics rabbitmqMetrics = metrics.forTransport("rabbitmq"); - - CopyOnWriteArraySet threadsProvidingSpans = new CopyOnWriteArraySet<>(); - LinkedBlockingQueue> receivedSpans = new LinkedBlockingQueue<>(); - SpanConsumer consumer = (spans) -> { - threadsProvidingSpans.add(Thread.currentThread()); - receivedSpans.add(spans); - return Call.create(null); - }; - Connection connection; - - @BeforeEach void setup() throws Exception { - metrics.clear(); - ConnectionFactory factory = new ConnectionFactory(); - factory.setHost(rabbit.host()); - factory.setPort(rabbit.port()); - connection = factory.newConnection(); - } - - @AfterEach void tearDown() throws Exception { - if (connection != null) connection.close(); - } - - @Test void checkPasses() throws Exception { - try (RabbitMQCollector collector = builder("check_passes").build()) { - assertThat(collector.check().ok()).isTrue(); - } - } - - /** Ensures list encoding works: a TBinaryProtocol encoded list of spans */ - @Test void messageWithMultipleSpans_thrift() throws Exception { - messageWithMultipleSpans(builder("multiple_spans_thrift"), THRIFT); - } - - /** Ensures list encoding works: a json encoded list of spans */ - @Test void messageWithMultipleSpans_json() throws Exception { - messageWithMultipleSpans(builder("multiple_spans_json"), SpanBytesEncoder.JSON_V1); - } - - /** Ensures list encoding works: a version 2 json list of spans */ - @Test void messageWithMultipleSpans_json2() throws Exception { - messageWithMultipleSpans(builder("multiple_spans_json2"), SpanBytesEncoder.JSON_V2); - } - - /** Ensures list encoding works: proto3 ListOfSpans */ - @Test void messageWithMultipleSpans_proto3() throws Exception { - messageWithMultipleSpans(builder("multiple_spans_proto3"), SpanBytesEncoder.PROTO3); - } - - void messageWithMultipleSpans(RabbitMQCollector.Builder builder, SpanBytesEncoder encoder) - throws Exception { - byte[] message = encoder.encodeList(spans); - - produceSpans(message, builder.queue); - - try (RabbitMQCollector collector = builder.build()) { - collector.start(); - assertThat(receivedSpans.take()).containsAll(spans); - } - - assertThat(rabbitmqMetrics.messages()).isEqualTo(1); - assertThat(rabbitmqMetrics.messagesDropped()).isZero(); - assertThat(rabbitmqMetrics.bytes()).isEqualTo(message.length); - assertThat(rabbitmqMetrics.spans()).isEqualTo(spans.size()); - assertThat(rabbitmqMetrics.spansDropped()).isZero(); - } - - /** Ensures malformed spans don't hang the collector */ - @Test void skipsMalformedData() throws Exception { - RabbitMQCollector.Builder builder = builder("decoder_exception"); - - byte[] malformed1 = "[\"='".getBytes(UTF_8); // screwed up json - byte[] malformed2 = "malformed".getBytes(UTF_8); - produceSpans(THRIFT.encodeList(spans), builder.queue); - produceSpans(new byte[0], builder.queue); - produceSpans(malformed1, builder.queue); - produceSpans(malformed2, builder.queue); - produceSpans(THRIFT.encodeList(spans), builder.queue); - - try (RabbitMQCollector collector = builder.build()) { - collector.start(); - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - // the only way we could read this, is if the malformed spans were skipped. - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - } - - assertThat(rabbitmqMetrics.messages()).isEqualTo(5); - assertThat(rabbitmqMetrics.messagesDropped()).isEqualTo(2); // only malformed, not empty - assertThat(rabbitmqMetrics.bytes()) - .isEqualTo(THRIFT.encodeList(spans).length * 2 + malformed1.length + malformed2.length); - assertThat(rabbitmqMetrics.spans()).isEqualTo(spans.size() * 2); - assertThat(rabbitmqMetrics.spansDropped()).isZero(); - } - - @Test void startsWhenConfiguredQueueDoesntExist() throws Exception { - try (RabbitMQCollector collector = builder("ignored").queue("zipkin-test2").build()) { - assertThat(collector.check().ok()).isTrue(); - } - } - - /** Guards against errors that leak from storage, such as InvalidQueryException */ - @Test void skipsOnSpanStorageException() throws Exception { - AtomicInteger counter = new AtomicInteger(); - consumer = (input) -> new Call.Base() { - @Override protected Void doExecute() { - throw new AssertionError(); - } - - @Override protected void doEnqueue(Callback callback) { - if (counter.getAndIncrement() == 1) { - callback.onError(new RuntimeException("storage fell over")); - } else { - receivedSpans.add(spans); - callback.onSuccess(null); - } - } - - @Override public Call clone() { - throw new AssertionError(); - } - }; - final StorageComponent storage = buildStorage(consumer); - RabbitMQCollector.Builder builder = builder("storage_exception").storage(storage); - - produceSpans(THRIFT.encodeList(spans), builder.queue); - produceSpans(THRIFT.encodeList(spans), builder.queue); // tossed on error - produceSpans(THRIFT.encodeList(spans), builder.queue); - - try (RabbitMQCollector collector = builder.build()) { - collector.start(); - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - // the only way we could read this, is if the malformed span was skipped. - assertThat(receivedSpans.take()).containsExactlyElementsOf(spans); - } - - assertThat(rabbitmqMetrics.messages()).isEqualTo(3); - assertThat(rabbitmqMetrics.messagesDropped()).isZero(); // storage failure isn't a message failure - assertThat(rabbitmqMetrics.bytes()).isEqualTo(THRIFT.encodeList(spans).length * 3); - assertThat(rabbitmqMetrics.spans()).isEqualTo(spans.size() * 3); - assertThat(rabbitmqMetrics.spansDropped()).isEqualTo(spans.size()); // only one dropped - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test void toStringContainsOnlySummaryInformation() throws Exception { - try (RabbitMQCollector collector = builder("bugs bunny").build()) { - collector.start(); - - assertThat(collector).hasToString( - String.format("RabbitMQCollector{addresses=[%s:%s], queue=%s}", rabbit.host(), - rabbit.port(), "bugs bunny") - ); - } - } - - void produceSpans(byte[] spans, String queue) throws Exception { - Channel channel = null; - try { - channel = connection.createChannel(); - channel.basicPublish("", queue, null, spans); - } finally { - if (channel != null) channel.close(); - } - } - - RabbitMQCollector.Builder builder(String queue) { - return rabbit.newCollectorBuilder(queue) - .metrics(metrics) - .storage(buildStorage(consumer)); - } - - static StorageComponent buildStorage(final SpanConsumer spanConsumer) { - return new ForwardingStorageComponent() { - @Override protected StorageComponent delegate() { - throw new AssertionError(); - } - - @Override public SpanConsumer spanConsumer() { - return spanConsumer; - } - }; - } -} diff --git a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQCollectorTest.java b/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQCollectorTest.java deleted file mode 100644 index cb390f1d6bc..00000000000 --- a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQCollectorTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.rabbitmq; - -import com.rabbitmq.client.ConnectionFactory; -import java.io.UncheckedIOException; -import org.junit.Before; -import org.junit.Test; -import zipkin2.CheckResult; -import zipkin2.Component; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -public class RabbitMQCollectorTest { - - RabbitMQCollector collector; - - @Before public void before() { - ConnectionFactory connectionFactory = new ConnectionFactory(); - connectionFactory.setConnectionTimeout(100); - // We can be pretty certain RabbitMQ isn't running on localhost port 80 - collector = RabbitMQCollector.builder() - .connectionFactory(connectionFactory).addresses(asList("localhost:80")).build(); - } - - @Test public void checkFalseWhenRabbitMQIsDown() { - CheckResult check = collector.check(); - assertThat(check.ok()).isFalse(); - assertThat(check.error()).isInstanceOf(UncheckedIOException.class); - } - - @Test public void startFailsWhenRabbitMQIsDown() { - // NOTE.. This is probably not good as it can crash on transient failure.. - assertThatThrownBy(collector::start) - .isInstanceOf(UncheckedIOException.class) - .hasMessageStartingWith("Unable to establish connection to RabbitMQ server"); - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test public void toStringContainsOnlySummaryInformation() { - assertThat(collector).hasToString( - "RabbitMQCollector{addresses=[localhost:80], queue=zipkin}" - ); - } -} diff --git a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQExtension.java b/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQExtension.java deleted file mode 100644 index 3944e4ca137..00000000000 --- a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQExtension.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.rabbitmq; - -import java.time.Duration; -import org.junit.jupiter.api.extension.AfterAllCallback; -import org.junit.jupiter.api.extension.BeforeAllCallback; -import org.junit.jupiter.api.extension.ExtensionContext; -import org.opentest4j.TestAbortedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testcontainers.containers.Container.ExecResult; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.wait.strategy.Wait; - -import static java.util.Arrays.asList; -import static org.testcontainers.utility.DockerImageName.parse; -import static zipkin2.Call.propagateIfFatal; - -class RabbitMQExtension implements BeforeAllCallback, AfterAllCallback { - static final Logger LOGGER = LoggerFactory.getLogger(RabbitMQExtension.class); - static final int RABBIT_PORT = 5672; - - RabbitMQContainer container = new RabbitMQContainer(); - - @Override public void beforeAll(ExtensionContext context) { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - - container.start(); - LOGGER.info("Using hostPort " + host() + ":" + port()); - } - - @Override public void afterAll(ExtensionContext context) { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - - container.stop(); - } - - RabbitMQCollector.Builder newCollectorBuilder(String queue) { - declareQueue(queue); - return RabbitMQCollector.builder().queue(queue).addresses(asList(host() + ":" + port())); - } - - void declareQueue(String queue) { - ExecResult result; - try { - result = container.execInContainer("rabbitmqadmin", "declare", "queue", "name=" + queue); - } catch (Throwable e) { - propagateIfFatal(e); - throw new TestAbortedException( - "Couldn't declare queue " + queue + ": " + e.getMessage(), e); - } - if (result.getExitCode() != 0) { - throw new TestAbortedException("Couldn't declare queue " + queue + ": " + result); - } - } - - String host() { - return container.getHost(); - } - - int port() { - return container.getMappedPort(RABBIT_PORT); - } - - // mostly waiting for https://github.com/testcontainers/testcontainers-java/issues/3537 - static final class RabbitMQContainer extends GenericContainer { - RabbitMQContainer() { - super(parse("ghcr.io/openzipkin/rabbitmq-management-alpine:latest")); - if ("true".equals(System.getProperty("docker.skip"))) { - throw new TestAbortedException("${docker.skip} == true"); - } - withExposedPorts(RABBIT_PORT); // rabbit's image doesn't expose any port - waitStrategy = Wait.forLogMessage(".*Server startup complete.*", 1); - withStartupTimeout(Duration.ofSeconds(60)); - } - } -} diff --git a/zipkin-collector/rabbitmq/src/test/resources/simplelogger.properties b/zipkin-collector/rabbitmq/src/test/resources/simplelogger.properties deleted file mode 100644 index 722851b466c..00000000000 --- a/zipkin-collector/rabbitmq/src/test/resources/simplelogger.properties +++ /dev/null @@ -1,9 +0,0 @@ -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options - -org.slf4j.simpleLogger.logFile=System.out -org.slf4j.simpleLogger.defaultLogLevel=warn -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS - -# stop huge spam -org.slf4j.simpleLogger.log.org.testcontainers.dockerclient=off diff --git a/zipkin-collector/scribe/README.md b/zipkin-collector/scribe/README.md deleted file mode 100644 index e9047717137..00000000000 --- a/zipkin-collector/scribe/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# collector-scribe - -## ScribeCollector -This collector accepts Scribe logs in a specified category. Each log -entry is expected to contain a single span, which is TBinaryProtocol -big-endian, then base64 encoded. These spans are then pushed to storage. - -`zipkin2.collector.scribe.ScribeCollector.Builder` includes defaults that will -listen on port 9410, accept log entries in the category "zipkin" - -## Encoding -The scribe message is a TBinaryProtocol big-endian, then Base64 span. -Base64 Basic and MIME schemes are supported. - -Here's what it looks like in pseudocode -``` -serialized = writeTBinaryProtocol(span) -encoded = base64(serialized) - -scribe.log(category = "zipkin", message = encoded) -``` diff --git a/zipkin-collector/scribe/pom.xml b/zipkin-collector/scribe/pom.xml deleted file mode 100644 index e3f0a24efcd..00000000000 --- a/zipkin-collector/scribe/pom.xml +++ /dev/null @@ -1,71 +0,0 @@ - - - - 4.0.0 - - - io.zipkin.zipkin2 - zipkin-collector-parent - 2.24.4-SNAPSHOT - - - zipkin-collector-scribe - Collector: Scribe (Legacy) - - - ${project.basedir}/../.. - - - -XepDisableWarningsInGeneratedCode - - - - - ${project.groupId} - zipkin-collector - ${project.version} - - - - ${armeria.groupId} - armeria-thrift0.15 - ${armeria.version} - - - - - javax.annotation - javax.annotation-api - ${javax-annotation-api.version} - provided - - - - ${armeria.groupId} - armeria-junit4 - ${armeria.version} - test - - - - org.awaitility - awaitility - ${awaitility.version} - test - - - diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/NettyScribeServer.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/NettyScribeServer.java deleted file mode 100644 index 0af84b1a680..00000000000 --- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/NettyScribeServer.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.scribe; - -import com.linecorp.armeria.common.CommonPools; -import com.linecorp.armeria.common.util.EventLoopGroups; -import io.netty.bootstrap.ServerBootstrap; -import io.netty.channel.Channel; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.socket.SocketChannel; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import java.net.InetSocketAddress; - -import static zipkin2.Call.propagateIfFatal; - -final class NettyScribeServer { - final int port; - final ScribeSpanConsumer scribe; - - volatile EventLoopGroup bossGroup; - volatile Channel channel; - - NettyScribeServer(int port, ScribeSpanConsumer scribe) { - this.port = port; - this.scribe = scribe; - } - - void start() { - bossGroup = EventLoopGroups.newEventLoopGroup(1); - EventLoopGroup workerGroup = CommonPools.workerGroup(); - - ServerBootstrap b = new ServerBootstrap(); - try { - channel = b.group(bossGroup, workerGroup) - .channel(EventLoopGroups.serverChannelType(bossGroup)) - .childHandler(new ChannelInitializer() { - @Override protected void initChannel(SocketChannel ch) { - ch.pipeline() - .addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4)) - .addLast(new ScribeInboundHandler(scribe)); - } - }) - .bind(port) - .syncUninterruptibly() - .channel(); - } catch (Throwable t) { - propagateIfFatal(t); - throw new RuntimeException("Could not start scribe server.", t); - } - } - - @SuppressWarnings("FutureReturnValueIgnored") - void close() { - if (channel == null) return; - // TODO: chain these futures, and probably block a bit - // https://line-armeria.slack.com/archives/C1NGPBUH2/p1591167918430500 - channel.close(); - bossGroup.shutdownGracefully(); - } - - boolean isRunning() { - return channel != null && channel.isActive(); - } - - int port() { - if (channel == null) return 0; - return ((InetSocketAddress) channel.localAddress()).getPort(); - } -} diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeCollector.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeCollector.java deleted file mode 100644 index af412941024..00000000000 --- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeCollector.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.scribe; - -import zipkin2.CheckResult; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorComponent; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.StorageComponent; - -/** - * This collector accepts Scribe logs in a specified category. Each log entry is expected to contain - * a single span, which is TBinaryProtocol big-endian, then base64 encoded. These spans are chained - * to an {@link SpanConsumer#accept asynchronous span consumer}. - */ -public final class ScribeCollector extends CollectorComponent { - - public static Builder newBuilder() { - return new Builder(); - } - - /** Configuration including defaults needed to receive spans from a Scribe category. */ - public static final class Builder extends CollectorComponent.Builder { - Collector.Builder delegate = Collector.newBuilder(ScribeCollector.class); - CollectorMetrics metrics = CollectorMetrics.NOOP_METRICS; - String category = "zipkin"; - int port = 9410; - - @Override public Builder storage(StorageComponent storage) { - delegate.storage(storage); - return this; - } - - @Override public Builder metrics(CollectorMetrics metrics) { - if (metrics == null) throw new NullPointerException("metrics == null"); - this.metrics = metrics.forTransport("scribe"); - delegate.metrics(this.metrics); - return this; - } - - @Override public Builder sampler(CollectorSampler sampler) { - delegate.sampler(sampler); - return this; - } - - /** Category zipkin spans will be consumed from. Defaults to "zipkin" */ - public Builder category(String category) { - if (category == null) throw new NullPointerException("category == null"); - this.category = category; - return this; - } - - /** The port to listen on. Defaults to 9410 */ - public Builder port(int port) { - this.port = port; - return this; - } - - @Override public ScribeCollector build() { - return new ScribeCollector(this); - } - } - - final NettyScribeServer server; - - ScribeCollector(Builder builder) { - server = new NettyScribeServer(builder.port, - new ScribeSpanConsumer(builder.delegate.build(), builder.metrics, builder.category)); - } - - /** Will throw an exception if the {@link Builder#port(int) port} is already in use. */ - @Override public ScribeCollector start() { - server.start(); - return this; - } - - @Override public CheckResult check() { - if (!server.isRunning()) { - return CheckResult.failed(new IllegalStateException("server not running")); - } - return CheckResult.OK; - } - - /** Returns zero until {@link #start()} was called. */ - public int port() { - return server.port(); - } - - @Override public final String toString() { - return "ScribeCollector{port=" + port() + ", category=" + server.scribe.category + "}"; - } - - @Override public void close() { - server.close(); - } -} diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeInboundHandler.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeInboundHandler.java deleted file mode 100644 index 12e63f889aa..00000000000 --- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeInboundHandler.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.scribe; - -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.HttpRequest; -import com.linecorp.armeria.common.HttpResponse; -import com.linecorp.armeria.common.RequestHeaders; -import com.linecorp.armeria.common.util.Exceptions; -import com.linecorp.armeria.common.util.SafeCloseable; -import com.linecorp.armeria.server.ServiceRequestContext; -import com.linecorp.armeria.server.ServiceRequestContextBuilder; -import com.linecorp.armeria.server.thrift.THttpService; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.channel.EventLoop; -import java.util.HashMap; -import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static zipkin2.Call.propagateIfFatal; - -@SuppressWarnings("FutureReturnValueIgnored") -// TODO: errorprone wants us to check futures before returning, but what would be a sensible check? -// Say it is somehow canceled, would we take action? Would callback.onError() be redundant? -final class ScribeInboundHandler extends ChannelInboundHandlerAdapter { - - static final Logger logger = LoggerFactory.getLogger(ScribeInboundHandler.class); - - // Headers mostly copied from https://github.com/apache/thrift/blob/master/lib/javame/src/org/apache/thrift/transport/THttpClient.java#L130 - static final RequestHeaders THRIFT_HEADERS = RequestHeaders.builder( - HttpMethod.POST, "/internal/zipkin-thriftrpc") - .set(HttpHeaderNames.CONTENT_TYPE, "application/x-thrift") - .set(HttpHeaderNames.ACCEPT, "application/x-thrift") - .set(HttpHeaderNames.USER_AGENT, "Zipkin/ScribeInboundHandler") - .build(); - - final THttpService scribeService; - - ScribeInboundHandler(ScribeSpanConsumer scribe) { - scribeService = THttpService.of(scribe); - } - - Map pendingResponses = new HashMap<>(); - int nextResponseIndex = 0; - int previouslySentResponseIndex = -1; - - @Override public void channelRead(ChannelHandlerContext ctx, Object payload) { - assert payload instanceof ByteBuf; - HttpRequest request = HttpRequest.of(THRIFT_HEADERS, HttpData.wrap((ByteBuf) payload)); - ServiceRequestContextBuilder requestContextBuilder = ServiceRequestContext.builder(request) - .service(scribeService) - .alloc(ctx.alloc()); - - if (ctx.executor() instanceof EventLoop) { - requestContextBuilder.eventLoop((EventLoop) ctx.executor()); - } - - ServiceRequestContext requestContext = requestContextBuilder.build(); - - final HttpResponse response; - try (SafeCloseable unused = requestContext.push()) { - response = HttpResponse.of(scribeService.serve(requestContext, request)); - } catch (Throwable t) { - propagateIfFatal(t); - exceptionCaught(ctx, t); - return; - } - - int responseIndex = nextResponseIndex++; - - response.aggregateWithPooledObjects(ctx.executor(), ctx.alloc()).handle((msg, t) -> { - if (t != null) { - exceptionCaught(ctx, t); - return null; - } - - try (HttpData content = msg.content()) { - ByteBuf returned = ctx.alloc().buffer(content.length() + 4); - returned.writeInt(content.length()); - returned.writeBytes(content.byteBuf()); - if (responseIndex == previouslySentResponseIndex + 1) { - ctx.writeAndFlush(returned); - previouslySentResponseIndex++; - - flushResponses(ctx); - } else { - pendingResponses.put(responseIndex, returned); - } - } - - return null; - }); - } - - @Override public void channelInactive(ChannelHandlerContext ctx) { - release(); - } - - @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - Exceptions.logIfUnexpected(logger, ctx.channel(), cause); - - release(); - closeOnFlush(ctx.channel()); - } - - void flushResponses(ChannelHandlerContext ctx) { - while (!pendingResponses.isEmpty()) { - ByteBuf response = pendingResponses.remove(previouslySentResponseIndex + 1); - if (response == null) { - return; - } - - ctx.writeAndFlush(response); - previouslySentResponseIndex++; - } - } - - void release() { - pendingResponses.values().forEach(ByteBuf::release); - pendingResponses.clear(); - } - - /** - * Closes the specified channel after all queued write requests are flushed. - */ - static void closeOnFlush(Channel ch) { - if (ch.isActive()) { - ch.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); - } - } -} diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeSpanConsumer.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeSpanConsumer.java deleted file mode 100644 index 2863a855a47..00000000000 --- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeSpanConsumer.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.scribe; - -import com.linecorp.armeria.common.CommonPools; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Base64; -import java.util.List; -import org.apache.thrift.async.AsyncMethodCallback; -import zipkin2.Callback; -import zipkin2.Span; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.scribe.generated.LogEntry; -import zipkin2.collector.scribe.generated.ResultCode; -import zipkin2.collector.scribe.generated.Scribe; - -final class ScribeSpanConsumer implements Scribe.AsyncIface { - final Collector collector; - final CollectorMetrics metrics; - final String category; - - ScribeSpanConsumer(Collector collector, CollectorMetrics metrics, String category) { - this.collector = collector; - this.metrics = metrics; - this.category = category; - } - - @Override - public void Log(List messages, AsyncMethodCallback resultHandler) { - metrics.incrementMessages(); - List spans = new ArrayList<>(); - int byteCount = 0; - try { - for (LogEntry logEntry : messages) { - if (!category.equals(logEntry.category)) continue; - byte[] bytes = logEntry.message.getBytes(StandardCharsets.ISO_8859_1); - bytes = Base64.getMimeDecoder().decode(bytes); // finagle-zipkin uses mime encoding - byteCount += bytes.length; - spans.add(SpanBytesDecoder.THRIFT.decodeOne(bytes)); - } - } catch (RuntimeException e) { - metrics.incrementMessagesDropped(); - resultHandler.onError(e); - return; - } finally { - metrics.incrementBytes(byteCount); - } - - collector.accept(spans, new Callback() { - @Override public void onSuccess(Void value) { - resultHandler.onComplete(ResultCode.OK); - } - - @Override public void onError(Throwable t) { - Exception error = t instanceof Exception ? (Exception) t : new RuntimeException(t); - resultHandler.onError(error); - } - // Collectors may not be asynchronous so switch to blocking executor here. - }, CommonPools.blockingTaskExecutor()); - } -} diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/LogEntry.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/LogEntry.java deleted file mode 100644 index 395867f8f04..00000000000 --- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/LogEntry.java +++ /dev/null @@ -1,482 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.12.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package zipkin2.collector.scribe.generated; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.12.0)", date = "2019-05-07") -public class LogEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LogEntry"); - - private static final org.apache.thrift.protocol.TField CATEGORY_FIELD_DESC = new org.apache.thrift.protocol.TField("category", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new LogEntryStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new LogEntryTupleSchemeFactory(); - - public @org.apache.thrift.annotation.Nullable java.lang.String category; // required - public @org.apache.thrift.annotation.Nullable java.lang.String message; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - CATEGORY((short)1, "category"), - MESSAGE((short)2, "message"); - - private static final java.util.Map byName = new java.util.HashMap(); - - static { - for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // CATEGORY - return CATEGORY; - case 2: // MESSAGE - return MESSAGE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByName(java.lang.String name) { - return byName.get(name); - } - - private final short _thriftId; - private final java.lang.String _fieldName; - - _Fields(short thriftId, java.lang.String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public java.lang.String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.CATEGORY, new org.apache.thrift.meta_data.FieldMetaData("category", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LogEntry.class, metaDataMap); - } - - public LogEntry() { - } - - public LogEntry( - java.lang.String category, - java.lang.String message) - { - this(); - this.category = category; - this.message = message; - } - - /** - * Performs a deep copy on other. - */ - public LogEntry(LogEntry other) { - if (other.isSetCategory()) { - this.category = other.category; - } - if (other.isSetMessage()) { - this.message = other.message; - } - } - - public LogEntry deepCopy() { - return new LogEntry(this); - } - - @Override - public void clear() { - this.category = null; - this.message = null; - } - - @org.apache.thrift.annotation.Nullable - public java.lang.String getCategory() { - return this.category; - } - - public LogEntry setCategory(@org.apache.thrift.annotation.Nullable java.lang.String category) { - this.category = category; - return this; - } - - public void unsetCategory() { - this.category = null; - } - - /** Returns true if field category is set (has been assigned a value) and false otherwise */ - public boolean isSetCategory() { - return this.category != null; - } - - public void setCategoryIsSet(boolean value) { - if (!value) { - this.category = null; - } - } - - @org.apache.thrift.annotation.Nullable - public java.lang.String getMessage() { - return this.message; - } - - public LogEntry setMessage(@org.apache.thrift.annotation.Nullable java.lang.String message) { - this.message = message; - return this; - } - - public void unsetMessage() { - this.message = null; - } - - /** Returns true if field message is set (has been assigned a value) and false otherwise */ - public boolean isSetMessage() { - return this.message != null; - } - - public void setMessageIsSet(boolean value) { - if (!value) { - this.message = null; - } - } - - public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { - switch (field) { - case CATEGORY: - if (value == null) { - unsetCategory(); - } else { - setCategory((java.lang.String)value); - } - break; - - case MESSAGE: - if (value == null) { - unsetMessage(); - } else { - setMessage((java.lang.String)value); - } - break; - - } - } - - @org.apache.thrift.annotation.Nullable - public java.lang.Object getFieldValue(_Fields field) { - switch (field) { - case CATEGORY: - return getCategory(); - - case MESSAGE: - return getMessage(); - - } - throw new java.lang.IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new java.lang.IllegalArgumentException(); - } - - switch (field) { - case CATEGORY: - return isSetCategory(); - case MESSAGE: - return isSetMessage(); - } - throw new java.lang.IllegalStateException(); - } - - @Override - public boolean equals(java.lang.Object that) { - if (that == null) - return false; - if (that instanceof LogEntry) - return this.equals((LogEntry)that); - return false; - } - - public boolean equals(LogEntry that) { - if (that == null) - return false; - if (this == that) - return true; - - boolean this_present_category = true && this.isSetCategory(); - boolean that_present_category = true && that.isSetCategory(); - if (this_present_category || that_present_category) { - if (!(this_present_category && that_present_category)) - return false; - if (!this.category.equals(that.category)) - return false; - } - - boolean this_present_message = true && this.isSetMessage(); - boolean that_present_message = true && that.isSetMessage(); - if (this_present_message || that_present_message) { - if (!(this_present_message && that_present_message)) - return false; - if (!this.message.equals(that.message)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - int hashCode = 1; - - hashCode = hashCode * 8191 + ((isSetCategory()) ? 131071 : 524287); - if (isSetCategory()) - hashCode = hashCode * 8191 + category.hashCode(); - - hashCode = hashCode * 8191 + ((isSetMessage()) ? 131071 : 524287); - if (isSetMessage()) - hashCode = hashCode * 8191 + message.hashCode(); - - return hashCode; - } - - @Override - public int compareTo(LogEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = java.lang.Boolean.valueOf(isSetCategory()).compareTo(other.isSetCategory()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCategory()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.category, other.category); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = java.lang.Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetMessage()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - @org.apache.thrift.annotation.Nullable - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - scheme(iprot).read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - scheme(oprot).write(oprot, this); - } - - @Override - public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("LogEntry("); - boolean first = true; - - sb.append("category:"); - if (this.category == null) { - sb.append("null"); - } else { - sb.append(this.category); - } - first = false; - if (!first) sb.append(", "); - sb.append("message:"); - if (this.message == null) { - sb.append("null"); - } else { - sb.append(this.message); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class LogEntryStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public LogEntryStandardScheme getScheme() { - return new LogEntryStandardScheme(); - } - } - - private static class LogEntryStandardScheme extends org.apache.thrift.scheme.StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, LogEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // CATEGORY - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.category = iprot.readString(); - struct.setCategoryIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // MESSAGE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.message = iprot.readString(); - struct.setMessageIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - - // check for required fields of primitive type, which can't be checked in the validate method - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, LogEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.category != null) { - oprot.writeFieldBegin(CATEGORY_FIELD_DESC); - oprot.writeString(struct.category); - oprot.writeFieldEnd(); - } - if (struct.message != null) { - oprot.writeFieldBegin(MESSAGE_FIELD_DESC); - oprot.writeString(struct.message); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class LogEntryTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public LogEntryTupleScheme getScheme() { - return new LogEntryTupleScheme(); - } - } - - private static class LogEntryTupleScheme extends org.apache.thrift.scheme.TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, LogEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetCategory()) { - optionals.set(0); - } - if (struct.isSetMessage()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetCategory()) { - oprot.writeString(struct.category); - } - if (struct.isSetMessage()) { - oprot.writeString(struct.message); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, LogEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.category = iprot.readString(); - struct.setCategoryIsSet(true); - } - if (incoming.get(1)) { - struct.message = iprot.readString(); - struct.setMessageIsSet(true); - } - } - } - - private static S scheme(org.apache.thrift.protocol.TProtocol proto) { - return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); - } -} - diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/ResultCode.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/ResultCode.java deleted file mode 100644 index e7f65542c24..00000000000 --- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/ResultCode.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.12.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package zipkin2.collector.scribe.generated; - - -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.12.0)", date = "2019-05-07") -public enum ResultCode implements org.apache.thrift.TEnum { - OK(0), - TRY_LATER(1); - - private final int value; - - private ResultCode(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - @org.apache.thrift.annotation.Nullable - public static ResultCode findByValue(int value) { - switch (value) { - case 0: - return OK; - case 1: - return TRY_LATER; - default: - return null; - } - } -} diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/Scribe.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/Scribe.java deleted file mode 100644 index 56be78c2195..00000000000 --- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/Scribe.java +++ /dev/null @@ -1,1045 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.12.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package zipkin2.collector.scribe.generated; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.12.0)", date = "2019-05-07") -public class Scribe { - - public interface Iface { - - public ResultCode Log(java.util.List messages) throws org.apache.thrift.TException; - - } - - public interface AsyncIface { - - public void Log(java.util.List messages, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - } - - public static class Client extends org.apache.thrift.TServiceClient implements Iface { - public static class Factory implements org.apache.thrift.TServiceClientFactory { - public Factory() {} - public Client getClient(org.apache.thrift.protocol.TProtocol prot) { - return new Client(prot); - } - public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { - return new Client(iprot, oprot); - } - } - - public Client(org.apache.thrift.protocol.TProtocol prot) - { - super(prot, prot); - } - - public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { - super(iprot, oprot); - } - - public ResultCode Log(java.util.List messages) throws org.apache.thrift.TException - { - send_Log(messages); - return recv_Log(); - } - - public void send_Log(java.util.List messages) throws org.apache.thrift.TException - { - Log_args args = new Log_args(); - args.setMessages(messages); - sendBase("Log", args); - } - - public ResultCode recv_Log() throws org.apache.thrift.TException - { - Log_result result = new Log_result(); - receiveBase(result, "Log"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "Log failed: unknown result"); - } - - } - public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { - public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { - private org.apache.thrift.async.TAsyncClientManager clientManager; - private org.apache.thrift.protocol.TProtocolFactory protocolFactory; - public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) { - this.clientManager = clientManager; - this.protocolFactory = protocolFactory; - } - public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) { - return new AsyncClient(protocolFactory, clientManager, transport); - } - } - - public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) { - super(protocolFactory, clientManager, transport); - } - - public void Log(java.util.List messages, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - Log_call method_call = new Log_call(messages, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class Log_call extends org.apache.thrift.async.TAsyncMethodCall { - private java.util.List messages; - public Log_call(java.util.List messages, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.messages = messages; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("Log", org.apache.thrift.protocol.TMessageType.CALL, 0)); - Log_args args = new Log_args(); - args.setMessages(messages); - args.write(prot); - prot.writeMessageEnd(); - } - - public ResultCode getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new java.lang.IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_Log(); - } - } - - } - - public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { - private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(Processor.class.getName()); - public Processor(I iface) { - super(iface, getProcessMap(new java.util.HashMap>())); - } - - protected Processor(I iface, java.util.Map> processMap) { - super(iface, getProcessMap(processMap)); - } - - private static java.util.Map> getProcessMap(java.util.Map> processMap) { - processMap.put("Log", new Log()); - return processMap; - } - - public static class Log extends org.apache.thrift.ProcessFunction { - public Log() { - super("Log"); - } - - public Log_args getEmptyArgsInstance() { - return new Log_args(); - } - - protected boolean isOneway() { - return false; - } - - @Override - protected boolean rethrowUnhandledExceptions() { - return false; - } - - public Log_result getResult(I iface, Log_args args) throws org.apache.thrift.TException { - Log_result result = new Log_result(); - result.success = iface.Log(args.messages); - return result; - } - } - - } - - public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor { - private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(AsyncProcessor.class.getName()); - public AsyncProcessor(I iface) { - super(iface, getProcessMap(new java.util.HashMap>())); - } - - protected AsyncProcessor(I iface, java.util.Map> processMap) { - super(iface, getProcessMap(processMap)); - } - - private static java.util.Map> getProcessMap(java.util.Map> processMap) { - processMap.put("Log", new Log()); - return processMap; - } - - public static class Log extends org.apache.thrift.AsyncProcessFunction { - public Log() { - super("Log"); - } - - public Log_args getEmptyArgsInstance() { - return new Log_args(); - } - - public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback() { - public void onComplete(ResultCode o) { - Log_result result = new Log_result(); - result.success = o; - try { - fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - } catch (org.apache.thrift.transport.TTransportException e) { - _LOGGER.error("TTransportException writing to internal frame buffer", e); - fb.close(); - } catch (java.lang.Exception e) { - _LOGGER.error("Exception writing to internal frame buffer", e); - onError(e); - } - } - public void onError(java.lang.Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TSerializable msg; - Log_result result = new Log_result(); - if (e instanceof org.apache.thrift.transport.TTransportException) { - _LOGGER.error("TTransportException inside handler", e); - fb.close(); - return; - } else if (e instanceof org.apache.thrift.TApplicationException) { - _LOGGER.error("TApplicationException inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TApplicationException)e; - } else { - _LOGGER.error("Exception inside handler", e); - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - } catch (java.lang.Exception ex) { - _LOGGER.error("Exception writing to internal frame buffer", ex); - fb.close(); - } - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, Log_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.Log(args.messages,resultHandler); - } - } - - } - - public static class Log_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Log_args"); - - private static final org.apache.thrift.protocol.TField MESSAGES_FIELD_DESC = new org.apache.thrift.protocol.TField("messages", org.apache.thrift.protocol.TType.LIST, (short)1); - - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new Log_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new Log_argsTupleSchemeFactory(); - - public @org.apache.thrift.annotation.Nullable java.util.List messages; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - MESSAGES((short)1, "messages"); - - private static final java.util.Map byName = new java.util.HashMap(); - - static { - for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // MESSAGES - return MESSAGES; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByName(java.lang.String name) { - return byName.get(name); - } - - private final short _thriftId; - private final java.lang.String _fieldName; - - _Fields(short thriftId, java.lang.String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public java.lang.String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.MESSAGES, new org.apache.thrift.meta_data.FieldMetaData("messages", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, LogEntry.class)))); - metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Log_args.class, metaDataMap); - } - - public Log_args() { - } - - public Log_args( - java.util.List messages) - { - this(); - this.messages = messages; - } - - /** - * Performs a deep copy on other. - */ - public Log_args(Log_args other) { - if (other.isSetMessages()) { - java.util.List __this__messages = new java.util.ArrayList(other.messages.size()); - for (LogEntry other_element : other.messages) { - __this__messages.add(new LogEntry(other_element)); - } - this.messages = __this__messages; - } - } - - public Log_args deepCopy() { - return new Log_args(this); - } - - @Override - public void clear() { - this.messages = null; - } - - public int getMessagesSize() { - return (this.messages == null) ? 0 : this.messages.size(); - } - - @org.apache.thrift.annotation.Nullable - public java.util.Iterator getMessagesIterator() { - return (this.messages == null) ? null : this.messages.iterator(); - } - - public void addToMessages(LogEntry elem) { - if (this.messages == null) { - this.messages = new java.util.ArrayList(); - } - this.messages.add(elem); - } - - @org.apache.thrift.annotation.Nullable - public java.util.List getMessages() { - return this.messages; - } - - public Log_args setMessages(@org.apache.thrift.annotation.Nullable java.util.List messages) { - this.messages = messages; - return this; - } - - public void unsetMessages() { - this.messages = null; - } - - /** Returns true if field messages is set (has been assigned a value) and false otherwise */ - public boolean isSetMessages() { - return this.messages != null; - } - - public void setMessagesIsSet(boolean value) { - if (!value) { - this.messages = null; - } - } - - public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { - switch (field) { - case MESSAGES: - if (value == null) { - unsetMessages(); - } else { - setMessages((java.util.List)value); - } - break; - - } - } - - @org.apache.thrift.annotation.Nullable - public java.lang.Object getFieldValue(_Fields field) { - switch (field) { - case MESSAGES: - return getMessages(); - - } - throw new java.lang.IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new java.lang.IllegalArgumentException(); - } - - switch (field) { - case MESSAGES: - return isSetMessages(); - } - throw new java.lang.IllegalStateException(); - } - - @Override - public boolean equals(java.lang.Object that) { - if (that == null) - return false; - if (that instanceof Log_args) - return this.equals((Log_args)that); - return false; - } - - public boolean equals(Log_args that) { - if (that == null) - return false; - if (this == that) - return true; - - boolean this_present_messages = true && this.isSetMessages(); - boolean that_present_messages = true && that.isSetMessages(); - if (this_present_messages || that_present_messages) { - if (!(this_present_messages && that_present_messages)) - return false; - if (!this.messages.equals(that.messages)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - int hashCode = 1; - - hashCode = hashCode * 8191 + ((isSetMessages()) ? 131071 : 524287); - if (isSetMessages()) - hashCode = hashCode * 8191 + messages.hashCode(); - - return hashCode; - } - - @Override - public int compareTo(Log_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = java.lang.Boolean.valueOf(isSetMessages()).compareTo(other.isSetMessages()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetMessages()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.messages, other.messages); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - @org.apache.thrift.annotation.Nullable - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - scheme(iprot).read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - scheme(oprot).write(oprot, this); - } - - @Override - public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("Log_args("); - boolean first = true; - - sb.append("messages:"); - if (this.messages == null) { - sb.append("null"); - } else { - sb.append(this.messages); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class Log_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public Log_argsStandardScheme getScheme() { - return new Log_argsStandardScheme(); - } - } - - private static class Log_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, Log_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // MESSAGES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); - struct.messages = new java.util.ArrayList(_list0.size); - @org.apache.thrift.annotation.Nullable LogEntry _elem1; - for (int _i2 = 0; _i2 < _list0.size; ++_i2) - { - _elem1 = new LogEntry(); - _elem1.read(iprot); - struct.messages.add(_elem1); - } - iprot.readListEnd(); - } - struct.setMessagesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - - // check for required fields of primitive type, which can't be checked in the validate method - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, Log_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.messages != null) { - oprot.writeFieldBegin(MESSAGES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.messages.size())); - for (LogEntry _iter3 : struct.messages) - { - _iter3.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class Log_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public Log_argsTupleScheme getScheme() { - return new Log_argsTupleScheme(); - } - } - - private static class Log_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, Log_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetMessages()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetMessages()) { - { - oprot.writeI32(struct.messages.size()); - for (LogEntry _iter4 : struct.messages) - { - _iter4.write(oprot); - } - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, Log_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list5 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.messages = new java.util.ArrayList(_list5.size); - @org.apache.thrift.annotation.Nullable LogEntry _elem6; - for (int _i7 = 0; _i7 < _list5.size; ++_i7) - { - _elem6 = new LogEntry(); - _elem6.read(iprot); - struct.messages.add(_elem6); - } - } - struct.setMessagesIsSet(true); - } - } - } - - private static S scheme(org.apache.thrift.protocol.TProtocol proto) { - return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); - } - } - - public static class Log_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Log_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0); - - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new Log_resultStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new Log_resultTupleSchemeFactory(); - - /** - * - * @see ResultCode - */ - public @org.apache.thrift.annotation.Nullable ResultCode success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * - * @see ResultCode - */ - SUCCESS((short)0, "success"); - - private static final java.util.Map byName = new java.util.HashMap(); - - static { - for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - @org.apache.thrift.annotation.Nullable - public static _Fields findByName(java.lang.String name) { - return byName.get(name); - } - - private final short _thriftId; - private final java.lang.String _fieldName; - - _Fields(short thriftId, java.lang.String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public java.lang.String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ResultCode.class))); - metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Log_result.class, metaDataMap); - } - - public Log_result() { - } - - public Log_result( - ResultCode success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public Log_result(Log_result other) { - if (other.isSetSuccess()) { - this.success = other.success; - } - } - - public Log_result deepCopy() { - return new Log_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - /** - * - * @see ResultCode - */ - @org.apache.thrift.annotation.Nullable - public ResultCode getSuccess() { - return this.success; - } - - /** - * - * @see ResultCode - */ - public Log_result setSuccess(@org.apache.thrift.annotation.Nullable ResultCode success) { - this.success = success; - return this; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((ResultCode)value); - } - break; - - } - } - - @org.apache.thrift.annotation.Nullable - public java.lang.Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new java.lang.IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new java.lang.IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new java.lang.IllegalStateException(); - } - - @Override - public boolean equals(java.lang.Object that) { - if (that == null) - return false; - if (that instanceof Log_result) - return this.equals((Log_result)that); - return false; - } - - public boolean equals(Log_result that) { - if (that == null) - return false; - if (this == that) - return true; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - int hashCode = 1; - - hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287); - if (isSetSuccess()) - hashCode = hashCode * 8191 + success.getValue(); - - return hashCode; - } - - @Override - public int compareTo(Log_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = java.lang.Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - @org.apache.thrift.annotation.Nullable - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - scheme(iprot).read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - scheme(oprot).write(oprot, this); - } - - @Override - public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("Log_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class Log_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public Log_resultStandardScheme getScheme() { - return new Log_resultStandardScheme(); - } - } - - private static class Log_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, Log_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.success = ResultCode.findByValue(iprot.readI32()); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - - // check for required fields of primitive type, which can't be checked in the validate method - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, Log_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeI32(struct.success.getValue()); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class Log_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { - public Log_resultTupleScheme getScheme() { - return new Log_resultTupleScheme(); - } - } - - private static class Log_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, Log_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet optionals = new java.util.BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - oprot.writeI32(struct.success.getValue()); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, Log_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = ResultCode.findByValue(iprot.readI32()); - struct.setSuccessIsSet(true); - } - } - } - - private static S scheme(org.apache.thrift.protocol.TProtocol proto) { - return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); - } - } - -} diff --git a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ITScribeCollector.java b/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ITScribeCollector.java deleted file mode 100644 index d6cc50c27cc..00000000000 --- a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ITScribeCollector.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2015-2021 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.scribe; - -import com.linecorp.armeria.common.CommonPools; -import java.util.Base64; -import java.util.List; -import java.util.stream.Collectors; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.layered.TFramedTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import zipkin2.Callback; -import zipkin2.Span; -import zipkin2.TestObjects; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.scribe.generated.LogEntry; -import zipkin2.collector.scribe.generated.ResultCode; -import zipkin2.collector.scribe.generated.Scribe; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -class ITScribeCollector { - static Collector collector; - static CollectorMetrics metrics; - static NettyScribeServer server; - - @BeforeAll static void startServer() { - collector = mock(Collector.class); - doAnswer(invocation -> { - Callback callback = invocation.getArgument(1); - callback.onSuccess(null); - return null; - }).when(collector).accept(any(), any(), any()); - - metrics = mock(CollectorMetrics.class); - - server = new NettyScribeServer(0, new ScribeSpanConsumer(collector, metrics, "zipkin")); - server.start(); - } - - @AfterAll static void stopServer() { - server.close(); - } - - @Test void normal() throws Exception { - // Java version of this sample code - // https://github.com/facebookarchive/scribe/wiki/Logging-Messages - TTransport transport = new TFramedTransport(new TSocket("localhost", server.port())); - TProtocol protocol = new TBinaryProtocol(transport, false, false); - Scribe.Iface client = new Scribe.Client(protocol); - - List entries = TestObjects.TRACE.stream() - .map(ITScribeCollector::logEntry) - .collect(Collectors.toList()); - - transport.open(); - try { - ResultCode code = client.Log(entries); - assertThat(code).isEqualTo(ResultCode.OK); - - code = client.Log(entries); - assertThat(code).isEqualTo(ResultCode.OK); - } finally { - transport.close(); - } - - verify(collector, times(2)).accept(eq(TestObjects.TRACE), any(), - eq(CommonPools.blockingTaskExecutor())); - verify(metrics, times(2)).incrementMessages(); - } - - static LogEntry logEntry(Span span) { - return new LogEntry() - .setCategory("zipkin") - .setMessage(Base64.getMimeEncoder().encodeToString(SpanBytesEncoder.THRIFT.encode(span))); - } -} diff --git a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeCollectorTest.java b/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeCollectorTest.java deleted file mode 100644 index e950ed324ad..00000000000 --- a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeCollectorTest.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.scribe; - -import org.junit.jupiter.api.Test; -import zipkin2.CheckResult; -import zipkin2.Component; -import zipkin2.storage.InMemoryStorage; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -class ScribeCollectorTest { - InMemoryStorage storage = InMemoryStorage.newBuilder().build(); - - @Test void check_failsWhenNotStarted() { - try (ScribeCollector scribe = ScribeCollector.newBuilder().storage(storage).port(0).build()) { - - CheckResult result = scribe.check(); - assertThat(result.ok()).isFalse(); - assertThat(result.error()).isInstanceOf(IllegalStateException.class); - - scribe.start(); - assertThat(scribe.check().ok()).isTrue(); - } - } - - @Test void anonymousPort() { - try (ScribeCollector scribe = ScribeCollector.newBuilder().storage(storage).port(0).build()) { - - assertThat(scribe.port()).isZero(); - - scribe.start(); - assertThat(scribe.port()).isNotZero(); - } - } - - @Test void start_failsWhenCantBindPort() { - ScribeCollector.Builder builder = ScribeCollector.newBuilder().storage(storage).port(0); - - try (ScribeCollector first = builder.build().start()) { - try (ScribeCollector samePort = builder.port(first.port()).build()) { - assertThatThrownBy(samePort::start) - .isInstanceOf(RuntimeException.class) - .hasMessage("Could not start scribe server."); - } - } - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test void toStringContainsOnlySummaryInformation() { - try (ScribeCollector scribe = ScribeCollector.newBuilder().storage(storage).port(0).build()) { - - assertThat(scribe.start()) - .hasToString("ScribeCollector{port=" + scribe.port() + ", category=zipkin}"); - } - } -} diff --git a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeSpanConsumerTest.java b/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeSpanConsumerTest.java deleted file mode 100644 index 1d0ccc493d6..00000000000 --- a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeSpanConsumerTest.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.scribe; - -import java.util.Arrays; -import java.util.Base64; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.jupiter.api.Test; -import zipkin2.Call; -import zipkin2.Callback; -import zipkin2.Endpoint; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.collector.InMemoryCollectorMetrics; -import zipkin2.collector.scribe.generated.LogEntry; -import zipkin2.collector.scribe.generated.ResultCode; -import zipkin2.storage.ForwardingStorageComponent; -import zipkin2.storage.InMemoryStorage; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.StorageComponent; -import zipkin2.v1.V1Span; -import zipkin2.v1.V1SpanConverter; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; - -class ScribeSpanConsumerTest { - // scope to scribe as we aren't creating the consumer with the builder. - InMemoryCollectorMetrics scribeMetrics = new InMemoryCollectorMetrics().forTransport("scribe"); - - InMemoryStorage storage = InMemoryStorage.newBuilder().build(); - SpanConsumer consumer = storage.spanConsumer(); - - static class CaptureAsyncMethodCallback implements AsyncMethodCallback { - - ResultCode resultCode; - Exception error; - - CountDownLatch latch = new CountDownLatch(1); - - @Override public void onComplete(ResultCode resultCode) { - this.resultCode = resultCode; - latch.countDown(); - } - - @Override public void onError(Exception error) { - this.error = error; - latch.countDown(); - } - } - - static String reallyLongAnnotation; - - static { - char[] as = new char[2048]; - Arrays.fill(as, 'a'); - reallyLongAnnotation = new String(as); - } - - Endpoint zipkinQuery = - Endpoint.newBuilder().serviceName("zipkin-query").ip("127.0.0.1").port(9411).build(); - Endpoint zipkinQuery0 = zipkinQuery.toBuilder().port(null).build(); - - V1Span v1 = V1Span.newBuilder() - .traceId(-6054243957716233329L) - .name("getTracesByIds") - .id(-3615651937927048332L) - .parentId(-6054243957716233329L) - .addAnnotation(1442493420635000L, "sr", zipkinQuery) - .addAnnotation(1442493420747000L, reallyLongAnnotation, zipkinQuery) - .addAnnotation( - 1442493422583586L, - "Gc(9,0.PSScavenge,2015-09-17 12:37:02 +0000,304.milliseconds+762.microseconds)", - zipkinQuery) - .addAnnotation(1442493422680000L, "ss", zipkinQuery) - .addBinaryAnnotation("srv/finagle.version", "6.28.0", zipkinQuery0) - .addBinaryAnnotation("sa", zipkinQuery) - .addBinaryAnnotation("ca", zipkinQuery.toBuilder().port(63840).build()) - .debug(false) - .build(); - - Span v2 = V1SpanConverter.create().convert(v1).get(0); - byte[] bytes = SpanBytesEncoder.THRIFT.encode(v2); - String encodedSpan = new String(Base64.getEncoder().encode(bytes), UTF_8); - - @Test void entriesWithSpansAreConsumed() throws Exception { - ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer); - - LogEntry entry = new LogEntry(); - entry.category = "zipkin"; - entry.message = encodedSpan; - - expectSuccess(scribe, entry); - - // Storage finishes after callback so wait for it. - await().untilAsserted(() -> assertThat(storage.getTraces()).containsExactly(asList(v2))); - - assertThat(scribeMetrics.messages()).isEqualTo(1); - assertThat(scribeMetrics.messagesDropped()).isZero(); - assertThat(scribeMetrics.bytes()).isEqualTo(bytes.length); - assertThat(scribeMetrics.spans()).isEqualTo(1); - assertThat(scribeMetrics.spansDropped()).isZero(); - } - - @Test void entriesWithoutSpansAreSkipped() throws Exception { - SpanConsumer consumer = (callback) -> { - throw new AssertionError(); // as we shouldn't get here. - }; - - ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer); - - LogEntry entry = new LogEntry(); - entry.category = "notzipkin"; - entry.message = "hello world"; - - expectSuccess(scribe, entry); - - // Storage finishes after callback so wait for it. - await().untilAsserted(() -> assertThat(scribeMetrics.messages()).isEqualTo(1)); - assertThat(scribeMetrics.messagesDropped()).isZero(); - assertThat(scribeMetrics.bytes()).isZero(); - assertThat(scribeMetrics.spans()).isZero(); - assertThat(scribeMetrics.spansDropped()).isZero(); - } - - private void expectSuccess(ScribeSpanConsumer scribe, LogEntry entry) throws Exception { - CaptureAsyncMethodCallback callback = new CaptureAsyncMethodCallback(); - scribe.Log(asList(entry), callback); - callback.latch.await(10, TimeUnit.SECONDS); - assertThat(callback.resultCode).isEqualTo(ResultCode.OK); - } - - @Test void malformedDataIsDropped() { - ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer); - - LogEntry entry = new LogEntry(); - entry.category = "zipkin"; - entry.message = "notbase64"; - - CaptureAsyncMethodCallback callback = new CaptureAsyncMethodCallback(); - scribe.Log(asList(entry), callback); - assertThat(callback.error).isInstanceOf(IllegalArgumentException.class); - - // Storage finishes after callback so wait for it. - await().untilAsserted(() -> assertThat(scribeMetrics.messages()).isEqualTo(1)); - assertThat(scribeMetrics.messagesDropped()).isEqualTo(1); - assertThat(scribeMetrics.bytes()).isZero(); - assertThat(scribeMetrics.spans()).isZero(); - assertThat(scribeMetrics.spansDropped()).isZero(); - } - - @Test void consumerExceptionBeforeCallbackDoesntSetFutureException() { - consumer = (input) -> { - throw new NullPointerException("endpoint was null"); - }; - - ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer); - - LogEntry entry = new LogEntry(); - entry.category = "zipkin"; - entry.message = encodedSpan; - - CaptureAsyncMethodCallback callback = new CaptureAsyncMethodCallback(); - scribe.Log(asList(entry), callback); - - // Storage related exceptions are not propagated to the caller. Only marshalling ones are. - assertThat(callback.error).isNull(); - - // Storage finishes after callback so wait for it. - await().untilAsserted(() -> assertThat(scribeMetrics.messages()).isEqualTo(1)); - assertThat(scribeMetrics.messagesDropped()).isZero(); - assertThat(scribeMetrics.bytes()).isEqualTo(bytes.length); - assertThat(scribeMetrics.spans()).isEqualTo(1); - assertThat(scribeMetrics.spansDropped()).isEqualTo(1); - } - - /** - * Callbacks are performed asynchronously. If they throw, it hints that we are chaining futures - * when we shouldn't - */ - @Test void callbackExceptionDoesntThrow() throws Exception { - consumer = (input) -> new Call.Base() { - @Override protected Void doExecute() { - throw new AssertionError(); - } - - @Override protected void doEnqueue(Callback callback) { - callback.onError(new NullPointerException()); - } - - @Override public Call clone() { - throw new AssertionError(); - } - }; - - ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer); - - LogEntry entry = new LogEntry(); - entry.category = "zipkin"; - entry.message = encodedSpan; - - expectSuccess(scribe, entry); - - // Storage finishes after callback so wait for it. - await().untilAsserted(() -> assertThat(scribeMetrics.messages()).isEqualTo(1)); - assertThat(scribeMetrics.messagesDropped()).isZero(); - assertThat(scribeMetrics.bytes()).isEqualTo(bytes.length); - assertThat(scribeMetrics.spans()).isEqualTo(1); - assertThat(scribeMetrics.spansDropped()).isEqualTo(1); - } - - /** Finagle's zipkin tracer breaks on a column width with a trailing newline */ - @Test void decodesSpanGeneratedByFinagle() throws Exception { - LogEntry entry = new LogEntry(); - entry.category = "zipkin"; - entry.message = "" - + "CgABq/sBMnzE048LAAMAAAAOZ2V0VHJhY2VzQnlJZHMKAATN0p+4EGfTdAoABav7ATJ8xNOPDwAGDAAAAAQKAAEABR/wq+2DeAsAAgAAAAJzcgwAAwgAAX8AAAEGAAIkwwsAAwAAAAx6aXBraW4tcXVlcnkAAAoAAQAFH/Cr7zj4CwACAAAIAGFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFh\n" - + "YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhDAADCAABfwAAAQYAAiTDCwADAAAADHppcGtpbi1xdWVyeQAACgABAAUf8KwLPyILAAIAAABOR2MoOSwwLlBTU2NhdmVuZ2UsMjAxNS0wOS0xNyAxMjozNzowMiArMDAwMCwzMDQubWlsbGlzZWNvbmRzKzc2Mi5taWNyb3NlY29uZHMpDAADCAABfwAAAQYAAiTDCwADAAAADHppcGtpbi1xdWVyeQAIAAQABKZ6AAoAAQAFH/CsDLfACwACAAAAAnNzDAADCAABfwAAAQYAAiTDCwADAAAADHppcGtpbi1xdWVyeQAADwAIDAAAAAULAAEAAAATc3J2L2ZpbmFnbGUudmVyc2lvbgsAAgAAAAY2LjI4LjAIAAMAAAAGDAAECAABfwAAAQYAAgAACwADAAAADHppcGtpbi1xdWVyeQAACwABAAAAD3Nydi9tdXgvZW5hYmxlZAsAAgAAAAEBCAADAAAAAAwABAgAAX8AAAEGAAIAAAsAAwAAAAx6aXBraW4tcXVlcnkAAAsAAQAAAAJzYQsAAgAAAAEBCAADAAAAAAwABAgAAX8AAAEGAAIkwwsAAwAAAAx6aXBraW4tcXVlcnkAAAsAAQAAAAJjYQsAAgAAAAEBCAADAAAAAAwABAgAAX8AAAEGAAL5YAsAAwAAAAx6aXBraW4tcXVlcnkAAAsAAQAAAAZudW1JZHMLAAIAAAAEAAAAAQgAAwAAAAMMAAQIAAF/AAABBgACJMMLAAMAAAAMemlwa2luLXF1ZXJ5AAACAAkAAA==\n"; - - ScribeSpanConsumer scribe = newScribeSpanConsumer(entry.category, consumer); - - expectSuccess(scribe, entry); - - // Storage finishes after callback so wait for it. - await().untilAsserted(() -> assertThat(storage.getTraces()).containsExactly(asList(v2))); - - assertThat(scribeMetrics.messages()).isEqualTo(1); - assertThat(scribeMetrics.messagesDropped()).isZero(); - assertThat(scribeMetrics.bytes()) - .isEqualTo(Base64.getMimeDecoder().decode(entry.message).length); - assertThat(scribeMetrics.spans()).isEqualTo(1); - assertThat(scribeMetrics.spansDropped()).isZero(); - } - - ScribeSpanConsumer newScribeSpanConsumer(String category, SpanConsumer spanConsumer) { - ScribeCollector.Builder builder = ScribeCollector.newBuilder() - .category(category) - .metrics(scribeMetrics) - .storage(new ForwardingStorageComponent() { - @Override protected StorageComponent delegate() { - throw new AssertionError(); - } - - @Override public SpanConsumer spanConsumer() { - return spanConsumer; - } - }); - return new ScribeSpanConsumer( - builder.delegate.build(), - builder.metrics, - builder.category); - } -} diff --git a/zipkin-collector/scribe/src/test/resources/simplelogger.properties b/zipkin-collector/scribe/src/test/resources/simplelogger.properties deleted file mode 100644 index 54c0b32d99d..00000000000 --- a/zipkin-collector/scribe/src/test/resources/simplelogger.properties +++ /dev/null @@ -1,6 +0,0 @@ -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options -org.slf4j.simpleLogger.logFile=System.out -org.slf4j.simpleLogger.defaultLogLevel=warn -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS -org.slf4j.simpleLogger.log.zipkin2.collector.scribe=debug diff --git a/zipkin-junit/README.md b/zipkin-junit/README.md deleted file mode 100644 index 39fc742030b..00000000000 --- a/zipkin-junit/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# zipkin-junit - -This contains `ZipkinRule`, a JUnit rule to spin-up a Zipkin server during tests. - -ZipkinRule aims to emulate a http collector. For example, it presents -the v1 and v2 POST apis [Zipkin Api](http://openzipkin.github.io/zipkin-api/#/), and -supports features like gzip compression. - -Usage ------- - -For example, you can write micro-integration tests like so: - -```java -@Rule -public ZipkinRule zipkin = new ZipkinRule(); - -// Pretend we have a traced system under test -TracedService service = new TracedService(zipkin.httpUrl(), ReportingMode.FLUSH_EVERY); - -@Test -public void skipsReportingWhenNotSampled() throws IOException { - zipkin.storeSpans(asList(rootSpan)); - - // send a request to the instrumented server, telling it not to sample. - client.addHeader("X-B3-TraceId", rootSpan.traceId) - .addHeader("X-B3-SpanId", rootSpan.id) - .addHeader("X-B3-Sampled", 0).get(service.httpUrl()); - - // check that zipkin didn't receive any new data in that trace - assertThat(zipkin.getTraces()).containsOnly(asList(rootSpan)); -} -``` - -You can also simulate failures. - -For example, if you want to ensure your instrumentation doesn't retry on http 400. - -```java -@Test -public void doesntAttemptToRetryOn400() throws IOException { - zipkin.enqueueFailure(sendErrorResponse(400, "Invalid Format")); - - reporter.record(span); - reporter.flush(); - - // check that we didn't retry on 400 - assertThat(zipkin.httpRequestCount()).isEqualTo(1); -} -``` - -Besides `httpRequestCount()`, there are two other counters that can -help you assert instrumentation is doing what you think: - -* `collectorMetrics()` - How many spans or bytes were collected on the http transport. - -These counters can validate aspects such if you are grouping spans by id -before reporting them to the server. diff --git a/zipkin-junit/pom.xml b/zipkin-junit/pom.xml deleted file mode 100644 index 3f72a95470a..00000000000 --- a/zipkin-junit/pom.xml +++ /dev/null @@ -1,87 +0,0 @@ - - - - 4.0.0 - - - io.zipkin - zipkin-parent - 2.24.4-SNAPSHOT - - - io.zipkin.zipkin2 - zipkin-junit - Zipkin JUnit - JUnit rule to spin-up a Zipkin server during tests - - - ${project.basedir}/.. - - - - - ${project.groupId} - zipkin - ${project.version} - - - ${project.groupId} - zipkin-collector - ${project.version} - - - - com.squareup.okhttp3 - mockwebserver - ${okhttp.version} - - - - junit - junit - ${junit.version} - - - - - org.slf4j - jul-to-slf4j - ${slf4j.version} - test - - - org.slf4j - slf4j-simple - ${slf4j.version} - test - - - - - com.google.code.gson - gson - ${gson.version} - test - - - ${project.groupId} - zipkin-tests - ${project.version} - test - - - diff --git a/zipkin-junit/src/main/java/zipkin2/junit/HttpFailure.java b/zipkin-junit/src/main/java/zipkin2/junit/HttpFailure.java deleted file mode 100644 index 84b2a5c5af6..00000000000 --- a/zipkin-junit/src/main/java/zipkin2/junit/HttpFailure.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.junit; - -import okhttp3.mockwebserver.MockResponse; - -import static okhttp3.mockwebserver.SocketPolicy.DISCONNECT_DURING_REQUEST_BODY; - -/** - * Instrumentation that use {@code POST} endpoints need to survive failures. Besides simply not - * starting the zipkin server, you can enqueue failures like this to test edge cases. For example, - * that you log a failure when a 400 code is returned. - */ -public final class HttpFailure { - - /** Ex a network partition occurs in the middle of the POST request */ - public static HttpFailure disconnectDuringBody() { - return new HttpFailure(new MockResponse().setSocketPolicy(DISCONNECT_DURING_REQUEST_BODY)); - } - - /** Ex code 400 when the server cannot read the spans */ - public static HttpFailure sendErrorResponse(int code, String body) { - return new HttpFailure(new MockResponse().setResponseCode(code).setBody(body)); - } - - /** Not exposed publicly in order to not leak okhttp3 types. */ - final MockResponse response; - - HttpFailure(MockResponse response) { - this.response = response; - } -} diff --git a/zipkin-junit/src/main/java/zipkin2/junit/ZipkinDispatcher.java b/zipkin-junit/src/main/java/zipkin2/junit/ZipkinDispatcher.java deleted file mode 100644 index 97082eda087..00000000000 --- a/zipkin-junit/src/main/java/zipkin2/junit/ZipkinDispatcher.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.junit; - -import java.io.IOException; -import okhttp3.HttpUrl; -import okhttp3.mockwebserver.Dispatcher; -import okhttp3.mockwebserver.MockResponse; -import okhttp3.mockwebserver.MockWebServer; -import okhttp3.mockwebserver.RecordedRequest; -import okio.Buffer; -import okio.GzipSource; -import zipkin2.Callback; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorMetrics; -import zipkin2.storage.StorageComponent; - -final class ZipkinDispatcher extends Dispatcher { - private final Collector consumer; - private final CollectorMetrics metrics; - private final MockWebServer server; - - ZipkinDispatcher(StorageComponent storage, CollectorMetrics metrics, MockWebServer server) { - this.consumer = Collector.newBuilder(getClass()).storage(storage).metrics(metrics).build(); - this.metrics = metrics; - this.server = server; - } - - @Override - public MockResponse dispatch(RecordedRequest request) { - HttpUrl url = server.url(request.getPath()); - if (request.getMethod().equals("POST")) { - String type = request.getHeader("Content-Type"); - if (url.encodedPath().equals("/api/v1/spans")) { - SpanBytesDecoder decoder = - type != null && type.contains("/x-thrift") - ? SpanBytesDecoder.THRIFT - : SpanBytesDecoder.JSON_V1; - return acceptSpans(request, decoder); - } else if (url.encodedPath().equals("/api/v2/spans")) { - SpanBytesDecoder decoder = - type != null && type.contains("/x-protobuf") - ? SpanBytesDecoder.PROTO3 - : SpanBytesDecoder.JSON_V2; - return acceptSpans(request, decoder); - } - } else { // unsupported method - return new MockResponse().setResponseCode(405); - } - return new MockResponse().setResponseCode(404); - } - - MockResponse acceptSpans(RecordedRequest request, SpanBytesDecoder decoder) { - byte[] body = request.getBody().readByteArray(); - metrics.incrementMessages(); - String encoding = request.getHeader("Content-Encoding"); - if (encoding != null && encoding.contains("gzip")) { - try { - Buffer result = new Buffer(); - GzipSource source = new GzipSource(new Buffer().write(body)); - while (source.read(result, Integer.MAX_VALUE) != -1) ; - body = result.readByteArray(); - } catch (IOException e) { - metrics.incrementMessagesDropped(); - return new MockResponse().setResponseCode(400).setBody("Cannot gunzip spans"); - } - } - metrics.incrementBytes(body.length); - - final MockResponse result = new MockResponse(); - if (body.length == 0) return result.setResponseCode(202); // lenient on empty - - consumer.acceptSpans(body, decoder, new Callback() { - @Override public void onSuccess(Void value) { - result.setResponseCode(202); - } - - @Override public void onError(Throwable t) { - String message = t.getMessage(); - result.setBody(message).setResponseCode(message.startsWith("Cannot store") ? 500 : 400); - } - }); - return result; - } -} diff --git a/zipkin-junit/src/main/java/zipkin2/junit/ZipkinRule.java b/zipkin-junit/src/main/java/zipkin2/junit/ZipkinRule.java deleted file mode 100644 index c5981f9fce7..00000000000 --- a/zipkin-junit/src/main/java/zipkin2/junit/ZipkinRule.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.junit; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.List; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; -import okhttp3.mockwebserver.Dispatcher; -import okhttp3.mockwebserver.MockResponse; -import okhttp3.mockwebserver.MockWebServer; -import okhttp3.mockwebserver.RecordedRequest; -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.collector.InMemoryCollectorMetrics; -import zipkin2.internal.Nullable; -import zipkin2.storage.InMemoryStorage; - -import static okhttp3.mockwebserver.SocketPolicy.KEEP_OPEN; - -/** - * Starts up a local Zipkin server, listening for http requests on {@link #httpUrl}. - * - *

This can be used to test instrumentation. For example, you can POST spans directly to this - * server. - * - *

See http://openzipkin.github.io/zipkin-api/#/ - */ -public final class ZipkinRule implements TestRule { - private final InMemoryStorage storage = InMemoryStorage.newBuilder().build(); - private final InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics(); - private final MockWebServer server = new MockWebServer(); - private final BlockingQueue failureQueue = new LinkedBlockingQueue<>(); - private final AtomicInteger receivedSpanBytes = new AtomicInteger(); - - public ZipkinRule() { - final ZipkinDispatcher successDispatch = new ZipkinDispatcher(storage, metrics, server); - Dispatcher dispatcher = new Dispatcher() { - @Override public MockResponse dispatch(RecordedRequest request) { - MockResponse maybeFailure = failureQueue.poll(); - if (maybeFailure != null) return maybeFailure; - MockResponse result = successDispatch.dispatch(request); - if (request.getMethod().equals("POST")) { - receivedSpanBytes.addAndGet((int) request.getBodySize()); - } - return result; - } - - @Override public MockResponse peek() { - MockResponse maybeFailure = failureQueue.peek(); - if (maybeFailure != null) return maybeFailure.clone(); - return new MockResponse().setSocketPolicy(KEEP_OPEN); - } - }; - server.setDispatcher(dispatcher); - } - - /** Use this to connect. The zipkin v1 interface will be under "/api/v1" */ - public String httpUrl() { - return String.format("http://%s:%s", server.getHostName(), server.getPort()); - } - - /** Use this to see how many requests you've sent to any zipkin http endpoint. */ - public int httpRequestCount() { - return server.getRequestCount(); - } - - /** Use this to see how many spans or serialized bytes were collected on the http endpoint. */ - public InMemoryCollectorMetrics collectorMetrics() { - return metrics; - } - - /** - * Stores the given spans directly, to setup preconditions for a test. - * - *

For example, if you are testing what happens when instrumentation adds a child to a trace, - * you'd add the parent here. - */ - public ZipkinRule storeSpans(List spans) { - try { - storage.accept(spans).execute(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - return this; - } - - /** - * Adds a one-time failure to the http endpoint. - * - *

Ex. If you want to test that you don't repeatedly send bad data, you could send a 400 back. - * - *

{@code
-   * zipkin.enqueueFailure(sendErrorResponse(400, "bad format"));
-   * }
- * - * @param failure type of failure the next call to the http endpoint responds with - */ - public ZipkinRule enqueueFailure(HttpFailure failure) { - failureQueue.add(failure.response); - return this; - } - - /** Retrieves all traces this zipkin server has received. */ - public List> getTraces() { - return storage.spanStore().getTraces(); - } - - /** Retrieves a trace by ID which Zipkin server has received, or null if not present. */ - @Nullable public List getTrace(String traceId) { - List result; - try { - result = storage.traces().getTrace(traceId).execute(); - } catch (IOException e) { - throw new AssertionError("I/O exception in in-memory storage", e); - } - // Note: this is a different behavior than Traces.getTrace() which is not nullable! - return result.isEmpty() ? null : result; - } - - /** Retrieves all service links between traces this zipkin server has received. */ - public List getDependencies() { - return storage.spanStore().getDependencies(); - } - - /** - * Used to manually start the server. - * - * @param httpPort choose 0 to select an available port - */ - public void start(int httpPort) throws IOException { - server.start(httpPort); - } - - /** - * Used to manually stop the server. - */ - public void shutdown() throws IOException { - server.shutdown(); - } - - @Override public Statement apply(Statement base, Description description) { - return server.apply(base, description); - } -} diff --git a/zipkin-junit/src/test/java/zipkin2/junit/ZipkinRuleTest.java b/zipkin-junit/src/test/java/zipkin2/junit/ZipkinRuleTest.java deleted file mode 100644 index 64809dd7e9b..00000000000 --- a/zipkin-junit/src/test/java/zipkin2/junit/ZipkinRuleTest.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.junit; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import okhttp3.MediaType; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.RequestBody; -import okhttp3.Response; -import okio.Buffer; -import okio.ByteString; -import okio.GzipSink; -import org.junit.AssumptionViolatedException; -import org.junit.Rule; -import org.junit.Test; -import org.slf4j.bridge.SLF4JBridgeHandler; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown; -import static zipkin2.TestObjects.CLIENT_SPAN; -import static zipkin2.TestObjects.LOTS_OF_SPANS; - -public class ZipkinRuleTest { - - static { - // ensure jul-to-slf4j works - SLF4JBridgeHandler.removeHandlersForRootLogger(); - SLF4JBridgeHandler.install(); - } - - @Rule public ZipkinRule zipkin = new ZipkinRule(); - - List spans = Arrays.asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]); - OkHttpClient client = new OkHttpClient(); - - @Test - public void getTraces_storedViaPost() throws IOException { - List trace = asList(CLIENT_SPAN); - // write the span to the zipkin using http - assertThat(postSpansV1(trace).code()).isEqualTo(202); - - // read the traces directly - assertThat(zipkin.getTraces()).containsOnly(trace); - } - - @Test - public void getTraces_storedViaPostVersion2_json() throws IOException { - getTraces_storedViaPostVersion2("application/json", SpanBytesEncoder.JSON_V2); - } - - @Test - public void getTraces_storedViaPostVersion2_proto3() throws IOException { - getTraces_storedViaPostVersion2("application/x-protobuf", SpanBytesEncoder.PROTO3); - } - - void getTraces_storedViaPostVersion2(String mediaType, SpanBytesEncoder encoder) - throws IOException { - - byte[] message = encoder.encodeList(spans); - - // write the span to the zipkin using http api v2 - Response response = - client - .newCall( - new Request.Builder() - .url(zipkin.httpUrl() + "/api/v2/spans") - .post(RequestBody.create(MediaType.parse(mediaType), message)) - .build()) - .execute(); - assertThat(response.code()).isEqualTo(202); - - // read the traces directly - assertThat(zipkin.getTraces()).containsOnly(asList(spans.get(0)), asList(spans.get(1))); - } - - /** The rule is here to help debugging. Even partial spans should be returned */ - @Test - public void getTraces_whenMissingTimestamps() throws IOException { - Span span = Span.newBuilder().traceId("1").id("1").name("foo").build(); - // write the span to the zipkin using http - assertThat(postSpansV1(asList(span)).code()).isEqualTo(202); - - // read the traces directly - assertThat(zipkin.getTraces()).containsOnly(asList(span)); - } - - /** The raw query can show affects like redundant rows in the data store. */ - @Test - public void storeSpans_readbackRaw() { - Span missingDuration = LOTS_OF_SPANS[0].toBuilder().duration(null).build(); - Span withDuration = LOTS_OF_SPANS[0]; - - // write the span to zipkin directly - zipkin.storeSpans(asList(missingDuration)); - zipkin.storeSpans(asList(withDuration)); - - assertThat(zipkin.getTrace(missingDuration.traceId())) - .containsExactly(missingDuration, withDuration); - } - - @Test - public void httpRequestCountIncrements() throws IOException { - postSpansV1(spans); - postSpansV1(spans); - - assertThat(zipkin.httpRequestCount()).isEqualTo(2); - } - - /** - * Normally, a span can be reported twice: for client and server. However, there are bugs that - * happened where several updates went to the same span id. {@link ZipkinRule#collectorMetrics} - * can be used to help ensure a span isn't reported more times than expected. - */ - @Test - public void collectorMetrics_spans() throws IOException { - postSpansV1(asList(LOTS_OF_SPANS[0])); - postSpansV1(asList(LOTS_OF_SPANS[1], LOTS_OF_SPANS[2])); - - assertThat(zipkin.collectorMetrics().spans()).isEqualTo(3); - } - - @Test - public void postSpans_disconnectDuringBody() { - zipkin.enqueueFailure(HttpFailure.disconnectDuringBody()); - - try { - postSpansV1(spans); - failBecauseExceptionWasNotThrown(IOException.class); - } catch (IOException expected) { // not always a ConnectException! - } - - // Zipkin didn't store the spans, as they shouldn't have been readable, due to disconnect - assertThat(zipkin.getTraces()).isEmpty(); - - try { - // The failure shouldn't affect later requests - assertThat(postSpansV1(spans).code()).isEqualTo(202); - } catch (IOException flake) { - throw new AssumptionViolatedException("test flaked", flake); - } - } - - @Test - public void postSpans_sendErrorResponse400() throws IOException { - zipkin.enqueueFailure(HttpFailure.sendErrorResponse(400, "Invalid Format")); - - Response response = postSpansV1(spans); - assertThat(response.code()).isEqualTo(400); - assertThat(response.body().string()).isEqualTo("Invalid Format"); - - // Zipkin didn't store the spans, as they shouldn't have been readable, due to the error - assertThat(zipkin.getTraces()).isEmpty(); - - // The failure shouldn't affect later requests - assertThat(postSpansV1(spans).code()).isEqualTo(202); - } - - @Test - public void gzippedSpans() throws IOException { - byte[] spansInJson = SpanBytesEncoder.JSON_V1.encodeList(spans); - - Buffer sink = new Buffer(); - GzipSink gzipSink = new GzipSink(sink); - gzipSink.write(new Buffer().write(spansInJson), spansInJson.length); - gzipSink.close(); - ByteString gzippedJson = sink.readByteString(); - - client.newCall(new Request.Builder() - .url(zipkin.httpUrl() + "/api/v1/spans") - .addHeader("Content-Encoding", "gzip") - .post(RequestBody.create(MediaType.parse("application/json"), gzippedJson)) - .build()).execute(); - - assertThat(zipkin.collectorMetrics().bytes()).isEqualTo(spansInJson.length); - } - - Response postSpansV1(List spans) throws IOException { - byte[] spansInJson = SpanBytesEncoder.JSON_V1.encodeList(spans); - return client - .newCall( - new Request.Builder() - .url(zipkin.httpUrl() + "/api/v1/spans") - .post(RequestBody.create(MediaType.parse("application/json"), spansInJson)) - .build()) - .execute(); - } -} diff --git a/zipkin-junit/src/test/resources/simplelogger.properties b/zipkin-junit/src/test/resources/simplelogger.properties deleted file mode 100644 index 3c9471563fb..00000000000 --- a/zipkin-junit/src/test/resources/simplelogger.properties +++ /dev/null @@ -1,9 +0,0 @@ -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options - -org.slf4j.simpleLogger.logFile=System.out -org.slf4j.simpleLogger.defaultLogLevel=warn -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS - -# see MockWebServer connections -# org.slf4j.simpleLogger.log.okhttp3=info diff --git a/zipkin-junit5/README.md b/zipkin-junit5/README.md deleted file mode 100644 index 4feb125c524..00000000000 --- a/zipkin-junit5/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# zipkin-junit5 - -This contains `ZipkinExtension`, a JUnit5 extension to spin-up a Zipkin server during tests. - -ZipkinExtension aims to emulate a http collector. For example, it presents -the v1 and v2 POST apis [Zipkin Api](http://openzipkin.github.io/zipkin-api/#/), and -supports features like gzip compression. - -Usage ------- - -For example, you can write micro-integration tests like so: - -```java -@RegisterExtension -public ZipkinExtension zipkin = new ZipkinExtension(); - -// Pretend we have a traced system under test -TracedService service = new TracedService(zipkin.httpUrl(), ReportingMode.FLUSH_EVERY); - -@Test -public void skipsReportingWhenNotSampled() throws IOException { - zipkin.storeSpans(asList(rootSpan)); - - // send a request to the instrumented server, telling it not to sample. - client.addHeader("X-B3-TraceId", rootSpan.traceId) - .addHeader("X-B3-SpanId", rootSpan.id) - .addHeader("X-B3-Sampled", 0).get(service.httpUrl()); - - // check that zipkin didn't receive any new data in that trace - assertThat(zipkin.getTraces()).containsOnly(asList(rootSpan)); -} -``` - -You can also simulate failures. - -For example, if you want to ensure your instrumentation doesn't retry on http 400. - -```java -@Test -public void doesntAttemptToRetryOn400() throws IOException { - zipkin.enqueueFailure(sendErrorResponse(400, "Invalid Format")); - - reporter.record(span); - reporter.flush(); - - // check that we didn't retry on 400 - assertThat(zipkin.httpRequestCount()).isEqualTo(1); -} -``` - -Besides `httpRequestCount()`, there are two other counters that can -help you assert instrumentation is doing what you think: - -* `collectorMetrics()` - How many spans or bytes were collected on the http transport. - -These counters can validate aspects such if you are grouping spans by id -before reporting them to the server. diff --git a/zipkin-junit5/pom.xml b/zipkin-junit5/pom.xml deleted file mode 100644 index cb03768ecbc..00000000000 --- a/zipkin-junit5/pom.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - 4.0.0 - - - io.zipkin - zipkin-parent - 2.24.4-SNAPSHOT - - - io.zipkin.zipkin2 - zipkin-junit5 - Zipkin JUnit5 - JUnit5 Extension to spin-up a Zipkin server during tests - - - ${project.basedir}/.. - - - - - ${project.groupId} - zipkin - ${project.version} - - - ${project.groupId} - zipkin-collector - ${project.version} - - - - com.squareup.okhttp3 - mockwebserver3-junit5 - ${okhttp5.version} - - - - org.junit.jupiter - junit-jupiter-api - ${junit-jupiter.version} - - - - org.slf4j - slf4j-simple - ${slf4j.version} - test - - - - - com.google.code.gson - gson - ${gson.version} - test - - - ${project.groupId} - zipkin-tests - ${project.version} - test - - - diff --git a/zipkin-junit5/src/main/java/zipkin2/junit5/HttpFailure.java b/zipkin-junit5/src/main/java/zipkin2/junit5/HttpFailure.java deleted file mode 100644 index 03871054dac..00000000000 --- a/zipkin-junit5/src/main/java/zipkin2/junit5/HttpFailure.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2015-2023 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.junit5; - -import mockwebserver3.MockResponse; - -import static mockwebserver3.SocketPolicy.DISCONNECT_DURING_REQUEST_BODY; - -/** - * Instrumentation that use {@code POST} endpoints need to survive failures. Besides simply not - * starting the zipkin server, you can enqueue failures like this to test edge cases. For example, - * that you log a failure when a 400 code is returned. - */ -public final class HttpFailure { - - /** Ex a network partition occurs in the middle of the POST request */ - public static HttpFailure disconnectDuringBody() { - return new HttpFailure(new MockResponse().setSocketPolicy(DISCONNECT_DURING_REQUEST_BODY)); - } - - /** Ex code 400 when the server cannot read the spans */ - public static HttpFailure sendErrorResponse(int code, String body) { - return new HttpFailure(new MockResponse().setResponseCode(code).setBody(body)); - } - - /** Not exposed publicly in order to not leak okhttp3 types. */ - final MockResponse response; - - HttpFailure(MockResponse response) { - this.response = response; - } -} diff --git a/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinDispatcher.java b/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinDispatcher.java deleted file mode 100644 index 8d3d74777bb..00000000000 --- a/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinDispatcher.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2015-2023 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.junit5; - -import okhttp3.HttpUrl; -import mockwebserver3.Dispatcher; -import mockwebserver3.MockResponse; -import mockwebserver3.MockWebServer; -import mockwebserver3.RecordedRequest; -import okio.Buffer; -import okio.GzipSource; -import zipkin2.Callback; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorMetrics; -import zipkin2.storage.StorageComponent; - -import java.io.IOException; - -final class ZipkinDispatcher extends Dispatcher { - private final Collector consumer; - private final CollectorMetrics metrics; - private final MockWebServer server; - - ZipkinDispatcher(StorageComponent storage, CollectorMetrics metrics, MockWebServer server) { - this.consumer = Collector.newBuilder(getClass()).storage(storage).metrics(metrics).build(); - this.metrics = metrics; - this.server = server; - } - - @Override - public MockResponse dispatch(RecordedRequest request) { - HttpUrl url = server.url(request.getPath()); - if (request.getMethod().equals("POST")) { - String type = request.getHeader("Content-Type"); - if (url.encodedPath().equals("/api/v1/spans")) { - SpanBytesDecoder decoder = - type != null && type.contains("/x-thrift") - ? SpanBytesDecoder.THRIFT - : SpanBytesDecoder.JSON_V1; - return acceptSpans(request, decoder); - } else if (url.encodedPath().equals("/api/v2/spans")) { - SpanBytesDecoder decoder = - type != null && type.contains("/x-protobuf") - ? SpanBytesDecoder.PROTO3 - : SpanBytesDecoder.JSON_V2; - return acceptSpans(request, decoder); - } - } else { // unsupported method - return new MockResponse().setResponseCode(405); - } - return new MockResponse().setResponseCode(404); - } - - MockResponse acceptSpans(RecordedRequest request, SpanBytesDecoder decoder) { - byte[] body = request.getBody().readByteArray(); - metrics.incrementMessages(); - String encoding = request.getHeader("Content-Encoding"); - if (encoding != null && encoding.contains("gzip")) { - try { - Buffer result = new Buffer(); - GzipSource source = new GzipSource(new Buffer().write(body)); - while (source.read(result, Integer.MAX_VALUE) != -1) ; - body = result.readByteArray(); - } catch (IOException e) { - metrics.incrementMessagesDropped(); - return new MockResponse().setResponseCode(400).setBody("Cannot gunzip spans"); - } - } - metrics.incrementBytes(body.length); - - final MockResponse result = new MockResponse(); - if (body.length == 0) return result.setResponseCode(202); // lenient on empty - - consumer.acceptSpans(body, decoder, new Callback() { - @Override public void onSuccess(Void value) { - result.setResponseCode(202); - } - - @Override public void onError(Throwable t) { - String message = t.getMessage(); - result.setBody(message).setResponseCode(message.startsWith("Cannot store") ? 500 : 400); - } - }); - return result; - } -} diff --git a/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinExtension.java b/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinExtension.java deleted file mode 100644 index dcf87528d51..00000000000 --- a/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinExtension.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright 2015-2023 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.junit5; - -import mockwebserver3.MockWebServer; -import mockwebserver3.Dispatcher; -import mockwebserver3.MockResponse; -import mockwebserver3.RecordedRequest; -import org.junit.jupiter.api.extension.AfterEachCallback; -import org.junit.jupiter.api.extension.BeforeEachCallback; -import org.junit.jupiter.api.extension.ExtensionContext; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.collector.InMemoryCollectorMetrics; -import zipkin2.internal.Nullable; -import zipkin2.storage.InMemoryStorage; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.List; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; - -import static mockwebserver3.SocketPolicy.KEEP_OPEN; - -/** - * Starts up a local Zipkin server, listening for http requests on {@link #httpUrl}. - * - *

This can be used to test instrumentation. For example, you can POST spans directly to this - * server. - * - *

See http://openzipkin.github.io/zipkin-api/#/ - */ -public final class ZipkinExtension implements BeforeEachCallback, AfterEachCallback { - private final InMemoryStorage storage = InMemoryStorage.newBuilder().build(); - private final InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics(); - private final MockWebServer server = new MockWebServer(); - private final BlockingQueue failureQueue = new LinkedBlockingQueue<>(); - private final AtomicInteger receivedSpanBytes = new AtomicInteger(); - - public ZipkinExtension() { - final ZipkinDispatcher successDispatch = new ZipkinDispatcher(storage, metrics, server); - Dispatcher dispatcher = new Dispatcher() { - @Override public MockResponse dispatch(RecordedRequest request) { - MockResponse maybeFailure = failureQueue.poll(); - if (maybeFailure != null) return maybeFailure; - MockResponse result = successDispatch.dispatch(request); - if (request.getMethod().equals("POST")) { - receivedSpanBytes.addAndGet((int) request.getBodySize()); - } - return result; - } - - @Override public MockResponse peek() { - MockResponse maybeFailure = failureQueue.peek(); - if (maybeFailure != null) return maybeFailure.clone(); - return new MockResponse().setSocketPolicy(KEEP_OPEN); - } - }; - server.setDispatcher(dispatcher); - } - - /** Use this to connect. The zipkin v1 interface will be under "/api/v1" */ - public String httpUrl() { - return String.format("http://%s:%s", server.getHostName(), server.getPort()); - } - - /** Use this to see how many requests you've sent to any zipkin http endpoint. */ - public int httpRequestCount() { - return server.getRequestCount(); - } - - /** Use this to see how many spans or serialized bytes were collected on the http endpoint. */ - public InMemoryCollectorMetrics collectorMetrics() { - return metrics; - } - - /** - * Stores the given spans directly, to setup preconditions for a test. - * - *

For example, if you are testing what happens when instrumentation adds a child to a trace, - * you'd add the parent here. - */ - public ZipkinExtension storeSpans(List spans) { - try { - storage.accept(spans).execute(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - return this; - } - - /** - * Adds a one-time failure to the http endpoint. - * - *

Ex. If you want to test that you don't repeatedly send bad data, you could send a 400 back. - * - *

{@code
-   * zipkin.enqueueFailure(sendErrorResponse(400, "bad format"));
-   * }
- * - * @param failure type of failure the next call to the http endpoint responds with - */ - public ZipkinExtension enqueueFailure(HttpFailure failure) { - failureQueue.add(failure.response); - return this; - } - - /** Retrieves all traces this zipkin server has received. */ - public List> getTraces() { - return storage.spanStore().getTraces(); - } - - /** Retrieves a trace by ID which Zipkin server has received, or null if not present. */ - @Nullable public List getTrace(String traceId) { - List result; - try { - result = storage.traces().getTrace(traceId).execute(); - } catch (IOException e) { - throw new AssertionError("I/O exception in in-memory storage", e); - } - // Note: this is a different behavior than Traces.getTrace() which is not nullable! - return result.isEmpty() ? null : result; - } - - /** Retrieves all service links between traces this zipkin server has received. */ - public List getDependencies() { - return storage.spanStore().getDependencies(); - } - - /** - * Used to manually start the server. - * - * @param httpPort choose 0 to select an available port - */ - public void start(int httpPort) throws IOException { - server.start(httpPort); - } - - /** - * Used to manually stop the server. - */ - public void shutdown() throws IOException { - server.shutdown(); - } - - @Override - public void beforeEach(ExtensionContext extensionContext) throws Exception { - - } - - @Override - public void afterEach(ExtensionContext extensionContext) throws Exception { - - } -} diff --git a/zipkin-junit5/src/test/java/zipkin2/junit5/ZipkinExtensionTest.java b/zipkin-junit5/src/test/java/zipkin2/junit5/ZipkinExtensionTest.java deleted file mode 100644 index 22891772174..00000000000 --- a/zipkin-junit5/src/test/java/zipkin2/junit5/ZipkinExtensionTest.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright 2015-2023 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.junit5; - -import okhttp3.MediaType; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.RequestBody; -import okhttp3.Response; -import okio.Buffer; -import okio.ByteString; -import okio.GzipSink; -import org.junit.AssumptionViolatedException; -import org.junit.Test; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown; -import static zipkin2.TestObjects.CLIENT_SPAN; -import static zipkin2.TestObjects.LOTS_OF_SPANS; - -public class ZipkinExtensionTest { - - @RegisterExtension - public ZipkinExtension zipkin = new ZipkinExtension(); - - List spans = Arrays.asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]); - OkHttpClient client = new OkHttpClient(); - - @Test - public void getTraces_storedViaPost() throws IOException { - List trace = asList(CLIENT_SPAN); - // write the span to the zipkin using http - assertThat(postSpansV1(trace).code()).isEqualTo(202); - - // read the traces directly - assertThat(zipkin.getTraces()).containsOnly(trace); - } - - @Test - public void getTraces_storedViaPostVersion2_json() throws IOException { - getTraces_storedViaPostVersion2("application/json", SpanBytesEncoder.JSON_V2); - } - - @Test - public void getTraces_storedViaPostVersion2_proto3() throws IOException { - getTraces_storedViaPostVersion2("application/x-protobuf", SpanBytesEncoder.PROTO3); - } - - void getTraces_storedViaPostVersion2(String mediaType, SpanBytesEncoder encoder) - throws IOException { - - byte[] message = encoder.encodeList(spans); - - // write the span to the zipkin using http api v2 - Response response = - client - .newCall( - new Request.Builder() - .url(zipkin.httpUrl() + "/api/v2/spans") - .post(RequestBody.create(MediaType.parse(mediaType), message)) - .build()) - .execute(); - assertThat(response.code()).isEqualTo(202); - - // read the traces directly - assertThat(zipkin.getTraces()).containsOnly(asList(spans.get(0)), asList(spans.get(1))); - } - - /** The rule is here to help debugging. Even partial spans should be returned */ - @Test - public void getTraces_whenMissingTimestamps() throws IOException { - Span span = Span.newBuilder().traceId("1").id("1").name("foo").build(); - // write the span to the zipkin using http - assertThat(postSpansV1(asList(span)).code()).isEqualTo(202); - - // read the traces directly - assertThat(zipkin.getTraces()).containsOnly(asList(span)); - } - - /** The raw query can show affects like redundant rows in the data store. */ - @Test - public void storeSpans_readbackRaw() { - Span missingDuration = LOTS_OF_SPANS[0].toBuilder().duration(null).build(); - Span withDuration = LOTS_OF_SPANS[0]; - - // write the span to zipkin directly - zipkin.storeSpans(asList(missingDuration)); - zipkin.storeSpans(asList(withDuration)); - - assertThat(zipkin.getTrace(missingDuration.traceId())) - .containsExactly(missingDuration, withDuration); - } - - @Test - public void httpRequestCountIncrements() throws IOException { - postSpansV1(spans); - postSpansV1(spans); - - assertThat(zipkin.httpRequestCount()).isEqualTo(2); - } - - /** - * Normally, a span can be reported twice: for client and server. However, there are bugs that - * happened where several updates went to the same span id. {@link ZipkinExtension#collectorMetrics} - * can be used to help ensure a span isn't reported more times than expected. - */ - @Test - public void collectorMetrics_spans() throws IOException { - postSpansV1(asList(LOTS_OF_SPANS[0])); - postSpansV1(asList(LOTS_OF_SPANS[1], LOTS_OF_SPANS[2])); - - assertThat(zipkin.collectorMetrics().spans()).isEqualTo(3); - } - - @Test - public void postSpans_disconnectDuringBody() { - zipkin.enqueueFailure(HttpFailure.disconnectDuringBody()); - - try { - postSpansV1(spans); - failBecauseExceptionWasNotThrown(IOException.class); - } catch (IOException expected) { // not always a ConnectException! - } - - // Zipkin didn't store the spans, as they shouldn't have been readable, due to disconnect - assertThat(zipkin.getTraces()).isEmpty(); - - try { - // The failure shouldn't affect later requests - assertThat(postSpansV1(spans).code()).isEqualTo(202); - } catch (IOException flake) { - throw new AssumptionViolatedException("test flaked", flake); - } - } - - @Test - public void postSpans_sendErrorResponse400() throws IOException { - zipkin.enqueueFailure(HttpFailure.sendErrorResponse(400, "Invalid Format")); - - Response response = postSpansV1(spans); - assertThat(response.code()).isEqualTo(400); - assertThat(response.body().string()).isEqualTo("Invalid Format"); - - // Zipkin didn't store the spans, as they shouldn't have been readable, due to the error - assertThat(zipkin.getTraces()).isEmpty(); - - // The failure shouldn't affect later requests - assertThat(postSpansV1(spans).code()).isEqualTo(202); - } - - @Test - public void gzippedSpans() throws IOException { - byte[] spansInJson = SpanBytesEncoder.JSON_V1.encodeList(spans); - - Buffer sink = new Buffer(); - GzipSink gzipSink = new GzipSink(sink); - gzipSink.write(new Buffer().write(spansInJson), spansInJson.length); - gzipSink.close(); - ByteString gzippedJson = sink.readByteString(); - - client.newCall(new Request.Builder() - .url(zipkin.httpUrl() + "/api/v1/spans") - .addHeader("Content-Encoding", "gzip") - .post(RequestBody.create(MediaType.parse("application/json"), gzippedJson)) - .build()).execute(); - - assertThat(zipkin.collectorMetrics().bytes()).isEqualTo(spansInJson.length); - } - - Response postSpansV1(List spans) throws IOException { - byte[] spansInJson = SpanBytesEncoder.JSON_V1.encodeList(spans); - return client - .newCall( - new Request.Builder() - .url(zipkin.httpUrl() + "/api/v1/spans") - .post(RequestBody.create(MediaType.parse("application/json"), spansInJson)) - .build()) - .execute(); - } -} diff --git a/zipkin-junit5/src/test/resources/simplelogger.properties b/zipkin-junit5/src/test/resources/simplelogger.properties deleted file mode 100644 index 0407cd2f055..00000000000 --- a/zipkin-junit5/src/test/resources/simplelogger.properties +++ /dev/null @@ -1,6 +0,0 @@ -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options - -org.slf4j.simpleLogger.logFile=System.out -org.slf4j.simpleLogger.defaultLogLevel=warn -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS diff --git a/zipkin-server/pom.xml b/zipkin-server/pom.xml index eec206e7366..655dfe144d5 100644 --- a/zipkin-server/pom.xml +++ b/zipkin-server/pom.xml @@ -16,15 +16,15 @@ --> 4.0.0 - + pom io.zipkin zipkin-parent 2.24.4-SNAPSHOT - zipkin-server - Zipkin Server + zipkin-server-parent + Zipkin Server Parent ${project.basedir}/.. @@ -35,8 +35,30 @@ 5.13.2 2.17.1 ${project.build.directory}/generated-test-sources/wire + + 9.7.0-SNAPSHOT + + + ../skywalking/apm-protocol + ../skywalking/oap-server-bom + ../skywalking/oap-server/server-core + ../skywalking/oap-server/server-receiver-plugin/receiver-proto + ../skywalking/oap-server/server-receiver-plugin/zipkin-receiver-plugin + ../skywalking/oap-server/server-cluster-plugin/cluster-standalone-plugin + ../skywalking/oap-server/server-storage-plugin + ../skywalking/oap-server/server-library + ../skywalking/oap-server/server-query-plugin/zipkin-query-plugin + ../skywalking/oap-server/server-telemetry + ../skywalking/oap-server/server-testing + ../skywalking/oap-server/server-configuration/configuration-api + ../skywalking/oap-server/ai-pipeline + + server-core + server-starter + + @@ -79,44 +101,6 @@ - - - org.springframework.boot - spring-boot-starter - ${spring-boot.version} - - - org.springframework.boot - spring-boot-starter-logging - - - - - org.springframework.boot - spring-boot-starter-actuator - ${spring-boot.version} - - - org.springframework.boot - spring-boot-starter-logging - - - - - - - org.yaml - snakeyaml - ${snakeyaml.version} - - - - - org.xerial.snappy - snappy-java - ${snappy.version} - - org.apache.logging.log4j @@ -147,70 +131,12 @@ compile - - - org.springframework.boot - spring-boot-starter-log4j2 - ${spring-boot.version} - - - - - ${armeria.groupId} - armeria-spring-boot2-autoconfigure - ${armeria.version} - - - ${armeria.groupId} - armeria-logback - - - javax.validation - validation-api - - - - - ${armeria.groupId} - armeria-brave - ${armeria.version} - - - ${armeria.groupId} - armeria-grpc-protocol - ${armeria.version} - - - - - io.micrometer - micrometer-registry-prometheus - ${micrometer.version} - - - - com.netflix.concurrency-limits - concurrency-limits-core - 0.3.6 - - - io.micrometer - micrometer-core - ${micrometer.version} - - ${project.groupId}.zipkin2 zipkin ${project.version} - - ${project.groupId}.zipkin2 - zipkin-collector - ${project.version} - - org.slf4j slf4j-api @@ -230,76 +156,6 @@ provided - - - - - ${project.groupId}.zipkin2 - zipkin-storage-cassandra - ${project.version} - true - - - - - ${project.groupId}.zipkin2 - zipkin-storage-elasticsearch - ${project.version} - true - - - - - ${project.groupId}.zipkin2 - zipkin-storage-mysql-v1 - ${project.version} - true - - - org.mariadb.jdbc - mariadb-java-client - ${mariadb-java-client.version} - true - - - com.zaxxer - HikariCP - ${HikariCP.version} - true - - - - - ${project.groupId}.zipkin2 - zipkin-collector-activemq - ${project.version} - true - - - - - ${project.groupId}.zipkin2 - zipkin-collector-kafka - ${project.version} - true - - - - - ${project.groupId}.zipkin2 - zipkin-collector-rabbitmq - ${project.version} - true - - - - - ${project.groupId}.zipkin2 - zipkin-collector-scribe - ${project.version} - true - - io.zipkin.brave @@ -326,13 +182,6 @@ ${zipkin-proto3.version} test - - - com.google.code.gson - gson - ${gson.version} - test - com.squareup.wire @@ -341,107 +190,15 @@ test - - com.squareup.okhttp3 - okhttp - ${okhttp.version} - test - - - - com.squareup.okio - okio - - - - - - ${armeria.groupId} - armeria-junit5 - ${armeria.version} - test - - - - ${armeria.groupId} - armeria-junit4 - ${armeria.version} - test - - - - - org.springframework.boot - spring-boot-test-autoconfigure - ${spring-boot.version} - - - * - * - - - test - - - org.springframework.boot - spring-boot-test - ${spring-boot.version} - - - * - * - - - test - - - org.springframework - spring-test - ${spring.version} - test - - ${project.groupId}.zipkin2 zipkin-tests ${project.version} test - - - org.awaitility - awaitility - ${awaitility.version} - test - - - - com.jayway.jsonpath - json-path - 2.4.0 - test - - - actuator - - - !skipActuator - - - - - - ${armeria.groupId} - armeria-spring-boot2-actuator-autoconfigure - ${armeria.version} - true - - - include-lens @@ -460,204 +217,4 @@ - - - - src/main/resources - true - - - - - - maven-dependency-plugin - - - com.squareup.wire - wire-maven-plugin - - - generate-test-sources - - generate-sources - - - ${proto.generatedSourceDirectory} - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 3.2.0 - - - add-test-source - generate-test-sources - - add-test-source - - - - ${proto.generatedSourceDirectory} - - - - - - - - pl.project13.maven - git-commit-id-plugin - ${git-commit-id.version} - - - extract-git-info - - revision - - - - - false - - - - - org.springframework.boot - spring-boot-maven-plugin - ${spring-boot.version} - - zipkin.server.ZipkinServer - true - - - - exec - - repackage - - - exec - - - org.slf4j - slf4j-simple - - - - - - - - slim - - repackage - - - slim - - - com.google.auto.value,com.google.guava,io.dropwizard.metrics,com.datastax.oss,com.github.jnr,org.ow2.asm,org.jooq,javax.xml.bind,org.mariadb.jdbc,com.zaxxer,org.apache.activemq,org.apache.geronimo.specs,org.fusesource.hawtbuf,org.apache.kafka,com.github.luben,org.lz4,org.xerial.snappy,com.rabbitmq,jakarta.annotation,org.apache.thrift,org.apache.logging.log4j - - - - - ${armeria.groupId} - armeria-spring-boot2-actuator-autoconfigure - - - org.springframework.boot - spring-boot-actuator-autoconfigure - - - org.springframework.boot - spring-boot-actuator - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - - - org.springframework.boot - spring-boot-starter-log4j2 - - - - - io.netty - netty-tcnative-boringssl-static - - - io.netty - netty-codec-haproxy - - - - - - - - - - - - - - - - - - ${project.groupId}.zipkin2 - zipkin-storage-cassandra - - - io.zipkin.brave.cassandra - brave-instrumentation-cassandra-driver - - - - - ${project.groupId}.zipkin2 - zipkin-storage-mysql-v1 - - - - - ${project.groupId}.zipkin2 - zipkin-collector-activemq - - - - - ${project.groupId}.zipkin2 - zipkin-collector-kafka - - - - - ${project.groupId}.zipkin2 - zipkin-collector-rabbitmq - - - - - ${project.groupId}.zipkin2 - zipkin-collector-scribe - - - ${armeria.groupId} - armeria-thrift0.15 - - - - - - - - diff --git a/zipkin-server/server-core/pom.xml b/zipkin-server/server-core/pom.xml new file mode 100644 index 00000000000..abb0b4c9fb0 --- /dev/null +++ b/zipkin-server/server-core/pom.xml @@ -0,0 +1,24 @@ + + + 4.0.0 + + + zipkin-server-parent + io.zipkin + 2.24.4-SNAPSHOT + + + zipkin-server-core + Zipkin Server Core + + + + org.apache.skywalking + server-core + ${skywalking.version} + + + + \ No newline at end of file diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java new file mode 100644 index 00000000000..cba2b402576 --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.core; + +import org.apache.skywalking.oap.server.library.module.ModuleConfig; + +public class CoreModuleConfig extends ModuleConfig { + /** + * The max length of the service name. + */ + private int serviceNameMaxLength = 70; + /** + * The max length of the service instance name. + */ + private int instanceNameMaxLength = 70; + /** + * The max length of the endpoint name. + * + *

NOTICE

+ * In the current practice, we don't recommend the length over 190. + */ + private int endpointNameMaxLength = 150; + /** + * The period of L1 aggregation flush. Unit is ms. + */ + private long l1FlushPeriod = 500; + /** + * The threshold of session time. Unit is ms. Default value is 70s. + */ + private long storageSessionTimeout = 70_000; + /** + * The service cache refresh interval, default 10s + */ + private int serviceCacheRefreshInterval = 10; + /** + * The time to live of all metrics data. Unit is day. + */ + private int metricsDataTTL = 3; + /** + * The time to live of all record data, including tracing. Unit is Day. + */ + private int recordDataTTL = 7; + + public int getServiceNameMaxLength() { + return serviceNameMaxLength; + } + + public int getInstanceNameMaxLength() { + return instanceNameMaxLength; + } + + public int getEndpointNameMaxLength() { + return endpointNameMaxLength; + } + + public long getL1FlushPeriod() { + return l1FlushPeriod; + } + + public long getStorageSessionTimeout() { + return storageSessionTimeout; + } + + public int getServiceCacheRefreshInterval() { + return serviceCacheRefreshInterval; + } + + public int getMetricsDataTTL() { + return metricsDataTTL; + } + + public int getRecordDataTTL() { + return recordDataTTL; + } + + public org.apache.skywalking.oap.server.core.CoreModuleConfig toSkyWalkingConfig() { + final org.apache.skywalking.oap.server.core.CoreModuleConfig result = new org.apache.skywalking.oap.server.core.CoreModuleConfig(); + result.setServiceCacheRefreshInterval(serviceCacheRefreshInterval); + return result; + } +} diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java new file mode 100644 index 00000000000..94ddf289fba --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.core; + +import org.apache.skywalking.oap.server.core.CoreModule; +import org.apache.skywalking.oap.server.core.analysis.meter.MeterSystem; +import org.apache.skywalking.oap.server.core.analysis.worker.MetricsStreamProcessor; +import org.apache.skywalking.oap.server.core.cache.NetworkAddressAliasCache; +import org.apache.skywalking.oap.server.core.cache.ProfileTaskCache; +import org.apache.skywalking.oap.server.core.command.CommandService; +import org.apache.skywalking.oap.server.core.config.ConfigService; +import org.apache.skywalking.oap.server.core.config.DownSamplingConfigService; +import org.apache.skywalking.oap.server.core.config.IComponentLibraryCatalogService; +import org.apache.skywalking.oap.server.core.config.NamingControl; +import org.apache.skywalking.oap.server.core.config.group.EndpointNameGrouping; +import org.apache.skywalking.oap.server.core.management.ui.menu.UIMenuManagementService; +import org.apache.skywalking.oap.server.core.management.ui.template.UITemplateManagementService; +import org.apache.skywalking.oap.server.core.oal.rt.OALEngineLoaderService; +import org.apache.skywalking.oap.server.core.profiling.continuous.ContinuousProfilingMutationService; +import org.apache.skywalking.oap.server.core.profiling.continuous.ContinuousProfilingQueryService; +import org.apache.skywalking.oap.server.core.profiling.ebpf.EBPFProfilingMutationService; +import org.apache.skywalking.oap.server.core.profiling.ebpf.EBPFProfilingQueryService; +import org.apache.skywalking.oap.server.core.profiling.trace.ProfileTaskMutationService; +import org.apache.skywalking.oap.server.core.profiling.trace.ProfileTaskQueryService; +import org.apache.skywalking.oap.server.core.query.AggregationQueryService; +import org.apache.skywalking.oap.server.core.query.AlarmQueryService; +import org.apache.skywalking.oap.server.core.query.BrowserLogQueryService; +import org.apache.skywalking.oap.server.core.query.EventQueryService; +import org.apache.skywalking.oap.server.core.query.LogQueryService; +import org.apache.skywalking.oap.server.core.query.MetadataQueryService; +import org.apache.skywalking.oap.server.core.query.MetricsMetadataQueryService; +import org.apache.skywalking.oap.server.core.query.MetricsQueryService; +import org.apache.skywalking.oap.server.core.query.RecordQueryService; +import org.apache.skywalking.oap.server.core.query.TagAutoCompleteQueryService; +import org.apache.skywalking.oap.server.core.query.TopNRecordsQueryService; +import org.apache.skywalking.oap.server.core.query.TopologyQueryService; +import org.apache.skywalking.oap.server.core.query.TraceQueryService; +import org.apache.skywalking.oap.server.core.remote.RemoteSenderService; +import org.apache.skywalking.oap.server.core.remote.client.RemoteClientManager; +import org.apache.skywalking.oap.server.core.server.GRPCHandlerRegister; +import org.apache.skywalking.oap.server.core.server.HTTPHandlerRegister; +import org.apache.skywalking.oap.server.core.source.SourceReceiver; +import org.apache.skywalking.oap.server.core.source.SourceReceiverImpl; +import org.apache.skywalking.oap.server.core.status.ServerStatusService; +import org.apache.skywalking.oap.server.core.storage.model.IModelManager; +import org.apache.skywalking.oap.server.core.storage.model.ModelCreator; +import org.apache.skywalking.oap.server.core.storage.model.ModelManipulator; +import org.apache.skywalking.oap.server.core.storage.model.StorageModels; +import org.apache.skywalking.oap.server.core.worker.IWorkerInstanceGetter; +import org.apache.skywalking.oap.server.core.worker.IWorkerInstanceSetter; +import org.apache.skywalking.oap.server.core.worker.WorkerInstancesService; +import org.apache.skywalking.oap.server.library.module.ModuleConfig; +import org.apache.skywalking.oap.server.library.module.ModuleDefine; +import org.apache.skywalking.oap.server.library.module.ModuleProvider; +import org.apache.skywalking.oap.server.library.module.ModuleStartException; +import org.apache.skywalking.oap.server.library.module.ServiceNotProvidedException; +import zipkin.server.core.services.EmptyComponentLibraryCatalogService; +import zipkin.server.core.services.EmptyGRPCHandlerRegister; +import zipkin.server.core.services.EmptyHTTPHandlerRegister; +import zipkin.server.core.services.EmptyNetworkAddressAliasCache; +import zipkin.server.core.services.ZipkinConfigService; + +import java.util.Collections; + +public class CoreModuleProvider extends ModuleProvider { + private CoreModuleConfig moduleConfig; + + private EndpointNameGrouping endpointNameGrouping; + private final SourceReceiverImpl receiver; + private final StorageModels storageModels; + + public CoreModuleProvider() { + this.receiver = new SourceReceiverImpl(); + this.storageModels = new StorageModels(); + } + + @Override + public String name() { + return "zipkin"; + } + + @Override + public Class module() { + return CoreModule.class; + } + + @Override + public ConfigCreator newConfigCreator() { + return new ConfigCreator() { + @Override + public Class type() { + return CoreModuleConfig.class; + } + + @Override + public void onInitialized(CoreModuleConfig initialized) { + moduleConfig = initialized; + } + }; + } + + @Override + public void prepare() throws ServiceNotProvidedException, ModuleStartException { + endpointNameGrouping = new EndpointNameGrouping(); + final NamingControl namingControl = new NamingControl( + moduleConfig.getServiceNameMaxLength(), + moduleConfig.getInstanceNameMaxLength(), + moduleConfig.getEndpointNameMaxLength(), + endpointNameGrouping + ); + this.registerServiceImplementation(NamingControl.class, namingControl); + + final org.apache.skywalking.oap.server.core.CoreModuleConfig swConfig = this.moduleConfig.toSkyWalkingConfig(); + this.registerServiceImplementation(MeterSystem.class, new MeterSystem(getManager())); + this.registerServiceImplementation(ConfigService.class, new ZipkinConfigService(moduleConfig, this)); + this.registerServiceImplementation(ServerStatusService.class, new ServerStatusService(getManager())); + this.registerServiceImplementation(DownSamplingConfigService.class, new DownSamplingConfigService(Collections.emptyList())); + this.registerServiceImplementation(GRPCHandlerRegister.class, new EmptyGRPCHandlerRegister()); + this.registerServiceImplementation(HTTPHandlerRegister.class, new EmptyHTTPHandlerRegister()); + this.registerServiceImplementation(IComponentLibraryCatalogService.class, new EmptyComponentLibraryCatalogService()); + this.registerServiceImplementation(SourceReceiver.class, receiver); + final WorkerInstancesService instancesService = new WorkerInstancesService(); + this.registerServiceImplementation(IWorkerInstanceGetter.class, instancesService); + this.registerServiceImplementation(IWorkerInstanceSetter.class, instancesService); + this.registerServiceImplementation(RemoteSenderService.class, new RemoteSenderService(getManager())); + this.registerServiceImplementation(RemoteSenderService.class, new RemoteSenderService(getManager())); + this.registerServiceImplementation(ModelCreator.class, storageModels); + this.registerServiceImplementation(IModelManager.class, storageModels); + this.registerServiceImplementation(ModelManipulator.class, storageModels); + this.registerServiceImplementation(NetworkAddressAliasCache.class, new EmptyNetworkAddressAliasCache()); + this.registerServiceImplementation(TopologyQueryService.class, new TopologyQueryService(getManager(), storageModels)); + this.registerServiceImplementation(MetricsMetadataQueryService.class, new MetricsMetadataQueryService()); + this.registerServiceImplementation(MetricsQueryService.class, new MetricsQueryService(getManager())); + this.registerServiceImplementation(TraceQueryService.class, new TraceQueryService(getManager())); + this.registerServiceImplementation(BrowserLogQueryService.class, new BrowserLogQueryService(getManager())); + this.registerServiceImplementation(LogQueryService.class, new LogQueryService(getManager())); + this.registerServiceImplementation(MetadataQueryService.class, new MetadataQueryService(getManager(), swConfig)); + this.registerServiceImplementation(AggregationQueryService.class, new AggregationQueryService(getManager())); + this.registerServiceImplementation(AlarmQueryService.class, new AlarmQueryService(getManager())); + this.registerServiceImplementation(TopNRecordsQueryService.class, new TopNRecordsQueryService(getManager())); + this.registerServiceImplementation(EventQueryService.class, new EventQueryService(getManager())); + this.registerServiceImplementation(TagAutoCompleteQueryService.class, new TagAutoCompleteQueryService(getManager(), swConfig)); + this.registerServiceImplementation(RecordQueryService.class, new RecordQueryService(getManager())); + this.registerServiceImplementation(ProfileTaskMutationService.class, new ProfileTaskMutationService(getManager())); + this.registerServiceImplementation(ProfileTaskQueryService.class, new ProfileTaskQueryService(getManager(), swConfig)); + this.registerServiceImplementation(ProfileTaskCache.class, new ProfileTaskCache(getManager(), swConfig)); + this.registerServiceImplementation(EBPFProfilingMutationService.class, new EBPFProfilingMutationService(getManager())); + this.registerServiceImplementation(EBPFProfilingQueryService.class, new EBPFProfilingQueryService(getManager(), swConfig, this.storageModels)); + this.registerServiceImplementation(ContinuousProfilingMutationService.class, new ContinuousProfilingMutationService(getManager())); + this.registerServiceImplementation(ContinuousProfilingQueryService.class, new ContinuousProfilingQueryService(getManager())); + this.registerServiceImplementation(CommandService.class, new CommandService(getManager())); + this.registerServiceImplementation(OALEngineLoaderService.class, new OALEngineLoaderService(getManager())); + this.registerServiceImplementation(RemoteClientManager.class, new RemoteClientManager(getManager(), 0)); + this.registerServiceImplementation(UITemplateManagementService.class, new UITemplateManagementService(getManager())); + this.registerServiceImplementation(UIMenuManagementService.class, new UIMenuManagementService(getManager(), swConfig)); + + if (moduleConfig.getMetricsDataTTL() < 2) { + throw new ModuleStartException( + "Metric TTL should be at least 2 days, current value is " + moduleConfig.getMetricsDataTTL()); + } + if (moduleConfig.getRecordDataTTL() < 2) { + throw new ModuleStartException( + "Record TTL should be at least 2 days, current value is " + moduleConfig.getRecordDataTTL()); + } + + final MetricsStreamProcessor metricsStreamProcessor = MetricsStreamProcessor.getInstance(); + metricsStreamProcessor.setL1FlushPeriod(moduleConfig.getL1FlushPeriod()); + metricsStreamProcessor.setStorageSessionTimeout(moduleConfig.getStorageSessionTimeout()); + metricsStreamProcessor.setMetricsDataTTL(moduleConfig.getMetricsDataTTL()); + } + + @Override + public void start() throws ServiceNotProvidedException, ModuleStartException { + + } + + @Override + public void notifyAfterCompleted() throws ServiceNotProvidedException, ModuleStartException { + + } + + @Override + public String[] requiredModules() { + return new String[0]; + } +} diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyComponentLibraryCatalogService.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyComponentLibraryCatalogService.java new file mode 100644 index 00000000000..1212ce5792b --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyComponentLibraryCatalogService.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.core.services; + +import org.apache.skywalking.oap.server.core.config.IComponentLibraryCatalogService; + +public class EmptyComponentLibraryCatalogService implements IComponentLibraryCatalogService { + @Override + public int getComponentId(String componentName) { + return 0; + } + + @Override + public int getServerIdBasedOnComponent(int componentId) { + return 0; + } + + @Override + public String getComponentName(int componentId) { + return null; + } + + @Override + public String getServerNameBasedOnComponent(int componentId) { + return null; + } + + @Override + public boolean compare(int componentA, int componentB) { + return false; + } +} diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyGRPCHandlerRegister.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyGRPCHandlerRegister.java new file mode 100644 index 00000000000..353d14b5d08 --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyGRPCHandlerRegister.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.core.services; + +import io.grpc.BindableService; +import io.grpc.ServerInterceptor; +import io.grpc.ServerServiceDefinition; +import org.apache.skywalking.oap.server.core.server.GRPCHandlerRegister; + +public class EmptyGRPCHandlerRegister implements GRPCHandlerRegister { + @Override + public void addHandler(BindableService handler) { + } + + @Override + public void addHandler(ServerServiceDefinition definition) { + } + + @Override + public void addFilter(ServerInterceptor interceptor) { + } +} diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyHTTPHandlerRegister.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyHTTPHandlerRegister.java new file mode 100644 index 00000000000..882e3175463 --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyHTTPHandlerRegister.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.core.services; + +import com.linecorp.armeria.common.HttpMethod; +import org.apache.skywalking.oap.server.core.server.HTTPHandlerRegister; + +import java.util.List; + +public class EmptyHTTPHandlerRegister implements HTTPHandlerRegister { + @Override + public void addHandler(Object httpService, List httpMethods) { + + } +} diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyNetworkAddressAliasCache.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyNetworkAddressAliasCache.java new file mode 100644 index 00000000000..779e85d2318 --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyNetworkAddressAliasCache.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.core.services; + +import org.apache.skywalking.oap.server.core.CoreModuleConfig; +import org.apache.skywalking.oap.server.core.cache.NetworkAddressAliasCache; + +public class EmptyNetworkAddressAliasCache extends NetworkAddressAliasCache { + public EmptyNetworkAddressAliasCache() { + super(new CoreModuleConfig()); + } +} diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/ZipkinConfigService.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/ZipkinConfigService.java new file mode 100644 index 00000000000..e60ae1d967b --- /dev/null +++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/ZipkinConfigService.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.core.services; + +import org.apache.skywalking.oap.server.core.config.ConfigService; +import org.apache.skywalking.oap.server.library.module.ModuleProvider; +import zipkin.server.core.CoreModuleConfig; + +public class ZipkinConfigService extends ConfigService { + + public ZipkinConfigService(CoreModuleConfig moduleConfig, ModuleProvider provider) { + super(new org.apache.skywalking.oap.server.core.CoreModuleConfig(), provider); + } +} diff --git a/zipkin-server/server-core/src/main/resources/META-INF/services/org.apache.skywalking.oap.server.library.module.ModuleProvider b/zipkin-server/server-core/src/main/resources/META-INF/services/org.apache.skywalking.oap.server.library.module.ModuleProvider new file mode 100755 index 00000000000..1baafe94b46 --- /dev/null +++ b/zipkin-server/server-core/src/main/resources/META-INF/services/org.apache.skywalking.oap.server.library.module.ModuleProvider @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +zipkin.server.core.CoreModuleProvider \ No newline at end of file diff --git a/zipkin-server/server-starter/pom.xml b/zipkin-server/server-starter/pom.xml new file mode 100644 index 00000000000..d280d7ee030 --- /dev/null +++ b/zipkin-server/server-starter/pom.xml @@ -0,0 +1,242 @@ + + + 4.0.0 + + + zipkin-server-parent + io.zipkin + 2.24.4-SNAPSHOT + + + zipkin-server + Zipkin Server + + + ${project.basedir}/src/main/resources/version.properties + + + + + + + io.zipkin + zipkin-server-core + ${project.version} + + + + + org.apache.skywalking + cluster-standalone-plugin + ${skywalking.version} + + + + org.apache.skywalking + telemetry-prometheus + ${skywalking.version} + + + + + org.apache.skywalking + storage-jdbc-hikaricp-plugin + ${skywalking.version} + + + org.apache.skywalking + storage-banyandb-plugin + ${skywalking.version} + + + org.apache.skywalking + storage-elasticsearch-plugin + ${skywalking.version} + + + + + org.apache.skywalking + zipkin-receiver-plugin + ${skywalking.version} + + + + + org.apache.skywalking + zipkin-query-plugin + ${skywalking.version} + + + + + org.apache.skywalking + library-server + ${skywalking.version} + + + + com.google.code.gson + gson + ${gson.version} + + + org.slf4j + slf4j-api + ${slf4j.version} + + + + + + + src/main/resources + true + + + + + + maven-dependency-plugin + + + com.squareup.wire + wire-maven-plugin + + + generate-test-sources + + generate-sources + + + ${proto.generatedSourceDirectory} + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.2.0 + + + add-test-source + generate-test-sources + + add-test-source + + + + ${proto.generatedSourceDirectory} + + + + + + + + pl.project13.maven + git-commit-id-plugin + ${git-commit-id.version} + + + extract-git-info + + revision + + + + + false + + + + + org.springframework.boot + spring-boot-maven-plugin + ${spring-boot.version} + + zipkin.server.ZipkinServer + true + + + + exec + + repackage + + + exec + + + org.slf4j + slf4j-simple + + + + + + + slim + + repackage + + + slim + + + com.google.auto.value,io.dropwizard.metrics,com.datastax.oss,com.github.jnr,org.ow2.asm,org.jooq,javax.xml.bind,org.mariadb.jdbc,org.apache.activemq,org.apache.geronimo.specs,org.fusesource.hawtbuf,org.apache.kafka,com.github.luben,org.lz4,org.xerial.snappy,com.rabbitmq,jakarta.annotation,org.apache.thrift,org.apache.skywalking.banyandb,org.postgresql + + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + + + io.netty + netty-tcnative-boringssl-static + + + io.netty + netty-codec-haproxy + + + + + + + + + pl.project13.maven + git-commit-id-plugin + 4.9.10 + + + get-the-git-information + + revision + + initialize + + + + false + true + ${generateGitPropertiesFilename} + UTC + yyyyMMddHHmmss + false + + ^git.build.version$ + ^git.commit.id$ + + + + + + \ No newline at end of file diff --git a/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServer.java b/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServer.java new file mode 100644 index 00000000000..8936165347c --- /dev/null +++ b/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServer.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server; + +public class ZipkinServer { + public static void main(String[] args) { + ZipkinServerBootstrap.start(); + } +} diff --git a/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServerBootstrap.java b/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServerBootstrap.java new file mode 100644 index 00000000000..ed8027965b7 --- /dev/null +++ b/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServerBootstrap.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server; + +import org.apache.skywalking.oap.server.core.CoreModule; +import org.apache.skywalking.oap.server.core.RunningMode; +import org.apache.skywalking.oap.server.core.status.ServerStatusService; +import org.apache.skywalking.oap.server.core.version.Version; +import org.apache.skywalking.oap.server.library.module.ApplicationConfiguration; +import org.apache.skywalking.oap.server.library.module.ModuleManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import zipkin.server.config.ApplicationConfigLoader; + +/** + * Starter core. Load the core configuration file, and initialize the startup sequence through {@link ModuleManager}. + */ +public class ZipkinServerBootstrap { + private static final Logger log = LoggerFactory.getLogger(ZipkinServerBootstrap.class); + + public static void start() { + String mode = System.getProperty("mode"); + RunningMode.setMode(mode); + + ApplicationConfigLoader configLoader = new ApplicationConfigLoader(); + ModuleManager manager = new ModuleManager(); + try { + ApplicationConfiguration applicationConfiguration = configLoader.load(); + manager.init(applicationConfiguration); + + manager.find(CoreModule.NAME) + .provider() + .getService(ServerStatusService.class) + .bootedNow(System.currentTimeMillis()); + + log.info("Version of Zipkin: {}", Version.CURRENT); + + if (RunningMode.isInitMode()) { + log.info("Zipkin starts up in init mode successfully, exit now..."); + System.exit(0); + } + } catch (Throwable t) { + log.error(t.getMessage(), t); + System.exit(1); + } + } +} diff --git a/zipkin-server/server-starter/src/main/java/zipkin/server/config/ApplicationConfigLoader.java b/zipkin-server/server-starter/src/main/java/zipkin/server/config/ApplicationConfigLoader.java new file mode 100644 index 00000000000..6963e3175ae --- /dev/null +++ b/zipkin-server/server-starter/src/main/java/zipkin/server/config/ApplicationConfigLoader.java @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.config; + +import org.apache.skywalking.oap.server.library.module.ApplicationConfiguration; +import org.apache.skywalking.oap.server.library.module.ProviderNotFoundException; +import org.apache.skywalking.oap.server.library.util.CollectionUtils; +import org.apache.skywalking.oap.server.library.util.PropertyPlaceholderHelper; +import org.apache.skywalking.oap.server.library.util.ResourceUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.Yaml; + +import java.io.FileNotFoundException; +import java.io.Reader; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; + +/** + * Initialize collector settings with following sources. Use application.yml as primary setting, and fix missing setting + * by default settings in application-default.yml. + *

+ * At last, override setting by system.properties and system.envs if the key matches moduleName.provideName.settingKey. + */ +public class ApplicationConfigLoader implements ConfigLoader { + static final Logger log = LoggerFactory.getLogger(ApplicationConfigLoader.class.getName()); + + private static final String DISABLE_SELECTOR = "-"; + private static final String SELECTOR = "selector"; + + private final Yaml yaml = new Yaml(); + + @Override + public ApplicationConfiguration load() throws ConfigFileNotFoundException { + ApplicationConfiguration configuration = new ApplicationConfiguration(); + this.loadConfig(configuration); + this.overrideConfigBySystemEnv(configuration); + return configuration; + } + + @SuppressWarnings("unchecked") + private void loadConfig(ApplicationConfiguration configuration) throws ConfigFileNotFoundException { + try { + Reader applicationReader = ResourceUtils.read("application.yml"); + Map> moduleConfig = yaml.loadAs(applicationReader, Map.class); + if (CollectionUtils.isNotEmpty(moduleConfig)) { + selectConfig(moduleConfig); + moduleConfig.forEach((moduleName, providerConfig) -> { + if (providerConfig.size() > 0) { + log.info("Get a module define from application.yml, module name: {}", moduleName); + ApplicationConfiguration.ModuleConfiguration moduleConfiguration = configuration.addModule( + moduleName); + providerConfig.forEach((providerName, config) -> { + log.info( + "Get a provider define belong to {} module, provider name: {}", moduleName, + providerName + ); + final Map propertiesConfig = (Map) config; + final Properties properties = new Properties(); + if (propertiesConfig != null) { + propertiesConfig.forEach((propertyName, propertyValue) -> { + if (propertyValue instanceof Map) { + Properties subProperties = new Properties(); + ((Map) propertyValue).forEach((key, value) -> { + subProperties.put(key, value); + replacePropertyAndLog(key, value, subProperties, providerName); + }); + properties.put(propertyName, subProperties); + } else { + properties.put(propertyName, propertyValue); + replacePropertyAndLog(propertyName, propertyValue, properties, providerName); + } + }); + } + moduleConfiguration.addProviderConfiguration(providerName, properties); + }); + } else { + log.warn( + "Get a module define from application.yml, but no provider define, use default, module name: {}", + moduleName + ); + } + }); + } + } catch (FileNotFoundException e) { + throw new ConfigFileNotFoundException(e.getMessage(), e); + } + } + + private void replacePropertyAndLog(final String propertyName, final Object propertyValue, final Properties target, + final Object providerName) { + final String valueString = PropertyPlaceholderHelper.INSTANCE + .replacePlaceholders(String.valueOf(propertyValue), target); + if (valueString.trim().length() == 0) { + target.replace(propertyName, valueString); + log.info("Provider={} config={} has been set as an empty string", providerName, propertyName); + } else { + // Use YAML to do data type conversion. + final Object replaceValue = convertValueString(valueString); + if (replaceValue != null) { + target.replace(propertyName, replaceValue); + } + } + } + + private Object convertValueString(String valueString) { + try { + Object replaceValue = yaml.load(valueString); + if (replaceValue instanceof String || replaceValue instanceof Integer || replaceValue instanceof Long || replaceValue instanceof Boolean || replaceValue instanceof ArrayList) { + return replaceValue; + } else { + return valueString; + } + } catch (Exception e) { + log.warn("yaml convert value type error, use origin values string. valueString={}", valueString, e); + return valueString; + } + } + + private void overrideConfigBySystemEnv(ApplicationConfiguration configuration) { + for (Map.Entry prop : System.getProperties().entrySet()) { + overrideModuleSettings(configuration, prop.getKey().toString(), prop.getValue().toString()); + } + } + + private void selectConfig(final Map> moduleConfiguration) { + Iterator>> moduleIterator = moduleConfiguration.entrySet().iterator(); + while (moduleIterator.hasNext()) { + Map.Entry> entry = moduleIterator.next(); + final String moduleName = entry.getKey(); + final Map providerConfig = entry.getValue(); + if (!providerConfig.containsKey(SELECTOR)) { + continue; + } + final String selector = (String) providerConfig.get(SELECTOR); + final String resolvedSelector = PropertyPlaceholderHelper.INSTANCE.replacePlaceholders( + selector, System.getProperties() + ); + providerConfig.entrySet().removeIf(e -> !resolvedSelector.equals(e.getKey())); + + if (!providerConfig.isEmpty()) { + continue; + } + + if (!DISABLE_SELECTOR.equals(resolvedSelector)) { + throw new ProviderNotFoundException( + "no provider found for module " + moduleName + ", " + + "if you're sure it's not required module and want to remove it, " + + "set the selector to -" + ); + } + + // now the module can be safely removed + moduleIterator.remove(); + log.info("Remove module {} without any provider", moduleName); + } + } + + private void overrideModuleSettings(ApplicationConfiguration configuration, String key, String value) { + int moduleAndConfigSeparator = key.indexOf('.'); + if (moduleAndConfigSeparator <= 0) { + return; + } + String moduleName = key.substring(0, moduleAndConfigSeparator); + String providerSettingSubKey = key.substring(moduleAndConfigSeparator + 1); + ApplicationConfiguration.ModuleConfiguration moduleConfiguration = configuration.getModuleConfiguration( + moduleName); + if (moduleConfiguration == null) { + return; + } + int providerAndConfigSeparator = providerSettingSubKey.indexOf('.'); + if (providerAndConfigSeparator <= 0) { + return; + } + String providerName = providerSettingSubKey.substring(0, providerAndConfigSeparator); + String settingKey = providerSettingSubKey.substring(providerAndConfigSeparator + 1); + if (!moduleConfiguration.has(providerName)) { + return; + } + Properties providerSettings = moduleConfiguration.getProviderConfiguration(providerName); + if (!providerSettings.containsKey(settingKey)) { + return; + } + Object originValue = providerSettings.get(settingKey); + Class type = originValue.getClass(); + if (type.equals(int.class) || type.equals(Integer.class)) + providerSettings.put(settingKey, Integer.valueOf(value)); + else if (type.equals(String.class)) + providerSettings.put(settingKey, value); + else if (type.equals(long.class) || type.equals(Long.class)) + providerSettings.put(settingKey, Long.valueOf(value)); + else if (type.equals(boolean.class) || type.equals(Boolean.class)) { + providerSettings.put(settingKey, Boolean.valueOf(value)); + } else { + return; + } + + log.info( + "The setting has been override by key: {}, value: {}, in {} provider of {} module through {}", settingKey, + value, providerName, moduleName, "System.properties" + ); + } +} diff --git a/zipkin-server/server-starter/src/main/java/zipkin/server/config/ConfigFileNotFoundException.java b/zipkin-server/server-starter/src/main/java/zipkin/server/config/ConfigFileNotFoundException.java new file mode 100644 index 00000000000..e5fd40768b7 --- /dev/null +++ b/zipkin-server/server-starter/src/main/java/zipkin/server/config/ConfigFileNotFoundException.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.config; + +public class ConfigFileNotFoundException extends Exception { + public ConfigFileNotFoundException(String message) { + super(message); + } + + public ConfigFileNotFoundException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/zipkin-server/server-starter/src/main/java/zipkin/server/config/ConfigLoader.java b/zipkin-server/server-starter/src/main/java/zipkin/server/config/ConfigLoader.java new file mode 100644 index 00000000000..2a210b60f3a --- /dev/null +++ b/zipkin-server/server-starter/src/main/java/zipkin/server/config/ConfigLoader.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package zipkin.server.config; + +public interface ConfigLoader { + T load() throws ConfigFileNotFoundException; +} diff --git a/zipkin-server/server-starter/src/main/resources/application.yml b/zipkin-server/server-starter/src/main/resources/application.yml new file mode 100644 index 00000000000..2cc2bedc646 --- /dev/null +++ b/zipkin-server/server-starter/src/main/resources/application.yml @@ -0,0 +1,185 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +core: + selector: zipkin + zipkin: + # The max length of service + instance names should be less than 200 + serviceNameMaxLength: ${ZIPKIN_SERVICE_NAME_MAX_LENGTH:70} + # The period(in seconds) of refreshing the service cache. Default value is 10s. + serviceCacheRefreshInterval: ${ZIPKIN_SERVICE_CACHE_REFRESH_INTERVAL:10} + instanceNameMaxLength: ${ZIPKIN_INSTANCE_NAME_MAX_LENGTH:70} + # The max length of service + endpoint names should be less than 240 + endpointNameMaxLength: ${ZIPKIN_ENDPOINT_NAME_MAX_LENGTH:150} + recordDataTTL: ${ZIPKIN_CORE_RECORD_DATA_TTL:3} # Unit is day + metricsDataTTL: ${ZIPKIN_CORE_METRICS_DATA_TTL:7} # Unit is day + # The period of L1 aggregation flush to L2 aggregation. Unit is ms. + l1FlushPeriod: ${ZIPKIN_CORE_L1_AGGREGATION_FLUSH_PERIOD:500} + # The threshold of session time. Unit is ms. Default value is 70s. + storageSessionTimeout: ${ZIPKIN_CORE_STORAGE_SESSION_TIMEOUT:70000} + +storage: + selector: ${ZIPKIN_STORAGE:h2} + elasticsearch: + namespace: ${ZIPKIN_NAMESPACE:""} + clusterNodes: ${ZIPKIN_STORAGE_ES_CLUSTER_NODES:localhost:9200} + protocol: ${ZIPKIN_STORAGE_ES_HTTP_PROTOCOL:"http"} + connectTimeout: ${ZIPKIN_STORAGE_ES_CONNECT_TIMEOUT:3000} + socketTimeout: ${ZIPKIN_STORAGE_ES_SOCKET_TIMEOUT:30000} + responseTimeout: ${ZIPKIN_STORAGE_ES_RESPONSE_TIMEOUT:15000} + numHttpClientThread: ${ZIPKIN_STORAGE_ES_NUM_HTTP_CLIENT_THREAD:0} + user: ${ZIPKIN_ES_USER:""} + password: ${ZIPKIN_ES_PASSWORD:""} + trustStorePath: ${ZIPKIN_STORAGE_ES_SSL_JKS_PATH:""} + trustStorePass: ${ZIPKIN_STORAGE_ES_SSL_JKS_PASS:""} + secretsManagementFile: ${ZIPKIN_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool. + dayStep: ${ZIPKIN_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index. + indexShardsNumber: ${ZIPKIN_STORAGE_ES_INDEX_SHARDS_NUMBER:1} # Shard number of new indexes + indexReplicasNumber: ${ZIPKIN_STORAGE_ES_INDEX_REPLICAS_NUMBER:1} # Replicas number of new indexes + # Specify the settings for each index individually. + # If configured, this setting has the highest priority and overrides the generic settings. + specificIndexSettings: ${ZIPKIN_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:""} + # Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es. + superDatasetDayStep: ${ZIPKIN_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1} # Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0 + superDatasetIndexShardsFactor: ${ZIPKIN_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5} # This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces. + superDatasetIndexReplicasNumber: ${ZIPKIN_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0} # Represent the replicas number in the super size dataset record index, the default value is 0. + indexTemplateOrder: ${ZIPKIN_STORAGE_ES_INDEX_TEMPLATE_ORDER:0} # the order of index template + bulkActions: ${ZIPKIN_STORAGE_ES_BULK_ACTIONS:5000} # Execute the async bulk record data every ${ZIPKIN_STORAGE_ES_BULK_ACTIONS} requests + batchOfBytes: ${ZIPKIN_STORAGE_ES_BATCH_OF_BYTES:10485760} # A threshold to control the max body size of ElasticSearch Bulk flush. + # flush the bulk every 5 seconds whatever the number of requests + flushInterval: ${ZIPKIN_STORAGE_ES_FLUSH_INTERVAL:5} + concurrentRequests: ${ZIPKIN_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests + resultWindowMaxSize: ${ZIPKIN_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000} + metadataQueryMaxSize: ${ZIPKIN_STORAGE_ES_QUERY_MAX_SIZE:10000} + scrollingBatchSize: ${ZIPKIN_STORAGE_ES_SCROLLING_BATCH_SIZE:5000} + segmentQueryMaxSize: ${ZIPKIN_STORAGE_ES_QUERY_SEGMENT_SIZE:200} + profileTaskQueryMaxSize: ${ZIPKIN_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200} + profileDataQueryBatchSize: ${ZIPKIN_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE:100} + oapAnalyzer: ${ZIPKIN_STORAGE_ES_OAP_ANALYZER:"{\"analyzer\":{\"oap_analyzer\":{\"type\":\"stop\"}}}"} # the oap analyzer. + oapLogAnalyzer: ${ZIPKIN_STORAGE_ES_OAP_LOG_ANALYZER:"{\"analyzer\":{\"oap_log_analyzer\":{\"type\":\"standard\"}}}"} # the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc. + advanced: ${ZIPKIN_STORAGE_ES_ADVANCED:""} + # Enable shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. + logicSharding: ${ZIPKIN_STORAGE_ES_LOGIC_SHARDING:false} + # Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values). + enableCustomRouting: ${ZIPKIN_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false} + h2: + properties: + jdbcUrl: ${ZIPKIN_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE} + dataSource.user: ${ZIPKIN_STORAGE_H2_USER:sa} + metadataQueryMaxSize: ${ZIPKIN_STORAGE_H2_QUERY_MAX_SIZE:5000} + maxSizeOfBatchSql: ${ZIPKIN_STORAGE_MAX_SIZE_OF_BATCH_SQL:100} + asyncBatchPersistentPoolSize: ${ZIPKIN_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1} + mysql: + properties: + jdbcUrl: ${ZIPKIN_JDBC_URL:"jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true&allowMultiQueries=true"} + dataSource.user: ${ZIPKIN_DATA_SOURCE_USER:root} + dataSource.password: ${ZIPKIN_DATA_SOURCE_PASSWORD:root@1234} + dataSource.cachePrepStmts: ${ZIPKIN_DATA_SOURCE_CACHE_PREP_STMTS:true} + dataSource.prepStmtCacheSize: ${ZIPKIN_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250} + dataSource.prepStmtCacheSqlLimit: ${ZIPKIN_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048} + dataSource.useServerPrepStmts: ${ZIPKIN_DATA_SOURCE_USE_SERVER_PREP_STMTS:true} + metadataQueryMaxSize: ${ZIPKIN_STORAGE_MYSQL_QUERY_MAX_SIZE:5000} + maxSizeOfBatchSql: ${ZIPKIN_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000} + asyncBatchPersistentPoolSize: ${ZIPKIN_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4} + postgresql: + properties: + jdbcUrl: ${ZIPKIN_JDBC_URL:"jdbc:postgresql://localhost:5432/skywalking"} + dataSource.user: ${ZIPKIN_DATA_SOURCE_USER:postgres} + dataSource.password: ${ZIPKIN_DATA_SOURCE_PASSWORD:123456} + dataSource.cachePrepStmts: ${ZIPKIN_DATA_SOURCE_CACHE_PREP_STMTS:true} + dataSource.prepStmtCacheSize: ${ZIPKIN_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250} + dataSource.prepStmtCacheSqlLimit: ${ZIPKIN_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048} + dataSource.useServerPrepStmts: ${ZIPKIN_DATA_SOURCE_USE_SERVER_PREP_STMTS:true} + metadataQueryMaxSize: ${ZIPKIN_STORAGE_MYSQL_QUERY_MAX_SIZE:5000} + maxSizeOfBatchSql: ${ZIPKIN_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000} + asyncBatchPersistentPoolSize: ${ZIPKIN_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4} + banyandb: + host: ${ZIPKIN_STORAGE_BANYANDB_HOST:127.0.0.1} + port: ${ZIPKIN_STORAGE_BANYANDB_PORT:17912} + maxBulkSize: ${ZIPKIN_STORAGE_BANYANDB_MAX_BULK_SIZE:5000} + flushInterval: ${ZIPKIN_STORAGE_BANYANDB_FLUSH_INTERVAL:15} + metricsShardsNumber: ${ZIPKIN_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1} + recordShardsNumber: ${ZIPKIN_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1} + superDatasetShardsFactor: ${ZIPKIN_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2} + concurrentWriteThreads: ${ZIPKIN_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15} + profileTaskQueryMaxSize: ${ZIPKIN_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200} # the max number of fetch task in a request + blockIntervalHours: ${ZIPKIN_STORAGE_BANYANDB_BLOCK_INTERVAL_HOURS:24} # Unit is hour + segmentIntervalDays: ${ZIPKIN_STORAGE_BANYANDB_SEGMENT_INTERVAL_DAYS:1} # Unit is day + superDatasetBlockIntervalHours: ${ZIPKIN_STORAGE_BANYANDB_SUPER_DATASET_BLOCK_INTERVAL_HOURS:4} # Unit is hour + superDatasetSegmentIntervalDays: ${ZIPKIN_STORAGE_BANYANDB_SUPER_DATASET_SEGMENT_INTERVAL_DAYS:1} # Unit is day + specificGroupSettings: ${ZIPKIN_STORAGE_BANYANDB_SPECIFIC_GROUP_SETTINGS:""} # For example, {"group1": {"blockIntervalHours": 4, "segmentIntervalDays": 1}} + +receiver-zipkin: + selector: ${ZIPKIN_RECEIVER_ZIPKIN:default} + default: + # Defines a set of span tag keys which are searchable. + # The max length of key=value should be less than 256 or will be dropped. + searchableTracesTags: ${ZIPKIN_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method} + # The sample rate precision is 1/10000, should be between 0 and 10000 + sampleRate: ${ZIPKIN_ZIPKIN_SAMPLE_RATE:10000} + ## The below configs are for OAP collect zipkin trace from HTTP + enableHttpCollector: ${ZIPKIN_ZIPKIN_HTTP_COLLECTOR_ENABLED:true} + restHost: ${ZIPKIN_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0} + restPort: ${ZIPKIN_RECEIVER_ZIPKIN_REST_PORT:9411} + restContextPath: ${ZIPKIN_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/} + restMaxThreads: ${ZIPKIN_RECEIVER_ZIPKIN_REST_MAX_THREADS:200} + restIdleTimeOut: ${ZIPKIN_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000} + restAcceptQueueSize: ${ZIPKIN_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0} + ## The below configs are for OAP collect zipkin trace from kafka + enableKafkaCollector: ${ZIPKIN_ZIPKIN_KAFKA_COLLECTOR_ENABLED:false} + kafkaBootstrapServers: ${ZIPKIN_ZIPKIN_KAFKA_SERVERS:localhost:9092} + kafkaGroupId: ${ZIPKIN_ZIPKIN_KAFKA_GROUP_ID:zipkin} + kafkaTopic: ${ZIPKIN_ZIPKIN_KAFKA_TOPIC:zipkin} + # Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. + kafkaConsumerConfig: ${ZIPKIN_ZIPKIN_KAFKA_CONSUMER_CONFIG:"{\"auto.offset.reset\":\"earliest\",\"enable.auto.commit\":true}"} + # The Count of the topic consumers + kafkaConsumers: ${ZIPKIN_ZIPKIN_KAFKA_CONSUMERS:1} + kafkaHandlerThreadPoolSize: ${ZIPKIN_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1} + kafkaHandlerThreadPoolQueueSize: ${ZIPKIN_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1} + +## This module is for Zipkin query API and support zipkin-lens UI +query-zipkin: + selector: ${ZIPKIN_QUERY_ZIPKIN:default} + default: + # For HTTP server + restHost: ${ZIPKIN_QUERY_ZIPKIN_REST_HOST:0.0.0.0} + restPort: ${ZIPKIN_QUERY_ZIPKIN_REST_PORT:9412} + restContextPath: ${ZIPKIN_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin} + restMaxThreads: ${ZIPKIN_QUERY_ZIPKIN_REST_MAX_THREADS:200} + restIdleTimeOut: ${ZIPKIN_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000} + restAcceptQueueSize: ${ZIPKIN_QUERY_ZIPKIN_REST_QUEUE_SIZE:0} + # Default look back for traces and autocompleteTags, 1 day in millis + lookback: ${ZIPKIN_QUERY_ZIPKIN_LOOKBACK:86400000} + # The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames + namesMaxAge: ${ZIPKIN_QUERY_ZIPKIN_NAMES_MAX_AGE:300} + ## The below config are OAP support for zipkin-lens UI + # Default traces query max size + uiQueryLimit: ${ZIPKIN_QUERY_ZIPKIN_UI_QUERY_LIMIT:10} + # Default look back on the UI for search traces, 15 minutes in millis + uiDefaultLookback: ${ZIPKIN_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000} + +telemetry: + selector: ${ZIPKIN_TELEMETRY:none} + none: + prometheus: + host: ${ZIPKIN_TELEMETRY_PROMETHEUS_HOST:0.0.0.0} + port: ${ZIPKIN_TELEMETRY_PROMETHEUS_PORT:1234} + sslEnabled: ${ZIPKIN_TELEMETRY_PROMETHEUS_SSL_ENABLED:false} + sslKeyPath: ${ZIPKIN_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:""} + sslCertChainPath: ${ZIPKIN_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:""} + +cluster: + selector: standalone + standalone: \ No newline at end of file diff --git a/zipkin-server/server-starter/src/main/resources/log4j2.xml b/zipkin-server/server-starter/src/main/resources/log4j2.xml new file mode 100644 index 00000000000..58ea7479d55 --- /dev/null +++ b/zipkin-server/server-starter/src/main/resources/log4j2.xml @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/zipkin-server/src/main/java/zipkin/server/EnableZipkinServer.java b/zipkin-server/src/main/java/zipkin/server/EnableZipkinServer.java deleted file mode 100644 index b50c00cbf76..00000000000 --- a/zipkin-server/src/main/java/zipkin/server/EnableZipkinServer.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin.server; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import org.springframework.context.annotation.Import; -import zipkin2.server.internal.InternalZipkinConfiguration; - -/** - * @deprecated Custom servers are possible, but not supported by the community. Please use our - * default server build first. - * If you find something missing, please gitter us - * about it before making a custom server. - * - *

If you decide to make a custom server, you accept responsibility for troubleshooting your - * build or configuration problems, even if such problems are a reaction to a change made by the - * Zipkin maintainers. In other words, custom servers are possible, but not supported. - */ -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -@Documented -@Import(InternalZipkinConfiguration.class) -@Deprecated -public @interface EnableZipkinServer { - -} diff --git a/zipkin-server/src/main/java/zipkin/server/ZipkinServer.java b/zipkin-server/src/main/java/zipkin/server/ZipkinServer.java deleted file mode 100644 index 50acebb127f..00000000000 --- a/zipkin-server/src/main/java/zipkin/server/ZipkinServer.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin.server; - -import org.slf4j.bridge.SLF4JBridgeHandler; -import org.springframework.boot.SpringBootConfiguration; -import org.springframework.boot.autoconfigure.EnableAutoConfiguration; -import org.springframework.boot.builder.SpringApplicationBuilder; -import zipkin2.server.internal.EnableZipkinServer; -import zipkin2.server.internal.ZipkinActuatorImporter; -import zipkin2.server.internal.ZipkinModuleImporter; -import zipkin2.server.internal.banner.ZipkinBanner; - -/** - * This adds the {@link EnableAutoConfiguration} annotation, but disables it by default to save - * startup time. - * - *

Supported Zipkin modules like zipkin-gcp need to explicitly configure themselves. - * - *

For example, add the following to {@code src/main/resources/zipkin-server-stackdriver.yml}: - *

{@code
- * zipkin:
- *   internal:
- *     module:
- *       stackdriver: zipkin.module.storage.stackdriver.ZipkinStackdriverStorageModule
- * }
- */ -@SpringBootConfiguration -@EnableAutoConfiguration -@EnableZipkinServer -public class ZipkinServer { - static { - SLF4JBridgeHandler.removeHandlersForRootLogger(); - SLF4JBridgeHandler.install(); - } - - public static void main(String[] args) { - new SpringApplicationBuilder(ZipkinServer.class) - .banner(new ZipkinBanner()) - .initializers(new ZipkinModuleImporter(), new ZipkinActuatorImporter()) - // Avoids potentially expensive DNS lookup and inaccurate startup timing - .logStartupInfo(false) - .properties( - EnableAutoConfiguration.ENABLED_OVERRIDE_PROPERTY + "=false", - "spring.config.name=zipkin-server").run(args); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/BodyIsExceptionMessage.java b/zipkin-server/src/main/java/zipkin2/server/internal/BodyIsExceptionMessage.java deleted file mode 100644 index 8dbca63887f..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/BodyIsExceptionMessage.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.HttpRequest; -import com.linecorp.armeria.common.HttpResponse; -import com.linecorp.armeria.server.ServiceRequestContext; -import com.linecorp.armeria.server.annotation.ExceptionHandlerFunction; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.linecorp.armeria.common.HttpStatus.BAD_REQUEST; -import static com.linecorp.armeria.common.HttpStatus.INTERNAL_SERVER_ERROR; -import static com.linecorp.armeria.common.MediaType.ANY_TEXT_TYPE; - -final class BodyIsExceptionMessage implements ExceptionHandlerFunction { - static final Logger LOGGER = LoggerFactory.getLogger(BodyIsExceptionMessage.class); - @Override - public HttpResponse handleException(ServiceRequestContext ctx, HttpRequest req, Throwable cause) { - if (req.method() == HttpMethod.POST && req.path().startsWith("/api/v")) { - ZipkinHttpCollector.metrics.incrementMessagesDropped(); - } - - String message = cause.getMessage(); - if (message == null) message = cause.getClass().getSimpleName(); - if (cause instanceof IllegalArgumentException) { - return HttpResponse.of(BAD_REQUEST, ANY_TEXT_TYPE, message); - } else { - LOGGER.warn("Unexpected error handling request.", cause); - - return HttpResponse.of(INTERNAL_SERVER_ERROR, ANY_TEXT_TYPE, message); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ConditionalOnSelfTracing.java b/zipkin-server/src/main/java/zipkin2/server/internal/ConditionalOnSelfTracing.java deleted file mode 100644 index dcc218d61d4..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ConditionalOnSelfTracing.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import org.springframework.boot.autoconfigure.condition.ConditionOutcome; -import org.springframework.boot.autoconfigure.condition.SpringBootCondition; -import org.springframework.context.annotation.ConditionContext; -import org.springframework.context.annotation.Conditional; -import org.springframework.core.annotation.AnnotationAttributes; -import org.springframework.core.type.AnnotatedTypeMetadata; - -/** - * This helps solve a number of problems including the following, which sometimes only break in an - * alpine JRE. The solution is to go with pure properties instead. - * - *
- * 

ConditionalOnClass(name = "brave.Tracing") - *

ConditionalOnBean(Brave.class) - *

- */ -@Conditional(ConditionalOnSelfTracing.SelfTracingCondition.class) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.METHOD}) -public @interface ConditionalOnSelfTracing { - String storageType() default ""; - - class SelfTracingCondition extends SpringBootCondition { - static final boolean BRAVE_PRESENT = checkForBrave(); - - static boolean checkForBrave() { - try { - Class.forName("brave.Tracing"); - return true; - } catch (ClassNotFoundException e) { - return false; - } - } - - @Override - public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata a) { - if (!BRAVE_PRESENT) { - return ConditionOutcome.noMatch("Brave must be in the classpath"); - } - - String selfTracingEnabled = context.getEnvironment() - .getProperty("zipkin.self-tracing.enabled"); - - if (!Boolean.valueOf(selfTracingEnabled)) { - return ConditionOutcome.noMatch("zipkin.self-tracing.enabled isn't true"); - } - - String expectedStorageType = AnnotationAttributes.fromMap( - a.getAnnotationAttributes(ConditionalOnSelfTracing.class.getName()) - ).getString("storageType"); - - if (expectedStorageType.equals("")) { - return ConditionOutcome.match(); - } - - String storageType = context.getEnvironment().getProperty("zipkin.storage.type"); - return expectedStorageType.equals(storageType) ? - ConditionOutcome.match() : - ConditionOutcome.noMatch( - "zipkin.storage.type was: " + storageType + " expected " + expectedStorageType); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ConditionalOnThrottledStorage.java b/zipkin-server/src/main/java/zipkin2/server/internal/ConditionalOnThrottledStorage.java deleted file mode 100644 index 542f629ccaf..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ConditionalOnThrottledStorage.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import org.springframework.boot.autoconfigure.condition.ConditionOutcome; -import org.springframework.boot.autoconfigure.condition.SpringBootCondition; -import org.springframework.context.annotation.ConditionContext; -import org.springframework.context.annotation.Conditional; -import org.springframework.core.type.AnnotatedTypeMetadata; - -@Conditional(ConditionalOnThrottledStorage.ThrottledStorageCondition.class) -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.METHOD}) -@interface ConditionalOnThrottledStorage { - class ThrottledStorageCondition extends SpringBootCondition { - @Override - public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata a) { - String throttleEnabled = context.getEnvironment() - .getProperty("zipkin.storage.throttle.enabled"); - - if (!Boolean.valueOf(throttleEnabled)) { - return ConditionOutcome.noMatch("zipkin.storage.throttle.enabled isn't true"); - } - - return ConditionOutcome.match(); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/EnableZipkinServer.java b/zipkin-server/src/main/java/zipkin2/server/internal/EnableZipkinServer.java deleted file mode 100644 index f7766a6a4a5..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/EnableZipkinServer.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import org.springframework.context.annotation.Import; - -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -@Documented -@Import(InternalZipkinConfiguration.class) -public @interface EnableZipkinServer { - -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/InternalZipkinConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/InternalZipkinConfiguration.java deleted file mode 100644 index a9090f1a947..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/InternalZipkinConfiguration.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.spring.ArmeriaAutoConfiguration; -import org.springframework.context.annotation.Import; -import zipkin2.server.internal.activemq.ZipkinActiveMQCollectorConfiguration; -import zipkin2.server.internal.brave.ZipkinSelfTracingConfiguration; -import zipkin2.server.internal.cassandra3.ZipkinCassandra3StorageConfiguration; -import zipkin2.server.internal.elasticsearch.ZipkinElasticsearchStorageConfiguration; -import zipkin2.server.internal.health.ZipkinHealthController; -import zipkin2.server.internal.kafka.ZipkinKafkaCollectorConfiguration; -import zipkin2.server.internal.mysql.ZipkinMySQLStorageConfiguration; -import zipkin2.server.internal.prometheus.ZipkinMetricsController; -import zipkin2.server.internal.prometheus.ZipkinPrometheusMetricsConfiguration; -import zipkin2.server.internal.rabbitmq.ZipkinRabbitMQCollectorConfiguration; -import zipkin2.server.internal.scribe.ZipkinScribeCollectorConfiguration; -import zipkin2.server.internal.ui.ZipkinUiConfiguration; - -@Import({ - ArmeriaAutoConfiguration.class, - ZipkinConfiguration.class, - ZipkinHttpConfiguration.class, - ZipkinUiConfiguration.class, - ZipkinCassandra3StorageConfiguration.class, - ZipkinElasticsearchStorageConfiguration.class, - ZipkinMySQLStorageConfiguration.class, - ZipkinScribeCollectorConfiguration.class, - ZipkinSelfTracingConfiguration.class, - ZipkinQueryApiV2.class, - ZipkinHttpCollector.class, - ZipkinGrpcCollector.class, - ZipkinActiveMQCollectorConfiguration.class, - ZipkinKafkaCollectorConfiguration.class, - ZipkinRabbitMQCollectorConfiguration.class, - ZipkinMetricsController.class, - ZipkinHealthController.class, - ZipkinPrometheusMetricsConfiguration.class -}) -public class InternalZipkinConfiguration { -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/JsonUtil.java b/zipkin-server/src/main/java/zipkin2/server/internal/JsonUtil.java deleted file mode 100644 index cf8b7a5ac30..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/JsonUtil.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.util.DefaultIndenter; -import com.fasterxml.jackson.core.util.DefaultPrettyPrinter; -import java.io.IOException; -import java.io.Writer; - -/** - * Utilities for working with JSON. - */ -public final class JsonUtil { - - static final JsonFactory JSON_FACTORY = new JsonFactory(); - static final DefaultPrettyPrinter.Indenter TWOSPACES_LF_INDENTER = - new DefaultIndenter(" ", "\n"); - - /** - * Creates a new {@link JsonGenerator} with pretty-printing enabled forcing {@code '\n'} - * between lines, as opposed to Jackson's default which uses the system line separator. - */ - public static JsonGenerator createGenerator(Writer writer) throws IOException { - JsonGenerator generator = JSON_FACTORY.createGenerator(writer); - DefaultPrettyPrinter prettyPrinter = new DefaultPrettyPrinter(); - prettyPrinter.indentArraysWith(TWOSPACES_LF_INDENTER); - prettyPrinter.indentObjectsWith(TWOSPACES_LF_INDENTER); - generator.setPrettyPrinter(prettyPrinter); - return generator; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/MicrometerCollectorMetrics.java b/zipkin-server/src/main/java/zipkin2/server/internal/MicrometerCollectorMetrics.java deleted file mode 100644 index e599302ea8c..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/MicrometerCollectorMetrics.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import io.micrometer.core.instrument.Counter; -import io.micrometer.core.instrument.Gauge; -import io.micrometer.core.instrument.MeterRegistry; -import java.util.concurrent.atomic.AtomicInteger; -import zipkin2.collector.CollectorMetrics; -import zipkin2.internal.Nullable; - -/** - * This is a simple metric service that exports the following to the "/metrics" endpoint: - * - *
- * 
    - *
  • counter.zipkin_collector.messages.$transport - cumulative messages received; should - * relate to messages reported by instrumented apps
  • - *
  • counter.zipkin_collector.messages_dropped.$transport - cumulative messages dropped; - * reasons include client disconnects or malformed content
  • - *
  • counter.zipkin_collector.bytes.$transport - cumulative message bytes
  • - *
  • counter.zipkin_collector.spans.$transport - cumulative spans read; should relate to - * messages reported by instrumented apps
  • - *
  • counter.zipkin_collector.spans_dropped.$transport - cumulative spans dropped; reasons - * include sampling or storage failures
  • - *
  • gauge.zipkin_collector.message_spans.$transport - last count of spans in a message
  • - *
  • gauge.zipkin_collector.message_bytes.$transport - last count of bytes in a message
  • - *
- *
- * - * See https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-metrics.html - */ -public final class MicrometerCollectorMetrics implements CollectorMetrics { - final MeterRegistry registryInstance; - final Counter messages, messagesDropped, bytes, spans, spansDropped; - final AtomicInteger messageBytes, messageSpans; - - public MicrometerCollectorMetrics(MeterRegistry registry) { - this(null, registry); - } - - MicrometerCollectorMetrics(@Nullable String transport, MeterRegistry meterRegistry) { - this.registryInstance = meterRegistry; - if (transport == null) { - messages = messagesDropped = bytes = spans = spansDropped = null; - messageBytes = messageSpans = null; - return; - } - this.messages = - Counter.builder("zipkin_collector.messages") - .description("cumulative amount of messages received") - .tag("transport", transport) - .register(registryInstance); - this.messagesDropped = - Counter.builder("zipkin_collector.messages_dropped") - .description("cumulative amount of messages received that were later dropped") - .tag("transport", transport) - .register(registryInstance); - - this.bytes = - Counter.builder("zipkin_collector.bytes") - .description("cumulative amount of bytes received") - .tag("transport", transport) - .baseUnit("bytes") - .register(registryInstance); - this.spans = - Counter.builder("zipkin_collector.spans") - .description("cumulative amount of spans received") - .tag("transport", transport) - .register(registryInstance); - this.spansDropped = - Counter.builder("zipkin_collector.spans_dropped") - .description("cumulative amount of spans received that were later dropped") - .tag("transport", transport) - .register(registryInstance); - - this.messageSpans = new AtomicInteger(0); - Gauge.builder("zipkin_collector.message_spans", messageSpans, AtomicInteger::get) - .description("count of spans per message") - .tag("transport", transport) - .register(registryInstance); - this.messageBytes = new AtomicInteger(0); - Gauge.builder("zipkin_collector.message_bytes", messageBytes, AtomicInteger::get) - .description("size of a message containing serialized spans") - .tag("transport", transport) - .baseUnit("bytes") - .register(registryInstance); - } - - @Override - public MicrometerCollectorMetrics forTransport(String transportType) { - if (transportType == null) throw new NullPointerException("transportType == null"); - return new MicrometerCollectorMetrics(transportType, registryInstance); - } - - @Override - public void incrementMessages() { - checkScoped(); - messages.increment(); - } - - @Override - public void incrementMessagesDropped() { - checkScoped(); - messagesDropped.increment(); - } - - @Override - public void incrementSpans(int quantity) { - checkScoped(); - messageSpans.set(quantity); - spans.increment(quantity); - } - - @Override - public void incrementBytes(int quantity) { - checkScoped(); - messageBytes.set(quantity); - bytes.increment(quantity); - } - - @Override - public void incrementSpansDropped(int quantity) { - checkScoped(); - spansDropped.increment(quantity); - } - - void checkScoped() { - if (messages == null) { - throw new IllegalStateException("always scope with ActuateCollectorMetrics.forTransport"); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/WrappingExecutorService.java b/zipkin-server/src/main/java/zipkin2/server/internal/WrappingExecutorService.java deleted file mode 100644 index 5b1163e5256..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/WrappingExecutorService.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** Used to implement a context propagating executor service which wraps tasks */ -// copy/pasted from Brave -public abstract class WrappingExecutorService implements ExecutorService { - protected WrappingExecutorService() { - } - - protected abstract ExecutorService delegate(); - - protected abstract Callable wrap(Callable task); - - protected abstract Runnable wrap(Runnable task); - - @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { - return delegate().awaitTermination(timeout, unit); - } - - @Override public List> invokeAll(Collection> tasks) - throws InterruptedException { - return delegate().invokeAll(wrap(tasks)); - } - - @Override public List> invokeAll(Collection> tasks, long timeout, - TimeUnit unit) throws InterruptedException { - return delegate().invokeAll(wrap(tasks), timeout, unit); - } - - @Override public T invokeAny(Collection> tasks) - throws InterruptedException, ExecutionException { - return delegate().invokeAny(wrap(tasks)); - } - - @Override public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return delegate().invokeAny(wrap(tasks), timeout, unit); - } - - @Override public boolean isShutdown() { - return delegate().isShutdown(); - } - - @Override public boolean isTerminated() { - return delegate().isTerminated(); - } - - @Override public void shutdown() { - delegate().shutdown(); - } - - @Override public List shutdownNow() { - return delegate().shutdownNow(); - } - - @Override public void execute(Runnable task) { - delegate().execute(wrap(task)); - } - - @Override public Future submit(Callable task) { - return delegate().submit(wrap(task)); - } - - @Override public Future submit(Runnable task) { - return delegate().submit(wrap(task)); - } - - @Override public Future submit(Runnable task, T result) { - return delegate().submit(wrap(task), result); - } - - Collection> wrap(Collection> tasks) { - ArrayList> result = new ArrayList<>(tasks.size()); - for (Callable task : tasks) { - result.add(wrap(task)); - } - return result; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinActuatorImporter.java b/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinActuatorImporter.java deleted file mode 100644 index 8af33d2e19a..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinActuatorImporter.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.boot.context.properties.bind.Binder; -import org.springframework.context.ApplicationContextInitializer; -import org.springframework.context.annotation.ImportSelector; -import org.springframework.context.support.GenericApplicationContext; -import org.springframework.core.env.ConfigurableEnvironment; - -/** - * When auto-configuration is enabled, actuator and all of its subordinate endpoints such as {@code - * BeansEndpointAutoConfiguration} load, subject to further conditions like {@code - * management.endpoint.health.enabled=false}. When auto-configuration is disabled, these - * configuration discovered indirectly via {@code META-INF/spring.factories} are no longer loaded. - * This type helps load the actuator functionality we currently support without a compilation - * dependency on actuator, and without relying on auto-configuration being enabled. - * - *

Implementation note

- *

It may be possible to re-implement this as {@link ImportSelector} to provide {@link - * #ACTUATOR_IMPL_CLASS} and the endpoint configuration types from {@link - * #PROPERTY_NAME_ACTUATOR_INCLUDE}. - */ -// look at RATIONALE.md and update if relevant when changing this file -public final class ZipkinActuatorImporter - implements ApplicationContextInitializer { - static final Logger LOG = LoggerFactory.getLogger(ZipkinActuatorImporter.class); - - static final String ACTUATOR_IMPL_CLASS = - "com.linecorp.armeria.spring.actuate.ArmeriaSpringActuatorAutoConfiguration"; - static final String PROPERTY_NAME_ACTUATOR_ENABLED = "zipkin.internal.actuator.enabled"; - static final String PROPERTY_NAME_ACTUATOR_INCLUDE = "zipkin.internal.actuator.include"; - - final String actuatorImplClass; - - public ZipkinActuatorImporter() { - this(ACTUATOR_IMPL_CLASS); - } - - ZipkinActuatorImporter(String actuatorImplClass) { // visible for testing - this.actuatorImplClass = actuatorImplClass; - } - - @Override public void initialize(GenericApplicationContext context) { - ConfigurableEnvironment env = context.getEnvironment(); - if ("false".equalsIgnoreCase(env.getProperty(PROPERTY_NAME_ACTUATOR_ENABLED))) { - LOG.debug("skipping actuator as it is disabled"); - return; - } - - // At this point in the life-cycle, env can directly resolve plain properties, like the boolean - // above. If you tried to resolve a property bound by a yaml list, it returns null, as they are - // not yet bound. - // - // As we are in a configurable environment, we can bind lists properties. We expect this to take - // includes from PROPERTY_NAME_ACTUATOR_INCLUDE yaml path of zipkin-server-shared.yml. - String[] includes = - Binder.get(env).bind(PROPERTY_NAME_ACTUATOR_INCLUDE, String[].class).orElse(null); - if (includes == null || includes.length == 0) { - LOG.debug("no actuator configuration found under path " + PROPERTY_NAME_ACTUATOR_INCLUDE); - return; - } - - LOG.debug("attempting to load actuator configuration: " + Arrays.toString(includes)); - try { - context.registerBean(Class.forName(actuatorImplClass)); - } catch (Exception e) { - LOG.debug("skipping actuator as implementation is not available", e); - return; - } - - for (String include : includes) { - try { - context.registerBean(Class.forName(include)); - } catch (Exception e) { - // Skip any classes that didn't match due to drift - LOG.debug("skipping unloadable actuator config " + include, e); - } - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinConfiguration.java deleted file mode 100644 index d42e0aa69e0..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinConfiguration.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import brave.Tracing; -import io.micrometer.core.instrument.MeterRegistry; -import java.util.List; -import org.springframework.beans.BeansException; -import org.springframework.beans.factory.BeanFactory; -import org.springframework.beans.factory.BeanFactoryAware; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.beans.factory.config.BeanPostProcessor; -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Condition; -import org.springframework.context.annotation.ConditionContext; -import org.springframework.context.annotation.Conditional; -import org.springframework.context.annotation.Import; -import org.springframework.core.type.AnnotatedTypeMetadata; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.server.internal.brave.TracingStorageComponent; -import zipkin2.server.internal.throttle.ThrottledStorageComponent; -import zipkin2.server.internal.throttle.ZipkinStorageThrottleProperties; -import zipkin2.storage.InMemoryStorage; -import zipkin2.storage.StorageComponent; - -/** Base collector and storage configurations needed for higher-level integrations */ -@Import({ - ZipkinConfiguration.InMemoryConfiguration.class, - ZipkinConfiguration.ThrottledStorageComponentEnhancer.class, - ZipkinConfiguration.TracingStorageComponentEnhancer.class -}) -public class ZipkinConfiguration { - - @Bean CollectorSampler traceIdSampler(@Value("${zipkin.collector.sample-rate:1.0}") float rate) { - return CollectorSampler.create(rate); - } - - @Bean CollectorMetrics metrics(MeterRegistry registry) { - return new MicrometerCollectorMetrics(registry); - } - - @EnableConfigurationProperties(ZipkinStorageThrottleProperties.class) - @ConditionalOnThrottledStorage - static class ThrottledStorageComponentEnhancer implements BeanPostProcessor, BeanFactoryAware { - - /** - * Need this to resolve cyclic instantiation issue with spring when instantiating with metrics - * and tracing. - * - *

Ref: Tracking down cause of Spring's "not - * eligible for auto-proxying"

- */ - BeanFactory beanFactory; - - @Override public Object postProcessAfterInitialization(Object bean, String beanName) { - if (bean instanceof StorageComponent) { - ZipkinStorageThrottleProperties throttleProperties = - beanFactory.getBean(ZipkinStorageThrottleProperties.class); - return new ThrottledStorageComponent((StorageComponent) bean, - beanFactory.getBean(MeterRegistry.class), - beanFactory.containsBean("tracing") ? beanFactory.getBean(Tracing.class) : null, - throttleProperties.getMinConcurrency(), - throttleProperties.getMaxConcurrency(), - throttleProperties.getMaxQueueSize()); - } - return bean; - } - - @Override public void setBeanFactory(BeanFactory beanFactory) throws BeansException { - this.beanFactory = beanFactory; - } - } - - @ConditionalOnSelfTracing - static class TracingStorageComponentEnhancer implements BeanPostProcessor, BeanFactoryAware { - /** - * Need this to resolve cyclic instantiation issue with spring when instantiating with tracing. - * - *

Ref: Tracking down cause of Spring's "not - * eligible for auto-proxying"

- */ - BeanFactory beanFactory; - - @Override public Object postProcessBeforeInitialization(Object bean, String beanName) { - return bean; - } - - @Override public Object postProcessAfterInitialization(Object bean, String beanName) { - if (bean instanceof StorageComponent && beanFactory.containsBean("tracing")) { - Tracing tracing = beanFactory.getBean(Tracing.class); - return new TracingStorageComponent(tracing, (StorageComponent) bean); - } - return bean; - } - - @Override public void setBeanFactory(BeanFactory beanFactory) throws BeansException { - this.beanFactory = beanFactory; - } - } - - /** - * This is a special-case configuration if there's no StorageComponent of any kind. In-Mem can - * supply both read apis, so we add two beans here. - */ - @Conditional(StorageTypeMemAbsentOrEmpty.class) - @ConditionalOnMissingBean(StorageComponent.class) - static class InMemoryConfiguration { - @Bean - StorageComponent storage( - @Value("${zipkin.storage.strict-trace-id:true}") boolean strictTraceId, - @Value("${zipkin.storage.search-enabled:true}") boolean searchEnabled, - @Value("${zipkin.storage.mem.max-spans:500000}") int maxSpans, - @Value("${zipkin.storage.autocomplete-keys:}") List autocompleteKeys) { - return InMemoryStorage.newBuilder() - .strictTraceId(strictTraceId) - .searchEnabled(searchEnabled) - .maxSpanCount(maxSpans) - .autocompleteKeys(autocompleteKeys) - .build(); - } - } - - static final class StorageTypeMemAbsentOrEmpty implements Condition { - @Override - public boolean matches(ConditionContext condition, AnnotatedTypeMetadata ignored) { - String storageType = condition.getEnvironment().getProperty("zipkin.storage.type"); - if (storageType == null) return true; - storageType = storageType.trim(); - if (storageType.isEmpty()) return true; - return storageType.equals("mem"); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinGrpcCollector.java b/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinGrpcCollector.java deleted file mode 100644 index f04b15e4840..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinGrpcCollector.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2015-2021 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.common.CommonPools; -import com.linecorp.armeria.server.ServiceRequestContext; -import com.linecorp.armeria.server.grpc.protocol.AbstractUnsafeUnaryGrpcService; -import com.linecorp.armeria.spring.ArmeriaServerConfigurator; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.Executor; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Bean; -import zipkin2.Callback; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.storage.StorageComponent; - -/** Collector for receiving spans on a gRPC endpoint. */ -@ConditionalOnProperty(name = "zipkin.collector.grpc.enabled") // disabled by default -final class ZipkinGrpcCollector { - - @Bean ArmeriaServerConfigurator grpcCollectorConfigurator(StorageComponent storage, - CollectorSampler sampler, CollectorMetrics metrics) { - CollectorMetrics grpcMetrics = metrics.forTransport("grpc"); - Collector collector = Collector.newBuilder(getClass()) - .storage(storage) - .sampler(sampler) - .metrics(grpcMetrics) - .build(); - - return sb -> - sb.service("/zipkin.proto3.SpanService/Report", new SpanService(collector, grpcMetrics)); - } - - static final class SpanService extends AbstractUnsafeUnaryGrpcService { - - final Collector collector; - final CollectorMetrics metrics; - - SpanService(Collector collector, CollectorMetrics metrics) { - this.collector = collector; - this.metrics = metrics; - } - - @Override protected CompletionStage handleMessage(ServiceRequestContext srCtx, ByteBuf bytes) { - metrics.incrementMessages(); - metrics.incrementBytes(bytes.readableBytes()); - - if (!bytes.isReadable()) { - return CompletableFuture.completedFuture(bytes); // lenient on empty messages - } - - try { - CompletableFutureCallback result = new CompletableFutureCallback(); - - // collector.accept might block so need to move off the event loop. We make sure the - // callback is context aware to continue the trace. - Executor executor = ServiceRequestContext.mapCurrent( - ctx -> ctx.makeContextAware(ctx.blockingTaskExecutor()), - CommonPools::blockingTaskExecutor); - - collector.acceptSpans(bytes.nioBuffer(), SpanBytesDecoder.PROTO3, result, executor); - - return result; - } finally { - bytes.release(); - } - } - } - - static final class CompletableFutureCallback extends CompletableFuture - implements Callback { - - @Override public void onSuccess(Void value) { - complete(Unpooled.EMPTY_BUFFER); - } - - @Override public void onError(Throwable t) { - completeExceptionally(t); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinHttpCollector.java b/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinHttpCollector.java deleted file mode 100644 index 5545578675d..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinHttpCollector.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.client.encoding.StreamDecoderFactory; -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpRequest; -import com.linecorp.armeria.common.HttpResponse; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.ResponseHeaders; -import com.linecorp.armeria.server.ServiceRequestContext; -import com.linecorp.armeria.server.annotation.Consumes; -import com.linecorp.armeria.server.annotation.ConsumesJson; -import com.linecorp.armeria.server.annotation.ExceptionHandler; -import com.linecorp.armeria.server.annotation.Post; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.nio.ByteBuffer; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Executor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import zipkin2.Callback; -import zipkin2.Span; -import zipkin2.SpanBytesDecoderDetector; -import zipkin2.codec.BytesDecoder; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.collector.Collector; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.storage.StorageComponent; - -import static zipkin2.Call.propagateIfFatal; - -@ConditionalOnProperty(name = "zipkin.collector.http.enabled", matchIfMissing = true) -@ExceptionHandler(BodyIsExceptionMessage.class) -public class ZipkinHttpCollector { - static final Logger LOGGER = LoggerFactory.getLogger(ZipkinHttpCollector.class); - static volatile CollectorMetrics metrics; - final Collector collector; - - @SuppressWarnings("StaticAssignmentInConstructor") - ZipkinHttpCollector( - StorageComponent storage, CollectorSampler sampler, CollectorMetrics metrics) { - metrics = metrics.forTransport("http"); - collector = - Collector.newBuilder(getClass()).storage(storage).sampler(sampler).metrics(metrics).build(); - ZipkinHttpCollector.metrics = metrics; // converter instances aren't injected by Spring - } - - @Post("/api/v2/spans") - public HttpResponse uploadSpans(ServiceRequestContext ctx, HttpRequest req) { - return validateAndStoreSpans(SpanBytesDecoder.JSON_V2, ctx, req); - } - - @Post("/api/v2/spans") - @ConsumesJson - public HttpResponse uploadSpansJson(ServiceRequestContext ctx, HttpRequest req) { - return validateAndStoreSpans(SpanBytesDecoder.JSON_V2, ctx, req); - } - - @Post("/api/v2/spans") - @ConsumesProtobuf - public HttpResponse uploadSpansProtobuf(ServiceRequestContext ctx, HttpRequest req) { - return validateAndStoreSpans(SpanBytesDecoder.PROTO3, ctx, req); - } - - @Post("/api/v1/spans") - public HttpResponse uploadSpansV1(ServiceRequestContext ctx, HttpRequest req) { - return validateAndStoreSpans(SpanBytesDecoder.JSON_V1, ctx, req); - } - - @Post("/api/v1/spans") - @ConsumesJson - public HttpResponse uploadSpansV1Json(ServiceRequestContext ctx, HttpRequest req) { - return validateAndStoreSpans(SpanBytesDecoder.JSON_V1, ctx, req); - } - - @Post("/api/v1/spans") - @ConsumesThrift - public HttpResponse uploadSpansV1Thrift(ServiceRequestContext ctx, HttpRequest req) { - return validateAndStoreSpans(SpanBytesDecoder.THRIFT, ctx, req); - } - - /** This synchronously decodes the message so that users can see data errors. */ - @SuppressWarnings("FutureReturnValueIgnored") - // TODO: errorprone wants us to check this future before returning, but what would be a sensible - // check? Say it is somehow canceled, would we take action? Would callback.onError() be redundant? - HttpResponse validateAndStoreSpans(SpanBytesDecoder decoder, ServiceRequestContext ctx, - HttpRequest req) { - CompletableCallback result = new CompletableCallback(); - - req.aggregateWithPooledObjects(ctx.eventLoop(), ctx.alloc()).handle((msg, t) -> { - if (t != null) { - result.onError(t); - return null; - } - - final HttpData requestContent; - try { - requestContent = UnzippingBytesRequestConverter.convertRequest(ctx, msg); - } catch (Throwable t1) { - propagateIfFatal(t1); - result.onError(t1); - return null; - } - - try (HttpData content = requestContent) { - // logging already handled upstream in UnzippingBytesRequestConverter where request context exists - if (content.isEmpty()) { - result.onSuccess(null); - return null; - } - - final ByteBuffer nioBuffer = content.byteBuf().nioBuffer(); - - try { - SpanBytesDecoderDetector.decoderForListMessage(nioBuffer); - } catch (IllegalArgumentException e) { - result.onError(new IllegalArgumentException("Expected a " + decoder + " encoded list\n")); - return null; - } catch (Throwable t1) { - result.onError(t1); - return null; - } - - SpanBytesDecoder unexpectedDecoder = testForUnexpectedFormat(decoder, nioBuffer); - if (unexpectedDecoder != null) { - result.onError(new IllegalArgumentException( - "Expected a " + decoder + " encoded list, but received: " + unexpectedDecoder + "\n")); - return null; - } - - // collector.accept might block so need to move off the event loop. We make sure the - // callback is context aware to continue the trace. - Executor executor = ctx.makeContextAware(ctx.blockingTaskExecutor()); - try { - collector.acceptSpans(nioBuffer, decoder, result, executor); - } catch (Throwable t1) { - result.onError(t1); - return null; - } - } - - return null; - }); - - return HttpResponse.from(result); - } - - static void maybeLog(String prefix, ServiceRequestContext ctx, AggregatedHttpRequest request) { - if (!LOGGER.isDebugEnabled()) return; - LOGGER.debug("{} sent by clientAddress->{}, userAgent->{}", - prefix, ctx.clientAddress(), request.headers().get(HttpHeaderNames.USER_AGENT) - ); - } - - /** - * Some formats clash on partial data. For example, a v1 and v2 span is identical if only the span - * name is sent. This looks for unexpected data format. - */ - static SpanBytesDecoder testForUnexpectedFormat(BytesDecoder decoder, ByteBuffer body) { - if (decoder == SpanBytesDecoder.JSON_V2) { - if (contains(body, BINARY_ANNOTATION_FIELD_SUFFIX)) { - return SpanBytesDecoder.JSON_V1; - } - } else if (decoder == SpanBytesDecoder.JSON_V1) { - if (contains(body, ENDPOINT_FIELD_SUFFIX) || contains(body, TAGS_FIELD)) { - return SpanBytesDecoder.JSON_V2; - } - } - return null; - } - - static final byte[] BINARY_ANNOTATION_FIELD_SUFFIX = - {'y', 'A', 'n', 'n', 'o', 't', 'a', 't', 'i', 'o', 'n', 's', '"'}; - // copy-pasted from SpanBytesDecoderDetector, to avoid making it public - static final byte[] ENDPOINT_FIELD_SUFFIX = {'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"'}; - static final byte[] TAGS_FIELD = {'"', 't', 'a', 'g', 's', '"'}; - - static boolean contains(ByteBuffer bytes, byte[] subsequence) { - bytes: - for (int i = 0; i < bytes.remaining() - subsequence.length + 1; i++) { - for (int j = 0; j < subsequence.length; j++) { - if (bytes.get(bytes.position() + i + j) != subsequence[j]) { - continue bytes; - } - } - return true; - } - return false; - } -} - -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.METHOD}) -@Consumes("application/x-thrift") @interface ConsumesThrift { -} - -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE, ElementType.METHOD}) -@Consumes("application/x-protobuf") @interface ConsumesProtobuf { -} - -final class CompletableCallback extends CompletableFuture - implements Callback { - - static final ResponseHeaders ACCEPTED_RESPONSE = ResponseHeaders.of(HttpStatus.ACCEPTED); - - @Override public void onSuccess(Void value) { - complete(HttpResponse.of(ACCEPTED_RESPONSE)); - } - - @Override public void onError(Throwable t) { - completeExceptionally(t); - } -} - -final class UnzippingBytesRequestConverter { - - static HttpData convertRequest(ServiceRequestContext ctx, AggregatedHttpRequest request) { - ZipkinHttpCollector.metrics.incrementMessages(); - String encoding = request.headers().get(HttpHeaderNames.CONTENT_ENCODING); - HttpData content = request.content(); - if (!content.isEmpty() && encoding != null && encoding.contains("gzip")) { - content = StreamDecoderFactory.gzip().newDecoder(ctx.alloc()).decode(content); - // The implementation of the armeria decoder is to return an empty body on failure - if (content.isEmpty()) { - ZipkinHttpCollector.maybeLog("Malformed gzip body", ctx, request); - content.close(); - throw new IllegalArgumentException("Cannot gunzip spans"); - } - } - - if (content.isEmpty()) ZipkinHttpCollector.maybeLog("Empty POST body", ctx, request); - if (content.length() == 2 && "[]".equals(content.toStringAscii())) { - ZipkinHttpCollector.maybeLog("Empty JSON list POST body", ctx, request); - content.close(); - content = HttpData.empty(); - } - - ZipkinHttpCollector.metrics.incrementBytes(content.length()); - return content; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinHttpConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinHttpConfiguration.java deleted file mode 100644 index 9d8feea6c7c..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinHttpConfiguration.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.HttpResponse; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.server.HttpService; -import com.linecorp.armeria.server.cors.CorsService; -import com.linecorp.armeria.server.cors.CorsServiceBuilder; -import com.linecorp.armeria.server.file.HttpFile; -import com.linecorp.armeria.server.metric.PrometheusExpositionService; -import com.linecorp.armeria.spring.ArmeriaServerConfigurator; -import io.micrometer.core.instrument.MeterRegistry; -import io.prometheus.client.CollectorRegistry; -import java.time.Duration; -import java.util.Optional; -import java.util.function.Function; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.core.annotation.Order; -import zipkin2.server.internal.health.ZipkinHealthController; -import zipkin2.server.internal.prometheus.ZipkinMetricsController; - -@Configuration(proxyBeanMethods = false) -public class ZipkinHttpConfiguration { - public static final MediaType MEDIA_TYPE_ACTUATOR = - MediaType.parse("application/vnd.spring-boot.actuator.v2+json;charset=UTF-8"); - - @Bean ArmeriaServerConfigurator serverConfigurator( - Optional httpQuery, - Optional httpCollector, - Optional healthController, - Optional metricsController, - Optional meterRegistry, - Optional collectorRegistry, - @Value("${zipkin.query.timeout:11s}") Duration queryTimeout) { - return sb -> { - httpQuery.ifPresent(h -> { - Function - timeoutDecorator = service -> (ctx, req) -> { - ctx.setRequestTimeout(queryTimeout); - return service.serve(ctx, req); - }; - sb.annotatedService(httpQuery.get(), timeoutDecorator); - sb.annotatedService("/zipkin", httpQuery.get(), timeoutDecorator); // For UI. - }); - httpCollector.ifPresent(sb::annotatedService); - healthController.ifPresent(sb::annotatedService); - metricsController.ifPresent(sb::annotatedService); - collectorRegistry.ifPresent(registry -> { - PrometheusExpositionService prometheusService = new PrometheusExpositionService(registry); - sb.service("/actuator/prometheus", prometheusService); - sb.service("/prometheus", prometheusService); - }); - - // Directly implement info endpoint, but use different content type for the /actuator path - sb.service("/actuator/info", infoService(MEDIA_TYPE_ACTUATOR)); - sb.service("/info", infoService(MediaType.JSON_UTF_8)); - - // It's common for backend requests to have timeouts of the magic number 10s, so we go ahead - // and default to a slightly longer timeout on the server to be able to handle these with - // better error messages where possible. - sb.requestTimeout(Duration.ofSeconds(11)); - - // Block TRACE requests because https://github.com/openzipkin/zipkin/issues/2286 - sb.routeDecorator().trace("prefix:/") - .build((delegate, ctx, req) -> HttpResponse.of(HttpStatus.METHOD_NOT_ALLOWED)); - }; - } - - /** Configures the server at the last because of the specified {@link Order} annotation. */ - @Order @Bean ArmeriaServerConfigurator corsConfigurator( - @Value("${zipkin.query.allowed-origins:*}") String allowedOrigins) { - CorsServiceBuilder corsBuilder = CorsService.builder(allowedOrigins.split(",")) - // NOTE: The property says query, and the UI does not use POST, but we allow POST? - // - // The reason is that our former CORS implementation accidentally allowed POST. People doing - // browser-based tracing relied on this, so we can't remove it by default. In the future, we - // could split the collector's CORS policy into a different property, still allowing POST - // with content-type by default. - .allowRequestMethods(HttpMethod.GET, HttpMethod.POST) - .allowRequestHeaders(HttpHeaderNames.CONTENT_TYPE, - // Use literals to avoid a runtime dependency on armeria-grpc types - HttpHeaderNames.of("X-GRPC-WEB")) - .exposeHeaders("grpc-status", "grpc-message", "armeria.grpc.ThrowableProto-bin"); - return builder -> builder.decorator(corsBuilder::build); - } - - HttpService infoService(MediaType mediaType) { - return HttpFile.builder(getClass().getClassLoader(), "info.json") - .contentType(mediaType) - .build() - .asService(); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinModuleImporter.java b/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinModuleImporter.java deleted file mode 100644 index 32dee3b2fbc..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinModuleImporter.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.boot.context.properties.bind.Binder; -import org.springframework.context.ApplicationContextInitializer; -import org.springframework.context.annotation.ImportSelector; -import org.springframework.context.support.GenericApplicationContext; -import org.springframework.core.env.ConfigurableEnvironment; - -/** - * This loads configuration needed for modules like zipkin-aws, but without relying on - * AutoConfiguration via spring.factories. Instead, this uses a configuration path defined in our - * yaml here and any profiles loaded by the modules themselves. - * - *

To use this, move autoconfiguration values from {@code src/main/resources/META-INF/spring.factories} - * to map entries under the yaml path {@link #PROPERTY_NAME_MODULE}. - * - *

For example, add the following to {@code src/main/resources/zipkin-server-stackdriver.yml}: - *

{@code
- * zipkin:
- *   internal:
- *     module:
- *       stackdriver: zipkin.autoconfigure.storage.stackdriver.ZipkinStackdriverModule
- * }
- * - *

Implementation note

* - *

It may be possible to re-implement this as {@link ImportSelector} to provide configuration - * types from {@link #PROPERTY_NAME_MODULE}. - */ -// look at RATIONALE.md and update if relevant when changing this file -public final class ZipkinModuleImporter implements ApplicationContextInitializer { - static final Logger LOG = LoggerFactory.getLogger(ZipkinModuleImporter.class); - - static final String PROPERTY_NAME_MODULE = "zipkin.internal.module"; - - @Override public void initialize(GenericApplicationContext context) { - ConfigurableEnvironment env = context.getEnvironment(); - - // At this point in the life-cycle, env can directly resolve plain properties, like the boolean - // above. If you tried to resolve a property bound by a yaml map, it returns null, as they are - // not yet bound. - // - // As we are in a configurable environment, we can bind lists properties. We expect this to take - // includes from PROPERTY_NAME_MODULE yaml path from all modules. - Map modules = - Binder.get(env).bind(PROPERTY_NAME_MODULE, Map.class).orElse(null); - if (modules == null || modules.isEmpty()) { - LOG.debug("no modules found under path " + PROPERTY_NAME_MODULE); - return; - } - - LOG.debug("attempting to load modules: " + modules.keySet()); - for (Map.Entry module : modules.entrySet()) { - try { - context.registerBean(Class.forName(module.getValue())); - } catch (Exception e) { - // Skip any classes that didn't match due to drift - LOG.debug("skipping unloadable module " + module.getKey(), e); - } - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinQueryApiV2.java b/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinQueryApiV2.java deleted file mode 100644 index 542d3ec14b6..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ZipkinQueryApiV2.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.fasterxml.jackson.core.JsonGenerator; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.ResponseHeaders; -import com.linecorp.armeria.common.ResponseHeadersBuilder; -import com.linecorp.armeria.server.ServiceRequestContext; -import com.linecorp.armeria.server.annotation.Blocking; -import com.linecorp.armeria.server.annotation.Default; -import com.linecorp.armeria.server.annotation.ExceptionHandler; -import com.linecorp.armeria.server.annotation.Get; -import com.linecorp.armeria.server.annotation.Param; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.io.UncheckedIOException; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import zipkin2.Call; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.codec.DependencyLinkBytesEncoder; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.StorageComponent; - -import static com.linecorp.armeria.common.HttpHeaderNames.CACHE_CONTROL; -import static com.linecorp.armeria.common.HttpStatus.BAD_REQUEST; -import static com.linecorp.armeria.common.HttpStatus.NOT_FOUND; -import static com.linecorp.armeria.common.MediaType.ANY_TEXT_TYPE; - -@ConditionalOnProperty(name = "zipkin.query.enabled", matchIfMissing = true) -@ExceptionHandler(BodyIsExceptionMessage.class) -public class ZipkinQueryApiV2 { - final String storageType; - final StorageComponent storage; // don't cache spanStore here as it can cause the app to crash! - final long defaultLookback; - /** - * The Cache-Control max-age (seconds) for /api/v2/services /api/v2/remoteServices and - * /api/v2/spans - */ - final int namesMaxAge; - final List autocompleteKeys; - - volatile int serviceCount; // used as a threshold to start returning cache-control headers - - ZipkinQueryApiV2( - StorageComponent storage, - @Value("${zipkin.storage.type:mem}") String storageType, - @Value("${zipkin.query.lookback:86400000}") long defaultLookback, // 1 day in millis - @Value("${zipkin.query.names-max-age:300}") int namesMaxAge, // 5 minutes - @Value("${zipkin.storage.autocomplete-keys:}") List autocompleteKeys - ) { - this.storage = storage; - this.storageType = storageType; - this.defaultLookback = defaultLookback; - this.namesMaxAge = namesMaxAge; - this.autocompleteKeys = autocompleteKeys; - } - - @Get("/api/v2/dependencies") - @Blocking - public AggregatedHttpResponse getDependencies( - @Param("endTs") long endTs, - @Param("lookback") Optional lookback) throws IOException { - Call> call = - storage.spanStore().getDependencies(endTs, lookback.orElse(defaultLookback)); - return jsonResponse(DependencyLinkBytesEncoder.JSON_V1.encodeList(call.execute())); - } - - @Get("/api/v2/services") - @Blocking - public AggregatedHttpResponse getServiceNames(ServiceRequestContext ctx) throws IOException { - List serviceNames = storage.serviceAndSpanNames().getServiceNames().execute(); - serviceCount = serviceNames.size(); - return maybeCacheNames(serviceCount > 3, serviceNames, ctx.alloc()); - } - - @Get("/api/v2/spans") - @Blocking - public AggregatedHttpResponse getSpanNames( - @Param("serviceName") String serviceName, ServiceRequestContext ctx) - throws IOException { - List spanNames = storage.serviceAndSpanNames().getSpanNames(serviceName).execute(); - return maybeCacheNames(serviceCount > 3, spanNames, ctx.alloc()); - } - - @Get("/api/v2/remoteServices") - @Blocking - public AggregatedHttpResponse getRemoteServiceNames( - @Param("serviceName") String serviceName, ServiceRequestContext ctx) - throws IOException { - List remoteServiceNames = - storage.serviceAndSpanNames().getRemoteServiceNames(serviceName).execute(); - return maybeCacheNames(serviceCount > 3, remoteServiceNames, ctx.alloc()); - } - - @Get("/api/v2/traces") - @Blocking - public AggregatedHttpResponse getTraces( - @Param("serviceName") Optional serviceName, - @Param("remoteServiceName") Optional remoteServiceName, - @Param("spanName") Optional spanName, - @Param("annotationQuery") Optional annotationQuery, - @Param("minDuration") Optional minDuration, - @Param("maxDuration") Optional maxDuration, - @Param("endTs") Optional endTs, - @Param("lookback") Optional lookback, - @Default("10") @Param("limit") int limit) - throws IOException { - QueryRequest queryRequest = - QueryRequest.newBuilder() - .serviceName(serviceName.orElse(null)) - .remoteServiceName(remoteServiceName.orElse(null)) - .spanName(spanName.orElse(null)) - .parseAnnotationQuery(annotationQuery.orElse(null)) - .minDuration(minDuration.orElse(null)) - .maxDuration(maxDuration.orElse(null)) - .endTs(endTs.orElse(System.currentTimeMillis())) - .lookback(lookback.orElse(defaultLookback)) - .limit(limit) - .build(); - - List> traces = storage.spanStore().getTraces(queryRequest).execute(); - return jsonResponse(writeTraces(SpanBytesEncoder.JSON_V2, traces)); - } - - @Get("/api/v2/trace/{traceId}") - @Blocking - public AggregatedHttpResponse getTrace(@Param("traceId") String traceId) throws IOException { - traceId = traceId != null ? traceId.trim() : null; - traceId = Span.normalizeTraceId(traceId); - List trace = storage.traces().getTrace(traceId).execute(); - if (trace.isEmpty()) { - return AggregatedHttpResponse.of(NOT_FOUND, ANY_TEXT_TYPE, traceId + " not found"); - } - return jsonResponse(SpanBytesEncoder.JSON_V2.encodeList(trace)); - } - - @Get("/api/v2/traceMany") - @Blocking - public AggregatedHttpResponse getTraces(@Param("traceIds") String traceIds) throws IOException { - if (traceIds.isEmpty()) { - return AggregatedHttpResponse.of(BAD_REQUEST, ANY_TEXT_TYPE, "traceIds parameter is empty"); - } - - Set normalized = new LinkedHashSet<>(); - for (String traceId : traceIds.split(",", 1000)) { - if (normalized.add(Span.normalizeTraceId(traceId))) continue; - return AggregatedHttpResponse.of(BAD_REQUEST, ANY_TEXT_TYPE, "redundant traceId: " + traceId); - } - - if (normalized.size() == 1) { - return AggregatedHttpResponse.of(BAD_REQUEST, ANY_TEXT_TYPE, - "Use /api/v2/trace/{traceId} endpoint to retrieve a single trace"); - } - - List> traces = storage.traces().getTraces(normalized).execute(); - return jsonResponse(writeTraces(SpanBytesEncoder.JSON_V2, traces)); - } - - static AggregatedHttpResponse jsonResponse(byte[] body) { - return AggregatedHttpResponse.of(ResponseHeaders.builder(200) - .contentType(MediaType.JSON) - .setInt(HttpHeaderNames.CONTENT_LENGTH, body.length).build(), HttpData.wrap(body)); - } - - @Get("/api/v2/autocompleteKeys") - @Blocking - public AggregatedHttpResponse getAutocompleteKeys(ServiceRequestContext ctx) { - return maybeCacheNames(true, autocompleteKeys, ctx.alloc()); - } - - @Get("/api/v2/autocompleteValues") - @Blocking - public AggregatedHttpResponse getAutocompleteValues( - @Param("key") String key, ServiceRequestContext ctx) throws IOException { - List values = storage.autocompleteTags().getValues(key).execute(); - return maybeCacheNames(values.size() > 3, values, ctx.alloc()); - } - - /** - * We cache names if there are more than 3 names. This helps people getting started: if we cache - * empty results, users have more questions. We assume caching becomes a concern when zipkin is in - * active use, and active use usually implies more than 3 services. - */ - AggregatedHttpResponse maybeCacheNames( - boolean shouldCacheControl, List values, ByteBufAllocator alloc) { - Collections.sort(values); - int sizeEstimate = 2; // Two brackets. - for (String value : values) { - sizeEstimate += value.length() + 1 /* comma */; - } - sizeEstimate -= 1; // Last element doesn't have a comma. - // If the values don't require escaping, this buffer will not be resized. - ByteBuf buf = alloc.buffer(sizeEstimate); - try (JsonGenerator gen = - JsonUtil.JSON_FACTORY.createGenerator((OutputStream) new ByteBufOutputStream(buf))) { - gen.writeStartArray(values.size()); - for (String value : values) { - gen.writeString(value); - } - gen.writeEndArray(); - } catch (IOException e) { - buf.release(); - throw new UncheckedIOException(e); - } - ResponseHeadersBuilder headers = ResponseHeaders.builder(200) - .contentType(MediaType.JSON) - .setInt(HttpHeaderNames.CONTENT_LENGTH, buf.readableBytes()); - if (shouldCacheControl) { - headers = headers.add(CACHE_CONTROL, "max-age=" + namesMaxAge + ", must-revalidate"); - } - return AggregatedHttpResponse.of(headers.build(), HttpData.wrap(buf)); - } - - // This is inlined here as there isn't enough re-use to warrant it being in the zipkin2 library - static byte[] writeTraces(SpanBytesEncoder codec, List> traces) { - // Get the encoded size of the nested list so that we don't need to grow the buffer - int length = traces.size(); - int sizeInBytes = 2; // [] - if (length > 1) sizeInBytes += length - 1; // comma to join elements - - for (int i = 0; i < length; i++) { - List spans = traces.get(i); - int jLength = spans.size(); - sizeInBytes += 2; // [] - if (jLength > 1) sizeInBytes += jLength - 1; // comma to join elements - for (int j = 0; j < jLength; j++) { - sizeInBytes += codec.sizeInBytes(spans.get(j)); - } - } - - byte[] out = new byte[sizeInBytes]; - int pos = 0; - out[pos++] = '['; // start list of traces - for (int i = 0; i < length; i++) { - pos += codec.encodeList(traces.get(i), out, pos); - if (i + 1 < length) out[pos++] = ','; - } - out[pos] = ']'; // stop list of traces - return out; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorConfiguration.java deleted file mode 100644 index 837ca04e48e..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorConfiguration.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.activemq; - -import org.springframework.boot.autoconfigure.condition.ConditionalOnClass; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Condition; -import org.springframework.context.annotation.ConditionContext; -import org.springframework.context.annotation.Conditional; -import org.springframework.core.type.AnnotatedTypeMetadata; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.collector.activemq.ActiveMQCollector; -import zipkin2.storage.StorageComponent; - -/** Auto-configuration for {@link ActiveMQCollector}. */ -@ConditionalOnClass(ActiveMQCollector.class) -@EnableConfigurationProperties(ZipkinActiveMQCollectorProperties.class) -@Conditional(ZipkinActiveMQCollectorConfiguration.ActiveMQUrlSet.class) -public class ZipkinActiveMQCollectorConfiguration { - - @Bean(initMethod = "start") - ActiveMQCollector activeMq( - ZipkinActiveMQCollectorProperties properties, - CollectorSampler sampler, - CollectorMetrics metrics, - StorageComponent storage) { - return properties.toBuilder().sampler(sampler).metrics(metrics).storage(storage).build(); - } - - /** - * This condition passes when {@link ZipkinActiveMQCollectorProperties#getUrl()}} is set to - * non-empty. - * - *

This is here because the yaml defaults this property to empty like this, and spring-boot - * doesn't have an option to treat empty properties as unset. - * - *

{@code
-   * url: ${ACTIVEMQ_URL:}
-   * }
- */ - static final class ActiveMQUrlSet implements Condition { - @Override public boolean matches(ConditionContext context, AnnotatedTypeMetadata a) { - return !isEmpty( - context.getEnvironment().getProperty("zipkin.collector.activemq.url")) && - notFalse(context.getEnvironment().getProperty("zipkin.collector.activemq.enabled")); - } - - private static boolean isEmpty(String s) { - return s == null || s.isEmpty(); - } - - private static boolean notFalse(String s){ - return s == null || !s.equals("false"); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorProperties.java deleted file mode 100644 index 19f677649ec..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorProperties.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.activemq; - -import org.apache.activemq.ActiveMQConnectionFactory; -import org.springframework.boot.context.properties.ConfigurationProperties; -import zipkin2.collector.activemq.ActiveMQCollector; - -/** Properties for configuring and building a {@link ActiveMQCollector}. */ -@ConfigurationProperties("zipkin.collector.activemq") -class ZipkinActiveMQCollectorProperties { - /** URL of the ActiveMQ broker. */ - private String url; - - /** ActiveMQ queue from which to collect the Zipkin spans */ - private String queue; - - /** Client ID prefix for queue consumers */ - private String clientIdPrefix = "zipkin"; - - /** Connection ID prefix for queue consumers */ - private String connectionIdPrefix = "zipkin"; - - /** Number of concurrent span consumers */ - private Integer concurrency; - - /** Login user of the broker. */ - private String username; - - /** Login password of the broker. */ - private String password; - - public String getUrl() { - return url; - } - - public void setUrl(String url) { - this.url = emptyToNull(url); - } - - public String getQueue() { - return queue; - } - - public void setQueue(String queue) { - this.queue = emptyToNull(queue); - } - - public String getClientIdPrefix() { - return clientIdPrefix; - } - - public void setClientIdPrefix(String clientIdPrefix) { - this.clientIdPrefix = clientIdPrefix; - } - - public String getConnectionIdPrefix() { - return connectionIdPrefix; - } - - public void setConnectionIdPrefix(String connectionIdPrefix) { - this.connectionIdPrefix = connectionIdPrefix; - } - - public Integer getConcurrency() { - return concurrency; - } - - public void setConcurrency(Integer concurrency) { - this.concurrency = concurrency; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = emptyToNull(username); - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = emptyToNull(password); - } - - public ActiveMQCollector.Builder toBuilder() { - final ActiveMQCollector.Builder result = ActiveMQCollector.builder(); - if (concurrency != null) result.concurrency(concurrency); - if (queue != null) result.queue(queue); - - ActiveMQConnectionFactory connectionFactory; - if (username != null) { - connectionFactory = new ActiveMQConnectionFactory(username, password, url); - } else { - connectionFactory = new ActiveMQConnectionFactory(url); - } - connectionFactory.setClientIDPrefix(clientIdPrefix); - connectionFactory.setConnectionIDPrefix(connectionIdPrefix); - result.connectionFactory(connectionFactory); - return result; - } - - private static String emptyToNull(String s) { - return "".equals(s) ? null : s; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/banner/ZipkinBanner.java b/zipkin-server/src/main/java/zipkin2/server/internal/banner/ZipkinBanner.java deleted file mode 100644 index 1faef9df384..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/banner/ZipkinBanner.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.banner; - -import java.io.InputStream; -import java.io.PrintStream; -import org.springframework.boot.Banner; -import org.springframework.boot.ansi.AnsiElement; -import org.springframework.boot.ansi.AnsiOutput; -import org.springframework.boot.ansi.AnsiStyle; -import org.springframework.core.env.Environment; -import org.springframework.core.io.ClassPathResource; -import org.springframework.util.StreamUtils; - -import static java.nio.charset.StandardCharsets.UTF_8; - -/** - * More efficient Banner implementation which doesn't use property sources as variables are expanded - * at compile time using Maven resource filtering. - */ -public class ZipkinBanner implements Banner { - static final AnsiElement ZIPKIN_ORANGE = new AnsiElement() { - @Override public String toString() { - return "38;5;208"; // Ansi 256 color code 208 (orange) - } - }; - - @Override - public void printBanner(Environment environment, Class sourceClass, PrintStream out) { - try (InputStream stream = new ClassPathResource("zipkin.txt").getInputStream()) { - String banner = StreamUtils.copyToString(stream, UTF_8); - - // Instead of use property expansion for only 2 ansi codes, inline them - banner = banner.replace("${AnsiOrange}", AnsiOutput.encode(ZIPKIN_ORANGE)); - banner = banner.replace("${AnsiNormal}", AnsiOutput.encode(AnsiStyle.NORMAL)); - - out.println(banner); - } catch (Exception ex) { - // who cares - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/brave/SelfTracingProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/brave/SelfTracingProperties.java deleted file mode 100644 index 9a2a47c3e2b..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/brave/SelfTracingProperties.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.brave; - -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.boot.convert.DurationUnit; - -@ConfigurationProperties("zipkin.self-tracing") -class SelfTracingProperties { - - /** Whether self-tracing is enabled. Defaults to {@code false}. */ - private boolean enabled = false; - /** - * The percentage of traces retained when self-tracing. If 1.0 (i.e., all traces are sampled), the - * value of {@link #getTracesPerSecond()} will be used for sampling traces. Defaults to {@code - * 1.0}, sampling all traces. - */ - private float sampleRate = 1.0f; - /** - * The number of traces per second to retain. If 0, an unlimited number of traces will be - * retained. This value has no effect if {@link #getSampleRate()} is set to something other than - * {@code 1.0}. Defaults to 1 trace per second. - */ - private int tracesPerSecond = 1; - /** Timeout to flush self-tracing data to storage. */ - @DurationUnit(ChronoUnit.SECONDS) - private Duration messageTimeout = Duration.ofSeconds(1); - - public boolean isEnabled() { - return enabled; - } - - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - public float getSampleRate() { - return sampleRate; - } - - public void setSampleRate(float sampleRate) { - this.sampleRate = sampleRate; - } - - public int getTracesPerSecond() { - return tracesPerSecond; - } - - public void setTracesPerSecond(int tracesPerSecond) { - this.tracesPerSecond = tracesPerSecond; - } - - public Duration getMessageTimeout() { - return messageTimeout; - } - - public void setMessageTimeout(Duration messageTimeout) { - this.messageTimeout = messageTimeout; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/brave/TracedCall.java b/zipkin-server/src/main/java/zipkin2/server/internal/brave/TracedCall.java deleted file mode 100644 index c9d62444aa0..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/brave/TracedCall.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.brave; - -import brave.ScopedSpan; -import brave.Span; -import brave.Tracer; -import java.io.IOException; -import zipkin2.Call; -import zipkin2.Callback; - -public final class TracedCall extends Call { - final Tracer tracer; - final Call delegate; - final String name; - - public TracedCall(Tracer tracer, Call delegate, String name) { - this.tracer = tracer; - this.delegate = delegate; - this.name = name; - } - - @Override public V execute() throws IOException { - ScopedSpan span = tracer.startScopedSpan(name); - try { - return delegate.execute(); - } catch (RuntimeException | IOException | Error e) { - span.error(e); - throw e; - } finally { - span.finish(); - } - } - - @Override public void enqueue(Callback callback) { - Span span = tracer.nextSpan().name(name).start(); - try { - if (span.isNoop()) { - delegate.enqueue(callback); - } else { - delegate.enqueue(new SpanFinishingCallback<>(callback, span)); - } - } catch (RuntimeException | Error e) { - span.error(e); - span.finish(); - throw e; - } - } - - @Override public void cancel() { - delegate.cancel(); - } - - @Override public boolean isCanceled() { - return delegate.isCanceled(); - } - - @Override public Call clone() { - return new TracedCall<>(tracer, delegate, name); - } - - @Override public String toString() { - return "Traced(" + delegate + ")"; - } - - static final class SpanFinishingCallback implements Callback { - private final Callback delegate; - private final Span span; - - SpanFinishingCallback(Callback delegate, Span span) { - this.delegate = delegate; - this.span = span; - } - - @Override public void onSuccess(V value) { - delegate.onSuccess(value); - span.finish(); - } - - @Override public void onError(Throwable t) { - delegate.onError(t); - span.error(t).finish(); - } - - @Override public String toString() { - return "Traced(" + delegate + ")"; - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/brave/TracingStorageComponent.java b/zipkin-server/src/main/java/zipkin2/server/internal/brave/TracingStorageComponent.java deleted file mode 100644 index bc458b0df20..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/brave/TracingStorageComponent.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.brave; - -import brave.Tracer; -import brave.Tracing; -import java.io.IOException; -import java.util.List; -import zipkin2.Call; -import zipkin2.CheckResult; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.storage.AutocompleteTags; -import zipkin2.storage.ForwardingStorageComponent; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.ServiceAndSpanNames; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.SpanStore; -import zipkin2.storage.StorageComponent; -import zipkin2.storage.Traces; - -// public for use in ZipkinServerConfiguration -public final class TracingStorageComponent extends ForwardingStorageComponent { - final Tracing tracing; - final StorageComponent delegate; - - public TracingStorageComponent(Tracing tracing, StorageComponent delegate) { - this.tracing = tracing; - this.delegate = delegate; - } - - @Override protected StorageComponent delegate() { - return delegate; - } - - @Override public ServiceAndSpanNames serviceAndSpanNames() { - return new TracingServiceAndSpanNames(tracing, delegate.serviceAndSpanNames()); - } - - @Override public Traces traces() { - return new TracingTraces(tracing, delegate.traces()); - } - - @Override public SpanStore spanStore() { - return new TracingSpanStore(tracing, delegate.spanStore()); - } - - @Override public AutocompleteTags autocompleteTags() { - return new TracingAutocompleteTags(tracing, delegate.autocompleteTags()); - } - - @Override public SpanConsumer spanConsumer() { - return new TracingSpanConsumer(tracing, delegate.spanConsumer()); - } - - @Override public CheckResult check() { - return delegate.check(); - } - - @Override public void close() throws IOException { - delegate.close(); - } - - @Override public String toString() { - return "Traced{" + delegate + "}"; - } - - static final class TracingTraces implements Traces { - final Tracer tracer; - final Traces delegate; - - TracingTraces(Tracing tracing, Traces delegate) { - this.tracer = tracing.tracer(); - this.delegate = delegate; - } - - @Override public Call> getTrace(String traceId) { - return new TracedCall<>(tracer, delegate.getTrace(traceId), "get-trace"); - } - - @Override public Call>> getTraces(Iterable traceIds) { - return new TracedCall<>(tracer, delegate.getTraces(traceIds), "get-traces"); - } - - @Override public String toString() { - return "Traced{" + delegate + "}"; - } - } - - static final class TracingSpanStore implements SpanStore { - final Tracer tracer; - final SpanStore delegate; - - TracingSpanStore(Tracing tracing, SpanStore delegate) { - this.tracer = tracing.tracer(); - this.delegate = delegate; - } - - @Override public Call>> getTraces(QueryRequest request) { - return new TracedCall<>(tracer, delegate.getTraces(request), "get-traces"); - } - - @Override @Deprecated public Call> getTrace(String traceId) { - return new TracedCall<>(tracer, delegate.getTrace(traceId), "get-trace"); - } - - @Override @Deprecated public Call> getServiceNames() { - return new TracedCall<>(tracer, delegate.getServiceNames(), "get-service-names"); - } - - @Override @Deprecated public Call> getSpanNames(String serviceName) { - return new TracedCall<>(tracer, delegate.getSpanNames(serviceName), "get-span-names"); - } - - @Override public Call> getDependencies(long endTs, long lookback) { - return new TracedCall<>( - tracer, delegate.getDependencies(endTs, lookback), "get-dependencies"); - } - - @Override public String toString() { - return "Traced{" + delegate + "}"; - } - } - - static final class TracingAutocompleteTags implements AutocompleteTags { - final Tracer tracer; - final AutocompleteTags delegate; - - TracingAutocompleteTags(Tracing tracing, AutocompleteTags delegate) { - this.tracer = tracing.tracer(); - this.delegate = delegate; - } - - @Override public Call> getKeys() { - return new TracedCall<>(tracer, delegate.getKeys(), "get-keys"); - } - - @Override public Call> getValues(String key) { - return new TracedCall<>(tracer, delegate.getValues(key), "get-values"); - } - - @Override public String toString() { - return "Traced{" + delegate + "}"; - } - } - - static final class TracingServiceAndSpanNames implements ServiceAndSpanNames { - final Tracer tracer; - final ServiceAndSpanNames delegate; - - TracingServiceAndSpanNames(Tracing tracing, ServiceAndSpanNames delegate) { - this.tracer = tracing.tracer(); - this.delegate = delegate; - } - - @Override public Call> getServiceNames() { - return new TracedCall<>(tracer, delegate.getServiceNames(), "get-service-names"); - } - - @Override public Call> getRemoteServiceNames(String serviceName) { - return new TracedCall<>(tracer, delegate.getRemoteServiceNames(serviceName), - "get-remote-service-names"); - } - - @Override public Call> getSpanNames(String serviceName) { - return new TracedCall<>(tracer, delegate.getSpanNames(serviceName), "get-span-names"); - } - - @Override public String toString() { - return "Traced{" + delegate + "}"; - } - } - - static final class TracingSpanConsumer implements SpanConsumer { - final Tracer tracer; - final SpanConsumer delegate; - - TracingSpanConsumer(Tracing tracing, SpanConsumer delegate) { - this.tracer = tracing.tracer(); - this.delegate = delegate; - } - - @Override public Call accept(List spans) { - return new TracedCall<>(tracer, delegate.accept(spans), "accept-spans"); - } - - @Override public String toString() { - return "Traced{" + delegate + "}"; - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/brave/ZipkinSelfTracingConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/brave/ZipkinSelfTracingConfiguration.java deleted file mode 100644 index 0d4d8462da9..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/brave/ZipkinSelfTracingConfiguration.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.brave; - -import brave.Tracing; -import brave.context.slf4j.MDCScopeDecorator; -import brave.http.HttpTracing; -import brave.propagation.B3Propagation; -import brave.propagation.CurrentTraceContext; -import brave.propagation.ThreadLocalSpan; -import brave.sampler.BoundarySampler; -import brave.sampler.RateLimitingSampler; -import brave.sampler.Sampler; -import com.linecorp.armeria.common.brave.RequestContextCurrentTraceContext; -import com.linecorp.armeria.server.brave.BraveService; -import com.linecorp.armeria.spring.ArmeriaServerConfigurator; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import org.springframework.beans.factory.BeanFactory; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import zipkin2.Call; -import zipkin2.CheckResult; -import zipkin2.Span; -import zipkin2.codec.Encoding; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.collector.CollectorMetrics; -import zipkin2.reporter.ReporterMetrics; -import zipkin2.reporter.Sender; -import zipkin2.reporter.brave.AsyncZipkinSpanHandler; -import zipkin2.server.internal.ConditionalOnSelfTracing; -import zipkin2.storage.StorageComponent; - -@EnableConfigurationProperties(SelfTracingProperties.class) -@ConditionalOnSelfTracing -public class ZipkinSelfTracingConfiguration { - /** Configuration for how to buffer spans into messages for Zipkin */ - @Bean AsyncZipkinSpanHandler reporter(BeanFactory factory, SelfTracingProperties config) { - return AsyncZipkinSpanHandler.newBuilder(new LocalSender(factory)) - .threadFactory((runnable) -> new Thread(new Runnable() { - @Override public void run() { - RequestContextCurrentTraceContext.setCurrentThreadNotRequestThread(true); - runnable.run(); - } - - @Override public String toString() { - return runnable.toString(); - } - })) - .messageTimeout(config.getMessageTimeout().toNanos(), TimeUnit.NANOSECONDS) - .metrics(new ReporterMetricsAdapter(factory)) - .build(); - } - - @Bean CurrentTraceContext currentTraceContext() { - return RequestContextCurrentTraceContext.builder() - .addScopeDecorator(MDCScopeDecorator.get()) // puts trace IDs into logs - .build(); - } - - /** - * There's no attribute namespace shared across request and response. Hence, we need to save off a - * reference to the span in scope, so that we can close it in the response. - */ - @Bean ThreadLocalSpan threadLocalSpan(Tracing tracing) { - return ThreadLocalSpan.create(tracing.tracer()); - } - - /** - * This controls the general rate. In order to not accidentally start traces started from the - * tracer itself, this isn't used as {@link Tracing.Builder#sampler(Sampler)}. The impact of this - * is that we can't currently start traces from Kafka or Rabbit (until we use a messaging - * sampler). - * - * See https://github.com/openzipkin/brave/pull/914 for the messaging abstraction - */ - @Bean Sampler sampler(SelfTracingProperties config) { - if (config.getSampleRate() != 1.0) { - if (config.getSampleRate() < 0.01) { - return BoundarySampler.create(config.getSampleRate()); - } else { - return Sampler.create(config.getSampleRate()); - } - } else if (config.getTracesPerSecond() != 0) { - return RateLimitingSampler.create(config.getTracesPerSecond()); - } - return Sampler.ALWAYS_SAMPLE; - } - - /** Controls aspects of tracing such as the name that shows up in the UI */ - @Bean Tracing tracing(AsyncZipkinSpanHandler zipkinSpanHandler, CurrentTraceContext currentTraceContext) { - return Tracing.newBuilder() - .localServiceName("zipkin-server") - .sampler(Sampler.NEVER_SAMPLE) // don't sample traces at this abstraction - .currentTraceContext(currentTraceContext) - // Reduce the impact on untraced downstream http services such as Elasticsearch - .propagationFactory(B3Propagation.newFactoryBuilder() - .injectFormat(brave.Span.Kind.CLIENT, B3Propagation.Format.SINGLE) - .build()) - .addSpanHandler(zipkinSpanHandler) - .build(); - } - - @Bean HttpTracing httpTracing(Tracing tracing, Sampler sampler) { - return HttpTracing.newBuilder(tracing) - // server starts traces for read requests under the path /api - .serverSampler(request -> { - String path = request.path(); - if (path.startsWith("/api") || path.startsWith("/zipkin/api")) { - return sampler.isSampled(0L); // use the global rate limit - } - return false; - } - ) - .build(); - } - - @Bean ArmeriaServerConfigurator tracingConfigurator(HttpTracing tracing) { - return server -> server.decorator(BraveService.newDecorator(tracing)); - } - - /** Lazily looks up the storage component in order to to avoid proxying. */ - static final class LocalSender extends Sender { - final BeanFactory factory; - volatile StorageComponent delegate; // volatile to prevent stale reads - - LocalSender(BeanFactory factory) { - this.factory = factory; - } - - @Override public Encoding encoding() { - // TODO: less memory efficient, but not a huge problem for self-tracing which is rarely on - // https://github.com/openzipkin/zipkin-reporter-java/issues/178 - return Encoding.JSON; - } - - @Override public int messageMaxBytes() { - return 5 * 1024 * 1024; // arbitrary - } - - @Override public int messageSizeInBytes(List list) { - return Encoding.JSON.listSizeInBytes(list); - } - - @Override public Call sendSpans(List encodedSpans) { - List spans = new ArrayList<>(encodedSpans.size()); - for (byte[] encodedSpan : encodedSpans) { - Span v2Span = SpanBytesDecoder.JSON_V2.decodeOne(encodedSpan); - spans.add(v2Span); - } - return delegate().spanConsumer().accept(spans); - } - - @Override public CheckResult check() { - return delegate().check(); - } - - @Override public String toString() { - // Avoid using the delegate to avoid eagerly loading the bean during initialization - return "StorageComponent"; - } - - @Override public void close() { - // don't close delegate as we didn't open it! - } - - /** Lazy lookup to avoid proxying */ - StorageComponent delegate() { - StorageComponent result = delegate; - if (result != null) return delegate; - // synchronization is not needed as redundant calls have no ill effects - result = factory.getBean(StorageComponent.class); - if (result instanceof TracingStorageComponent) { - result = ((TracingStorageComponent) result).delegate; - } - return delegate = result; - } - } - - static final class ReporterMetricsAdapter implements ReporterMetrics { - final BeanFactory factory; - volatile CollectorMetrics delegate; // volatile to prevent stale reads - - ReporterMetricsAdapter(BeanFactory factory) { - this.factory = factory; - } - - @Override public void incrementMessages() { - delegate().incrementMessages(); - } - - @Override public void incrementMessagesDropped(Throwable throwable) { - delegate().incrementMessagesDropped(); - } - - @Override public void incrementSpans(int i) { - delegate().incrementSpans(i); - } - - @Override public void incrementSpanBytes(int i) { - delegate().incrementBytes(i); - } - - @Override public void incrementMessageBytes(int i) { - } - - @Override public void incrementSpansDropped(int i) { - delegate().incrementMessagesDropped(); - } - - @Override public void updateQueuedSpans(int i) { - } - - @Override public void updateQueuedBytes(int i) { - } - - /** Lazy lookup to avoid proxying */ - CollectorMetrics delegate() { - CollectorMetrics result = delegate; - if (result != null) return delegate; - // synchronization is not needed as redundant calls have no ill effects - return delegate = factory.getBean(CollectorMetrics.class).forTransport("local"); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/cassandra3/ZipkinCassandra3StorageConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/cassandra3/ZipkinCassandra3StorageConfiguration.java deleted file mode 100644 index 97557d0813b..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/cassandra3/ZipkinCassandra3StorageConfiguration.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.cassandra3; - -import java.util.List; -import org.springframework.beans.BeansException; -import org.springframework.beans.factory.BeanFactory; -import org.springframework.beans.factory.BeanFactoryAware; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.beans.factory.config.BeanPostProcessor; -import org.springframework.boot.autoconfigure.condition.ConditionalOnClass; -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Import; -import zipkin2.server.internal.ConditionalOnSelfTracing; -import zipkin2.storage.StorageComponent; -import zipkin2.storage.cassandra.CassandraStorage; -import zipkin2.storage.cassandra.CassandraStorage.SessionFactory; - -/** - * This storage accepts Cassandra logs in a specified category. Each log entry is expected to - * contain a single span, which is TBinaryProtocol big-endian, then base64 encoded. Decoded spans - * are stored asynchronously. - */ -@ConditionalOnClass(CassandraStorage.class) -@EnableConfigurationProperties(ZipkinCassandra3StorageProperties.class) -@ConditionalOnProperty(name = "zipkin.storage.type", havingValue = "cassandra3") -@ConditionalOnMissingBean(StorageComponent.class) -@Import(ZipkinCassandra3StorageConfiguration.TracingSessionFactoryEnhancer.class) -// This component is named .*Cassandra3.* even though the package already says cassandra3 because -// Spring Boot configuration endpoints only printout the simple name of the class -public class ZipkinCassandra3StorageConfiguration { - - @Bean SessionFactory sessionFactory() { - return SessionFactory.DEFAULT; - } - - @Bean - @ConditionalOnMissingBean - StorageComponent storage( - ZipkinCassandra3StorageProperties properties, - SessionFactory sessionFactory, - @Value("${zipkin.storage.strict-trace-id:true}") boolean strictTraceId, - @Value("${zipkin.storage.search-enabled:true}") boolean searchEnabled, - @Value("${zipkin.storage.autocomplete-keys:}") List autocompleteKeys, - @Value("${zipkin.storage.autocomplete-ttl:3600000}") int autocompleteTtl, - @Value("${zipkin.storage.autocomplete-cardinality:20000}") int autocompleteCardinality) { - return properties.toBuilder() - .strictTraceId(strictTraceId) - .searchEnabled(searchEnabled) - .autocompleteKeys(autocompleteKeys) - .autocompleteTtl(autocompleteTtl) - .autocompleteCardinality(autocompleteCardinality) - .sessionFactory(sessionFactory).build(); - } - - @ConditionalOnSelfTracing - static class TracingSessionFactoryEnhancer implements BeanPostProcessor, BeanFactoryAware { - /** - * Need this to resolve cyclic instantiation issue with spring when instantiating with tracing. - * - *

Ref: Tracking down cause of Spring's "not - * eligible for auto-proxying"

- */ - BeanFactory beanFactory; - - @Override public Object postProcessBeforeInitialization(Object bean, String beanName) { - return bean; - } - - @Override public Object postProcessAfterInitialization(Object bean, String beanName) { - //if (bean instanceof SessionFactory && beanFactory.containsBean("tracing")) { - // SessionFactory delegate = (SessionFactory) bean; - // Tracing tracing = beanFactory.getBean(Tracing.class); - // return (SessionFactory) storage -> TracingSession.create(tracing, delegate.create(storage)); - //} - return bean; - } - - @Override public void setBeanFactory(BeanFactory beanFactory) throws BeansException { - this.beanFactory = beanFactory; - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/cassandra3/ZipkinCassandra3StorageProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/cassandra3/ZipkinCassandra3StorageProperties.java deleted file mode 100644 index 6cc9aaf3d4b..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/cassandra3/ZipkinCassandra3StorageProperties.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.cassandra3; - -import java.io.Serializable; -import org.springframework.boot.context.properties.ConfigurationProperties; -import zipkin2.storage.cassandra.CassandraStorage; - -@ConfigurationProperties("zipkin.storage.cassandra3") -class ZipkinCassandra3StorageProperties implements Serializable { // for Spark jobs - private static final long serialVersionUID = 0L; - - private String keyspace = "zipkin3"; - private String contactPoints = "localhost"; - private String localDc = "datacenter1"; - private int maxConnections = 8; - private boolean ensureSchema = true; - private boolean useSsl = false; - private String username; - private String password; - /** See {@link CassandraStorage.Builder#indexFetchMultiplier(int)} */ - private int indexFetchMultiplier = 3; - - public String getKeyspace() { - return keyspace; - } - - public void setKeyspace(String keyspace) { - this.keyspace = keyspace; - } - - public String getContactPoints() { - return contactPoints; - } - - public void setContactPoints(String contactPoints) { - this.contactPoints = contactPoints; - } - - public String getLocalDc() { - return localDc; - } - - public void setLocalDc(String localDc) { - this.localDc = "".equals(localDc) ? null : localDc; - } - - public int getMaxConnections() { - return maxConnections; - } - - public void setMaxConnections(int maxConnections) { - this.maxConnections = maxConnections; - } - - public boolean isEnsureSchema() { - return ensureSchema; - } - - public void setEnsureSchema(boolean ensureSchema) { - this.ensureSchema = ensureSchema; - } - - public boolean isUseSsl() { - return useSsl; - } - - public void setUseSsl(boolean useSsl) { - this.useSsl = useSsl; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = "".equals(username) ? null : username; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = "".equals(password) ? null : password; - } - - public int getIndexFetchMultiplier() { - return indexFetchMultiplier; - } - - public void setIndexFetchMultiplier(int indexFetchMultiplier) { - this.indexFetchMultiplier = indexFetchMultiplier; - } - - public CassandraStorage.Builder toBuilder() { - return CassandraStorage.newBuilder() - .keyspace(keyspace) - .contactPoints(contactPoints) - .localDc(localDc) - .maxConnections(maxConnections) - .ensureSchema(ensureSchema) - .useSsl(useSsl) - .username(username) - .password(password) - .indexFetchMultiplier(indexFetchMultiplier); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/BasicAuthInterceptor.java b/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/BasicAuthInterceptor.java deleted file mode 100644 index 81e6bf60c92..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/BasicAuthInterceptor.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.client.ClientRequestContext; -import com.linecorp.armeria.client.HttpClient; -import com.linecorp.armeria.client.SimpleDecoratingHttpClient; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpRequest; -import com.linecorp.armeria.common.HttpResponse; -import java.util.Objects; - -/** - * Adds basic auth username and password to every request. - * - *

Ref: How - * Elasticsearch security works

- */ -final class BasicAuthInterceptor extends SimpleDecoratingHttpClient { - - final BasicCredentials basicCredentials; - - BasicAuthInterceptor(HttpClient client, BasicCredentials basicCredentials) { - super(client); - this.basicCredentials = basicCredentials; - } - - @Override - public HttpResponse execute(ClientRequestContext ctx, HttpRequest req) throws Exception { - String credentials = basicCredentials.getCredentials(); - if (credentials != null) { - ctx.addAdditionalRequestHeader(HttpHeaderNames.AUTHORIZATION, credentials); - } - return unwrap().execute(ctx, req); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/BasicCredentials.java b/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/BasicCredentials.java deleted file mode 100644 index 55c8e14da6a..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/BasicCredentials.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import java.util.Base64; -import java.util.Optional; -import zipkin2.internal.Nullable; - -import static java.nio.charset.StandardCharsets.UTF_8; - -/** - * Generate Elasticsearch basic user credentials. - * - *

Ref: How - * Elasticsearch security works

- */ -final class BasicCredentials { - - private volatile String basicCredentials; - - BasicCredentials() { - - } - - BasicCredentials(String username, String password) { - updateCredentials(username, password); - } - - void updateCredentials(String username, String password) { - String token = username + ':' + password; - basicCredentials = "Basic " + Base64.getEncoder().encodeToString(token.getBytes(UTF_8)); - } - - @Nullable - String getCredentials() { - return basicCredentials; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/DynamicCredentialsFileLoader.java b/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/DynamicCredentialsFileLoader.java deleted file mode 100644 index 68ddbe52a79..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/DynamicCredentialsFileLoader.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import java.io.FileInputStream; -import java.io.IOException; -import java.util.Properties; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.internal.Nullable; - -import static zipkin2.server.internal.elasticsearch.ZipkinElasticsearchStorageConfiguration.PASSWORD; -import static zipkin2.server.internal.elasticsearch.ZipkinElasticsearchStorageConfiguration.USERNAME; - -/** - * Loads username/password from credentials file. - * - *

NOTE: This implementation loops instead of using {@link java.nio.file.WatchService}. - * This means that spans will drop and api failures will occur for any time remaining in the refresh - * interval. A future version can tighten this by also using poll events. - */ -class DynamicCredentialsFileLoader implements Runnable { - static final Logger LOGGER = LoggerFactory.getLogger(DynamicCredentialsFileLoader.class); - - private final String credentialsFile; - - private final BasicCredentials basicCredentials; - - public DynamicCredentialsFileLoader(BasicCredentials basicCredentials, - String credentialsFile) { - this.basicCredentials = basicCredentials; - this.credentialsFile = credentialsFile; - } - - @Override public void run() { - try { - updateCredentialsFromProperties(); - } catch (Exception e) { - LOGGER.error("Error loading elasticsearch credentials", e); - } - } - - void updateCredentialsFromProperties() throws IOException { - Properties properties = new Properties(); - try (FileInputStream is = new FileInputStream(credentialsFile)) { - properties.load(is); - } - String username = ensureNotEmptyOrNull(properties, credentialsFile, USERNAME); - String password = ensureNotEmptyOrNull(properties, credentialsFile, PASSWORD); - basicCredentials.updateCredentials(username, password); - } - - @Nullable static String ensureNotEmptyOrNull(Properties properties, String fileName, String name) { - String value = properties.getProperty(name); - if (value == null) { - throw new IllegalStateException("no " + name + " property in " + fileName); - } - value = value.trim(); - if ("".equals(value)) { - throw new IllegalStateException("empty " + name + " property in " + fileName); - } - return value; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/HttpClientFactory.java b/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/HttpClientFactory.java deleted file mode 100644 index 64fe18272fc..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/HttpClientFactory.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.client.ClientFactory; -import com.linecorp.armeria.client.ClientOptions; -import com.linecorp.armeria.client.ClientOptionsBuilder; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.client.encoding.DecodingClient; -import com.linecorp.armeria.client.endpoint.EndpointGroup; -import com.linecorp.armeria.client.logging.ContentPreviewingClient; -import com.linecorp.armeria.client.logging.LoggingClient; -import com.linecorp.armeria.client.logging.LoggingClientBuilder; -import com.linecorp.armeria.client.metric.MetricCollectingClient; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpHeaders; -import com.linecorp.armeria.common.SessionProtocol; -import com.linecorp.armeria.common.logging.LogLevel; -import com.linecorp.armeria.common.metric.MeterIdPrefixFunction; -import java.io.Closeable; -import java.util.List; -import java.util.function.Consumer; -import java.util.function.Function; -import zipkin2.server.internal.elasticsearch.ZipkinElasticsearchStorageProperties.HttpLogging; - -// Exposed as a bean so that zipkin-aws can use this for api requests to get initial endpoints. -public class HttpClientFactory implements Function, Closeable { - final SessionProtocol protocol; - final ClientOptions options; - final ClientFactory clientFactory; - final int timeout; - final List> customizers; - - HttpClientFactory(ZipkinElasticsearchStorageProperties es, ClientFactory factory, - SessionProtocol protocol, List> customizers - ) { - this.clientFactory = factory; - this.protocol = protocol; - this.customizers = customizers; - this.timeout = es.getTimeout(); - HttpLogging httpLogging = es.getHttpLogging(); - ClientOptionsBuilder options = ClientOptions.builder() - .decorator(MetricCollectingClient.newDecorator( - MeterIdPrefixFunction.ofDefault("elasticsearch"))) - .decorator(DecodingClient.newDecorator()); - - configureHttpLogging(httpLogging, options); - this.options = configureOptionsExceptHttpLogging(options).build(); - } - - void configureHttpLogging(HttpLogging httpLogging, ClientOptionsBuilder options) { - if (httpLogging == HttpLogging.NONE) return; - LoggingClientBuilder loggingBuilder = LoggingClient.builder() - .requestLogLevel(LogLevel.INFO) - .successfulResponseLogLevel(LogLevel.INFO) - .requestHeadersSanitizer((ctx, headers) -> { - if (!headers.contains(HttpHeaderNames.AUTHORIZATION)) { - return headers; - } - // TODO(anuraaga): Add unit tests after https://github.com/line/armeria/issues/2220 - return headers.toBuilder().set(HttpHeaderNames.AUTHORIZATION, "****").build(); - }); - switch (httpLogging) { - case HEADERS: - loggingBuilder.contentSanitizer((ctx, unused) -> ""); - break; - case BASIC: - loggingBuilder.contentSanitizer((ctx, unused) -> ""); - loggingBuilder.headersSanitizer((ctx, unused) -> HttpHeaders.of()); - break; - case BODY: - default: - break; - } - options.decorator(loggingBuilder.newDecorator()); - if (httpLogging == HttpLogging.BODY) { - options.decorator(ContentPreviewingClient.newDecorator(Integer.MAX_VALUE)); - } - } - - @Override public WebClient apply(EndpointGroup endpoint) { - return WebClient.builder(protocol, endpoint) - .options(options) - .build(); - } - - @Override public void close() { - clientFactory.close(); - } - - /** This takes care to not expose health checks into wire level logging */ - ClientOptionsBuilder configureOptionsExceptHttpLogging(ClientOptionsBuilder options) { - options.factory(clientFactory).responseTimeoutMillis(timeout).writeTimeoutMillis(timeout); - customizers.forEach(c -> c.accept(options)); - return options; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/InitialEndpointSupplier.java b/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/InitialEndpointSupplier.java deleted file mode 100644 index c90600c8e69..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/InitialEndpointSupplier.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.client.Endpoint; -import com.linecorp.armeria.client.endpoint.EndpointGroup; -import com.linecorp.armeria.client.endpoint.dns.DnsAddressEndpointGroup; -import com.linecorp.armeria.common.SessionProtocol; -import java.net.URI; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Supplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.internal.Nullable; - -final class InitialEndpointSupplier implements Supplier { - static final Logger LOGGER = LoggerFactory.getLogger(InitialEndpointSupplier.class); - - final String hosts; - final SessionProtocol sessionProtocol; - - InitialEndpointSupplier(SessionProtocol sessionProtocol, @Nullable String hosts) { - if (sessionProtocol == null) throw new NullPointerException("sessionProtocol == null"); - this.sessionProtocol = sessionProtocol; - this.hosts = - hosts == null || hosts.isEmpty() ? sessionProtocol.uriText() + "://localhost:9200" : hosts; - } - - @Override public EndpointGroup get() { - List endpointGroups = new ArrayList<>(); - for (String hostText : hosts.split(",", 100)) { - if ("".equals(hostText)) continue; // possibly extra comma - - URI url; - if (hostText.startsWith("http://") || hostText.startsWith("https://")) { - url = URI.create(hostText); - } else if (!sessionProtocol.isTls() && hostText.indexOf(':') == -1) { - url = URI.create(sessionProtocol.uriText() + "://" + hostText + ":9200"); - } else { - url = URI.create(sessionProtocol.uriText() + "://" + hostText); - } - - String host = url.getHost(); - if (host == null) { - LOGGER.warn("Skipping invalid ES host {}", url); - continue; - } - - int port = getPort(url); - - if (port == 9300) { - LOGGER.warn("Native transport no longer supported. Changing {} to http port 9200", host); - port = 9200; - } - - if (isIpAddress(host) || host.equals("localhost")) { - endpointGroups.add(EndpointGroup.of(Endpoint.of(host, port))); - } else { - // A host that isn't an IP may resolve to multiple IP addresses, so we use a endpoint - // group to round-robin over them. Users can mix addresses that resolve to multiple IPs - // with single IPs freely, they'll all get used. - endpointGroups.add(DnsAddressEndpointGroup.builder(host).port(port).build()); - } - } - - if (endpointGroups.isEmpty()) { - throw new IllegalArgumentException("No valid endpoints found in ES hosts: " + hosts); - } - - return EndpointGroup.of(endpointGroups); - } - - int getPort(URI url) { - int port = url.getPort(); - return port != -1 ? port : sessionProtocol.defaultPort(); - } - - static boolean isIpAddress(String address) { - return zipkin2.Endpoint.newBuilder().parseIp(address); - } - - @Override public String toString() { - return hosts; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/LazyHttpClientImpl.java b/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/LazyHttpClientImpl.java deleted file mode 100644 index 2f385a109c3..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/LazyHttpClientImpl.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.client.Endpoint; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.client.endpoint.EndpointGroup; -import com.linecorp.armeria.client.endpoint.healthcheck.HealthCheckedEndpointGroup; -import com.linecorp.armeria.client.metric.MetricCollectingClient; -import com.linecorp.armeria.common.SessionProtocol; -import com.linecorp.armeria.common.metric.MeterIdPrefixFunction; -import io.micrometer.core.instrument.MeterRegistry; -import java.util.function.Supplier; -import zipkin2.elasticsearch.ElasticsearchStorage.LazyHttpClient; - -final class LazyHttpClientImpl implements LazyHttpClient { - final HttpClientFactory factory; - final SessionProtocol protocol; - final Supplier initialEndpoints; - final ZipkinElasticsearchStorageProperties.HealthCheck healthCheck; - final int timeoutMillis; - final MeterRegistry meterRegistry; - - volatile WebClient result; - - LazyHttpClientImpl(HttpClientFactory factory, SessionProtocol protocol, - Supplier initialEndpoints, ZipkinElasticsearchStorageProperties es, - MeterRegistry meterRegistry) { - this.factory = factory; - this.protocol = protocol; - this.initialEndpoints = initialEndpoints; - this.healthCheck = es.getHealthCheck(); - this.timeoutMillis = es.getTimeout(); - this.meterRegistry = meterRegistry; - } - - @Override public WebClient get() { - if (result == null) { - synchronized (this) { - if (result == null) { - result = factory.apply(getEndpoint()); - } - } - } - return result; - } - - EndpointGroup getEndpoint() { - EndpointGroup initial = initialEndpoints.get(); - // Only health-check when there are alternative endpoints. There aren't when instanceof Endpoint - if (initial instanceof Endpoint || !healthCheck.isEnabled()) return initial; - - // Wrap the result when health checking is enabled. - return decorateHealthCheck(initial); - } - - // Enables health-checking of an endpoint group, so we only send requests to endpoints that are up - HealthCheckedEndpointGroup decorateHealthCheck(EndpointGroup endpointGroup) { - HealthCheckedEndpointGroup healthChecked = - HealthCheckedEndpointGroup.builder(endpointGroup, "/_cluster/health") - .protocol(protocol) - .useGet(true) - .clientFactory(factory.clientFactory) - .withClientOptions(options -> { - factory.configureHttpLogging(healthCheck.getHttpLogging(), options); - factory.configureOptionsExceptHttpLogging(options); - options.decorator(MetricCollectingClient.newDecorator( - MeterIdPrefixFunction.ofDefault("elasticsearch-healthcheck"))); - options.decorator((delegate, ctx, req) -> { - ctx.logBuilder().name("health-check"); - return delegate.execute(ctx, req); - }); - return options; - }) - .retryInterval(healthCheck.getInterval()) - .build(); - healthChecked.newMeterBinder("elasticsearch").bindTo(meterRegistry); - return healthChecked; - } - - @Override public final String toString() { - return initialEndpoints.toString(); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/SslUtil.java b/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/SslUtil.java deleted file mode 100644 index 108ff7598d7..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/SslUtil.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import java.net.URL; -import java.security.KeyStore; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.TrustManagerFactory; -import org.springframework.util.ResourceUtils; -import zipkin2.server.internal.elasticsearch.ZipkinElasticsearchStorageProperties.Ssl; - -// snippets adapted from com.linecorp.armeria.internal.spring.ArmeriaConfigurationUtil -final class SslUtil { - - static KeyManagerFactory getKeyManagerFactory(Ssl ssl) throws Exception { - KeyStore store = - loadKeyStore(ssl.getKeyStoreType(), ssl.getKeyStore(), ssl.getKeyStorePassword()); - - KeyManagerFactory keyManagerFactory = - KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - - String keyPassword = ssl.getKeyStorePassword(); - keyManagerFactory.init(store, keyPassword != null ? keyPassword.toCharArray() : null); - return keyManagerFactory; - } - - static TrustManagerFactory getTrustManagerFactory(Ssl ssl) throws Exception { - KeyStore store = - loadKeyStore(ssl.getTrustStoreType(), ssl.getTrustStore(), ssl.getTrustStorePassword()); - - TrustManagerFactory trustManagerFactory = - TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - trustManagerFactory.init(store); - return trustManagerFactory; - } - - static KeyStore loadKeyStore(String type, String resource, String password) throws Exception { - if (resource == null) return null; - KeyStore store = KeyStore.getInstance(type != null ? type : "JKS"); - URL url = ResourceUtils.getURL(resource); - store.load(url.openStream(), password != null ? password.toCharArray() : null); - return store; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/ZipkinElasticsearchStorageConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/ZipkinElasticsearchStorageConfiguration.java deleted file mode 100644 index 13d66bbb13c..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/ZipkinElasticsearchStorageConfiguration.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import brave.CurrentSpanCustomizer; -import brave.SpanCustomizer; -import brave.http.HttpTracing; -import com.linecorp.armeria.client.ClientFactory; -import com.linecorp.armeria.client.ClientFactoryBuilder; -import com.linecorp.armeria.client.ClientOptionsBuilder; -import com.linecorp.armeria.client.brave.BraveClient; -import com.linecorp.armeria.client.endpoint.EndpointGroup; -import com.linecorp.armeria.common.SessionProtocol; -import com.linecorp.armeria.common.logging.RequestLog; -import com.linecorp.armeria.common.logging.RequestLogProperty; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.util.NamedThreadFactory; -import java.io.IOException; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; -import java.util.function.Supplier; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.TrustManagerFactory; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Condition; -import org.springframework.context.annotation.ConditionContext; -import org.springframework.context.annotation.Conditional; -import org.springframework.context.annotation.Configuration; -import org.springframework.core.type.AnnotatedTypeMetadata; -import zipkin2.elasticsearch.ElasticsearchStorage; -import zipkin2.server.internal.ConditionalOnSelfTracing; -import zipkin2.storage.StorageComponent; - -import static zipkin2.server.internal.elasticsearch.ZipkinElasticsearchStorageProperties.Ssl; - -@Configuration(proxyBeanMethods = false) -@EnableConfigurationProperties(ZipkinElasticsearchStorageProperties.class) -@ConditionalOnProperty(name = "zipkin.storage.type", havingValue = "elasticsearch") -@ConditionalOnMissingBean(StorageComponent.class) -public class ZipkinElasticsearchStorageConfiguration { - static final String QUALIFIER = "zipkinElasticsearch"; - static final String USERNAME = "zipkin.storage.elasticsearch.username"; - static final String PASSWORD = "zipkin.storage.elasticsearch.password"; - static final String CREDENTIALS_FILE = - "zipkin.storage.elasticsearch.credentials-file"; - static final String CREDENTIALS_REFRESH_INTERVAL = - "zipkin.storage.elasticsearch.credentials-refresh-interval"; - - // Exposed as a bean so that zipkin-aws can override this as sourced from the AWS endpoints api - @Bean @Qualifier(QUALIFIER) @ConditionalOnMissingBean - Supplier esInitialEndpoints( - SessionProtocol esSessionProtocol, ZipkinElasticsearchStorageProperties es) { - return new InitialEndpointSupplier(esSessionProtocol, es.getHosts()); - } - - // Exposed as a bean so that zipkin-aws can override this to always be SSL - @Bean @Qualifier(QUALIFIER) @ConditionalOnMissingBean - SessionProtocol esSessionProtocol(ZipkinElasticsearchStorageProperties es) { - if (es.getHosts() == null) return SessionProtocol.HTTP; - if (es.getHosts().contains("https://")) return SessionProtocol.HTTPS; - return SessionProtocol.HTTP; - } - - // exposed as a bean so that we can test TLS by swapping it out. - // TODO: see if we can override the TLS via properties instead as that has less surface area. - @Bean @Qualifier(QUALIFIER) @ConditionalOnMissingBean ClientFactory esClientFactory( - ZipkinElasticsearchStorageProperties es, - MeterRegistry meterRegistry) throws Exception { - ClientFactoryBuilder builder = ClientFactory.builder(); - - Ssl ssl = es.getSsl(); - if (ssl.isNoVerify()) builder.tlsNoVerify(); - // Allow use of a custom KeyStore or TrustStore when connecting to Elasticsearch - if (ssl.getKeyStore() != null || ssl.getTrustStore() != null) configureSsl(builder, ssl); - - // Elasticsearch 7 never returns a response when receiving an HTTP/2 preface instead of the more - // valid behavior of returning a bad request response, so we can't use the preface. - // TODO: find or raise a bug with Elastic - return builder.useHttp2Preface(false) - .connectTimeoutMillis(es.getTimeout()) - .meterRegistry(meterRegistry) - .build(); - } - - @Bean HttpClientFactory esHttpClientFactory(ZipkinElasticsearchStorageProperties es, - @Qualifier(QUALIFIER) ClientFactory factory, - @Qualifier(QUALIFIER) SessionProtocol protocol, - @Qualifier(QUALIFIER) List> options - ) { - return new HttpClientFactory(es, factory, protocol, options); - } - - @Bean @ConditionalOnMissingBean StorageComponent storage( - ZipkinElasticsearchStorageProperties es, - HttpClientFactory esHttpClientFactory, - MeterRegistry meterRegistry, - @Qualifier(QUALIFIER) SessionProtocol protocol, - @Qualifier(QUALIFIER) Supplier initialEndpoints, - @Value("${zipkin.query.lookback:86400000}") int namesLookback, - @Value("${zipkin.storage.strict-trace-id:true}") boolean strictTraceId, - @Value("${zipkin.storage.search-enabled:true}") boolean searchEnabled, - @Value("${zipkin.storage.autocomplete-keys:}") List autocompleteKeys, - @Value("${zipkin.storage.autocomplete-ttl:3600000}") int autocompleteTtl, - @Value("${zipkin.storage.autocomplete-cardinality:20000}") int autocompleteCardinality) { - ElasticsearchStorage.Builder builder = es - .toBuilder(new LazyHttpClientImpl(esHttpClientFactory, protocol, initialEndpoints, es, - meterRegistry)) - .namesLookback(namesLookback) - .strictTraceId(strictTraceId) - .searchEnabled(searchEnabled) - .autocompleteKeys(autocompleteKeys) - .autocompleteTtl(autocompleteTtl) - .autocompleteCardinality(autocompleteCardinality); - - return builder.build(); - } - - @Bean @Qualifier(QUALIFIER) @Conditional(BasicAuthRequired.class) - Consumer esBasicAuth( - @Qualifier(QUALIFIER) BasicCredentials basicCredentials) { - return new Consumer() { - @Override public void accept(ClientOptionsBuilder client) { - client.decorator( - delegate -> new BasicAuthInterceptor(delegate, basicCredentials)); - } - - @Override public String toString() { - return "BasicAuthCustomizer{basicCredentials=}"; - } - }; - } - - @Bean @Qualifier(QUALIFIER) @Conditional(BasicAuthRequired.class) - BasicCredentials basicCredentials(ZipkinElasticsearchStorageProperties es) { - if (isEmpty(es.getUsername()) || isEmpty(es.getPassword())) { - return new BasicCredentials(); - } - return new BasicCredentials(es.getUsername(), es.getPassword()); - } - - @Bean(destroyMethod = "shutdown") @Qualifier(QUALIFIER) @Conditional(DynamicRefreshRequired.class) - ScheduledExecutorService dynamicCredentialsScheduledExecutorService( - @Value("${" + CREDENTIALS_FILE + "}") String credentialsFile, - @Value("${" + CREDENTIALS_REFRESH_INTERVAL + "}") Integer credentialsRefreshInterval, - @Qualifier(QUALIFIER) BasicCredentials basicCredentials) throws IOException { - ScheduledExecutorService ses = Executors.newSingleThreadScheduledExecutor( - new NamedThreadFactory("zipkin-load-es-credentials")); - DynamicCredentialsFileLoader credentialsFileLoader = - new DynamicCredentialsFileLoader(basicCredentials, credentialsFile); - credentialsFileLoader.updateCredentialsFromProperties(); - ScheduledFuture future = ses.scheduleAtFixedRate(credentialsFileLoader, - 0, credentialsRefreshInterval, TimeUnit.SECONDS); - if (future.isDone()) throw new RuntimeException("credential refresh thread didn't start"); - return ses; - } - - @Bean @Qualifier(QUALIFIER) @ConditionalOnSelfTracing - Consumer esTracing(Optional maybeHttpTracing) { - if (!maybeHttpTracing.isPresent()) { - // TODO: is there a special cased empty consumer we can use here? I suspect debug is cluttered - // Alternatively, check why we would ever get here if ConditionalOnSelfTracing matches - return client -> { - }; - } - - HttpTracing httpTracing = maybeHttpTracing.get().clientOf("elasticsearch"); - SpanCustomizer spanCustomizer = CurrentSpanCustomizer.create(httpTracing.tracing()); - - return client -> { - client.decorator((delegate, ctx, req) -> { - // We only need the name if it's available and can unsafely access the partially filled log. - RequestLog log = ctx.log().partial(); - if (log.isAvailable(RequestLogProperty.NAME)) { - String name = log.name(); - if (name != null) { - // override the span name if set - spanCustomizer.name(name); - } - } - return delegate.execute(ctx, req); - }); - // the tracing decorator is added last so that it encloses the attempt to overwrite the name. - client.decorator(BraveClient.newDecorator(httpTracing)); - }; - } - - static final class BasicAuthRequired implements Condition { - @Override public boolean matches(ConditionContext condition, AnnotatedTypeMetadata ignored) { - String userName = - condition.getEnvironment().getProperty(USERNAME); - String password = - condition.getEnvironment().getProperty(PASSWORD); - String credentialsFile = - condition.getEnvironment().getProperty(CREDENTIALS_FILE); - return (!isEmpty(userName) && !isEmpty(password)) || !isEmpty(credentialsFile); - } - } - - static final class DynamicRefreshRequired implements Condition { - @Override public boolean matches(ConditionContext condition, AnnotatedTypeMetadata ignored) { - return !isEmpty(condition.getEnvironment().getProperty(CREDENTIALS_FILE)); - } - } - - static ClientFactoryBuilder configureSsl(ClientFactoryBuilder builder, Ssl ssl) throws Exception { - final KeyManagerFactory keyManagerFactory = SslUtil.getKeyManagerFactory(ssl); - final TrustManagerFactory trustManagerFactory = SslUtil.getTrustManagerFactory(ssl); - return builder.tlsCustomizer(sslContextBuilder -> { - sslContextBuilder.keyManager(keyManagerFactory); - sslContextBuilder.trustManager(trustManagerFactory); - }); - } - - private static boolean isEmpty(String s) { - return s == null || s.isEmpty(); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/ZipkinElasticsearchStorageProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/ZipkinElasticsearchStorageProperties.java deleted file mode 100644 index 5685b920b16..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/elasticsearch/ZipkinElasticsearchStorageProperties.java +++ /dev/null @@ -1,377 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import java.io.Serializable; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import java.util.logging.Logger; -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.boot.convert.DurationUnit; -import zipkin2.elasticsearch.ElasticsearchStorage; -import zipkin2.elasticsearch.ElasticsearchStorage.LazyHttpClient; - -/** - * Settings for Elasticsearch client connection - *

{@code
- * zipkin.storage.elasticsearch:
- *   hosts: localhost:9200
- *   pipeline: my_pipeline
- *   timeout: 10000
- *   index: zipkin
- *   date-separator: -
- *   index-shards: 5
- *   index-replicas: 1
- *   ensure-templates: true
- *   username: username
- *   password: password
- *   credentials-file: credentialsFile
- *   credentials-refresh-interval: 1
- *   http-logging: HEADERS
- *   ssl:
- *     key-store: keystore.p12
- *     key-store-password: changeme
- *     key-store-type: PKCS12
- *     trust-store: truststore.p12
- *     trust-store-password: changeme
- *     trust-store-type: PKCS12
- *   health-check:
- *     enabled: true
- *     http-logging: HEADERS
- *     interval: 3s
- *   template-priority: 0
- * }
- */ -@ConfigurationProperties("zipkin.storage.elasticsearch") -class ZipkinElasticsearchStorageProperties implements Serializable { // for Spark jobs - /** - * Sets the level of logging for HTTP requests made by the Elasticsearch client. If not set or - * none, logging will be disabled. - */ - enum HttpLogging { - NONE, - BASIC, - HEADERS, - BODY - } - - public static class Ssl { - private String keyStore = emptyToNull(System.getProperty("javax.net.ssl.keyStore")); - private String keyStorePassword = - emptyToNull(System.getProperty("javax.net.ssl.keyStorePassword")); - private String keyStoreType = emptyToNull(System.getProperty("javax.net.ssl.keyStoreType")); - private String trustStore = emptyToNull(System.getProperty("javax.net.ssl.trustStore")); - private String trustStorePassword = - emptyToNull(System.getProperty("javax.net.ssl.trustStorePassword")); - private String trustStoreType = emptyToNull(System.getProperty("javax.net.ssl.trustStoreType")); - /** Disables the verification of server's key certificate chain. */ - boolean noVerify = false; - - public String getKeyStore() { - return keyStore; - } - - public void setKeyStore(String keyStore) { - this.keyStore = keyStore; - } - - public String getKeyStorePassword() { - return keyStorePassword; - } - - public void setKeyStorePassword(String keyStorePassword) { - this.keyStorePassword = keyStorePassword; - } - - public String getKeyStoreType() { - return keyStoreType; - } - - public void setKeyStoreType(String keyStoreType) { - this.keyStoreType = keyStoreType; - } - - public String getTrustStore() { - return trustStore; - } - - public void setTrustStore(String trustStore) { - this.trustStore = trustStore; - } - - public String getTrustStorePassword() { - return trustStorePassword; - } - - public void setTrustStorePassword(String trustStorePassword) { - this.trustStorePassword = trustStorePassword; - } - - public String getTrustStoreType() { - return trustStoreType; - } - - public void setTrustStoreType(String trustStoreType) { - this.trustStoreType = trustStoreType; - } - - public boolean isNoVerify() { - return noVerify; - } - - public void setNoVerify(boolean noVerify) { - this.noVerify = noVerify; - } - } - - /** - * Configures the health-checking of endpoints by the Elasticsearch client. - */ - public static class HealthCheck { - /** Indicates health checking is enabled. */ - private boolean enabled = true; - /** When set, controls the volume of HTTP logging of the Elasticsearch API. */ - private HttpLogging httpLogging = HttpLogging.NONE; - - /** The time to wait between sending health check requests. */ - @DurationUnit(ChronoUnit.MILLIS) - private Duration interval = Duration.ofSeconds(3); - - public boolean isEnabled() { - return enabled; - } - - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - public HttpLogging getHttpLogging() { - return httpLogging; - } - - public void setHttpLogging(HttpLogging httpLogging) { - this.httpLogging = httpLogging; - } - - public Duration getInterval() { - return interval; - } - - public void setInterval(Duration interval) { - this.interval = interval; - } - } - - static final Logger log = Logger.getLogger(ZipkinElasticsearchStorageProperties.class.getName()); - - private static final long serialVersionUID = 0L; - - /** Indicates the ingest pipeline used before spans are indexed. */ - private String pipeline; - /** A comma separated list of base urls to connect to. */ - private String hosts = "http://localhost:9200"; - /** The index prefix to use when generating daily index names. */ - private String index; - /** The date separator used to create the index name. */ - private String dateSeparator; - /** Number of shards (horizontal scaling factor) per index. */ - private Integer indexShards; - /** Number of replicas (redundancy factor) per index. */ - private Integer indexReplicas; - /** False disables automatic index template creation. */ - private Boolean ensureTemplates; - /** username used for basic auth. Needed when Shield or X-Pack security is enabled */ - private String username; - /** password used for basic auth. Needed when Shield or X-Pack security is enabled */ - private String password; - /** - * credentialsFile is an absolute path refers to a properties-file used to store username and - * password - */ - private String credentialsFile; - /** Credentials refresh interval (in seconds) */ - private Integer credentialsRefreshInterval = 1; - /** When set, controls the volume of HTTP logging of the Elasticsearch API. */ - private HttpLogging httpLogging = HttpLogging.NONE; - /** Connect, read and write socket timeouts (in milliseconds) for Elasticsearch API requests. */ - private Integer timeout = 10_000; - /** Overrides ssl configuration relating to the Elasticsearch client connection. */ - private Ssl ssl = new Ssl(); - - private Integer maxRequests; // unused - - private HealthCheck healthCheck = new HealthCheck(); - - private Integer templatePriority; - - public String getPipeline() { - return pipeline; - } - - public void setPipeline(String pipeline) { - this.pipeline = emptyToNull(pipeline); - } - - public String getHosts() { - return hosts; - } - - public void setHosts(String hosts) { - this.hosts = emptyToNull(hosts); - } - - public String getIndex() { - return index; - } - - public Integer getMaxRequests() { - return maxRequests; - } - - public void setMaxRequests(Integer maxRequests) { - this.maxRequests = maxRequests; - } - - public void setIndex(String index) { - this.index = emptyToNull(index); - } - - public Integer getIndexShards() { - return indexShards; - } - - public void setIndexShards(Integer indexShards) { - this.indexShards = indexShards; - } - - public Boolean isEnsureTemplates() { - return ensureTemplates; - } - - public void setEnsureTemplates(Boolean ensureTemplates) { - this.ensureTemplates = ensureTemplates; - } - - public String getDateSeparator() { - return dateSeparator; - } - - public void setDateSeparator(String dateSeparator) { - String trimmed = dateSeparator.trim(); - if (trimmed.length() > 1) { - throw new IllegalArgumentException("dateSeparator must be empty or a single character"); - } - this.dateSeparator = dateSeparator; - } - - public Integer getIndexReplicas() { - return indexReplicas; - } - - public void setIndexReplicas(Integer indexReplicas) { - this.indexReplicas = indexReplicas; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = emptyToNull(username); - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = emptyToNull(password); - } - - public String getCredentialsFile() { - return credentialsFile; - } - - public void setCredentialsFile(final String credentialsFile) { - this.credentialsFile = credentialsFile; - } - - public Integer getCredentialsRefreshInterval() { - return credentialsRefreshInterval; - } - - public void setCredentialsRefreshInterval( - Integer credentialsRefreshInterval) { - this.credentialsRefreshInterval = credentialsRefreshInterval; - } - - public HttpLogging getHttpLogging() { - return httpLogging; - } - - public void setHttpLogging(HttpLogging httpLogging) { - this.httpLogging = httpLogging; - } - - public Integer getTimeout() { - return timeout; - } - - public void setTimeout(Integer timeout) { - this.timeout = timeout; - } - - public HealthCheck getHealthCheck() { - return healthCheck; - } - - public void setHealthCheck( - HealthCheck healthCheck) { - this.healthCheck = healthCheck; - } - - public Ssl getSsl() { - return ssl; - } - - public void setSsl(Ssl ssl) { - this.ssl = ssl; - } - - public Integer getTemplatePriority() { return templatePriority; } - - public void setTemplatePriority(Integer templatePriority) { this.templatePriority = templatePriority; } - - public ElasticsearchStorage.Builder toBuilder(LazyHttpClient httpClient) { - ElasticsearchStorage.Builder builder = ElasticsearchStorage.newBuilder(httpClient); - if (index != null) builder.index(index); - if (dateSeparator != null) { - builder.dateSeparator(dateSeparator.isEmpty() ? 0 : dateSeparator.charAt(0)); - } - if (pipeline != null) builder.pipeline(pipeline); - if (indexShards != null) builder.indexShards(indexShards); - if (indexReplicas != null) builder.indexReplicas(indexReplicas); - if (ensureTemplates != null) builder.ensureTemplates(ensureTemplates); - - if (maxRequests != null) { - log.warning("ES_MAX_REQUESTS is no longer honored. Use STORAGE_THROTTLE_ENABLED instead"); - } - if (templatePriority != null) builder.templatePriority(templatePriority); - return builder; - } - - private static String emptyToNull(String s) { - return "".equals(s) ? null : s; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/health/ComponentHealth.java b/zipkin-server/src/main/java/zipkin2/server/internal/health/ComponentHealth.java deleted file mode 100644 index a499fbb07cf..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/health/ComponentHealth.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.health; - -import zipkin2.Call; -import zipkin2.CheckResult; -import zipkin2.Component; -import zipkin2.internal.Nullable; - -final class ComponentHealth { - static final String STATUS_UP = "UP", STATUS_DOWN = "DOWN"; - - static ComponentHealth ofComponent(Component component) { - Throwable t = null; - try { - CheckResult check = component.check(); - if (!check.ok()) t = check.error(); - } catch (Throwable unexpected) { - Call.propagateIfFatal(unexpected); - t = unexpected; - } - if (t == null) return new ComponentHealth(component.toString(), STATUS_UP, null); - String message = t.getMessage(); - String error = t.getClass().getName() + (message != null ? ": " + message : ""); - return new ComponentHealth(component.toString(), STATUS_DOWN, error); - } - - final String name; - final String status; - @Nullable final String error; - - ComponentHealth(String name, String status, String error) { - this.name = name; - this.status = status; - this.error = error; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/health/ZipkinHealthController.java b/zipkin-server/src/main/java/zipkin2/server/internal/health/ZipkinHealthController.java deleted file mode 100644 index b0889071fc2..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/health/ZipkinHealthController.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.health; - -import com.fasterxml.jackson.core.JsonGenerator; -import com.linecorp.armeria.common.HttpResponse; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.server.ServiceRequestContext; -import com.linecorp.armeria.server.annotation.Get; -import java.io.IOException; -import java.io.StringWriter; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.stream.Collectors; -import zipkin2.Component; -import zipkin2.server.internal.JsonUtil; - -import static zipkin2.server.internal.ZipkinHttpConfiguration.MEDIA_TYPE_ACTUATOR; -import static zipkin2.server.internal.health.ComponentHealth.STATUS_DOWN; -import static zipkin2.server.internal.health.ComponentHealth.STATUS_UP; - -public class ZipkinHealthController { - final List components; - - ZipkinHealthController(List components) { - this.components = components; - } - - @Get("/actuator/health") - public CompletableFuture getActuatorHealth(ServiceRequestContext ctx) { - return health(ctx, MEDIA_TYPE_ACTUATOR); - } - - @Get("/health") - public CompletableFuture getHealth(ServiceRequestContext ctx) { - return health(ctx, MediaType.JSON_UTF_8); - } - - @SuppressWarnings("FutureReturnValueIgnored") - CompletableFuture health(ServiceRequestContext ctx, MediaType mediaType) { - CompletableFuture responseFuture = new CompletableFuture<>(); - ctx.whenRequestTimingOut().handle((unused, unused2) -> { - try { - String healthJson = writeJsonError("Timed out computing health status. " - + "This often means your storage backend is unreachable."); - responseFuture.complete(newHealthResponse(STATUS_DOWN, mediaType, healthJson)); - } catch (Throwable e) { - // Shouldn't happen since we serialize to an array. - responseFuture.completeExceptionally(e); - } - return null; - }); - - List> futures = components.stream() - .map(component -> - CompletableFuture.supplyAsync( - () -> ComponentHealth.ofComponent(component), - // Computing health of a component may block so we make sure to invoke in the blocking - // executor. - ctx.blockingTaskExecutor())) - .collect(Collectors.toList()); - - CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])) - .handle((unused, t) -> { - if (t != null) { - responseFuture.completeExceptionally(t); - } else { - responseFuture.complete(newHealthResponse( - futures.stream() - .map(CompletableFuture::join) - .collect(Collectors.toList()), - mediaType)); - } - return null; - }); - - return responseFuture; - } - - static HttpResponse newHealthResponse(List healths, MediaType mediaType) { - - String overallStatus = STATUS_UP; - for (ComponentHealth health : healths) { - if (health.status.equals(STATUS_DOWN)) overallStatus = STATUS_DOWN; - } - - final String healthJson; - try { - healthJson = writeJson(overallStatus, healths); - } catch (IOException e) { - // Can't have an exception writing to a string. - throw new Error(e); - } - return newHealthResponse(overallStatus, mediaType, healthJson); - } - - static HttpResponse newHealthResponse(String status, MediaType mediaType, String healthJson) { - HttpStatus code = status.equals(STATUS_UP) ? HttpStatus.OK : HttpStatus.SERVICE_UNAVAILABLE; - return HttpResponse.of(code, mediaType, healthJson); - } - - static String writeJsonError(String error) throws IOException { - StringWriter writer = new StringWriter(); - try (JsonGenerator generator = JsonUtil.createGenerator(writer)) { - generator.writeStartObject(); - generator.writeStringField("status", STATUS_DOWN); - generator.writeObjectFieldStart("zipkin"); - generator.writeStringField("status", STATUS_DOWN); - generator.writeObjectFieldStart("details"); - generator.writeStringField("error", error); - generator.writeEndObject(); // .zipkin.details - generator.writeEndObject(); // .zipkin - generator.writeEndObject(); // . - } - return writer.toString(); - } - - static String writeJson(String overallStatus, List healths) throws IOException { - StringWriter writer = new StringWriter(); - try (JsonGenerator generator = JsonUtil.createGenerator(writer)) { - generator.writeStartObject(); - generator.writeStringField("status", overallStatus); - generator.writeObjectFieldStart("zipkin"); - generator.writeStringField("status", overallStatus); - generator.writeObjectFieldStart("details"); - - for (ComponentHealth health : healths) { - generator.writeObjectFieldStart(health.name); - generator.writeStringField("status", health.status); - - if (health.status.equals(STATUS_DOWN)) { - generator.writeObjectFieldStart("details"); - generator.writeStringField("error", health.error); - generator.writeEndObject(); // .zipkin.details.healthName.details - } - - generator.writeEndObject(); // .zipkin.details.healthName - } - - generator.writeEndObject(); // .zipkin.details - generator.writeEndObject(); // .zipkin - generator.writeEndObject(); // . - } - return writer.toString(); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorConfiguration.java deleted file mode 100644 index 93fdbacb3bb..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorConfiguration.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.kafka; - -import org.springframework.boot.autoconfigure.condition.ConditionalOnClass; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Condition; -import org.springframework.context.annotation.ConditionContext; -import org.springframework.context.annotation.Conditional; -import org.springframework.core.type.AnnotatedTypeMetadata; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.collector.kafka.KafkaCollector; -import zipkin2.storage.StorageComponent; - -/** - * This collector consumes a topic, decodes spans from thrift messages and stores them subject to - * sampling policy. - */ -@ConditionalOnClass(KafkaCollector.class) -@Conditional(ZipkinKafkaCollectorConfiguration.KafkaBootstrapServersSet.class) -@EnableConfigurationProperties(ZipkinKafkaCollectorProperties.class) -public class ZipkinKafkaCollectorConfiguration { // makes simple type name unique for /actuator/conditions - - @Bean(initMethod = "start") - KafkaCollector kafka( - ZipkinKafkaCollectorProperties properties, - CollectorSampler sampler, - CollectorMetrics metrics, - StorageComponent storage) { - return properties.toBuilder().sampler(sampler).metrics(metrics).storage(storage).build(); - } - /** - * This condition passes when {@link ZipkinKafkaCollectorProperties#getBootstrapServers()} is set - * to non-empty. - * - *

This is here because the yaml defaults this property to empty like this, and spring-boot - * doesn't have an option to treat empty properties as unset. - * - *

{@code
-   * bootstrap-servers: ${KAFKA_BOOTSTRAP_SERVERS:}
-   * }
- */ - static final class KafkaBootstrapServersSet implements Condition { - @Override - public boolean matches(ConditionContext context, AnnotatedTypeMetadata a) { - return !isEmpty( - context.getEnvironment().getProperty("zipkin.collector.kafka.bootstrap-servers")) && - notFalse(context.getEnvironment().getProperty("zipkin.collector.kafka.enabled")); - } - - private static boolean isEmpty(String s) { - return s == null || s.isEmpty(); - } - - private static boolean notFalse(String s){ - return s == null || !s.equals("false"); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorProperties.java deleted file mode 100644 index 18ce1133041..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorProperties.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.kafka; - -import java.util.LinkedHashMap; -import java.util.Map; -import org.springframework.boot.context.properties.ConfigurationProperties; -import zipkin2.collector.kafka.KafkaCollector; - -@ConfigurationProperties("zipkin.collector.kafka") -class ZipkinKafkaCollectorProperties { - /** Comma-separated list of Kafka bootstrap servers in the form [host]:[port],... */ - private String bootstrapServers; - /** Kafka consumer group id used by the collector. */ - private String groupId; - /** Kafka topic span data will be retrieved from. */ - private String topic; - /** Number of Kafka consumer threads to run. */ - private Integer streams; - /** Additional Kafka consumer configuration. */ - private Map overrides = new LinkedHashMap<>(); - - public String getBootstrapServers() { - return bootstrapServers; - } - - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = emptyToNull(bootstrapServers); - } - - public String getGroupId() { - return groupId; - } - - public void setGroupId(String groupId) { - this.groupId = emptyToNull(groupId); - } - - public String getTopic() { - return topic; - } - - public void setTopic(String topic) { - this.topic = emptyToNull(topic); - } - - public Integer getStreams() { - return streams; - } - - public void setStreams(Integer streams) { - this.streams = streams; - } - - public Map getOverrides() { - return overrides; - } - - public void setOverrides(Map overrides) { - this.overrides = overrides; - } - - public KafkaCollector.Builder toBuilder() { - final KafkaCollector.Builder result = KafkaCollector.builder(); - if (bootstrapServers != null) result.bootstrapServers(bootstrapServers); - if (groupId != null) result.groupId(groupId); - if (topic != null) result.topic(topic); - if (streams != null) result.streams(streams); - if (overrides != null) result.overrides(overrides); - return result; - } - - private static String emptyToNull(String s) { - return "".equals(s) ? null : s; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/mysql/ZipkinMySQLStorageConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/mysql/ZipkinMySQLStorageConfiguration.java deleted file mode 100644 index e3a6f538544..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/mysql/ZipkinMySQLStorageConfiguration.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.mysql; - -import java.util.List; -import java.util.concurrent.Executor; -import javax.sql.DataSource; -import org.jooq.ExecuteListenerProvider; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnClass; -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Import; -import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; -import zipkin2.storage.StorageComponent; -import zipkin2.storage.mysql.v1.MySQLStorage; - -@EnableConfigurationProperties(ZipkinMySQLStorageProperties.class) -@ConditionalOnClass(MySQLStorage.class) -@ConditionalOnProperty(name = "zipkin.storage.type", havingValue = "mysql") -@ConditionalOnMissingBean(StorageComponent.class) -@Import(ZipkinSelfTracingMySQLStorageConfiguration.class) -public class ZipkinMySQLStorageConfiguration { - @Autowired(required = false) ZipkinMySQLStorageProperties mysql; - @Autowired(required = false) ExecuteListenerProvider mysqlListener; - - @Bean @ConditionalOnMissingBean - Executor mysqlExecutor() { - ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); - executor.setThreadNamePrefix("ZipkinMySQLStorage-"); - executor.initialize(); - return executor; - } - - @Bean @ConditionalOnMissingBean - DataSource mysqlDataSource() { - return mysql.toDataSource(); - } - - @Bean StorageComponent storage( - Executor mysqlExecutor, - DataSource mysqlDataSource, - @Value("${zipkin.storage.strict-trace-id:true}") boolean strictTraceId, - @Value("${zipkin.storage.search-enabled:true}") boolean searchEnabled, - @Value("${zipkin.storage.autocomplete-keys:}") List autocompleteKeys) { - return MySQLStorage.newBuilder() - .strictTraceId(strictTraceId) - .searchEnabled(searchEnabled) - .autocompleteKeys(autocompleteKeys) - .executor(mysqlExecutor) - .datasource(mysqlDataSource) - .listenerProvider(mysqlListener) - .build(); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/mysql/ZipkinMySQLStorageProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/mysql/ZipkinMySQLStorageProperties.java deleted file mode 100644 index 858d11765a7..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/mysql/ZipkinMySQLStorageProperties.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.mysql; - -import com.zaxxer.hikari.HikariDataSource; -import java.io.Serializable; -import javax.sql.DataSource; -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.util.StringUtils; - -@ConfigurationProperties("zipkin.storage.mysql") -class ZipkinMySQLStorageProperties implements Serializable { // for Spark jobs - private static final long serialVersionUID = 0L; - - private String jdbcUrl; - private String host = "localhost"; - private int port = 3306; - private String username; - private String password; - private String db = "zipkin"; - private int maxActive = 10; - private boolean useSsl; - - public String getJdbcUrl() { - return jdbcUrl; - } - - public void setJdbcUrl(String jdbcUrl) { - this.jdbcUrl = jdbcUrl; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public int getPort() { - return port; - } - - public void setPort(int port) { - this.port = port; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = "".equals(username) ? null : username; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = "".equals(password) ? null : password; - } - - public String getDb() { - return db; - } - - public void setDb(String db) { - this.db = db; - } - - public int getMaxActive() { - return maxActive; - } - - public void setMaxActive(int maxActive) { - this.maxActive = maxActive; - } - - public boolean isUseSsl() { - return useSsl; - } - - public void setUseSsl(boolean useSsl) { - this.useSsl = useSsl; - } - - public DataSource toDataSource() { - HikariDataSource result = new HikariDataSource(); - result.setDriverClassName("org.mariadb.jdbc.Driver"); - result.setJdbcUrl(determineJdbcUrl()); - result.setMaximumPoolSize(getMaxActive()); - result.setUsername(getUsername()); - result.setPassword(getPassword()); - return result; - } - - private String determineJdbcUrl() { - if (StringUtils.hasText(getJdbcUrl())) { - return getJdbcUrl(); - } - - StringBuilder url = new StringBuilder(); - url.append("jdbc:mysql://"); - url.append(getHost()).append(":").append(getPort()); - url.append("/").append(getDb()); - url.append("?autoReconnect=true"); - url.append("&useSSL=").append(isUseSsl()); - url.append("&useUnicode=yes&characterEncoding=UTF-8"); - return url.toString(); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/mysql/ZipkinSelfTracingMySQLStorageConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/mysql/ZipkinSelfTracingMySQLStorageConfiguration.java deleted file mode 100644 index d307b08023a..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/mysql/ZipkinSelfTracingMySQLStorageConfiguration.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.mysql; - -import brave.Span; -import brave.propagation.CurrentTraceContext; -import brave.propagation.ThreadLocalSpan; -import com.linecorp.armeria.common.RequestContext; -import java.util.concurrent.Executor; -import org.jooq.ExecuteContext; -import org.jooq.ExecuteListenerProvider; -import org.jooq.impl.DefaultExecuteListener; -import org.jooq.impl.DefaultExecuteListenerProvider; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Bean; -import zipkin2.server.internal.ConditionalOnSelfTracing; - -/** Sets up the MySQL tracing in Brave as an initialization. */ -@ConditionalOnSelfTracing -@ConditionalOnProperty(name = "zipkin.storage.type", havingValue = "mysql") -class ZipkinSelfTracingMySQLStorageConfiguration extends DefaultExecuteListener { - - @Autowired ZipkinMySQLStorageProperties mysql; - @Autowired CurrentTraceContext currentTraceContext; - @Autowired ThreadLocalSpan threadLocalSpan; - - @Bean ExecuteListenerProvider mysqlListener() { - return new DefaultExecuteListenerProvider(this); - } - - @Bean Executor mysqlExecutor() { - return makeContextAware( - new ZipkinMySQLStorageConfiguration().mysqlExecutor(), - currentTraceContext - ); - } - - /** - * Decorates the input such that the {@link RequestContext#current() current request context} and - * the and the {@link CurrentTraceContext#get() current trace context} at assembly time is made - * current when task is executed. - */ - static Executor makeContextAware(Executor delegate, CurrentTraceContext currentTraceContext) { - class TracingCurrentRequestContextExecutor implements Executor { - @Override public void execute(Runnable task) { - delegate.execute(RequestContext.current().makeContextAware(currentTraceContext.wrap(task))); - } - } - return new TracingCurrentRequestContextExecutor(); - } - - @Override public void renderEnd(ExecuteContext ctx) { - // don't start new traces (to prevent amplifying writes to local storage) - if (currentTraceContext.get() == null) return; - - // Gets the next span (and places it in scope) so code between here and postProcess can read it - Span span = threadLocalSpan.next(); - if (span == null || span.isNoop()) return; - - String sql = ctx.sql(); - int spaceIndex = sql.indexOf(' '); // Allow span names of single-word statements like COMMIT - span.kind(Span.Kind.CLIENT).name(spaceIndex == -1 ? sql : sql.substring(0, spaceIndex)); - span.tag("sql.query", sql); - span.remoteServiceName("mysql"); - span.remoteIpAndPort(mysql.getHost(), mysql.getPort()); - span.start(); - } - - @Override public void executeEnd(ExecuteContext ctx) { - Span span = ThreadLocalSpan.CURRENT_TRACER.remove(); - if (span == null || span.isNoop()) return; - if (ctx.sqlException() != null) span.error(ctx.sqlException()); - span.finish(); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/package-info.java b/zipkin-server/src/main/java/zipkin2/server/internal/package-info.java deleted file mode 100644 index ad6179805f5..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -/** - * Classes in this package are considered internal details to Zipkin's server and are unsupported - * unless integrated with our server build. - */ -package zipkin2.server.internal; diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/prometheus/ZipkinMetricsController.java b/zipkin-server/src/main/java/zipkin2/server/internal/prometheus/ZipkinMetricsController.java deleted file mode 100644 index 0a31e924a18..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/prometheus/ZipkinMetricsController.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.prometheus; - -import com.fasterxml.jackson.core.JsonGenerator; -import com.linecorp.armeria.common.HttpResponse; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.server.annotation.Get; -import io.micrometer.core.instrument.Counter; -import io.micrometer.core.instrument.Gauge; -import io.micrometer.core.instrument.Meter; -import io.micrometer.core.instrument.MeterRegistry; -import io.prometheus.client.CollectorRegistry; -import java.io.IOException; -import java.io.StringWriter; -import zipkin2.server.internal.JsonUtil; - -public class ZipkinMetricsController { - - final MeterRegistry meterRegistry; - final CollectorRegistry collectorRegistry; - - ZipkinMetricsController(MeterRegistry meterRegistry, CollectorRegistry collectorRegistry) { - this.meterRegistry = meterRegistry; - this.collectorRegistry = collectorRegistry; - } - - // Extracts Zipkin metrics to provide backward compatibility - @Get("/metrics") - public HttpResponse fetchMetricsFromMicrometer() throws IOException { - StringWriter writer = new StringWriter(); - JsonGenerator generator = JsonUtil.createGenerator(writer); - generator.writeStartObject(); - // Get the Zipkin Custom meters for constructing the Metrics endpoint - for (Meter meter : meterRegistry.getMeters()) { - String name = meter.getId().getName(); - if (!name.startsWith("zipkin_collector")) continue; - String transport = meter.getId().getTag("transport"); - if (transport == null) continue; - - Meter.Type type = meter.getId().getType(); - if (type == Meter.Type.COUNTER) { - generator.writeNumberField("counter." + name + "." + transport, ((Counter) meter).count()); - } else if (type == Meter.Type.GAUGE) { - generator.writeNumberField("gauge." + name + "." + transport, ((Gauge) meter).value()); - } // We only use counters and gauges - } - generator.writeEndObject(); - generator.flush(); // instead of using try/finally as extra indent causes lines to wrap - return HttpResponse.of(HttpStatus.OK, MediaType.JSON, writer.toString()); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/prometheus/ZipkinPrometheusMetricsConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/prometheus/ZipkinPrometheusMetricsConfiguration.java deleted file mode 100644 index bba70bde148..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/prometheus/ZipkinPrometheusMetricsConfiguration.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.prometheus; - -import com.linecorp.armeria.common.HttpRequest; -import com.linecorp.armeria.common.HttpResponse; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.RequestContext; -import com.linecorp.armeria.common.logging.RequestLog; -import com.linecorp.armeria.server.HttpService; -import com.linecorp.armeria.server.Route; -import com.linecorp.armeria.server.ServiceRequestContext; -import com.linecorp.armeria.server.SimpleDecoratingHttpService; -import com.linecorp.armeria.spring.ArmeriaServerConfigurator; -import io.micrometer.core.instrument.Clock; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Tag; -import io.micrometer.core.instrument.Timer; -import io.micrometer.core.instrument.binder.jvm.ClassLoaderMetrics; -import io.micrometer.core.instrument.binder.jvm.JvmGcMetrics; -import io.micrometer.core.instrument.binder.jvm.JvmMemoryMetrics; -import io.micrometer.core.instrument.binder.jvm.JvmThreadMetrics; -import io.micrometer.core.instrument.binder.system.ProcessorMetrics; -import io.micrometer.prometheus.PrometheusConfig; -import io.micrometer.prometheus.PrometheusMeterRegistry; -import io.netty.util.AttributeKey; -import io.prometheus.client.CollectorRegistry; -import java.util.Arrays; -import java.util.concurrent.TimeUnit; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.core.annotation.Order; -import org.springframework.util.StringUtils; - -@Configuration(proxyBeanMethods=false) -public class ZipkinPrometheusMetricsConfiguration { - // from io.micrometer.spring.web.servlet.WebMvcTags - private static final Tag URI_NOT_FOUND = Tag.of("uri", "NOT_FOUND"); - private static final Tag URI_REDIRECTION = Tag.of("uri", "REDIRECTION"); - private static final Tag URI_TRACE_V2 = Tag.of("uri", "/api/v2/trace/{traceId}"); - // single-page app requests are forwarded to index: ZipkinUiConfiguration.forwardUiEndpoints - private static final Tag URI_CROSSROADS = Tag.of("uri", "/zipkin/index.html"); - - final String metricName; - - // https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#production-ready-metrics-spring-mvc - ZipkinPrometheusMetricsConfiguration( - @Value("${management.metrics.web.server.requests-metric-name:http.server.requests}") - String metricName - ) { - this.metricName = metricName; - } - - @Bean @ConditionalOnMissingBean public Clock clock() { - return Clock.SYSTEM; - } - - @Bean @ConditionalOnMissingBean public PrometheusConfig config() { - return PrometheusConfig.DEFAULT; - } - - @Bean @ConditionalOnMissingBean public CollectorRegistry registry() { - return new CollectorRegistry(true); - } - - @Bean @ConditionalOnMissingBean public PrometheusMeterRegistry prometheusMeterRegistry( - PrometheusConfig config, CollectorRegistry registry, Clock clock) { - PrometheusMeterRegistry meterRegistry = new PrometheusMeterRegistry(config, registry, clock); - new JvmMemoryMetrics().bindTo(meterRegistry); - new JvmGcMetrics().bindTo(meterRegistry); - new JvmThreadMetrics().bindTo(meterRegistry); - new ClassLoaderMetrics().bindTo(meterRegistry); - new ProcessorMetrics().bindTo(meterRegistry); - return meterRegistry; - } - - // https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#production-ready-metrics-spring-mvc - @Bean ArmeriaServerConfigurator httpRequestDurationConfigurator(MeterRegistry registry) { - return serverBuilder -> serverBuilder.routeDecorator() - .pathPrefix("/zipkin/api") - .pathPrefix("/api") - .build(s -> new MetricCollectingService(s, registry, metricName)); - } - - // We need to make sure not-found requests are still handled by a service to be decorated for - // adding metrics. We add a lower precedence path mapping so anything not mapped by another - // service is handled by this. - @Bean - @Order(1) - ArmeriaServerConfigurator notFoundMetricCollector() { - // Use glob instead of catch-all to avoid adding it to the trie router. - return sb -> sb.service(Route.builder().glob("/**").build(), - (ctx, req) -> HttpResponse.of(HttpStatus.NOT_FOUND)); - } - - static final class MetricCollectingService extends SimpleDecoratingHttpService { - final MeterRegistry registry; - final String metricName; - - MetricCollectingService(HttpService delegate, MeterRegistry registry, String metricName) { - super(delegate); - this.registry = registry; - this.metricName = metricName; - } - - @Override - public HttpResponse serve(ServiceRequestContext ctx, HttpRequest req) throws Exception { - setup(ctx, registry, metricName); - return unwrap().serve(ctx, req); - } - } - - // A variable to make sure setup method is not called twice. - private static final AttributeKey PROMETHEUS_METRICS_SET = - AttributeKey.valueOf(Boolean.class, "PROMETHEUS_METRICS_SET"); - - @SuppressWarnings("FutureReturnValueIgnored") // no known action to take following .thenAccept - public static void setup(RequestContext ctx, MeterRegistry registry, String metricName) { - if (ctx.hasAttr(PROMETHEUS_METRICS_SET)) { - return; - } - ctx.setAttr(PROMETHEUS_METRICS_SET, true); - ctx.log().whenComplete().thenAccept(log -> getTimeBuilder(log, metricName).register(registry) - .record(log.totalDurationNanos(), TimeUnit.NANOSECONDS)); - } - - private static Timer.Builder getTimeBuilder(RequestLog requestLog, String metricName) { - return Timer.builder(metricName) - .tags(getTags(requestLog)) - .description("Response time histogram") - .publishPercentileHistogram(); - } - - private static Iterable getTags(RequestLog requestLog) { - return Arrays.asList(Tag.of("method", requestLog.requestHeaders().method().toString()) - , uri(requestLog) - , Tag.of("status", Integer.toString(requestLog.responseHeaders().status().code())) - ); - } - - /** Ensure metrics cardinality doesn't blow up on variables */ - private static Tag uri(RequestLog requestLog) { - int status = requestLog.responseHeaders().status().code(); - if (status > 299 && status < 400) return URI_REDIRECTION; - if (status == 404) return URI_NOT_FOUND; - - String uri = getPathInfo(requestLog); - if (uri.startsWith("/zipkin")) { - if (uri.equals("/zipkin/") || uri.equals("/zipkin") - || uri.startsWith("/zipkin/traces/") - || uri.equals("/zipkin/dependency") - || uri.equals("/zipkin/traceViewer")) { - return URI_CROSSROADS; // single-page app route - } - - // un-map UI's api route - if (uri.startsWith("/zipkin/api")) { - uri = uri.replaceFirst("/zipkin", ""); - } - } - // handle templated routes instead of exploding on trace ID cardinality - if (uri.startsWith("/api/v2/trace/")) return URI_TRACE_V2; - return Tag.of("uri", uri); - } - - // from io.micrometer.spring.web.servlet.WebMvcTags - static String getPathInfo(RequestLog requestLog) { - String uri = requestLog.context().path(); - if (!StringUtils.hasText(uri)) return "/"; - return uri.replaceAll("//+", "/") - .replaceAll("/$", ""); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorConfiguration.java deleted file mode 100644 index 888798e4f42..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorConfiguration.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.rabbitmq; - -import java.net.URISyntaxException; -import java.security.KeyManagementException; -import java.security.NoSuchAlgorithmException; -import org.springframework.boot.autoconfigure.condition.ConditionalOnClass; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Condition; -import org.springframework.context.annotation.ConditionContext; -import org.springframework.context.annotation.Conditional; -import org.springframework.core.type.AnnotatedTypeMetadata; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.collector.rabbitmq.RabbitMQCollector; -import zipkin2.storage.StorageComponent; - -/** Auto-configuration for {@link RabbitMQCollector}. */ -@ConditionalOnClass(RabbitMQCollector.class) -@Conditional(ZipkinRabbitMQCollectorConfiguration.RabbitMQAddressesOrUriSet.class) -@EnableConfigurationProperties(ZipkinRabbitMQCollectorProperties.class) -public class ZipkinRabbitMQCollectorConfiguration { - - @Bean(initMethod = "start") - RabbitMQCollector rabbitMq( - ZipkinRabbitMQCollectorProperties properties, - CollectorSampler sampler, - CollectorMetrics metrics, - StorageComponent storage) - throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { - return properties.toBuilder().sampler(sampler).metrics(metrics).storage(storage).build(); - } - /** - * This condition passes when {@link ZipkinRabbitMQCollectorProperties#getAddresses()} or {@link - * ZipkinRabbitMQCollectorProperties#getUri()} is set to a non-empty value. - * - *

This is here because the yaml defaults this property to empty like this, and Spring Boot - * doesn't have an option to treat empty properties as unset. - * - *

{@code
-   * addresses: ${RABBIT_ADDRESSES:}
-   * uri: ${RABBIT_URI:}
-   * }
- */ - static final class RabbitMQAddressesOrUriSet implements Condition { - @Override - public boolean matches(ConditionContext context, AnnotatedTypeMetadata a) { - return (!isEmpty(context.getEnvironment().getProperty("zipkin.collector.rabbitmq.addresses")) - || !isEmpty(context.getEnvironment().getProperty("zipkin.collector.rabbitmq.uri"))) && - notFalse(context.getEnvironment().getProperty("zipkin.collector.rabbitmq.enabled")); - } - - private static boolean isEmpty(String s) { - return s == null || s.isEmpty(); - } - - private static boolean notFalse(String s){ - return s == null || !s.equals("false"); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorProperties.java deleted file mode 100644 index c823b4f1d19..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorProperties.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.rabbitmq; - -import com.rabbitmq.client.ConnectionFactory; -import java.net.URI; -import java.net.URISyntaxException; -import java.security.KeyManagementException; -import java.security.NoSuchAlgorithmException; -import java.util.List; -import org.springframework.boot.context.properties.ConfigurationProperties; -import zipkin2.collector.rabbitmq.RabbitMQCollector; - -/** Properties for configuring and building a {@link RabbitMQCollector}. */ -@ConfigurationProperties("zipkin.collector.rabbitmq") -class ZipkinRabbitMQCollectorProperties { - static final URI EMPTY_URI = URI.create(""); - - /** RabbitMQ server addresses in the form of a (comma-separated) list of host:port pairs */ - private List addresses; - /** Number of concurrent consumers */ - private Integer concurrency = 1; - /** TCP connection timeout in milliseconds */ - private Integer connectionTimeout; - /** RabbitMQ user password */ - private String password; - /** RabbitMQ queue from which to collect the Zipkin spans */ - private String queue; - /** RabbitMQ username */ - private String username; - /** RabbitMQ virtual host */ - private String virtualHost; - /** Flag to use SSL */ - private Boolean useSsl; - /** - * RabbitMQ URI spec-compliant URI to connect to the RabbitMQ server. When used, other connection - * properties will be ignored. - */ - private URI uri; - - public List getAddresses() { - return addresses; - } - - public void setAddresses(List addresses) { - this.addresses = addresses; - } - - public int getConcurrency() { - return concurrency; - } - - public void setConcurrency(int concurrency) { - this.concurrency = concurrency; - } - - public Integer getConnectionTimeout() { - return connectionTimeout; - } - - public void setConnectionTimeout(Integer connectionTimeout) { - this.connectionTimeout = connectionTimeout; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public String getQueue() { - return queue; - } - - public void setQueue(String queue) { - this.queue = queue; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getVirtualHost() { - return virtualHost; - } - - public void setVirtualHost(String virtualHost) { - this.virtualHost = virtualHost; - } - - public Boolean getUseSsl() { - return useSsl; - } - - public void setUseSsl(Boolean useSsl) { - this.useSsl = useSsl; - } - - public URI getUri() { - return uri; - } - - public void setUri(URI uri) { - if (EMPTY_URI.equals(uri)) return; - this.uri = uri; - } - - public RabbitMQCollector.Builder toBuilder() - throws KeyManagementException, NoSuchAlgorithmException, URISyntaxException { - final RabbitMQCollector.Builder result = RabbitMQCollector.builder(); - ConnectionFactory connectionFactory = new ConnectionFactory(); - if (concurrency != null) result.concurrency(concurrency); - if (connectionTimeout != null) connectionFactory.setConnectionTimeout(connectionTimeout); - if (queue != null) result.queue(queue); - - if (uri != null) { - connectionFactory.setUri(uri); - } else { - if (addresses != null) result.addresses(addresses); - if (password != null) connectionFactory.setPassword(password); - if (username != null) connectionFactory.setUsername(username); - if (virtualHost != null) connectionFactory.setVirtualHost(virtualHost); - if (useSsl != null && useSsl) connectionFactory.useSslProtocol(); - } - result.connectionFactory(connectionFactory); - return result; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/scribe/ZipkinScribeCollectorConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/scribe/ZipkinScribeCollectorConfiguration.java deleted file mode 100644 index 87cee440d02..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/scribe/ZipkinScribeCollectorConfiguration.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.scribe; - -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnClass; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Bean; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.collector.scribe.ScribeCollector; -import zipkin2.storage.StorageComponent; - -/** - * This collector accepts Scribe logs in a specified category. Each log entry is expected to contain - * a single span, which is TBinaryProtocol big-endian, then base64 encoded. Decoded spans are stored - * asynchronously. - */ -@ConditionalOnClass(ScribeCollector.class) -@ConditionalOnProperty(value = "zipkin.collector.scribe.enabled", havingValue = "true") -public class ZipkinScribeCollectorConfiguration { - /** The init method will block until the scribe port is listening, or crash on port conflict */ - @Bean(initMethod = "start") - ScribeCollector scribe( - @Value("${zipkin.collector.scribe.category:zipkin}") String category, - @Value("${zipkin.collector.scribe.port:9410}") int port, - CollectorSampler sampler, - CollectorMetrics metrics, - StorageComponent storage) { - return ScribeCollector.newBuilder() - .category(category) - .port(port) - .sampler(sampler) - .metrics(metrics) - .storage(storage) - .build(); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/LimiterMetrics.java b/zipkin-server/src/main/java/zipkin2/server/internal/throttle/LimiterMetrics.java deleted file mode 100644 index 3ca66ff97a2..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/LimiterMetrics.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.throttle; - -import io.micrometer.core.instrument.Counter; -import io.micrometer.core.instrument.MeterRegistry; -import zipkin2.collector.CollectorMetrics; - -/** Follows the same naming convention as {@link CollectorMetrics} */ -final class LimiterMetrics { - final Counter requests, requestsSucceeded, requestsIgnored, requestsDropped; - - LimiterMetrics(MeterRegistry registry) { - requests = Counter.builder("zipkin_storage.throttle.requests") - .description("cumulative amount of limiter requests acquired") - .register(registry); - requestsSucceeded = Counter.builder("zipkin_storage.throttle.requests_succeeded") - .description("cumulative amount of limiter requests acquired that later succeeded") - .register(registry); - requestsDropped = - Counter.builder("zipkin_storage.throttle.requests_dropped") - .description( - "cumulative amount of limiter requests acquired that later dropped due to capacity") - .register(registry); - requestsIgnored = - Counter.builder("zipkin_storage.throttle.requests_ignored") - .description( - "cumulative amount of limiter requests acquired that later dropped not due to capacity") - .register(registry); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/MicrometerThrottleMetrics.java b/zipkin-server/src/main/java/zipkin2/server/internal/throttle/MicrometerThrottleMetrics.java deleted file mode 100644 index f91c171db4c..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/MicrometerThrottleMetrics.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.throttle; - -import com.netflix.concurrency.limits.limiter.AbstractLimiter; -import io.micrometer.core.instrument.Gauge; -import io.micrometer.core.instrument.MeterRegistry; -import java.util.concurrent.ThreadPoolExecutor; -import zipkin2.server.internal.MicrometerCollectorMetrics; - -/** Follows the same naming convention as {@link MicrometerCollectorMetrics} */ -final class MicrometerThrottleMetrics { - final MeterRegistry registryInstance; - - MicrometerThrottleMetrics(MeterRegistry registryInstance) { - this.registryInstance = registryInstance; - } - - void bind(ThreadPoolExecutor pool) { - Gauge.builder("zipkin_storage.throttle.concurrency", pool::getCorePoolSize) - .description("number of threads running storage requests") - .register(registryInstance); - Gauge.builder("zipkin_storage.throttle.queue_size", pool.getQueue()::size) - .description("number of items queued waiting for access to storage") - .register(registryInstance); - } - - void bind(AbstractLimiter limiter) { - // This value should parallel (zipkin_storage.throttle.queue_size + zipkin_storage.throttle.concurrency) - // It is tracked to make sure it doesn't perpetually increase. If it does then we're not resolving LimitListeners. - Gauge.builder("zipkin_storage.throttle.in_flight_requests", limiter::getInflight) - .description("number of requests the limiter thinks are active") - .register(registryInstance); - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/ThrottledCall.java b/zipkin-server/src/main/java/zipkin2/server/internal/throttle/ThrottledCall.java deleted file mode 100644 index e4210ea5cb4..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/ThrottledCall.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.throttle; - -import com.linecorp.armeria.common.util.Exceptions; -import com.netflix.concurrency.limits.Limiter; -import com.netflix.concurrency.limits.Limiter.Listener; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.RejectedExecutionException; -import java.util.function.Predicate; -import zipkin2.Call; -import zipkin2.Callback; - -import static com.linecorp.armeria.common.util.Exceptions.clearTrace; - -/** - * {@link Call} implementation that is backed by an {@link ExecutorService}. The ExecutorService - * serves two purposes: - *
    - *
  1. Limits the number of requests that can run in parallel.
  2. - *
  3. Depending on configuration, can queue up requests to make sure we don't aggressively drop - * requests that would otherwise succeed if given a moment. Bounded queues are safest for this as - * unbounded ones can lead to heap exhaustion and {@link OutOfMemoryError OOM errors}.
  4. - *
- * - * @see ThrottledStorageComponent - */ -final class ThrottledCall extends Call.Base { - /** - *

This reduces allocations when concurrency reached by always returning the same instance. - * This is only thrown in one location, and a stack trace starting from static initialization - * isn't useful. Hence, we {@link Exceptions#clearTrace clear the trace}. - */ - static final RejectedExecutionException STORAGE_THROTTLE_MAX_CONCURRENCY = - clearTrace(new RejectedExecutionException("STORAGE_THROTTLE_MAX_CONCURRENCY reached")); - - static final Callback NOOP_CALLBACK = new Callback() { - @Override public void onSuccess(Void value) { - } - - @Override public void onError(Throwable t) { - } - }; - - final Call delegate; - final Executor executor; - final Limiter limiter; - final LimiterMetrics limiterMetrics; - final Predicate isOverCapacity; - final CountDownLatch latch = new CountDownLatch(1); - Throwable throwable; // thread visibility guaranteed by the countdown latch - - ThrottledCall(Call delegate, Executor executor, Limiter limiter, - LimiterMetrics limiterMetrics, Predicate isOverCapacity) { - this.delegate = delegate; - this.executor = executor; - this.limiter = limiter; - this.limiterMetrics = limiterMetrics; - this.isOverCapacity = isOverCapacity; - } - - /** - * To simplify code, this doesn't actually invoke the underlying {@link #execute()} method. This - * is ok because in almost all cases, doing so would imply invoking {@link #enqueue(Callback)} - * anyway. - */ - @Override protected Void doExecute() throws IOException { - // Enqueue the call invocation on the executor and block until it completes. - doEnqueue(NOOP_CALLBACK); - if (!await(latch)) throw new InterruptedIOException(); - - // Check if the run resulted in an exception - Throwable t = this.throwable; - if (t == null) return null; // success - - // Coerce the throwable to the signature of Call.execute() - if (t instanceof Error) throw (Error) t; - if (t instanceof IOException) throw (IOException) t; - if (t instanceof RuntimeException) throw (RuntimeException) t; - throw new RuntimeException(t); - } - - // When handling enqueue, we don't block the calling thread. Any exception goes to the callback. - @Override protected void doEnqueue(Callback callback) { - Listener limiterListener = - limiter.acquire(null).orElseThrow(() -> STORAGE_THROTTLE_MAX_CONCURRENCY); - - limiterMetrics.requests.increment(); - EnqueueAndAwait enqueueAndAwait = new EnqueueAndAwait(callback, limiterListener); - - try { - executor.execute(enqueueAndAwait); - } catch (RuntimeException | Error t) { // possibly rejected, but from the executor, not storage! - propagateIfFatal(t); - callback.onError(t); - // Ignoring in all cases here because storage itself isn't saying we need to throttle. Though - // we may still be write bound, but a drop in concurrency won't necessarily help. - limiterListener.onIgnore(); - throw t; // allows blocking calls to see the exception - } - } - - @Override public Call clone() { - return new ThrottledCall(delegate.clone(), executor, limiter, limiterMetrics, isOverCapacity); - } - - @Override public String toString() { - return "Throttled(" + delegate + ")"; - } - - /** When run, this enqueues a call with a given callback, and awaits its completion. */ - final class EnqueueAndAwait implements Runnable, Callback { - final Callback callback; - final Listener limiterListener; - - EnqueueAndAwait(Callback callback, Listener limiterListener) { - this.callback = callback; - this.limiterListener = limiterListener; - } - - /** - * This waits until completion to ensure the number of executing calls doesn't surpass the - * concurrency limit of the executor. - * - *

The {@link Listener} isn't affected during run

- * There could be an error enqueuing the call or an interruption during shutdown of the - * executor. We do not affect the {@link Listener} here because it would be redundant to - * handling already done in callbacks. For example, if shutting down, the storage layer would - * also invoke {@link #onError(Throwable)}. - */ - @Override public void run() { - if (delegate.isCanceled()) return; - try { - delegate.enqueue(this); - - // Need to wait here since the callback call will run asynchronously also. - // This ensures we don't exceed our throttle/queue limits. - await(latch); - } catch (Throwable t) { // edge case: error during enqueue! - propagateIfFatal(t); - callback.onError(t); - } - } - - @Override public void onSuccess(Void value) { - try { - // usually we don't add metrics like this, - // but for now it is helpful to sanity check acquired vs erred. - limiterMetrics.requestsSucceeded.increment(); - limiterListener.onSuccess(); // NOTE: limiter could block and delay the caller's callback - callback.onSuccess(value); - } finally { - latch.countDown(); - } - } - - @Override public void onError(Throwable t) { - try { - throwable = t; // catch the throwable in case the invocation is blocking (Call.execute()) - if (isOverCapacity.test(t)) { - limiterMetrics.requestsDropped.increment(); - limiterListener.onDropped(); - } else { - limiterMetrics.requestsIgnored.increment(); - limiterListener.onIgnore(); - } - - // NOTE: the above limiter could block and delay the caller's callback - callback.onError(t); - } finally { - latch.countDown(); - } - } - - @Override public String toString() { - return "EnqueueAndAwait{call=" + delegate + ", callback=" + callback + "}"; - } - } - - /** Returns true if uninterrupted waiting for the latch */ - static boolean await(CountDownLatch latch) { - try { - latch.await(); - return true; - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return false; - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/ThrottledStorageComponent.java b/zipkin-server/src/main/java/zipkin2/server/internal/throttle/ThrottledStorageComponent.java deleted file mode 100644 index 7153ff11955..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/ThrottledStorageComponent.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.throttle; - -import brave.Tracer; -import brave.Tracing; -import brave.propagation.CurrentTraceContext; -import com.linecorp.armeria.common.brave.RequestContextCurrentTraceContext; -import com.netflix.concurrency.limits.Limit; -import com.netflix.concurrency.limits.Limiter; -import com.netflix.concurrency.limits.limit.Gradient2Limit; -import com.netflix.concurrency.limits.limiter.AbstractLimiter; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.util.NamedThreadFactory; -import java.io.IOException; -import java.util.List; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.Executor; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; -import java.util.function.Predicate; -import zipkin2.Call; -import zipkin2.Span; -import zipkin2.internal.Nullable; -import zipkin2.server.internal.brave.TracedCall; -import zipkin2.storage.ForwardingStorageComponent; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.StorageComponent; - -import static com.linecorp.armeria.common.util.Exceptions.clearTrace; - -/** - * Delegating implementation that limits requests to the {@link #spanConsumer()} of another {@link - * StorageComponent}. The theory here is that this class can be used to: - *
    - *
  • Prevent spamming the storage engine with excessive, spike requests when they come in; thus - * preserving it's life.
  • - *
  • Optionally act as a buffer so that a fixed number requests can be queued for execution when - * the throttle allows for it. This optional queue must be bounded in order to avoid running out of - * memory from infinitely queueing.
  • - *
- * - * @see ThrottledSpanConsumer - */ -public final class ThrottledStorageComponent extends ForwardingStorageComponent { - /** - * See {@link ThrottledCall#STORAGE_THROTTLE_MAX_CONCURRENCY} if unfamiliar with clearing trace on - * exceptions only thrown from one spot. - */ - static final RejectedExecutionException STORAGE_THROTTLE_MAX_QUEUE_SIZE = - clearTrace(new RejectedExecutionException("STORAGE_THROTTLE_MAX_QUEUE_SIZE reached")); - - final StorageComponent delegate; - final @Nullable Tracer tracer; - final @Nullable CurrentTraceContext currentTraceContext; - final AbstractLimiter limiter; - final ThreadPoolExecutor executor; - final LimiterMetrics limiterMetrics; - - public ThrottledStorageComponent(StorageComponent delegate, MeterRegistry registry, - @Nullable Tracing tracing, int minConcurrency, int maxConcurrency, int maxQueueSize) { - this.delegate = Objects.requireNonNull(delegate); - this.tracer = tracing != null ? tracing.tracer() : null; - this.currentTraceContext = tracing != null ? tracing.currentTraceContext() : null; - - Limit limit = Gradient2Limit.newBuilder() - .minLimit(minConcurrency) - // Limiter will trend towards min until otherwise necessary so may as well start there - .initialLimit(minConcurrency) - .maxConcurrency(maxConcurrency) - .queueSize(0) - .build(); - this.limiter = new Builder().limit(limit).build(); - - // The size of the thread pool is managed by the limiter, so we initialize it with the lower - // bound (current limit), and later use change notification to resize it. - executor = new ThreadPoolExecutor( - limit.getLimit(), - limit.getLimit(), - 0, - TimeUnit.DAYS, - createQueue(maxQueueSize), - new NamedThreadFactory("zipkin-throttle-pool") { - @Override public Thread newThread(Runnable runnable) { - return super.newThread(new Runnable() { - @Override public void run() { - RequestContextCurrentTraceContext.setCurrentThreadNotRequestThread(true); - runnable.run(); - } - - @Override public String toString() { - return runnable.toString(); - } - }); - } - }, - (r, e) -> { - throw STORAGE_THROTTLE_MAX_QUEUE_SIZE; - }); - limit.notifyOnChange(new ThreadPoolExecutorResizer(executor)); - - MicrometerThrottleMetrics metrics = new MicrometerThrottleMetrics(registry); - metrics.bind(executor); - metrics.bind(limiter); - - limiterMetrics = new LimiterMetrics(registry); - } - - @Override protected StorageComponent delegate() { - return delegate; - } - - @Override public SpanConsumer spanConsumer() { - return new ThrottledSpanConsumer(this); - } - - @Override public void close() throws IOException { - executor.shutdownNow(); - delegate.close(); - } - - @Override public String toString() { - return "Throttled{" + delegate.toString() + "}"; - } - - static final class ThrottledSpanConsumer implements SpanConsumer { - final SpanConsumer delegate; - final Executor executor; - final Limiter limiter; - final LimiterMetrics limiterMetrics; - final Predicate isOverCapacity; - @Nullable final Tracer tracer; - - ThrottledSpanConsumer(ThrottledStorageComponent throttledStorage) { - this.delegate = throttledStorage.delegate.spanConsumer(); - this.executor = throttledStorage.currentTraceContext != null - ? throttledStorage.currentTraceContext.executor(throttledStorage.executor) - : throttledStorage.executor; - this.limiter = throttledStorage.limiter; - this.limiterMetrics = throttledStorage.limiterMetrics; - this.isOverCapacity = throttledStorage::isOverCapacity; - this.tracer = throttledStorage.tracer; - } - - @Override public Call accept(List spans) { - Call result = new ThrottledCall( - delegate.accept(spans), executor, limiter, limiterMetrics, isOverCapacity); - - return tracer != null ? new TracedCall<>(tracer, result, "throttled-accept-spans") : result; - } - - @Override public String toString() { - return "Throttled(" + delegate + ")"; - } - } - - static BlockingQueue createQueue(int maxSize) { - if (maxSize < 0) throw new IllegalArgumentException("maxSize < 0"); - - if (maxSize == 0) { - // 0 means we should be bounded but we can't create a queue with that size so use 1 instead. - maxSize = 1; - } - - return new LinkedBlockingQueue<>(maxSize); - } - - static final class ThreadPoolExecutorResizer implements Consumer { - final ThreadPoolExecutor executor; - - ThreadPoolExecutorResizer(ThreadPoolExecutor executor) { - this.executor = executor; - } - - /** - * This is {@code synchronized} to ensure that we don't let the core/max pool sizes get out of - * sync; even for an instant. The two need to be tightly coupled together to ensure that when - * our queue fills up we don't spin up extra Threads beyond our calculated limit. - * - *

There is also an unfortunate aspect where the {@code max} has to always be greater than - * {@code core} or an exception will be thrown. So they have to be adjust appropriately - * relative to the direction the size is going. - */ - @Override public synchronized void accept(Integer newValue) { - int previousValue = executor.getCorePoolSize(); - - int newValueInt = newValue; - if (previousValue < newValueInt) { - executor.setMaximumPoolSize(newValueInt); - executor.setCorePoolSize(newValueInt); - } else if (previousValue > newValueInt) { - executor.setCorePoolSize(newValueInt); - executor.setMaximumPoolSize(newValueInt); - } - // Note: no case for equals. Why modify something that doesn't need modified? - } - } - - static final class Builder extends AbstractLimiter.Builder { - NonLimitingLimiter build() { - return new NonLimitingLimiter(this); - } - - @Override protected Builder self() { - return this; - } - } - - /** - * Unlike a normal Limiter, this will actually not prevent the creation of a {@link Listener} in - * {@link #acquire(java.lang.Void)}. The point of this is to ensure that we can always derive an - * appropriate {@link Limit#getLimit() Limit} while the {@link #executor} handles actually - * limiting running requests. - */ - static final class NonLimitingLimiter extends AbstractLimiter { - NonLimitingLimiter(AbstractLimiter.Builder builder) { - super(builder); - } - - @Override public Optional acquire(Void context) { - return Optional.of(createListener()); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/ZipkinStorageThrottleProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/throttle/ZipkinStorageThrottleProperties.java deleted file mode 100644 index c8407c38210..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/throttle/ZipkinStorageThrottleProperties.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.throttle; - -import org.springframework.boot.context.properties.ConfigurationProperties; - -@ConfigurationProperties("zipkin.storage.throttle") -public final class ZipkinStorageThrottleProperties { - /** Should we throttle at all? */ - private boolean enabled; - /** Minimum number of storage requests to allow through at a given time. */ - private int minConcurrency; - /** - * Maximum number of storage requests to allow through at a given time. Should be tuned to - * (bulk_index_pool_size / num_servers_in_cluster). e.g. 200 (default pool size in Elasticsearch) - * / 2 (number of load balanced zipkin-server instances) = 100. - */ - private int maxConcurrency; - /** - * Maximum number of storage requests to buffer while waiting for open Thread. 0 = no buffering. - */ - private int maxQueueSize; - - public boolean isEnabled() { - return enabled; - } - - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - public int getMinConcurrency() { - return minConcurrency; - } - - public void setMinConcurrency(int minConcurrency) { - this.minConcurrency = minConcurrency; - } - - public int getMaxConcurrency() { - return maxConcurrency; - } - - public void setMaxConcurrency(int maxConcurrency) { - this.maxConcurrency = maxConcurrency; - } - - public int getMaxQueueSize() { - return maxQueueSize; - } - - public void setMaxQueueSize(int maxQueueSize) { - this.maxQueueSize = maxQueueSize; - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ui/CompressionProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/ui/CompressionProperties.java deleted file mode 100644 index f579aa6bf4b..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ui/CompressionProperties.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.ui; - -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.boot.web.server.Compression; - -@ConfigurationProperties("server") -class CompressionProperties { - public Compression getCompression() { - return compression; - } - - public void setCompression(Compression compression) { - this.compression = compression; - } - - private Compression compression; -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ui/ZipkinUiConfiguration.java b/zipkin-server/src/main/java/zipkin2/server/internal/ui/ZipkinUiConfiguration.java deleted file mode 100644 index 3b41b975a30..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ui/ZipkinUiConfiguration.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.ui; - -import com.fasterxml.jackson.core.JsonGenerator; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.ServerCacheControl; -import com.linecorp.armeria.server.HttpService; -import com.linecorp.armeria.server.RedirectService; -import com.linecorp.armeria.server.file.FileService; -import com.linecorp.armeria.server.file.HttpFile; -import com.linecorp.armeria.spring.ArmeriaServerConfigurator; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.config.MeterFilter; -import java.io.IOException; -import java.io.InputStream; -import java.io.StringWriter; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import org.springframework.beans.factory.BeanCreationException; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.core.io.Resource; -import org.springframework.util.StreamUtils; -import zipkin2.server.internal.JsonUtil; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static zipkin2.server.internal.ui.ZipkinUiProperties.DEFAULT_BASEPATH; - -/** - * Zipkin-UI is a single-page application mounted at /zipkin. For simplicity, assume paths mentioned - * below are relative to that. For example, the UI reads config.json, from the absolute path - * /zipkin/config.json - * - *

When looking at a trace, the browser is sent to the path "/traces/{id}". For the single-page - * app to serve that route, the server needs to forward the request to "/index.html". The same - * forwarding applies to "/dependencies" and any other routes the UI controls. - * - *

Under the scenes the JavaScript code looks at {@code window.location} to figure out what the - * UI should do. This is handled by a route api defined in the crossroads library. - * - *

Caching

- *

This includes a hard-coded cache policy, consistent with zipkin-scala. - *

    - *
  • 1 minute for index.html
  • - *
  • 10 minute for /config.json
  • - *
  • 365 days for hashed resources (ex /app-e12b3bbb7e5a572f270d.min.js)
  • - *
- * Since index.html links to hashed resource names, any change to it will orphan old resources. - * That's why hashed resource age can be 365 days. - */ -@EnableConfigurationProperties({ZipkinUiProperties.class, CompressionProperties.class}) -@ConditionalOnProperty(name = "zipkin.ui.enabled", matchIfMissing = true) -public class ZipkinUiConfiguration { - @Autowired ZipkinUiProperties ui; - @Value("classpath:zipkin-lens/index.html") Resource lensIndexHtml; - - @Bean - HttpService indexService() throws Exception { - HttpService lensIndex = maybeIndexService(ui.getBasepath(), lensIndexHtml); - if (lensIndex != null) return lensIndex; - throw new BeanCreationException("Could not load Lens UI from " + lensIndexHtml); - } - - @Bean ArmeriaServerConfigurator uiServerConfigurator( - HttpService indexService, - Optional meterRegistry - ) throws IOException { - ServerCacheControl maxAgeYear = - ServerCacheControl.builder().maxAgeSeconds(TimeUnit.DAYS.toSeconds(365)).build(); - - HttpService uiFileService = FileService.builder(getClass().getClassLoader(), "zipkin-lens") - .cacheControl(maxAgeYear) - .build(); - - String config = writeConfig(ui); - return sb -> { - sb.service("/zipkin/config.json", HttpFile.builder(HttpData.ofUtf8(config)) - .cacheControl(ServerCacheControl.builder().maxAgeSeconds(600).build()) - .contentType(MediaType.JSON_UTF_8) - .build() - .asService()); - - sb.serviceUnder("/zipkin/", uiFileService); - - // TODO This approach requires maintenance when new UI routes are added. Change to the following: - // If the path is a a file w/an extension, treat normally. - // Otherwise instead of returning 404, forward to the index. - // See https://github.com/twitter/finatra/blob/458c6b639c3afb4e29873d123125eeeb2b02e2cd/http/src/main/scala/com/twitter/finatra/http/response/ResponseBuilder.scala#L321 - sb.service("/zipkin/", indexService) - .service("/zipkin/index.html", indexService) - .service("/zipkin/traces/{id}", indexService) - .service("/zipkin/dependency", indexService) - .service("/zipkin/traceViewer", indexService); - - sb.service("/favicon.ico", new RedirectService(HttpStatus.FOUND, "/zipkin/favicon.ico")) - .service("/", new RedirectService(HttpStatus.FOUND, "/zipkin/")) - .service("/zipkin", new RedirectService(HttpStatus.FOUND, "/zipkin/")); - - // don't add metrics for favicon - meterRegistry.ifPresent(m -> m.config().meterFilter(MeterFilter.deny(id -> { - String uri = id.getTag("uri"); - return uri != null && uri.startsWith("/favicon.ico"); - }))); - }; - } - - // - // environment: '', - // queryLimit: 10, - // defaultLookback: 15 * 60 * 1000, // 15 minutes - // searchEnabled: true, - // dependency: { - // enabled: true, - // lowErrorRate: 0.5, // 50% of calls in error turns line yellow - // highErrorRate: 0.75 // 75% of calls in error turns line red - // } - static String writeConfig(ZipkinUiProperties ui) throws IOException { - StringWriter writer = new StringWriter(); - try (JsonGenerator generator = JsonUtil.createGenerator(writer)) { - generator.useDefaultPrettyPrinter(); - generator.writeStartObject(); - generator.writeStringField("environment", ui.getEnvironment()); - generator.writeNumberField("queryLimit", ui.getQueryLimit()); - generator.writeNumberField("defaultLookback", ui.getDefaultLookback()); - generator.writeBooleanField("searchEnabled", ui.isSearchEnabled()); - generator.writeStringField("logsUrl", ui.getLogsUrl()); - generator.writeStringField("supportUrl", ui.getSupportUrl()); - generator.writeStringField("archivePostUrl", ui.getArchivePostUrl()); - generator.writeStringField("archiveUrl", ui.getArchiveUrl()); - generator.writeObjectFieldStart("dependency"); - generator.writeBooleanField("enabled", ui.getDependency().isEnabled()); - generator.writeNumberField("lowErrorRate", ui.getDependency().getLowErrorRate()); - generator.writeNumberField("highErrorRate", ui.getDependency().getHighErrorRate()); - generator.writeEndObject(); // .dependency - generator.writeEndObject(); // . - } - return writer.toString(); - } - - static HttpService maybeIndexService(String basePath, Resource resource) throws IOException { - String maybeContent = maybeResource(basePath, resource); - if (maybeContent == null) return null; - - ServerCacheControl maxAgeMinute = ServerCacheControl.builder().maxAgeSeconds(60).build(); - - return HttpFile.builder(HttpData.ofUtf8(maybeContent)) - .contentType(MediaType.HTML_UTF_8).cacheControl(maxAgeMinute) - .build().asService(); - } - - static String maybeResource(String basePath, Resource resource) throws IOException { - if (!resource.isReadable()) return null; - - try (InputStream stream = resource.getInputStream()) { - String content = StreamUtils.copyToString(stream, UTF_8); - if (DEFAULT_BASEPATH.equals(basePath)) return content; - - String baseTagValue = "/".equals(basePath) ? "/" : basePath + "/"; - // html-webpack-plugin seems to strip out quotes from the base tag when compiling so be - // careful with this matcher. - return content.replaceAll( - "]+>", "" - ); - } - } -} diff --git a/zipkin-server/src/main/java/zipkin2/server/internal/ui/ZipkinUiProperties.java b/zipkin-server/src/main/java/zipkin2/server/internal/ui/ZipkinUiProperties.java deleted file mode 100644 index 53faa09e3cb..00000000000 --- a/zipkin-server/src/main/java/zipkin2/server/internal/ui/ZipkinUiProperties.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.ui; - -import java.util.concurrent.TimeUnit; -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.util.StringUtils; - -@ConfigurationProperties("zipkin.ui") -class ZipkinUiProperties { - // TODO: this isn't honored in lens https://github.com/openzipkin/zipkin/issues/2519 - static final String DEFAULT_BASEPATH = "/zipkin"; - - private String environment = ""; - private int queryLimit = 10; - private int defaultLookback = (int) TimeUnit.DAYS.toMillis(7); - private String instrumented = ".*"; - private String logsUrl = null; - private String supportUrl = null; - private String archivePostUrl = null; - private String archiveUrl = null; - private String basepath = DEFAULT_BASEPATH; - private boolean searchEnabled = true; - private Dependency dependency = new Dependency(); - - public int getDefaultLookback() { - return defaultLookback; - } - - public void setDefaultLookback(int defaultLookback) { - this.defaultLookback = defaultLookback; - } - - public String getEnvironment() { - return environment; - } - - public void setEnvironment(String environment) { - this.environment = environment; - } - - public int getQueryLimit() { - return queryLimit; - } - - public void setQueryLimit(int queryLimit) { - this.queryLimit = queryLimit; - } - - public String getInstrumented() { - return instrumented; - } - - public void setInstrumented(String instrumented) { - this.instrumented = instrumented; - } - - public String getLogsUrl() { - return logsUrl; - } - - public String getArchivePostUrl() { - return archivePostUrl; - } - - - public String getArchiveUrl() { - return archiveUrl; - } - - public void setLogsUrl(String logsUrl) { - if (!StringUtils.isEmpty(logsUrl)) { - this.logsUrl = logsUrl; - } - } - - public String getSupportUrl() { - return supportUrl; - } - - public void setSupportUrl(String supportUrl) { - if (!StringUtils.isEmpty(supportUrl)) { - this.supportUrl = supportUrl; - } - - } - - public void setArchivePostUrl(String archivePostUrl) { - if (!StringUtils.isEmpty(archivePostUrl)) { - this.archivePostUrl = archivePostUrl; - } - } - - public void setArchiveUrl(String archiveUrl) { - if (!StringUtils.isEmpty(archiveUrl)) { - this.archiveUrl = archiveUrl; - } - } - - public boolean isSearchEnabled() { - return searchEnabled; - } - - public void setSearchEnabled(boolean searchEnabled) { - this.searchEnabled = searchEnabled; - } - - public Dependency getDependency() { - return dependency; - } - - public void setDependency(Dependency dependency) { - this.dependency = dependency; - } - - public String getBasepath() { - return basepath; - } - - public void setBasepath(String basepath) { - this.basepath = basepath; - } - - public static class Dependency { - private boolean enabled = true; - private float lowErrorRate = 0.5f; // 50% of calls in error turns line yellow - private float highErrorRate = 0.75f; // 75% of calls in error turns line red - - public boolean isEnabled() { - return enabled; - } - - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - public float getLowErrorRate() { - return lowErrorRate; - } - - public void setLowErrorRate(float lowErrorRate) { - this.lowErrorRate = lowErrorRate; - } - - public float getHighErrorRate() { - return highErrorRate; - } - - public void setHighErrorRate(float highErrorRate) { - this.highErrorRate = highErrorRate; - } - } -} diff --git a/zipkin-server/src/main/resources/info.json b/zipkin-server/src/main/resources/info.json deleted file mode 100644 index e5b384f835e..00000000000 --- a/zipkin-server/src/main/resources/info.json +++ /dev/null @@ -1 +0,0 @@ -{"zipkin":{"version":"@project.version@","commit":"@git.commit.id.abbrev@"}} diff --git a/zipkin-server/src/main/resources/simplelogger.properties b/zipkin-server/src/main/resources/simplelogger.properties deleted file mode 100644 index 8bb407b9899..00000000000 --- a/zipkin-server/src/main/resources/simplelogger.properties +++ /dev/null @@ -1,15 +0,0 @@ -# SLF4J's SimpleLogger configuration file -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options - -org.slf4j.simpleLogger.defaultLogLevel=info -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS -org.slf4j.simpleLogger.showShortLogName=true - -# this mirrors the logging configuration applied in zipkin-server-shared.yml , logging.level -# This only includes Armeria as for example Kafka and Cassandra are not in the slim dist - -# Unless it's serious we don't want to know. -org.slf4j.simpleLogger.log.com.linecorp.armeria=WARN -# But allow to say it's ready to serve requests -org.slf4j.simpleLogger.log.com.linecorp.armeria.server.Server=INFO diff --git a/zipkin-server/src/main/resources/zipkin-server-shared.yml b/zipkin-server/src/main/resources/zipkin-server-shared.yml deleted file mode 100644 index 60121a65b86..00000000000 --- a/zipkin-server/src/main/resources/zipkin-server-shared.yml +++ /dev/null @@ -1,297 +0,0 @@ -zipkin: - self-tracing: - # Set to true to enable self-tracing. - enabled: ${SELF_TRACING_ENABLED:false} - # percentage of self-traces to retain. If set to a value other than 1.0, traces-per-second will - # not be used. - sample-rate: ${SELF_TRACING_SAMPLE_RATE:1.0} - # Number of traces per second to retain. sample-rate must be set to 1.0 to use this value. If - # set to 0, an unlimited number of traces per second will be retained. - traces-per-second: ${SELF_TRACING_TRACES_PER_SECOND:1} - # Timeout in seconds to flush self-tracing data to storage. - message-timeout: ${SELF_TRACING_FLUSH_INTERVAL:1} - collector: - # percentage to traces to retain - sample-rate: ${COLLECTOR_SAMPLE_RATE:1.0} - activemq: - enabled: ${COLLECTOR_ACTIVEMQ_ENABLED:true} - # ActiveMQ broker url. Ex. tcp://localhost:61616 or failover:(tcp://localhost:61616,tcp://remotehost:61616) - url: ${ACTIVEMQ_URL:} - # Queue from which to collect span messages. - queue: ${ACTIVEMQ_QUEUE:zipkin} - # Number of concurrent span consumers. - concurrency: ${ACTIVEMQ_CONCURRENCY:1} - # Optional username to connect to the broker - username: ${ACTIVEMQ_USERNAME:} - # Optional password to connect to the broker - password: ${ACTIVEMQ_PASSWORD:} - http: - # Set to false to disable creation of spans via HTTP collector API - enabled: ${COLLECTOR_HTTP_ENABLED:${HTTP_COLLECTOR_ENABLED:true}} - grpc: - # Set to true to enable the GRPC collector - enabled: ${COLLECTOR_GRPC_ENABLED:false} - kafka: - enabled: ${COLLECTOR_KAFKA_ENABLED:true} - # Kafka bootstrap broker list, comma-separated host:port values. Setting this activates the - # Kafka 0.10+ collector. - bootstrap-servers: ${KAFKA_BOOTSTRAP_SERVERS:} - # Name of topic to poll for spans - topic: ${KAFKA_TOPIC:zipkin} - # Consumer group this process is consuming on behalf of. - group-id: ${KAFKA_GROUP_ID:zipkin} - # Count of consumer threads consuming the topic - streams: ${KAFKA_STREAMS:1} - rabbitmq: - enabled: ${COLLECTOR_RABBITMQ_ENABLED:true} - # RabbitMQ server address list (comma-separated list of host:port) - addresses: ${RABBIT_ADDRESSES:} - concurrency: ${RABBIT_CONCURRENCY:1} - # TCP connection timeout in milliseconds - connection-timeout: ${RABBIT_CONNECTION_TIMEOUT:60000} - password: ${RABBIT_PASSWORD:guest} - queue: ${RABBIT_QUEUE:zipkin} - username: ${RABBIT_USER:guest} - virtual-host: ${RABBIT_VIRTUAL_HOST:/} - useSsl: ${RABBIT_USE_SSL:false} - uri: ${RABBIT_URI:} - scribe: - enabled: ${COLLECTOR_SCRIBE_ENABLED:${SCRIBE_ENABLED:false}} - category: ${SCRIBE_CATEGORY:zipkin} - port: ${COLLECTOR_PORT:9410} - query: - enabled: ${QUERY_ENABLED:true} - # Timeout for requests to the query API - timeout: ${QUERY_TIMEOUT:11s} - # 1 day in millis - lookback: ${QUERY_LOOKBACK:86400000} - # The Cache-Control max-age (seconds) for /api/v2/services, /api/v2/remoteServices and /api/v2/spans - names-max-age: 300 - # CORS allowed-origins. - allowed-origins: "*" - - # Internal properties that end users should never try to use - internal: - actuator: - enabled: true - # auto-configuration to include when ArmeriaSpringActuatorAutoConfiguration is present. - # Note: These are still subject to endpoint conditions. The list must be checked for drift - # upgrading Spring Boot. - include: - - org.springframework.boot.actuate.autoconfigure.beans.BeansEndpointAutoConfiguration - - org.springframework.boot.actuate.autoconfigure.condition.ConditionsReportEndpointAutoConfiguration - - org.springframework.boot.actuate.autoconfigure.context.properties.ConfigurationPropertiesReportEndpointAutoConfiguration - - org.springframework.boot.actuate.autoconfigure.endpoint.EndpointAutoConfiguration - - org.springframework.boot.actuate.autoconfigure.env.EnvironmentEndpointAutoConfiguration - - org.springframework.boot.actuate.autoconfigure.management.HeapDumpWebEndpointAutoConfiguration - - org.springframework.boot.actuate.autoconfigure.logging.LoggersEndpointAutoConfiguration - - org.springframework.boot.actuate.autoconfigure.management.ThreadDumpEndpointAutoConfiguration - - storage: - strict-trace-id: ${STRICT_TRACE_ID:true} - search-enabled: ${SEARCH_ENABLED:true} - autocomplete-keys: ${AUTOCOMPLETE_KEYS:} - autocomplete-ttl: ${AUTOCOMPLETE_TTL:3600000} - autocomplete-cardinality: 20000 - type: ${STORAGE_TYPE:mem} - throttle: - enabled: ${STORAGE_THROTTLE_ENABLED:false} - min-concurrency: ${STORAGE_THROTTLE_MIN_CONCURRENCY:10} - max-concurrency: ${STORAGE_THROTTLE_MAX_CONCURRENCY:200} - max-queue-size: ${STORAGE_THROTTLE_MAX_QUEUE_SIZE:1000} - mem: - # Maximum number of spans to keep in memory. When exceeded, oldest traces (and their spans) will be purged. - max-spans: ${MEM_MAX_SPANS:500000} - cassandra: - # Comma separated list of host addresses part of Cassandra cluster. Ports default to 9042 but you can also specify a custom port with 'host:port'. - contact-points: ${CASSANDRA_CONTACT_POINTS:localhost} - # Name of the datacenter that will be considered "local" for load balancing. - local-dc: ${CASSANDRA_LOCAL_DC:datacenter1} - # Will throw an exception on startup if authentication fails. - username: ${CASSANDRA_USERNAME:} - password: ${CASSANDRA_PASSWORD:} - keyspace: ${CASSANDRA_KEYSPACE:zipkin} - # Max pooled connections per datacenter-local host. - max-connections: ${CASSANDRA_MAX_CONNECTIONS:8} - # Ensuring that schema exists, if enabled tries to execute script /zipkin-cassandra-core/resources/cassandra-schema.cql. - ensure-schema: ${CASSANDRA_ENSURE_SCHEMA:true} - # 7 days in seconds - span-ttl: ${CASSANDRA_SPAN_TTL:604800} - # 3 days in seconds - index-ttl: ${CASSANDRA_INDEX_TTL:259200} - # the maximum trace index metadata entries to cache - index-cache-max: ${CASSANDRA_INDEX_CACHE_MAX:100000} - # how long to cache index metadata about a trace. 1 minute in seconds - index-cache-ttl: ${CASSANDRA_INDEX_CACHE_TTL:60} - # how many more index rows to fetch than the user-supplied query limit - index-fetch-multiplier: ${CASSANDRA_INDEX_FETCH_MULTIPLIER:3} - # Using ssl for connection, rely on Keystore - use-ssl: ${CASSANDRA_USE_SSL:false} - cassandra3: - # Comma separated list of host addresses part of Cassandra cluster. Ports default to 9042 but you can also specify a custom port with 'host:port'. - contact-points: ${CASSANDRA_CONTACT_POINTS:localhost} - # Name of the datacenter that will be considered "local" for load balancing. - local-dc: ${CASSANDRA_LOCAL_DC:datacenter1} - # Will throw an exception on startup if authentication fails. - username: ${CASSANDRA_USERNAME:} - password: ${CASSANDRA_PASSWORD:} - keyspace: ${CASSANDRA_KEYSPACE:zipkin2} - # Max pooled connections per datacenter-local host. - max-connections: ${CASSANDRA_MAX_CONNECTIONS:8} - # Ensuring that schema exists, if enabled tries to execute script /zipkin2-schema.cql - ensure-schema: ${CASSANDRA_ENSURE_SCHEMA:true} - # how many more index rows to fetch than the user-supplied query limit - index-fetch-multiplier: ${CASSANDRA_INDEX_FETCH_MULTIPLIER:3} - # Using ssl for connection, rely on Keystore - use-ssl: ${CASSANDRA_USE_SSL:false} - elasticsearch: - # host is left unset intentionally, to defer the decision - hosts: ${ES_HOSTS:} - pipeline: ${ES_PIPELINE:} - timeout: ${ES_TIMEOUT:10000} - index: ${ES_INDEX:zipkin} - ensure-templates: ${ES_ENSURE_TEMPLATES:true} - date-separator: ${ES_DATE_SEPARATOR:-} - index-shards: ${ES_INDEX_SHARDS:5} - index-replicas: ${ES_INDEX_REPLICAS:1} - username: ${ES_USERNAME:} - password: ${ES_PASSWORD:} - credentials-file: ${ES_CREDENTIALS_FILE:} - credentials-refresh-interval: ${ES_CREDENTIALS_REFRESH_INTERVAL:5} - http-logging: ${ES_HTTP_LOGGING:} - ssl: - no-verify: ${ES_SSL_NO_VERIFY:false} - health-check: - enabled: ${ES_HEALTH_CHECK_ENABLED:true} - interval: ${ES_HEALTH_CHECK_INTERVAL:3s} - template-priority: ${ES_TEMPLATE_PRIORITY:} - mysql: - jdbc-url: ${MYSQL_JDBC_URL:} - host: ${MYSQL_HOST:localhost} - port: ${MYSQL_TCP_PORT:3306} - username: ${MYSQL_USER:} - password: ${MYSQL_PASS:} - db: ${MYSQL_DB:zipkin} - max-active: ${MYSQL_MAX_CONNECTIONS:10} - use-ssl: ${MYSQL_USE_SSL:false} - ui: - enabled: ${QUERY_ENABLED:true} - ## Values below here are mapped to ZipkinUiProperties, served as /config.json - # Default limit for Find Traces - query-limit: 10 - # The value here becomes a label in the top-right corner - environment: - # Default duration to look back when finding traces. - # Affects the "Start time" element in the UI. 15 minutes in millis - default-lookback: 900000 - # When false, disables the "Discover" screen - search-enabled: ${SEARCH_ENABLED:true} - # Which sites this Zipkin UI covers. Regex syntax. (e.g. http:\/\/example.com\/.*) - # Multiple sites can be specified, e.g. - # - .*example1.com - # - .*example2.com - # Default is "match all websites" - instrumented: .* - # URL placed into the tag in the HTML - base-path: /zipkin - -# We are using Armeria instead of Tomcat. Have it inherit the default configuration from Spring -spring.main.web-application-type: none -# These defaults are not used directly. They are used via armeria namespacing -server: - port: ${QUERY_PORT:9411} - use-forward-headers: true - compression: - enabled: true - # compresses any response over min-response-size (default is 2KiB) - # Includes dynamic json content and large static assets from zipkin-ui - mime-types: application/json,application/javascript,text/css,image/svg - min-response-size: 2048 - -armeria: - ports: - - port: ${server.port} - protocols: - - http - compression: - enabled: ${server.compression.enabled} - mime-types: ${server.compression.mime-types} - min-response-size: ${server.compression.min-response-size} - gracefulShutdownQuietPeriodMillis: -1 - gracefulShutdownTimeoutMillis: -1 - -spring: - jmx: - # reduce startup time by excluding unexposed JMX service - enabled: false - mvc: - favicon: - # zipkin has its own favicon - enabled: false - autoconfigure: - # NOTE: These exclusions can drift between Spring Boot minor versions. Audit accordingly. - # Ex. curl -s localhost:9411/actuator/beans|jq '.contexts.application.beans|keys_unsorted[]'|sort - exclude: - # JMX is disabled - - org.springframework.boot.actuate.autoconfigure.endpoint.jmx.JmxEndpointAutoConfiguration - # /health and /actuator/health served directly by Armeria - - org.springframework.boot.actuate.autoconfigure.health.HealthEndpointAutoConfiguration - - org.springframework.boot.actuate.autoconfigure.health.HealthContributorAutoConfiguration - # /info and /actuator/info served directly by Armeria (content is /info.json) - - org.springframework.boot.autoconfigure.info.ProjectInfoAutoConfiguration - - org.springframework.boot.actuate.autoconfigure.info.InfoContributorAutoConfiguration - # /prometheus and /actuator/prometheus are served directly by Armeria - - org.springframework.boot.actuate.autoconfigure.metrics.export.prometheus.PrometheusMetricsExportAutoConfiguration - # Remove unused auto-configuration - - org.springframework.boot.autoconfigure.cassandra.CassandraAutoConfiguration - - org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration - - org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration - - org.springframework.boot.autoconfigure.jooq.JooqAutoConfiguration - - org.springframework.boot.autoconfigure.jms.activemq.ActiveMQAutoConfiguration - - org.springframework.boot.autoconfigure.task.TaskExecutionAutoConfiguration - - org.springframework.boot.autoconfigure.task.TaskSchedulingAutoConfiguration -logging: - pattern: - level: "%clr{%5p} %clr{[%X{traceId}/%X{spanId}]}{yellow}" - level: - # Hush MySQL related logs - org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor: 'WARN' - com.zaxxer.hikari.HikariDataSource: 'WARN' - # Don't print driver version in console output - com.datastax.oss.driver.internal.core.DefaultMavenCoordinates: 'WARN' - # We exclude Geo codec and Graph extensions to keep size down - com.datastax.oss.driver.internal.core.context.InternalDriverContext: 'WARN' - # Use of native clocks in Cassandra is not insightful - com.datastax.oss.driver.internal.core.time.Clock: 'WARN' - # Unless it's serious we don't want to know - com.linecorp.armeria: 'WARN' - # # But allow to say it's ready to serve requests - com.linecorp.armeria.server.Server: 'INFO' - # kafka is quite chatty so we switch everything off by default - org.apache.kafka: 'OFF' -# # investigate /api/v2/dependencies -# zipkin2.internal.DependencyLinker: 'DEBUG' -# # log reason behind http collector dropped messages -# zipkin2.server.ZipkinHttpCollector: 'DEBUG' -# zipkin2.collector.kafka.KafkaCollector: 'DEBUG' -# zipkin2.collector.rabbitmq.RabbitMQCollector: 'DEBUG' -# zipkin2.collector.scribe.ScribeCollector: 'DEBUG' -management: - endpoints: - web: - exposure: - include: '*' - # Below are served directly without actuator. - endpoint: - health: - enabled: false - prometheus: - enabled: false - info: - enabled: false -# Disabling auto time http requests since it is added in ZipkinPrometheusMetricsConfiguration -# In Zipkin we use different naming for the http requests duration - metrics: - web: - server: - auto-time-requests: false diff --git a/zipkin-server/src/main/resources/zipkin-server.yml b/zipkin-server/src/main/resources/zipkin-server.yml deleted file mode 100644 index dc72ed35b50..00000000000 --- a/zipkin-server/src/main/resources/zipkin-server.yml +++ /dev/null @@ -1,2 +0,0 @@ -spring.profiles.include: shared -armeria.enableMetrics: false diff --git a/zipkin-server/src/main/resources/zipkin.txt b/zipkin-server/src/main/resources/zipkin.txt deleted file mode 100644 index c326ed2b270..00000000000 --- a/zipkin-server/src/main/resources/zipkin.txt +++ /dev/null @@ -1,28 +0,0 @@ -${AnsiOrange} - oo - oooo - oooooo - oooooooo - oooooooooo - oooooooooooo - ooooooo ooooooo - oooooo ooooooo - oooooo ooooooo - oooooo o o oooooo - oooooo oo oo oooooo - ooooooo oooo oooo ooooooo - oooooo ooooo ooooo ooooooo - oooooo oooooo oooooo ooooooo - oooooooo oo oo oooooooo - ooooooooooooo oo oo ooooooooooooo - oooooooooooo oooooooooooo - oooooooo oooooooo - oooo oooo -${AnsiNormal} - ________ ____ _ _____ _ _ - |__ /_ _| _ \| |/ /_ _| \ | | - / / | || |_) | ' / | || \| | - / /_ | || __/| . \ | || |\ | - |____|___|_| |_|\_\___|_| \_| - -:: version @project.version@ :: commit @git.commit.id.abbrev@ :: diff --git a/zipkin-server/src/test/java/zipkin/server/ITEnableZipkinServer.java b/zipkin-server/src/test/java/zipkin/server/ITEnableZipkinServer.java deleted file mode 100644 index 7b8a317644e..00000000000 --- a/zipkin-server/src/test/java/zipkin/server/ITEnableZipkinServer.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin.server; - -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.SpringBootApplication; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.ITZipkinServer.url; - -@SpringBootTest( - classes = CustomServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server" - } -) -@RunWith(SpringRunner.class) -public class ITEnableZipkinServer { - - @Autowired Server server; - - OkHttpClient client = new OkHttpClient.Builder().followRedirects(false).build(); - - @Test public void writeSpans_noContentTypeIsJson() throws Exception { - Response response = get("/api/v2/services"); - - assertThat(response.code()) - .isEqualTo(200); - } - - Response get(String path) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .build()).execute(); - } -} -@SpringBootApplication -@EnableZipkinServer -class CustomServer { - -} diff --git a/zipkin-server/src/test/java/zipkin2/collector/activemq/ZipkinActiveMQCollectorPropertiesOverrideTest.java b/zipkin-server/src/test/java/zipkin2/collector/activemq/ZipkinActiveMQCollectorPropertiesOverrideTest.java deleted file mode 100644 index f41f4717159..00000000000 --- a/zipkin-server/src/test/java/zipkin2/collector/activemq/ZipkinActiveMQCollectorPropertiesOverrideTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.activemq; - -import java.util.Arrays; -import java.util.function.Function; -import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.server.internal.activemq.Access; - -@RunWith(Parameterized.class) -public class ZipkinActiveMQCollectorPropertiesOverrideTest { - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After public void close() { - context.close(); - } - - @Parameter(0) - public String property; - - @Parameter(1) - public Object value; - - @Parameter(2) - public Function builderExtractor; - - @Parameters(name = "{0}") - public static Iterable data() { - return Arrays.asList( - parameters("url", "failover:(tcp://localhost:61616,tcp://remotehost:61616)", - b -> b.connectionFactory.getBrokerURL()), - parameters("client-id-prefix", "zipkin-prod", b -> b.connectionFactory.getClientIDPrefix()), - parameters("queue", "zapkin", b -> b.queue), - parameters("concurrency", 2, b -> b.concurrency), - parameters("username", "u", b -> b.connectionFactory.getUserName()), - parameters("password", "p", b -> b.connectionFactory.getPassword()) - ); - } - - /** to allow us to define with a lambda */ - static Object[] parameters( - String propertySuffix, T value, Function builderExtractor) { - return new Object[] {"zipkin.collector.activemq." + propertySuffix, value, builderExtractor}; - } - - @Test public void propertyTransferredToCollectorBuilder() { - if (!property.endsWith("url")) { - TestPropertyValues.of("zipkin.collector.activemq.url:tcp://localhost:61616").applyTo(context); - } - - TestPropertyValues.of("zipkin.collector.activemq.$property:$value").applyTo(context); - - if (property.endsWith("username")) { - TestPropertyValues.of("zipkin.collector.activemq.password:p").applyTo(context); - } - - if (property.endsWith("password")) { - TestPropertyValues.of("zipkin.collector.activemq.username:u").applyTo(context); - } - - TestPropertyValues.of(property + ":" + value).applyTo(context); - Access.registerActiveMQProperties(context); - context.refresh(); - - Assertions.assertThat(Access.collectorBuilder(context)) - .extracting(builderExtractor) - .isEqualTo(value); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/collector/kafka/ZipkinKafkaCollectorPropertiesOverrideTest.java b/zipkin-server/src/test/java/zipkin2/collector/kafka/ZipkinKafkaCollectorPropertiesOverrideTest.java deleted file mode 100644 index d22c04b12b0..00000000000 --- a/zipkin-server/src/test/java/zipkin2/collector/kafka/ZipkinKafkaCollectorPropertiesOverrideTest.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.kafka; - -import java.util.Arrays; -import java.util.function.Function; -import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.server.internal.kafka.Access; - -@RunWith(Parameterized.class) -public class ZipkinKafkaCollectorPropertiesOverrideTest { - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After - public void close() { - if (context != null) context.close(); - } - - @Parameterized.Parameter(0) - public String property; - - @Parameterized.Parameter(1) - public Object value; - - @Parameterized.Parameter(2) - public Function builderExtractor; - - @Parameterized.Parameters(name = "{0}") - public static Iterable data() { - return Arrays.asList( - parameters( - "bootstrap-servers", - "127.0.0.1:9092", - b -> b.properties.getProperty("bootstrap.servers")), - parameters("group-id", "zapkin", b -> b.properties.getProperty("group.id")), - parameters("topic", "zapkin", b -> b.topic), - parameters("streams", 2, b -> b.streams), - parameters( - "overrides.auto.offset.reset", - "latest", - b -> b.properties.getProperty("auto.offset.reset"))); - } - - /** to allow us to define with a lambda */ - static Object[] parameters( - String propertySuffix, T value, Function builderExtractor) { - return new Object[] {"zipkin.collector.kafka." + propertySuffix, value, builderExtractor}; - } - - @Test - public void propertyTransferredToCollectorBuilder() { - TestPropertyValues.of(property + ":" + value).applyTo(context); - Access.registerKafkaProperties(context); - context.refresh(); - - Assertions.assertThat(Access.collectorBuilder(context)) - .extracting(builderExtractor) - .isEqualTo(value); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/collector/rabbitmq/ZipkinRabbitMQCollectorPropertiesOverrideTest.java b/zipkin-server/src/test/java/zipkin2/collector/rabbitmq/ZipkinRabbitMQCollectorPropertiesOverrideTest.java deleted file mode 100644 index 25e79aa554d..00000000000 --- a/zipkin-server/src/test/java/zipkin2/collector/rabbitmq/ZipkinRabbitMQCollectorPropertiesOverrideTest.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.rabbitmq; - -import java.net.URI; -import java.util.Arrays; -import java.util.function.Function; -import org.junit.After; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.server.internal.rabbitmq.Access; - -import static org.assertj.core.api.Assertions.assertThat; - -@RunWith(Parameterized.class) -public class ZipkinRabbitMQCollectorPropertiesOverrideTest { - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After - public void close() { - if (context != null) context.close(); - } - - @Parameterized.Parameter(0) - public String property; - - @Parameterized.Parameter(1) - public Object value; - - @Parameterized.Parameter(2) - public Function builderExtractor; - - @Parameterized.Parameters(name = "{0}") - public static Iterable data() { - return Arrays.asList( - // intentionally punting on comma-separated form of a list of addresses as it doesn't fit - // this unit test. Better to make a separate one than force-fit! - parameters("addresses", "localhost:5671", builder -> builder.addresses[0].toString()), - parameters("concurrency", 2, builder -> builder.concurrency), - parameters( - "connectionTimeout", - 30_000, - builder -> builder.connectionFactory.getConnectionTimeout()), - parameters("password", "admin", builder -> builder.connectionFactory.getPassword()), - parameters("queue", "zapkin", builder -> builder.queue), - parameters("username", "admin", builder -> builder.connectionFactory.getUsername()), - parameters("virtualHost", "/hello", builder -> builder.connectionFactory.getVirtualHost()), - parameters("useSsl", true, builder -> builder.connectionFactory.isSSL()), - parameters( - "uri", - URI.create("amqp://localhost"), - builder -> URI.create("amqp://" + builder.connectionFactory.getHost()))); - } - - /** to allow us to define with a lambda */ - static Object[] parameters( - String propertySuffix, T value, Function builderExtractor) { - return new Object[] {"zipkin.collector.rabbitmq." + propertySuffix, value, builderExtractor}; - } - - @Test - public void propertyTransferredToCollectorBuilder() throws Exception { - TestPropertyValues.of(property + ":" + value).applyTo(context); - Access.registerRabbitMQProperties(context); - context.refresh(); - - assertThat(Access.collectorBuilder(context)) - .extracting(builderExtractor) - .isEqualTo(value); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/collector/scribe/ZipkinScribeCollectorConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/collector/scribe/ZipkinScribeCollectorConfigurationTest.java deleted file mode 100644 index b61104f3320..00000000000 --- a/zipkin-server/src/test/java/zipkin2/collector/scribe/ZipkinScribeCollectorConfigurationTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.collector.scribe; - -import org.junit.After; -import org.junit.Test; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.server.internal.InMemoryConfiguration; -import zipkin2.server.internal.scribe.ZipkinScribeCollectorConfiguration; - -import static org.assertj.core.api.Assertions.assertThat; - -public class ZipkinScribeCollectorConfigurationTest { - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After public void close() { - context.close(); - } - - @Test(expected = NoSuchBeanDefinitionException.class) - public void doesntProvidesCollectorComponent_byDefault() { - refreshContext(); - - context.getBean(ScribeCollector.class); - } - - /** Note: this will flake if you happen to be running a server on port 9410! */ - @Test public void providesCollectorComponent_whenEnabled() { - TestPropertyValues.of("zipkin.collector.scribe.enabled:true").applyTo(context); - refreshContext(); - - assertThat(context.getBean(ScribeCollector.class)).isNotNull(); - } - - @Test public void canOverrideProperty_port() { - TestPropertyValues.of( - "zipkin.collector.scribe.enabled:true", - "zipkin.collector.scribe.port:9999") - .applyTo(context); - refreshContext(); - - assertThat(context.getBean(ScribeCollector.class).server.port) - .isEqualTo(9999); - } - - public void refreshContext() { - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinScribeCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ITActuatorMappings.java b/zipkin-server/src/test/java/zipkin2/server/internal/ITActuatorMappings.java deleted file mode 100644 index dc7ecf50fab..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ITActuatorMappings.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.server.Server; -import io.micrometer.prometheus.PrometheusMeterRegistry; -import java.io.IOException; -import java.io.InterruptedIOException; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assumptions.assumeThat; -import static zipkin2.server.internal.ITZipkinServer.url; - -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server" - } -) -@RunWith(SpringRunner.class) -public class ITActuatorMappings { - @Autowired PrometheusMeterRegistry registry; - @Autowired Server server; - - OkHttpClient client = new OkHttpClient.Builder().followRedirects(true).build(); - - @Test public void actuatorIsOK() throws Exception { - assumeThat(get("/actuator").isSuccessful()) // actuator is optional - .isTrue(); - - // ensure we don't track actuator in prometheus - assertThat(scrape()) - .doesNotContain("actuator"); - } - - @Test public void actuatorInfoEndpointHasDifferentContentType() throws IOException { - Response info = get("/info"); - Response actuatorInfo = get("/actuator/info"); - - // Different content type - assertThat(actuatorInfo.isSuccessful()).isTrue(); - assertThat(actuatorInfo.body().contentType()) - .isNotEqualTo(info.body().contentType()) - .hasToString("application/vnd.spring-boot.actuator.v2+json; charset=utf-8"); - - // Same content - assertThat(actuatorInfo.body().string()) - .isEqualTo(info.body().string()); - - // ensure we don't track info in prometheus - assertThat(scrape()) - .doesNotContain("/info"); - } - - @Test public void actuatorHealthEndpointHasDifferentContentType() throws IOException { - Response health = get("/health"); - Response actuatorHealth = get("/actuator/health"); - - // Different content type - assertThat(actuatorHealth.isSuccessful()).isTrue(); - assertThat(actuatorHealth.body().contentType()) - .isNotEqualTo(health.body().contentType()) - .hasToString("application/vnd.spring-boot.actuator.v2+json; charset=utf-8"); - - // Same content - assertThat(actuatorHealth.body().string()) - .isEqualTo(health.body().string()); - - // ensure we don't track health in prometheus - assertThat(scrape()) - .doesNotContain("/health"); - } - - Response get(String path) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .build()).execute(); - } - - String scrape() throws IOException { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - throw new InterruptedIOException(e.getMessage()); - } - return registry.scrape(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinGrpcCollector.java b/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinGrpcCollector.java deleted file mode 100644 index 09586c4109a..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinGrpcCollector.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import okhttp3.MediaType; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.RequestBody; -import okhttp3.Response; -import okio.Buffer; -import okio.BufferedSource; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; -import zipkin2.TestObjects; -import zipkin2.codec.SpanBytesDecoder; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.proto3.ListOfSpans; -import zipkin2.proto3.ReportResponse; -import zipkin2.storage.InMemoryStorage; - -import static java.util.Arrays.asList; -import static okhttp3.Protocol.H2_PRIOR_KNOWLEDGE; -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; -import static zipkin2.server.internal.ITZipkinServer.url; - -/** This tests that we accept messages constructed by other clients. */ -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server", - "zipkin.collector.grpc.enabled=true" - } -) -@RunWith(SpringRunner.class) -public class ITZipkinGrpcCollector { - @Autowired InMemoryStorage storage; - @Autowired Server server; - - @Before public void init() { - storage.clear(); - } - - OkHttpClient client = new OkHttpClient.Builder().protocols(asList(H2_PRIOR_KNOWLEDGE)).build(); - - ListOfSpans request; - - @Before public void sanityCheckCodecCompatible() throws IOException { - request = ListOfSpans.ADAPTER.decode(SpanBytesEncoder.PROTO3.encodeList(TestObjects.TRACE)); - - assertThat(SpanBytesDecoder.PROTO3.decodeList(request.encode())) - .containsExactlyElementsOf(TestObjects.TRACE); // sanity check codec compatible - } - - @Test public void report_trace() throws IOException { - callReport(request); // Result is effectively void - - awaitSpans(); - - assertThat(storage.getTraces()) - .containsExactly(TestObjects.TRACE); - } - - @Test public void report_emptyIsOk() throws IOException { - - callReport(new ListOfSpans.Builder().build()); - } - - ReportResponse callReport(ListOfSpans spans) throws IOException { - Buffer requestBody = new Buffer(); - requestBody.writeByte(0 /* compressedFlag */); - Buffer encodedMessage = new Buffer(); - ListOfSpans.ADAPTER.encode(encodedMessage, spans); - requestBody.writeInt((int) encodedMessage.size()); - requestBody.writeAll(encodedMessage); - - Response response = client.newCall(new Request.Builder() - .url(url(server, "/zipkin.proto3.SpanService/Report")) - .addHeader("te", "trailers") - .post(RequestBody.create(requestBody.snapshot(), MediaType.get("application/grpc"))) - .build()) - .execute(); - - BufferedSource responseBody = response.body().source(); - assertThat((int) responseBody.readByte()).isEqualTo(0); // uncompressed - long encodedLength = responseBody.readInt() & 0xffffffffL; - - return ReportResponse.ADAPTER.decode(responseBody); - } - - void awaitSpans() { - await().untilAsserted(// wait for spans - () -> assertThat(storage.acceptedSpanCount()).isGreaterThanOrEqualTo(1)); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServer.java b/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServer.java deleted file mode 100644 index 49f0208cded..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServer.java +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import java.io.InputStream; -import java.net.URL; -import java.util.Arrays; -import java.util.List; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; -import okio.Okio; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; -import zipkin2.Endpoint; -import zipkin2.Span; -import zipkin2.TestObjects; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.storage.InMemoryStorage; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.CLIENT_SPAN; -import static zipkin2.TestObjects.FRONTEND; -import static zipkin2.TestObjects.TODAY; -import static zipkin2.TestObjects.UTF_8; - -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server" - } -) -@RunWith(SpringRunner.class) -public class ITZipkinServer { - static final List TRACE = asList(TestObjects.CLIENT_SPAN); - - @Autowired InMemoryStorage storage; - @Autowired Server server; - - OkHttpClient client = new OkHttpClient.Builder().followRedirects(true).build(); - - @Before public void init() { - storage.clear(); - } - - @Test public void getTrace() throws Exception { - storage.accept(TRACE).execute(); - - Response response = get("/api/v2/trace/" + TRACE.get(0).traceId()); - assertThat(response.isSuccessful()).isTrue(); - - assertThat(response.body().bytes()) - .containsExactly(SpanBytesEncoder.JSON_V2.encodeList(TRACE)); - } - - @Test public void getTrace_notFound() throws Exception { - Response response = get("/api/v2/trace/" + TRACE.get(0).traceId()); - assertThat(response.code()).isEqualTo(404); - - assertThat(response.body().string()) - .isEqualTo(TRACE.get(0).traceId() + " not found"); - } - - @Test public void getTrace_malformed() throws Exception { - storage.accept(TRACE).execute(); - - Response response = get("/api/v2/trace/0e8b46e1-81b"); - assertThat(response.code()).isEqualTo(400); - - assertThat(response.body().string()) - .isEqualTo("0e8b46e1-81b should be lower-hex encoded with no prefix"); - } - - @Test public void getTraces() throws Exception { - storage.accept(TRACE).execute(); - - Response response = get("/api/v2/traceMany?traceIds=abcd," + TRACE.get(0).traceId()); - assertThat(response.isSuccessful()).isTrue(); - - assertThat(response.body().string()) - .isEqualTo("[" + new String(SpanBytesEncoder.JSON_V2.encodeList(TRACE), UTF_8) + "]"); - } - - @Test public void getTraces_emptyNotOk() throws Exception { - storage.accept(TRACE).execute(); - - Response response = get("/api/v2/traceMany?traceIds="); - assertThat(response.code()).isEqualTo(400); - - assertThat(response.body().string()) - .isEqualTo("traceIds parameter is empty"); - } - - @Test public void getTraces_singleNotOk() throws Exception { - storage.accept(TRACE).execute(); - - Response response = get("/api/v2/traceMany?traceIds=" + TRACE.get(0).traceId()); - assertThat(response.code()).isEqualTo(400); - - assertThat(response.body().string()) - .isEqualTo("Use /api/v2/trace/{traceId} endpoint to retrieve a single trace"); - } - - @Test public void getTraces_malformed() throws Exception { - storage.accept(TRACE).execute(); - - Response response = get("/api/v2/traceMany?traceIds=abcd,0e8b46e1-81b"); - assertThat(response.code()).isEqualTo(400); - - assertThat(response.body().string()) - .isEqualTo("0e8b46e1-81b should be lower-hex encoded with no prefix"); - } - - @Test public void tracesQueryRequiresNoParameters() throws Exception { - storage.accept(TRACE).execute(); - - Response response = get("/api/v2/traces"); - assertThat(response.isSuccessful()).isTrue(); - assertThat(response.body().string()) - .isEqualTo("[" + new String(SpanBytesEncoder.JSON_V2.encodeList(TRACE), UTF_8) + "]"); - } - - @Test public void v2WiresUp() throws Exception { - assertThat(get("/api/v2/services").isSuccessful()) - .isTrue(); - } - - @Test public void doesntSetCacheControlOnNameEndpointsWhenLessThan4Services() throws Exception { - storage.accept(TRACE).execute(); - - assertThat(get("/api/v2/services").header("Cache-Control")) - .isNull(); - - assertThat(get("/api/v2/spans?serviceName=web").header("Cache-Control")) - .isNull(); - - assertThat(get("/api/v2/remoteServices?serviceName=web").header("Cache-Control")) - .isNull(); - } - - @Test public void spanNameQueryWorksWithNonAsciiServiceName() throws Exception { - assertThat(get("/api/v2/spans?serviceName=个人信息服务").code()) - .isEqualTo(200); - } - - @Test public void remoteServiceNameQueryWorksWithNonAsciiServiceName() throws Exception { - assertThat(get("/api/v2/remoteServices?serviceName=个人信息服务").code()) - .isEqualTo(200); - } - - @Test public void remoteServiceNameReturnsCorrectJsonForEscapedWhitespaceInName() - throws Exception { - storage.accept(Arrays.asList(CLIENT_SPAN.toBuilder() - .localEndpoint(FRONTEND.toBuilder().serviceName("foo\tbar").build()) - .build())) - .execute(); - Response response = get("/api/v2/services"); - assertThat(response.isSuccessful()).isTrue(); - assertThat(response.body().string()).isEqualTo("[\"foo\\tbar\"]"); - } - - @Test public void setsCacheControlOnNameEndpointsWhenMoreThan3Services() throws Exception { - List services = asList("foo", "bar", "baz", "quz"); - for (int i = 0; i < services.size(); i++) { - storage.accept(asList( - Span.newBuilder().traceId("a").id(i + 1).timestamp(TODAY).name("whopper") - .localEndpoint(Endpoint.newBuilder().serviceName(services.get(i)).build()) - .remoteEndpoint(Endpoint.newBuilder().serviceName(services.get(i) + 1).build()) - .build() - )).execute(); - } - - assertThat(get("/api/v2/services").header("Cache-Control")) - .isEqualTo("max-age=300, must-revalidate"); - - assertThat(get("/api/v2/spans?serviceName=web").header("Cache-Control")) - .isEqualTo("max-age=300, must-revalidate"); - - assertThat(get("/api/v2/remoteServices?serviceName=web").header("Cache-Control")) - .isEqualTo("max-age=300, must-revalidate"); - - // Check that the response is alphabetically sorted. - assertThat(get("/api/v2/services").body().string()) - .isEqualTo("[\"bar\",\"baz\",\"foo\",\"quz\"]"); - } - - @Test public void shouldAllowAnyOriginByDefault() throws Exception { - Response response = client.newCall(new Request.Builder() - .url(url(server, "/api/v2/traces")) - .header("Origin", "http://foo.example.com") - .build()).execute(); - - assertThat(response.isSuccessful()).isTrue(); - assertThat(response.header("vary")).isNull(); - assertThat(response.header("access-control-allow-credentials")).isNull(); - assertThat(response.header("access-control-allow-origin")).contains("*"); - } - - @Test public void forwardsApiForUi() throws Exception { - assertThat(get("/zipkin/api/v2/traces").isSuccessful()).isTrue(); - assertThat(get("/zipkin/api/v2/traces").isSuccessful()).isTrue(); - } - - /** Simulate a proxy which forwards / to zipkin as opposed to resolving / -> /zipkin first */ - @Test public void redirectedHeaderUsesOriginalHostAndPort() throws Exception { - Request forwarded = new Request.Builder() - .url(url(server, "/")) - .addHeader("Host", "zipkin.com") - .addHeader("X-Forwarded-Proto", "https") - .addHeader("X-Forwarded-Port", "444") - .build(); - - Response response = client.newBuilder().followRedirects(false).build() - .newCall(forwarded).execute(); - - // Redirect header should be the proxy, not the backed IP/port - assertThat(response.header("Location")) - .isEqualTo("/zipkin/"); - } - - @Test public void infoEndpointIsAvailable() throws IOException { - Response info = get("/info"); - assertThat(info.isSuccessful()).isTrue(); - assertThat(info.body().contentType().toString()) - .isEqualTo("application/json; charset=utf-8"); - assertThat(info.body().string()) - .isEqualToIgnoringWhitespace(stringFromClasspath(getClass(), "info.json")); - } - - @Test public void getTrace_spaceAfterTraceId() throws Exception { - storage.accept(TRACE).execute(); - - Response response = get("/api/v2/trace/" + TRACE.get(0).traceId() + " "); - assertThat(response.isSuccessful()).isTrue(); - - assertThat(response.body().bytes()) - .containsExactly(SpanBytesEncoder.JSON_V2.encodeList(TRACE)); - } - - @Test public void traceMethodDisallowed() { - // trace method is disallowed for any route but we just test couple of paths here, can't test them all - Arrays.stream(new String[]{"/", "/api/v2/traces", "/whatever/and/not"}).forEach(path -> { - final Response response; - try { - response = client.newCall(new Request.Builder().url(url(server, path)) - .method("TRACE", null).build()) - .execute(); - assertThat(response.isSuccessful()).isFalse(); - assertThat(response.code()).isEqualTo(HttpStatus.METHOD_NOT_ALLOWED.code()); - } catch (IOException e) { - throw new RuntimeException(e.getMessage(), e); - } - }); - } - - - private Response get(String path) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .build()).execute(); - } - - public static String url(Server server, String path) { - return "http://localhost:" + server.activeLocalPort() + path; - } - - public static String stringFromClasspath(Class thisClass, String path) throws IOException { - URL url = thisClass.getClassLoader().getResource(path); - assertThat(url).isNotNull(); - - try (InputStream fromClasspath = url.openStream()) { - return Okio.buffer(Okio.source(fromClasspath)).readUtf8(); - } - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerAutocomplete.java b/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerAutocomplete.java deleted file mode 100644 index 330b6195193..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerAutocomplete.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.RequestBody; -import okhttp3.Response; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.TODAY; -import static zipkin2.server.internal.ITZipkinServer.url; - -/** - * Integration test suite for autocomplete tags. - * - * Verifies that the whitelist of key can be configured via "zipkin.storage.autocomplete-keys". - */ -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server", - "zipkin.storage.autocomplete-keys=environment,clnt/finagle.version" - } -) -@RunWith(SpringRunner.class) -public class ITZipkinServerAutocomplete { - - @Autowired Server server; - OkHttpClient client = new OkHttpClient.Builder().followRedirects(false).build(); - - @Test public void setsCacheControlOnAutocompleteKeysEndpoint() throws Exception { - assertThat(get("/api/v2/autocompleteKeys").header("Cache-Control")) - .isEqualTo("max-age=300, must-revalidate"); - } - - @Test public void setsCacheControlOnAutocompleteEndpointWhenMoreThan3Values() throws Exception { - assertThat(get("/api/v2/autocompleteValues?key=environment").header("Cache-Control")) - .isNull(); - assertThat(get("/api/v2/autocompleteValues?key=clnt/finagle.version").header("Cache-Control")) - .isNull(); - - for (int i = 0; i < 4; i++) { - post("/api/v2/spans", SpanBytesEncoder.JSON_V2.encodeList(asList( - Span.newBuilder().traceId("a").id(i + 1).timestamp(TODAY).name("whopper") - .putTag("clnt/finagle.version", "6.45." + i).build() - ))); - } - - assertThat(get("/api/v2/autocompleteValues?key=environment").header("Cache-Control")) - .isNull(); - assertThat(get("/api/v2/autocompleteValues?key=clnt/finagle.version").header("Cache-Control")) - .isEqualTo("max-age=300, must-revalidate"); - } - - private Response get(String path) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .build()).execute(); - } - - private Response post(String path, byte[] body) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .post(RequestBody.create(body)) - .build()).execute(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerCORS.java b/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerCORS.java deleted file mode 100644 index a5623e0119f..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerCORS.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import okhttp3.MediaType; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.RequestBody; -import okhttp3.Response; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.ITZipkinServer.url; - -/** - * Integration test suite for CORS configuration. - * - * Verifies that allowed-origins can be configured via properties (zipkin.query.allowed-origins). - */ -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server", - "zipkin.query.allowed-origins=" + ITZipkinServerCORS.ALLOWED_ORIGIN - } -) -@RunWith(SpringRunner.class) -public class ITZipkinServerCORS { - static final String ALLOWED_ORIGIN = "http://foo.example.com"; - static final String DISALLOWED_ORIGIN = "http://bar.example.com"; - - @Autowired Server server; - OkHttpClient client = new OkHttpClient.Builder().followRedirects(false).build(); - - /** Notably, javascript makes pre-flight requests, and won't POST spans if disallowed! */ - @Test public void shouldAllowConfiguredOrigin_preflight() throws Exception { - shouldPermitPreflight(optionsForOrigin("GET", "/api/v2/traces", ALLOWED_ORIGIN)); - shouldPermitPreflight(optionsForOrigin("POST", "/api/v2/spans", ALLOWED_ORIGIN)); - } - - static void shouldPermitPreflight(Response response) { - assertThat(response.isSuccessful()) - .withFailMessage(response.toString()) - .isTrue(); - assertThat(response.header("vary")).contains("origin"); - assertThat(response.header("access-control-allow-origin")).contains(ALLOWED_ORIGIN); - assertThat(response.header("access-control-allow-methods")) - .contains(response.request().header("access-control-request-method")); - assertThat(response.header("access-control-allow-credentials")).isNull(); - assertThat(response.header("access-control-allow-headers")).contains("content-type"); - } - - @Test public void shouldAllowConfiguredOrigin() throws Exception { - shouldAllowConfiguredOrigin(getTracesFromOrigin(ALLOWED_ORIGIN)); - shouldAllowConfiguredOrigin(postSpansFromOrigin(ALLOWED_ORIGIN)); - } - - static void shouldAllowConfiguredOrigin(Response response) { - assertThat(response.header("vary")).contains("origin"); - assertThat(response.header("access-control-allow-origin")) - .contains(response.request().header("origin")); - assertThat(response.header("access-control-allow-credentials")).isNull(); - assertThat(response.header("access-control-allow-headers")).contains("content-type"); - } - - @Test public void shouldDisallowOrigin() throws Exception { - shouldDisallowOrigin(getTracesFromOrigin(DISALLOWED_ORIGIN)); - shouldDisallowOrigin(postSpansFromOrigin(DISALLOWED_ORIGIN)); - } - - static void shouldDisallowOrigin(Response response) { - assertThat(response.header("vary")).isNull(); - assertThat(response.header("access-control-allow-credentials")).isNull(); - assertThat(response.header("access-control-allow-origin")).isNull(); - assertThat(response.header("access-control-allow-headers")).isNull(); - } - - private Response optionsForOrigin(String method, String path, String origin) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .header("Origin", origin) - .header("access-control-request-method", method) - .header("access-control-request-headers", "content-type") - .method("OPTIONS", null) - .build()).execute(); - } - - private Response getTracesFromOrigin(String origin) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, "/api/v2/traces")) - .header("Origin", origin) - .build()).execute(); - } - - private Response postSpansFromOrigin(String origin) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, "/api/v2/spans")) - .header("Origin", origin) - .post(RequestBody.create("[]", MediaType.parse("application/json"))) - .build()).execute(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerHttpCollectorDisabled.java b/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerHttpCollectorDisabled.java deleted file mode 100644 index ba585efcf20..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerHttpCollectorDisabled.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.server.Server; -import okhttp3.MediaType; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.RequestBody; -import okhttp3.Response; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.ITZipkinServer.url; - -/** - * Query-only builds should be able to disable the HTTP collector, so that associated assets 404 - * instead of allowing creation of spans. - */ -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server", - "zipkin.storage.type=", // cheat and test empty storage type - "zipkin.collector.http.enabled=false" - }) -@RunWith(SpringRunner.class) -public class ITZipkinServerHttpCollectorDisabled { - - @Autowired Server server; - OkHttpClient client = new OkHttpClient.Builder().followRedirects(false).build(); - - @Test public void httpCollectorEndpointReturns404() throws Exception { - Response response = client.newCall(new Request.Builder() - .url(url(server, "/api/v2/spans")) - .post(RequestBody.create("[]", MediaType.parse("application/json"))) - .build()).execute(); - - assertThat(response.code()).isEqualTo(404); - } - - /** Shows the same http path still works for GET */ - @Test public void getOnSpansEndpointReturnsOK() throws Exception { - Response response = client.newCall(new Request.Builder() - .url(url(server, "/api/v2/spans?serviceName=unknown")) - .build()).execute(); - - assertThat(response.isSuccessful()).isTrue(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerQueryDisabled.java b/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerQueryDisabled.java deleted file mode 100644 index 90f696b1c5a..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerQueryDisabled.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.ITZipkinServer.url; - -/** - * Collector-only builds should be able to disable the query (and indirectly the UI), so that - * associated assets 404 vs throw exceptions. - */ -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server", - "zipkin.query.enabled=false", - "zipkin.ui.enabled=false" - } -) -@RunWith(SpringRunner.class) -public class ITZipkinServerQueryDisabled { - @Autowired Server server; - OkHttpClient client = new OkHttpClient.Builder().followRedirects(false).build(); - - @Test public void queryRelatedEndpoints404() throws Exception { - assertThat(get("/api/v2/traces").code()).isEqualTo(404); - assertThat(get("/index.html").code()).isEqualTo(404); - - // but other endpoints are ok - assertThat(get("/health").isSuccessful()).isTrue(); - } - - private Response get(String path) throws IOException { - return client.newCall(new Request.Builder().url(url(server, path)).build()).execute(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerSsl.java b/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerSsl.java deleted file mode 100644 index 6fe2075a55f..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerSsl.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.client.ClientFactory; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.SessionProtocol; -import com.linecorp.armeria.server.Server; -import com.linecorp.armeria.spring.ArmeriaSettings; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.elasticsearch.Access.configureSsl; - -/** - * This code ensures you can setup SSL. Look at {@link ArmeriaSettings} for property names. - * - *

This is inspired by com.linecorp.armeria.spring.ArmeriaSslConfigurationTest - */ -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server", - // TODO: use normal spring.server properties after https://github.com/line/armeria/issues/1834 - "armeria.ssl.enabled=true", - "armeria.ssl.key-store=classpath:keystore.p12", - "armeria.ssl.key-store-password=password", - "armeria.ssl.key-store-type=PKCS12", - "armeria.ssl.trust-store=classpath:keystore.p12", - "armeria.ssl.trust-store-password=password", - "armeria.ssl.trust-store-type=PKCS12", - "armeria.ports[1].port=0", - "armeria.ports[1].protocols[0]=https", - // redundant in zipkin-server-shared https://github.com/spring-projects/spring-boot/issues/16394 - "armeria.ports[0].port=${server.port}", - "armeria.ports[0].protocols[0]=http", - }) -@RunWith(SpringRunner.class) -public class ITZipkinServerSsl { - @Autowired Server server; - @Autowired ArmeriaSettings armeriaSettings; - - ClientFactory clientFactory; - - @Before public void configureClientFactory() { - clientFactory = configureSsl(ClientFactory.builder(), armeriaSettings.getSsl()).build(); - } - - @Test public void callHealthEndpoint_HTTP() { - callHealthEndpoint(SessionProtocol.HTTP); - } - - @Test public void callHealthEndpoint_HTTPS() { - callHealthEndpoint(SessionProtocol.HTTPS); - } - - void callHealthEndpoint(SessionProtocol http) { - AggregatedHttpResponse response = - WebClient.builder(baseUrl(server, http)).factory(clientFactory).build() - .get("/health") - .aggregate().join(); - - assertThat(response.status()).isEqualTo(HttpStatus.OK); - } - - static String baseUrl(Server server, SessionProtocol protocol) { - return server.activePorts().values().stream() - .filter(p -> p.hasProtocol(protocol)).findAny() - .map(p -> protocol.uriText() + "://localhost:" + p.localAddress().getPort()) - .orElseThrow(() -> new AssertionError(protocol + " port not open")); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerTimeout.java b/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerTimeout.java deleted file mode 100644 index f35aaffe5e3..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ITZipkinServerTimeout.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2015-2021 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import java.util.List; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.boot.test.mock.mockito.MockBean; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; -import zipkin2.Call; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.TestObjects; -import zipkin2.internal.TracesAdapter; -import zipkin2.storage.InMemoryStorage; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.SpanStore; -import zipkin2.storage.StorageComponent; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.when; - -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "zipkin.query.timeout=1ms", - "spring.config.name=zipkin-server" - } -) -@RunWith(SpringRunner.class) -public class ITZipkinServerTimeout { - static final List TRACE = asList(TestObjects.CLIENT_SPAN); - - SlowSpanStore spanStore; - - @MockBean StorageComponent storage; - @Autowired Server server; - - OkHttpClient client = new OkHttpClient.Builder().followRedirects(true).build(); - - @Before public void init() { - spanStore = new SlowSpanStore(); - when(storage.spanStore()).thenReturn(spanStore); - when(storage.traces()).thenReturn(new TracesAdapter(spanStore)); - } - - @Test public void getTrace() throws Exception { - spanStore.storage.accept(TRACE).execute(); - - Response response = get("/api/v2/trace/" + TRACE.get(0).traceId()); - assertThat(response.isSuccessful()).isFalse(); - - assertThat(response.code()).isEqualTo(500); - } - - Response get(String path) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .build()).execute(); - } - - static String url(Server server, String path) { - return "http://localhost:" + server.activeLocalPort() + path; - } - - static class SlowSpanStore implements SpanStore { - final InMemoryStorage storage = InMemoryStorage.newBuilder().build(); - - @Override public Call>> getTraces(QueryRequest request) { - sleep(); - return storage.spanStore().getTraces(request); - } - - @Override public Call> getTrace(String traceId) { - sleep(); - return storage.spanStore().getTrace(traceId); - } - - @Override public Call> getServiceNames() { - sleep(); - return storage.spanStore().getServiceNames(); - } - - @Override public Call> getSpanNames(String serviceName) { - sleep(); - return storage.spanStore().getSpanNames(serviceName); - } - - @Override public Call> getDependencies(long endTs, long lookback) { - sleep(); - return storage.spanStore().getDependencies(endTs, lookback); - } - - static void sleep() { - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new Error(e); - } - } - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/InMemoryConfiguration.java b/zipkin-server/src/test/java/zipkin2/server/internal/InMemoryConfiguration.java deleted file mode 100644 index 6b75277220a..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/InMemoryConfiguration.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import zipkin2.collector.CollectorMetrics; -import zipkin2.collector.CollectorSampler; -import zipkin2.storage.InMemoryStorage; -import zipkin2.storage.StorageComponent; - -@Configuration -public class InMemoryConfiguration { - @Bean public CollectorSampler sampler() { - return CollectorSampler.ALWAYS_SAMPLE; - } - - @Bean public CollectorMetrics metrics() { - return CollectorMetrics.NOOP_METRICS; - } - - @Bean public StorageComponent storage() { - return InMemoryStorage.newBuilder().build(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/NoOpMeterRegistryConfiguration.java b/zipkin-server/src/test/java/zipkin2/server/internal/NoOpMeterRegistryConfiguration.java deleted file mode 100644 index 4ce6bca58a8..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/NoOpMeterRegistryConfiguration.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import com.linecorp.armeria.common.metric.NoopMeterRegistry; -import io.micrometer.core.instrument.MeterRegistry; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -@Configuration -public class NoOpMeterRegistryConfiguration { - @Bean public MeterRegistry noOpMeterRegistry() { - return NoopMeterRegistry.get(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ZipkinActuatorImporterTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/ZipkinActuatorImporterTest.java deleted file mode 100644 index 1837655f23d..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ZipkinActuatorImporterTest.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import org.junit.After; -import org.junit.Test; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.support.GenericApplicationContext; - -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static zipkin2.server.internal.ZipkinActuatorImporter.PROPERTY_NAME_ACTUATOR_ENABLED; - -// This tests actuator integration without actually requiring a compile dep on actuator -public class ZipkinActuatorImporterTest { - ZipkinActuatorImporter zipkinActuatorImporter = - new ZipkinActuatorImporter(ActuatorImpl.class.getName()); - GenericApplicationContext context = new GenericApplicationContext(); - - @After public void close() { - context.close(); - } - - @Test public void doesntCrashWhenNoIncludes() { - zipkinActuatorImporter.initialize(context); - - context.refresh(); - } - - @Test public void configuresInclude() { - TestPropertyValues.of( - "zipkin.internal.actuator.include[0]=" + Include1.class.getName() - ).applyTo(context); - - zipkinActuatorImporter.initialize(context); - - context.refresh(); - context.getBean(Include1.class); - } - - @Test public void doesntCrashOnBadActuatorImpl() { - TestPropertyValues.of( - "zipkin.internal.actuator.include[0]=" + Include1.class.getName() - ).applyTo(context); - - new ZipkinActuatorImporter("tomatoes").initialize(context); - - context.refresh(); - assertThatThrownBy(() -> context.getBean(Include1.class)) - .isInstanceOf(NoSuchBeanDefinitionException.class); - } - - @Test public void skipsWhenDisabled() { - TestPropertyValues.of( - PROPERTY_NAME_ACTUATOR_ENABLED + "=false", - "zipkin.internal.actuator.include[1]=" + Include2.class.getName() - ).applyTo(context); - - zipkinActuatorImporter.initialize(context); - - context.refresh(); - - assertThatThrownBy(() -> context.getBean(Include1.class)) - .isInstanceOf(NoSuchBeanDefinitionException.class); - } - - @Test public void doesntCrashWhenBadInclude() { - TestPropertyValues.of( - "zipkin.internal.actuator.include[0]=tomatoes" - ).applyTo(context); - - zipkinActuatorImporter.initialize(context); - - context.refresh(); - } - - @Test public void configuresIncludes() { - TestPropertyValues.of( - "zipkin.internal.actuator.include[0]=" + Include1.class.getName(), - "zipkin.internal.actuator.include[1]=" + Include2.class.getName() - ).applyTo(context); - - zipkinActuatorImporter.initialize(context); - - context.refresh(); - context.getBean(Include1.class); - context.getBean(Include2.class); - } - - @Configuration - static class ActuatorImpl { - } - - @Configuration - static class Include1 { - } - - @Configuration - static class Include2 { - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ZipkinHttpConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/ZipkinHttpConfigurationTest.java deleted file mode 100644 index 4243637faa7..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ZipkinHttpConfigurationTest.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import brave.Tracing; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.prometheus.PrometheusConfig; -import io.micrometer.prometheus.PrometheusMeterRegistry; -import org.junit.After; -import org.junit.Test; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.convert.ApplicationConversionService; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.core.convert.ConversionService; -import zipkin2.server.internal.brave.ZipkinSelfTracingConfiguration; -import zipkin2.storage.StorageComponent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -public class ZipkinHttpConfigurationTest { - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After public void close() { - context.close(); - } - - @Test public void httpCollector_enabledByDefault() { - registerBaseConfig(context); - context.register(ZipkinHttpCollector.class); - context.refresh(); - - assertThat(context.getBean(ZipkinHttpCollector.class)).isNotNull(); - } - - @Test(expected = NoSuchBeanDefinitionException.class) - public void httpCollector_canDisable() { - TestPropertyValues.of("zipkin.collector.http.enabled:false").applyTo(context); - registerBaseConfig(context); - context.register(ZipkinHttpCollector.class); - context.refresh(); - - context.getBean(ZipkinHttpCollector.class); - } - - @Test public void query_enabledByDefault() { - registerBaseConfig(context); - context.register(ZipkinQueryApiV2.class); - context.refresh(); - - assertThat(context.getBean(ZipkinQueryApiV2.class)).isNotNull(); - } - - @Test public void query_canDisable() { - TestPropertyValues.of("zipkin.query.enabled:false").applyTo(context); - registerBaseConfig(context); - context.register(ZipkinQueryApiV2.class); - context.refresh(); - - assertThatThrownBy(() -> context.getBean(ZipkinQueryApiV2.class)) - .isInstanceOf(NoSuchBeanDefinitionException.class); - } - - @Test public void selfTracing_canEnable() { - TestPropertyValues.of("zipkin.self-tracing.enabled:true").applyTo(context); - registerBaseConfig(context); - context.register(ZipkinSelfTracingConfiguration.class); - context.refresh(); - - context.getBean(Tracing.class).close(); - } - - @Test public void search_canDisable() { - TestPropertyValues.of("zipkin.storage.search-enabled:false").applyTo(context); - registerBaseConfig(context); - context.refresh(); - - StorageComponent v2Storage = context.getBean(StorageComponent.class); - assertThat(v2Storage) - .extracting("searchEnabled") - .isEqualTo(false); - } - - @Configuration - public static class Config { - @Bean MeterRegistry registry() { - return new PrometheusMeterRegistry(PrometheusConfig.DEFAULT); - } - - @Bean ConversionService conversionService() { - return ApplicationConversionService.getSharedInstance(); - } - } - - static void registerBaseConfig(AnnotationConfigApplicationContext context) { - context.register( - PropertyPlaceholderAutoConfiguration.class, - Config.class, - ZipkinConfiguration.class, - ZipkinHttpConfiguration.class - ); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ZipkinModuleImporterTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/ZipkinModuleImporterTest.java deleted file mode 100644 index 417fd3491c0..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ZipkinModuleImporterTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal; - -import org.junit.After; -import org.junit.Test; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.support.GenericApplicationContext; - -public class ZipkinModuleImporterTest { - ZipkinModuleImporter zipkinModuleImporter = new ZipkinModuleImporter(); - GenericApplicationContext context = new GenericApplicationContext(); - - @After public void close() { - context.close(); - } - - @Test public void doesntCrashWhenNoModules() { - zipkinModuleImporter.initialize(context); - - context.refresh(); - } - - @Test public void configuresModule() { - TestPropertyValues.of( - "zipkin.internal.module.module1=" + Module1.class.getName() - ).applyTo(context); - - zipkinModuleImporter.initialize(context); - - context.refresh(); - context.getBean(Module1.class); - } - - @Test public void doesntCrashWhenBadModule() { - TestPropertyValues.of( - "zipkin.internal.module.module1=tomatoes" - ).applyTo(context); - - zipkinModuleImporter.initialize(context); - - context.refresh(); - } - - @Test public void configuresModules() { - TestPropertyValues.of( - "zipkin.internal.module.module1=" + Module1.class.getName(), - "zipkin.internal.module.module2=" + Module2.class.getName() - ).applyTo(context); - - zipkinModuleImporter.initialize(context); - - context.refresh(); - context.getBean(Module1.class); - context.getBean(Module2.class); - } - - @Configuration - static class Module1 { - } - - @Configuration - static class Module2 { - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/activemq/Access.java b/zipkin-server/src/test/java/zipkin2/server/internal/activemq/Access.java deleted file mode 100644 index cfb7741b8b1..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/activemq/Access.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.activemq; - -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import org.springframework.context.annotation.Configuration; -import zipkin2.collector.activemq.ActiveMQCollector; - -/** opens package access for testing */ -public final class Access { - - /** Just registering properties to avoid automatically connecting to a ActiveMQ server */ - public static void registerActiveMQProperties(AnnotationConfigApplicationContext context) { - context.register( - PropertyPlaceholderAutoConfiguration.class, EnableActiveMQCollectorProperties.class); - } - - @Configuration - @EnableConfigurationProperties(ZipkinActiveMQCollectorProperties.class) - static class EnableActiveMQCollectorProperties {} - - public static ActiveMQCollector.Builder collectorBuilder( - AnnotationConfigApplicationContext context) { - return context.getBean(ZipkinActiveMQCollectorProperties.class).toBuilder(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorConfigurationTest.java deleted file mode 100644 index bd37b450632..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorConfigurationTest.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.activemq; - -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.springframework.beans.factory.BeanCreationException; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.collector.activemq.ActiveMQCollector; -import zipkin2.server.internal.InMemoryConfiguration; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown; - -public class ZipkinActiveMQCollectorConfigurationTest { - - @Rule public ExpectedException thrown = ExpectedException.none(); - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After public void close() { - context.close(); - } - - @Test public void doesNotProvideCollectorComponent_whenAddressAndUriNotSet() { - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinActiveMQCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - - thrown.expect(NoSuchBeanDefinitionException.class); - context.getBean(ActiveMQCollector.class); - } - - @Test public void providesCollectorComponent_whenUrlSet() { - TestPropertyValues.of("zipkin.collector.activemq.url=vm://localhost") - .applyTo(context); - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinActiveMQCollectorConfiguration.class, - InMemoryConfiguration.class); - - try { - context.refresh(); - failBecauseExceptionWasNotThrown(BeanCreationException.class); - } catch (BeanCreationException e) { - assertThat(e.getCause()).hasMessage( - "Unable to establish connection to ActiveMQ broker: Transport scheme NOT recognized: [vm]"); - } - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorPropertiesTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorPropertiesTest.java deleted file mode 100644 index 4c91a2fca45..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/activemq/ZipkinActiveMQCollectorPropertiesTest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.activemq; - -import org.junit.Test; -import org.springframework.beans.factory.BeanCreationException; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.collector.activemq.ActiveMQCollector; -import zipkin2.server.internal.InMemoryConfiguration; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -public class ZipkinActiveMQCollectorPropertiesTest { - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - /** This prevents an empty ACTIVEMQ_URL variable from being mistaken as a real one */ - @Test public void ignoresEmptyURL() { - ZipkinActiveMQCollectorProperties properties = new ZipkinActiveMQCollectorProperties(); - properties.setUrl(""); - - assertThat(properties.getUrl()).isNull(); - } - - @Test public void providesCollectorComponent_whenUrlSet() { - TestPropertyValues.of("zipkin.collector.activemq.url:tcp://localhost:61611") // wrong port - .applyTo(context); - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinActiveMQCollectorConfiguration.class, - InMemoryConfiguration.class); - - assertThatThrownBy(context::refresh) - .isInstanceOf(BeanCreationException.class) - .hasMessageContaining("Unable to establish connection to ActiveMQ broker"); - } - - @Test public void doesNotProvidesCollectorComponent_whenUrlSetAndDisabled() { - TestPropertyValues.of("zipkin.collector.activemq.url:tcp://localhost:61616") - .applyTo(context); - TestPropertyValues.of("zipkin.collector.activemq.enabled:false").applyTo(context); - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinActiveMQCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - - assertThatThrownBy(() -> context.getBean(ActiveMQCollector.class)) - .isInstanceOf(NoSuchBeanDefinitionException.class); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/banner/ZipkinBannerTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/banner/ZipkinBannerTest.java deleted file mode 100644 index 0fa07a612f3..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/banner/ZipkinBannerTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.banner; - -import java.io.ByteArrayOutputStream; -import java.io.PrintStream; -import org.junit.After; -import org.junit.Test; -import org.springframework.boot.ansi.AnsiOutput; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.assertj.core.api.Assertions.assertThat; - -public class ZipkinBannerTest { - @After public void tearDown() { - AnsiOutput.setEnabled(AnsiOutput.Enabled.DETECT); - } - - @Test public void shouldReplaceWhenAnsiEnabled() { - AnsiOutput.setEnabled(AnsiOutput.Enabled.ALWAYS); - - ZipkinBanner banner = new ZipkinBanner(); - ByteArrayOutputStream out = new ByteArrayOutputStream(); - banner.printBanner(null, null, new PrintStream(out)); - - assertThat(new String(out.toByteArray(), UTF_8)) - .doesNotContain("${") - .contains("\033"); // ansi codes - } - - @Test public void shouldReplaceWhenAnsiDisabled() { - AnsiOutput.setEnabled(AnsiOutput.Enabled.NEVER); - - ZipkinBanner banner = new ZipkinBanner(); - ByteArrayOutputStream out = new ByteArrayOutputStream(); - banner.printBanner(null, null, new PrintStream(out)); - - assertThat(new String(out.toByteArray(), UTF_8)) - .doesNotContain("${") - .doesNotContain("\033"); // ansi codes - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/brave/ITZipkinSelfTracing.java b/zipkin-server/src/test/java/zipkin2/server/internal/brave/ITZipkinSelfTracing.java deleted file mode 100644 index cd1efbcfc9a..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/brave/ITZipkinSelfTracing.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.brave; - -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.RequestBody; -import okhttp3.Response; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; -import zipkin2.Component; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.reporter.brave.AsyncZipkinSpanHandler; -import zipkin2.storage.InMemoryStorage; -import zipkin2.storage.QueryRequest; - -import static java.util.Collections.singletonMap; -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; -import static zipkin2.TestObjects.DAY; -import static zipkin2.TestObjects.TODAY; -import static zipkin2.server.internal.ITZipkinServer.url; - -/** - * This class is flaky for as yet unknown reasons. For example, in CI, sometimes assertions fail - * due to incomplete traces. Hence, it includes more assertion customization than normal. - */ -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server", - "zipkin.self-tracing.enabled=true", - "zipkin.self-tracing.message-timeout=100ms", - "zipkin.self-tracing.traces-per-second=100" - }) -@RunWith(SpringRunner.class) -public class ITZipkinSelfTracing { - @Autowired TracingStorageComponent storage; - @Autowired AsyncZipkinSpanHandler zipkinSpanHandler; - @Autowired Server server; - - OkHttpClient client = new OkHttpClient.Builder().followRedirects(false).build(); - - @Before public void clear() { - inMemoryStorage().clear(); - } - - InMemoryStorage inMemoryStorage() { - return (InMemoryStorage) storage.delegate; - } - - @Test public void getIsTraced_v2() throws Exception { - assertThat(getServices("v2").body().string()).isEqualTo("[]"); - - List> traces = awaitSpans(2); - - assertQueryReturnsResults(QueryRequest.newBuilder() - .annotationQuery(singletonMap("http.path", "/api/v2/services")), traces); - - assertQueryReturnsResults(QueryRequest.newBuilder().spanName("get-service-names"), traces); - } - - @Test @Ignore("https://github.com/openzipkin/zipkin/issues/2781") - public void postIsTraced_v1() throws Exception { - postSpan("v1"); - - List> traces = awaitSpans(3); // test span + POST + accept-spans - - assertQueryReturnsResults(QueryRequest.newBuilder() - .annotationQuery(singletonMap("http.path", "/api/v1/spans")), traces); - - assertQueryReturnsResults(QueryRequest.newBuilder().spanName("accept-spans"), traces); - } - - @Test @Ignore("https://github.com/openzipkin/zipkin/issues/2781") - public void postIsTraced_v2() throws Exception { - postSpan("v2"); - - List> traces = awaitSpans(3); // test span + POST + accept-spans - - assertQueryReturnsResults(QueryRequest.newBuilder() - .annotationQuery(singletonMap("http.path", "/api/v2/spans")), traces); - - assertQueryReturnsResults(QueryRequest.newBuilder().spanName("accept-spans"), traces); - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test public void toStringContainsOnlySummaryInformation() { - assertThat(storage).hasToString("Traced{InMemoryStorage{}}"); - assertThat(zipkinSpanHandler).hasToString("AsyncReporter{StorageComponent}"); - } - - List> awaitSpans(int count) { - await().untilAsserted(() -> { // wait for spans - List> traces = inMemoryStorage().getTraces(); - long received = traces.stream().flatMap(List::stream).count(); - assertThat(inMemoryStorage().acceptedSpanCount()) - .withFailMessage("Wanted %s spans: got %s. Current traces: %s", count, received, traces) - .isGreaterThanOrEqualTo(count); - }); - return inMemoryStorage().getTraces(); - } - - void assertQueryReturnsResults(QueryRequest.Builder builder, List> traces) - throws IOException { - QueryRequest query = builder.endTs(System.currentTimeMillis()).lookback(DAY).limit(2).build(); - assertThat(inMemoryStorage().getTraces(query).execute()) - .withFailMessage("Expected results from %s. Current traces: %s", query, traces) - .isNotEmpty(); - } - - /** - * This POSTs a single span. Afterwards, we expect this trace in storage, and also the self-trace - * of POSTing it. - */ - void postSpan(String version) throws IOException { - SpanBytesEncoder encoder = - "v1".equals(version) ? SpanBytesEncoder.JSON_V1 : SpanBytesEncoder.JSON_V2; - - List testTrace = Collections.singletonList( - Span.newBuilder().timestamp(TODAY).traceId("1").id("2").name("test-trace").build() - ); - - Response response = client.newCall(new Request.Builder() - .url(url(server, "/api/" + version + "/spans")) - .post(RequestBody.create(encoder.encodeList(testTrace))) - .build()) - .execute(); - assertSuccessful(response); - } - - Response getServices(String version) throws IOException { - Response response = client.newCall(new Request.Builder() - .url(url(server, "/api/" + version + "/services")) - .build()) - .execute(); - assertSuccessful(response); - return response; - } - - static void assertSuccessful(Response response) throws IOException { - assertThat(response.isSuccessful()) - .withFailMessage("unsuccessful %s: %s", response.request(), - response.peekBody(Long.MAX_VALUE).string()) - .isTrue(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/cassandra3/Access.java b/zipkin-server/src/test/java/zipkin2/server/internal/cassandra3/Access.java deleted file mode 100644 index 14ad324d08c..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/cassandra3/Access.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.cassandra3; - -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; - -/** opens package access for testing */ -public final class Access { - - public static void registerCassandra3(AnnotationConfigApplicationContext context) { - context.register( - PropertyPlaceholderAutoConfiguration.class, ZipkinCassandra3StorageConfiguration.class); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/Access.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/Access.java deleted file mode 100644 index c361a2c2345..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/Access.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.client.ClientFactoryBuilder; -import com.linecorp.armeria.spring.Ssl; -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.server.internal.NoOpMeterRegistryConfiguration; - -/** opens package access for testing */ -public final class Access { - - public static void registerElasticsearch(AnnotationConfigApplicationContext context) { - context.register( - PropertyPlaceholderAutoConfiguration.class, - NoOpMeterRegistryConfiguration.class, - ZipkinElasticsearchStorageConfiguration.class); - } - - public static ClientFactoryBuilder configureSsl(ClientFactoryBuilder builder, Ssl ssl) { - ZipkinElasticsearchStorageProperties.Ssl eSsl = new ZipkinElasticsearchStorageProperties.Ssl(); - eSsl.setKeyStore(ssl.getKeyStore()); - eSsl.setKeyStorePassword(ssl.getKeyStorePassword()); - eSsl.setKeyStoreType(ssl.getKeyStoreType()); - eSsl.setTrustStore(ssl.getTrustStore()); - eSsl.setTrustStorePassword(ssl.getTrustStorePassword()); - eSsl.setTrustStoreType(ssl.getTrustStoreType()); - try { - return ZipkinElasticsearchStorageConfiguration.configureSsl(builder, eSsl); - } catch (Exception e) { - throw new AssertionError(e); - } - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchAuth.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchAuth.java deleted file mode 100644 index d37c6f2b79f..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchAuth.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.server.ServerBuilder; -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.TrustManagerFactory; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.elasticsearch.ElasticsearchStorage; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.elasticsearch.TestResponses.VERSION_RESPONSE; -import static zipkin2.server.internal.elasticsearch.TestResponses.YELLOW_RESPONSE; -import static zipkin2.server.internal.elasticsearch.ZipkinElasticsearchStorageProperties.Ssl; - -class ITElasticsearchAuth { - - @RegisterExtension static MockWebServerExtension server = new MockWebServerExtension() { - @Override protected void configureServer(ServerBuilder sb) throws Exception { - sb.https(0); - Ssl ssl = new Ssl(); - ssl.setKeyStore("classpath:keystore.jks"); - ssl.setKeyStorePassword("password"); - ssl.setTrustStore("classpath:keystore.jks"); - ssl.setTrustStorePassword("password"); - - final KeyManagerFactory keyManagerFactory = SslUtil.getKeyManagerFactory(ssl); - final TrustManagerFactory trustManagerFactory = SslUtil.getTrustManagerFactory(ssl); - sb.tls(keyManagerFactory) - .tlsCustomizer(sslContextBuilder -> { - sslContextBuilder.keyManager(keyManagerFactory); - sslContextBuilder.trustManager(trustManagerFactory); - }); - } - }; - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - ElasticsearchStorage storage; - - @BeforeEach void init() { - TestPropertyValues.of( - "spring.config.name=zipkin-server", - "zipkin.storage.type=elasticsearch", - "zipkin.storage.elasticsearch.ensure-templates=false", - "zipkin.storage.elasticsearch.username=Aladdin", - "zipkin.storage.elasticsearch.password=OpenSesame", - "zipkin.storage.elasticsearch.hosts=https://localhost:" + server.httpsPort(), - "zipkin.storage.elasticsearch.ssl.key-store=classpath:keystore.jks", - "zipkin.storage.elasticsearch.ssl.key-store-password=password", - "zipkin.storage.elasticsearch.ssl.trust-store=classpath:keystore.jks", - "zipkin.storage.elasticsearch.ssl.trust-store-password=password") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - storage = context.getBean(ElasticsearchStorage.class); - } - - @AfterEach void close() { - storage.close(); - } - - @Test void healthcheck_usesAuthAndTls() { - server.enqueue(VERSION_RESPONSE.toHttpResponse()); - server.enqueue(YELLOW_RESPONSE.toHttpResponse()); - - assertThat(storage.check().ok()).isTrue(); - - AggregatedHttpRequest next = server.takeRequest().request(); - // hard coded for sanity taken from https://en.wikipedia.org/wiki/Basic_access_authentication - assertThat(next.headers().get("Authorization")) - .isEqualTo("Basic QWxhZGRpbjpPcGVuU2VzYW1l"); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchClientInitialization.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchClientInitialization.java deleted file mode 100644 index f79a7b766f5..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchClientInitialization.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import java.io.IOException; -import org.junit.Test; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.CheckResult; -import zipkin2.elasticsearch.ElasticsearchStorage; - -import static org.assertj.core.api.Assertions.assertThat; - -public class ITElasticsearchClientInitialization { - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - /** - * This blocks for less than the timeout of 2 second to prove we defer i/o until first use of the - * storage component. - */ - @Test(timeout = 1900L) public void defersIOUntilFirstUse() throws IOException { - TestPropertyValues.of( - "spring.config.name=zipkin-server", - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.timeout:2000", - "zipkin.storage.elasticsearch.hosts:127.0.0.1:1234,127.0.0.1:5678") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - context.getBean(ElasticsearchStorage.class).close(); - } - - /** blocking a little is ok, but blocking forever is not. */ - @Test(timeout = 3000L) public void doesntHangWhenAllDown() throws IOException { - TestPropertyValues.of( - "spring.config.name=zipkin-server", - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.timeout:1000", - "zipkin.storage.elasticsearch.hosts:127.0.0.1:1234,127.0.0.1:5678") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - try (ElasticsearchStorage storage = context.getBean(ElasticsearchStorage.class)) { - CheckResult result = storage.check(); - assertThat(result.ok()).isFalse(); - } - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchDynamicCredentials.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchDynamicCredentials.java deleted file mode 100644 index 94e10111b42..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchDynamicCredentials.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.server.ServerBuilder; -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import java.io.File; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.TrustManagerFactory; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.elasticsearch.ElasticsearchStorage; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.elasticsearch.TestResponses.VERSION_RESPONSE; -import static zipkin2.server.internal.elasticsearch.TestResponses.YELLOW_RESPONSE; -import static zipkin2.server.internal.elasticsearch.ZipkinElasticsearchStorageProperties.Ssl; - -class ITElasticsearchDynamicCredentials { - @RegisterExtension static MockWebServerExtension server = new MockWebServerExtension() { - @Override protected void configureServer(ServerBuilder sb) throws Exception { - sb.https(0); - Ssl ssl = new Ssl(); - ssl.setKeyStore("classpath:keystore.jks"); - ssl.setKeyStorePassword("password"); - ssl.setTrustStore("classpath:keystore.jks"); - ssl.setTrustStorePassword("password"); - - final KeyManagerFactory keyManagerFactory = SslUtil.getKeyManagerFactory(ssl); - final TrustManagerFactory trustManagerFactory = SslUtil.getTrustManagerFactory(ssl); - sb.tls(keyManagerFactory) - .tlsCustomizer(sslContextBuilder -> { - sslContextBuilder.keyManager(keyManagerFactory); - sslContextBuilder.trustManager(trustManagerFactory); - }); - } - }; - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - ElasticsearchStorage storage; - String credentialsFile; - - @BeforeEach void init() { - credentialsFile = pathOfResource("es-credentials"); - TestPropertyValues.of( - "spring.config.name=zipkin-server", - "zipkin.storage.type=elasticsearch", - "zipkin.storage.elasticsearch.ensure-templates=false", - "zipkin.storage.elasticsearch.hosts=https://localhost:" + server.httpsPort(), - "zipkin.storage.elasticsearch.credentials-file=" + credentialsFile, - "zipkin.storage.elasticsearch.credentials-refresh-interval=3", - "zipkin.storage.elasticsearch.ssl.key-store=classpath:keystore.jks", - "zipkin.storage.elasticsearch.ssl.key-store-password=password", - "zipkin.storage.elasticsearch.ssl.trust-store=classpath:keystore.jks", - "zipkin.storage.elasticsearch.ssl.trust-store-password=password") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - storage = context.getBean(ElasticsearchStorage.class); - } - - @AfterEach void close() { - storage.close(); - } - - @Test void healthcheck_usesDynamicCredentialsAndTls() { - server.enqueue(VERSION_RESPONSE.toHttpResponse()); - server.enqueue(YELLOW_RESPONSE.toHttpResponse()); - assertThat(storage.check().ok()).isTrue(); - AggregatedHttpRequest next = server.takeRequest().request(); - assertThat(next.headers().get("Authorization")).isEqualTo("Basic Zm9vOmJhcg=="); - } - - static String pathOfResource(String resource) { - File file = new File( - ITElasticsearchDynamicCredentials.class.getClassLoader().getResource(resource).getFile()); - return file.getAbsolutePath(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchHealthCheck.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchHealthCheck.java deleted file mode 100644 index 27462589e65..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchHealthCheck.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.client.endpoint.EmptyEndpointGroupException; -import com.linecorp.armeria.client.endpoint.EndpointSelectionTimeoutException; -import com.linecorp.armeria.server.ServerBuilder; -import com.linecorp.armeria.server.healthcheck.HealthCheckService; -import com.linecorp.armeria.server.healthcheck.SettableHealthChecker; -import com.linecorp.armeria.testing.junit4.server.ServerRule; -import java.util.concurrent.TimeUnit; -import javax.net.ssl.SSLException; -import org.awaitility.core.ConditionFactory; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.CheckResult; -import zipkin2.elasticsearch.ElasticsearchStorage; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; -import static zipkin2.server.internal.elasticsearch.TestResponses.GREEN_RESPONSE; -import static zipkin2.server.internal.elasticsearch.TestResponses.VERSION_RESPONSE; - -/** - * These tests focus on http client health checks not currently in zipkin-storage-elasticsearch. - */ -public class ITElasticsearchHealthCheck { - static final Logger logger = LoggerFactory.getLogger(ITElasticsearchHealthCheck.class.getName()); - // Health check interval is 100ms, but in-flight requests in CI might take a few hundred ms - static final ConditionFactory awaitTimeout = await().timeout(1, TimeUnit.SECONDS); - - static final SettableHealthChecker server1Health = new SettableHealthChecker(true); - - static { - // Gives better context when there's an exception such as AbortedStreamException - System.setProperty("com.linecorp.armeria.verboseExceptions", "always"); - } - - @ClassRule public static ServerRule server1 = new ServerRule() { - @Override protected void configure(ServerBuilder sb) { - sb.service("/", (ctx, req) -> VERSION_RESPONSE.toHttpResponse()); - sb.service("/_cluster/health", HealthCheckService.of(server1Health)); - sb.serviceUnder("/_cluster/health/", (ctx, req) -> GREEN_RESPONSE.toHttpResponse()); - } - }; - - static final SettableHealthChecker server2Health = new SettableHealthChecker(true); - - @ClassRule public static ServerRule server2 = new ServerRule() { - @Override protected void configure(ServerBuilder sb) { - sb.service("/", (ctx, req) -> VERSION_RESPONSE.toHttpResponse()); - sb.service("/_cluster/health", HealthCheckService.of(server2Health)); - sb.serviceUnder("/_cluster/health/", (ctx, req) -> GREEN_RESPONSE.toHttpResponse()); - } - }; - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @Before public void setUp() { - server1Health.setHealthy(true); - server2Health.setHealthy(true); - - logger.info("server 1: {}, server 2: {}", server1.httpUri(), server2.httpUri()); - - initWithHosts("127.0.0.1:" + server1.httpPort() + ",127.0.0.1:" + server2.httpPort()); - } - - private void initWithHosts(String hosts) { - TestPropertyValues.of( - "spring.config.name=zipkin-server", - "zipkin.storage.type=elasticsearch", - "zipkin.storage.elasticsearch.ensure-templates=false", - "zipkin.storage.elasticsearch.timeout=200", - "zipkin.storage.elasticsearch.health-check.enabled=true", - // uncomment (and also change log4j2.properties) to see health-checks requests in the console - //"zipkin.storage.elasticsearch.health-check.http-logging=headers", - "zipkin.storage.elasticsearch.health-check.interval=100ms", - "zipkin.storage.elasticsearch.hosts=" + hosts) - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - } - - @Test public void allHealthy() { - try (ElasticsearchStorage storage = context.getBean(ElasticsearchStorage.class)) { - assertOk(storage.check()); - } - } - - @Test public void oneHealthy() { - server1Health.setHealthy(false); - - try (ElasticsearchStorage storage = context.getBean(ElasticsearchStorage.class)) { - assertOk(storage.check()); - } - } - - @Test public void wrongScheme() { - context.close(); - context = new AnnotationConfigApplicationContext(); - initWithHosts("https://localhost:" + server1.httpPort()); - - try (ElasticsearchStorage storage = context.getBean(ElasticsearchStorage.class)) { - CheckResult result = storage.check(); - assertThat(result.ok()).isFalse(); - // Test this is not wrapped in a rejection exception, as health check is not throttled - // Depending on JDK this is SSLHandshakeException or NotSslRecordException - assertThat(result.error()).isInstanceOf(SSLException.class); - } - } - - @Test public void noneHealthy() { - server1Health.setHealthy(false); - server2Health.setHealthy(false); - - try (ElasticsearchStorage storage = context.getBean(ElasticsearchStorage.class)) { - CheckResult result = storage.check(); - assertThat(result.ok()).isFalse(); - assertThat(result.error()) - .isInstanceOf(EndpointSelectionTimeoutException.class); - } - } - - // If this flakes, uncomment in initWithHosts and log4j2.properties - @Test public void healthyThenNotHealthyThenHealthy() { - try (ElasticsearchStorage storage = context.getBean(ElasticsearchStorage.class)) { - assertOk(storage.check()); - - logger.info("setting server 1 and 2 unhealthy"); - server1Health.setHealthy(false); - server2Health.setHealthy(false); - - awaitTimeout.untilAsserted(() -> assertThat(storage.check().ok()).isFalse()); - - logger.info("setting server 1 healthy"); - server1Health.setHealthy(true); - - awaitTimeout.untilAsserted(() -> assertThat(storage.check().ok()).isTrue()); - } - } - - @Test public void notHealthyThenHealthyThenNotHealthy() { - server1Health.setHealthy(false); - server2Health.setHealthy(false); - - try (ElasticsearchStorage storage = context.getBean(ElasticsearchStorage.class)) { - CheckResult result = storage.check(); - assertThat(result.ok()).isFalse(); - - server2Health.setHealthy(true); - - awaitTimeout.untilAsserted(() -> assertThat(storage.check().ok()).isTrue()); - - server2Health.setHealthy(false); - - awaitTimeout.untilAsserted(() -> assertThat(storage.check().ok()).isFalse()); - } - } - - @Test public void healthCheckDisabled() { - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - TestPropertyValues.of( - "spring.config.name=zipkin-server", - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.ensure-templates=false", - "zipkin.storage.elasticsearch.timeout=200", - "zipkin.storage.elasticsearch.health-check.enabled=false", - "zipkin.storage.elasticsearch.health-check.interval=100ms", - "zipkin.storage.elasticsearch.hosts=127.0.0.1:" + - server1.httpPort() + ",127.0.0.1:" + server2.httpPort()) - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - server1Health.setHealthy(false); - server2Health.setHealthy(false); - - try (ElasticsearchStorage storage = context.getBean(ElasticsearchStorage.class)) { - // Even though cluster health is false, we ignore that and continue to check index health, - // which is correctly returned by our mock server. - assertOk(storage.check()); - } - } - - static void assertOk(CheckResult result) { - if (!result.ok()) { - Throwable error = result.error(); - throw new AssertionError("Health check failed with message: " + error.getMessage(), error); - } - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchNoVerify.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchNoVerify.java deleted file mode 100644 index bfb03cb570c..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchNoVerify.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.ResponseHeaders; -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.elasticsearch.ElasticsearchStorage; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.elasticsearch.TestResponses.VERSION_RESPONSE; -import static zipkin2.server.internal.elasticsearch.TestResponses.YELLOW_RESPONSE; - -class ITElasticsearchNoVerify { - @RegisterExtension - static MockWebServerExtension server = new MockWebServerExtension(); - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - ElasticsearchStorage storage; - - @BeforeEach void init() { - TestPropertyValues.of( - "spring.config.name=zipkin-server", - "zipkin.storage.type=elasticsearch", - "zipkin.storage.elasticsearch.ensure-templates=false", - "zipkin.storage.elasticsearch.hosts=https://localhost:" + server.httpsPort(), - "zipkin.storage.elasticsearch.ssl.no-verify=true") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - storage = context.getBean(ElasticsearchStorage.class); - } - - @AfterEach void close() { - storage.close(); - } - - @Test void healthcheck_no_tls_verify() { - server.enqueue(VERSION_RESPONSE.toHttpResponse()); - server.enqueue(YELLOW_RESPONSE.toHttpResponse()); - - assertThat(storage.check().ok()).isTrue(); - } - - @Test void service_no_tls_verify() throws Exception { - server.enqueue( - AggregatedHttpResponse.of(ResponseHeaders.of(HttpStatus.OK), HttpData.ofUtf8("{}"))); - - assertThat(storage.serviceAndSpanNames().getServiceNames().execute()).isEmpty(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchSelfTracing.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchSelfTracing.java deleted file mode 100644 index 17cc413e76a..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ITElasticsearchSelfTracing.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.elasticsearch.ElasticsearchStorage; -import zipkin2.server.internal.brave.ZipkinSelfTracingConfiguration; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.elasticsearch.TestResponses.VERSION_RESPONSE; -import static zipkin2.server.internal.elasticsearch.TestResponses.YELLOW_RESPONSE; - -class ITElasticsearchSelfTracing { - - @RegisterExtension static MockWebServerExtension server = new MockWebServerExtension(); - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - ElasticsearchStorage storage; - - @BeforeEach void init() { - TestPropertyValues.of( - "spring.config.name=zipkin-server", - "zipkin.self-tracing.enabled=true", - "zipkin.self-tracing.message-timeout=1ms", - "zipkin.self-tracing.traces-per-second=10", - "zipkin.storage.type=elasticsearch", - "zipkin.storage.elasticsearch.ensure-templates=false", - "zipkin.storage.elasticsearch.hosts=" + server.httpUri()).applyTo(context); - Access.registerElasticsearch(context); - context.register(ZipkinSelfTracingConfiguration.class); - context.refresh(); - storage = context.getBean(ElasticsearchStorage.class); - } - - @AfterEach void close() { - storage.close(); - } - - /** - * We currently don't have a nice way to mute outbound propagation in Brave. This just makes sure - * we are nicer. - */ - @Test void healthcheck_usesB3Single() { - server.enqueue(VERSION_RESPONSE.toHttpResponse()); - server.enqueue(YELLOW_RESPONSE.toHttpResponse()); - - assertThat(storage.check().ok()).isTrue(); - - assertThat(server.takeRequest().request().headers()) - .extracting(e -> e.getKey().toString()) - .contains("b3") - .doesNotContain("x-b3-traceid"); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/InitialEndpointSupplierTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/InitialEndpointSupplierTest.java deleted file mode 100644 index fe8c2b2f03b..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/InitialEndpointSupplierTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.client.Endpoint; -import org.junit.jupiter.api.Test; - -import static com.linecorp.armeria.common.SessionProtocol.HTTP; -import static com.linecorp.armeria.common.SessionProtocol.HTTPS; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -class InitialEndpointSupplierTest { - - @Test void defaultIsLocalhost9200RegardlessOfSessionProtocol() { - assertThat(new InitialEndpointSupplier(HTTP, null).get()) - .isEqualTo(Endpoint.of("localhost", 9200)) - .isEqualTo(new InitialEndpointSupplier(HTTPS, null).get()); - } - - @Test void usesNaturalHttpPortsWhenUrls() { - assertThat(new InitialEndpointSupplier(HTTP, "http://localhost").get()) - .isEqualTo(Endpoint.of("localhost", 80)); - assertThat(new InitialEndpointSupplier(HTTPS, "https://localhost").get()) - .isEqualTo(Endpoint.of("localhost", 443)); - } - - @Test void defaultsPlainHostsToPort9200() { - assertThat(new InitialEndpointSupplier(HTTP, "localhost").get()) - .isEqualTo(Endpoint.of("localhost", 9200)); - assertThat(new InitialEndpointSupplier(HTTPS, "localhost").get()) - .isEqualTo(Endpoint.of("localhost", 443)); - } - - /** This helps ensure old setups don't break (provided they have http port 9200 open) */ - @Test public void coersesPort9300To9200() { - assertThat(new InitialEndpointSupplier(HTTP, "localhost:9300").get()) - .isEqualTo(Endpoint.of("localhost", 9200)); - } - - @Test void parsesListOfLocalhosts() { - String hostList = "localhost:9201,localhost:9202"; - assertThat(new InitialEndpointSupplier(HTTP, hostList).get().endpoints()) - .containsExactly(Endpoint.of("localhost", 9201), Endpoint.of("localhost", 9202)) - .containsExactlyElementsOf(new InitialEndpointSupplier(HTTPS, hostList).get().endpoints()); - } - - @Test void parsesListOfLocalhosts_skipsBlankEntry() { - String hostList = "localhost:9201,,localhost:9202"; - assertThat(new InitialEndpointSupplier(HTTP, hostList).get().endpoints()) - .containsExactly(Endpoint.of("localhost", 9201), Endpoint.of("localhost", 9202)) - .containsExactlyElementsOf(new InitialEndpointSupplier(HTTPS, hostList).get().endpoints()); - } - - @Test void parsesEmptyListOfHosts_toDefault() { - assertThat(new InitialEndpointSupplier(HTTP, "").get().endpoints()) - .containsExactly(Endpoint.of("localhost", 9200)); - } - - @Test void parsesListOfLocalhosts_failsWhenAllInvalid() { - InitialEndpointSupplier supplier = new InitialEndpointSupplier(HTTP, ","); - assertThatThrownBy(supplier::get) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("No valid endpoints found in ES hosts: ,"); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/TestResponses.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/TestResponses.java deleted file mode 100644 index 3a614b0e9f1..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/TestResponses.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.common.AggregatedHttpResponse; - -import static com.linecorp.armeria.common.HttpStatus.OK; -import static com.linecorp.armeria.common.MediaType.JSON; - -final class TestResponses { - static final AggregatedHttpResponse VERSION_RESPONSE = AggregatedHttpResponse.of(OK, JSON, "" - + "{\n" - + " \"name\" : \"PV-NhJd\",\n" - + " \"cluster_name\" : \"CollectorDBCluster\",\n" - + " \"cluster_uuid\" : \"UjZaM0fQRC6tkHINCg9y8w\",\n" - + " \"version\" : {\n" - + " \"number\" : \"6.7.0\",\n" - + " \"build_flavor\" : \"oss\",\n" - + " \"build_type\" : \"tar\",\n" - + " \"build_hash\" : \"8453f77\",\n" - + " \"build_date\" : \"2019-03-21T15:32:29.844721Z\",\n" - + " \"build_snapshot\" : false,\n" - + " \"lucene_version\" : \"7.7.0\",\n" - + " \"minimum_wire_compatibility_version\" : \"5.6.0\",\n" - + " \"minimum_index_compatibility_version\" : \"5.0.0\"\n" - + " },\n" - + " \"tagline\" : \"You Know, for Search\"\n" - + "}"); - static final AggregatedHttpResponse YELLOW_RESPONSE = AggregatedHttpResponse.of(OK, JSON, "" - + "{\n" - + " \"cluster_name\": \"CollectorDBCluster\",\n" - + " \"status\": \"yellow\",\n" - + " \"timed_out\": false,\n" - + " \"number_of_nodes\": 1,\n" - + " \"number_of_data_nodes\": 1,\n" - + " \"active_primary_shards\": 5,\n" - + " \"active_shards\": 5,\n" - + " \"relocating_shards\": 0,\n" - + " \"initializing_shards\": 0,\n" - + " \"unassigned_shards\": 5,\n" - + " \"delayed_unassigned_shards\": 0,\n" - + " \"number_of_pending_tasks\": 0,\n" - + " \"number_of_in_flight_fetch\": 0,\n" - + " \"task_max_waiting_in_queue_millis\": 0,\n" - + " \"active_shards_percent_as_number\": 50\n" - + "}\n"); - static final AggregatedHttpResponse GREEN_RESPONSE = AggregatedHttpResponse.of(OK, JSON, - "{\n" - + " \"cluster_name\": \"CollectorDBCluster\",\n" - + " \"status\": \"green\",\n" - + " \"timed_out\": false,\n" - + " \"number_of_nodes\": 1,\n" - + " \"number_of_data_nodes\": 1,\n" - + " \"active_primary_shards\": 5,\n" - + " \"active_shards\": 5,\n" - + " \"relocating_shards\": 0,\n" - + " \"initializing_shards\": 0,\n" - + " \"unassigned_shards\": 5,\n" - + " \"delayed_unassigned_shards\": 0,\n" - + " \"number_of_pending_tasks\": 0,\n" - + " \"number_of_in_flight_fetch\": 0,\n" - + " \"task_max_waiting_in_queue_millis\": 0,\n" - + " \"active_shards_percent_as_number\": 50\n" - + "}\n"); - - private TestResponses() { - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ZipkinElasticsearchStorageConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ZipkinElasticsearchStorageConfigurationTest.java deleted file mode 100644 index c6272a6b7c4..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/elasticsearch/ZipkinElasticsearchStorageConfigurationTest.java +++ /dev/null @@ -1,450 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.elasticsearch; - -import com.linecorp.armeria.client.ClientOptions; -import com.linecorp.armeria.client.ClientOptionsBuilder; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.common.SessionProtocol; -import java.util.Arrays; -import java.util.Objects; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; -import org.junit.After; -import org.junit.Test; -import org.springframework.beans.factory.BeanCreationException; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.beans.factory.UnsatisfiedDependencyException; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import zipkin2.elasticsearch.ElasticsearchStorage; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.elasticsearch.ITElasticsearchDynamicCredentials.pathOfResource; - -public class ZipkinElasticsearchStorageConfigurationTest { - final AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After public void close() { - context.close(); - } - - @Test(expected = NoSuchBeanDefinitionException.class) - public void doesntProvideStorageComponent_whenStorageTypeNotElasticsearch() { - TestPropertyValues.of("zipkin.storage.type:cassandra").applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - es(); - } - - @Test public void providesStorageComponent_whenStorageTypeElasticsearchAndHostsAreUrls() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es()).isNotNull(); - } - - @Test public void canOverridesProperty_hostsWithList() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200,http://host2:9200") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(context.getBean(ZipkinElasticsearchStorageProperties.class).getHosts()) - .isEqualTo("http://host1:9200,http://host2:9200"); - } - - @Test public void decentToString_whenUnresolvedOrUnhealthy() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://127.0.0.1:9200,http://127.0.0.1:9201") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es()).hasToString( - "ElasticsearchStorage{initialEndpoints=http://127.0.0.1:9200,http://127.0.0.1:9201, index=zipkin}"); - } - - @Test public void configuresPipeline() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200", - "zipkin.storage.elasticsearch.pipeline:zipkin") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es().pipeline()).isEqualTo("zipkin"); - } - - @Test public void httpPrefixOptional() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:host1:9200") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(context.getBean(SessionProtocol.class)) - .isEqualTo(SessionProtocol.HTTP); - } - - @Test public void https() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:https://localhost") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(context.getBean(SessionProtocol.class)) - .isEqualTo(SessionProtocol.HTTPS); - assertThat(context.getBean(InitialEndpointSupplier.class).get().endpoints().get(0).port()) - .isEqualTo(443); - } - - @Configuration - static class CustomizerConfiguration { - - @Bean @Qualifier("zipkinElasticsearch") public Consumer one() { - return one; - } - - @Bean @Qualifier("zipkinElasticsearch") public Consumer two() { - return two; - } - - Consumer one = client -> client.maxResponseLength(12345L); - Consumer two = - client -> client.addHeader("test", "bar"); - } - - /** Ensures we can wire up network interceptors, such as for logging or authentication */ - @Test public void usesInterceptorsQualifiedWith_zipkinElasticsearchHttp() { - TestPropertyValues.of("zipkin.storage.type:elasticsearch").applyTo(context); - Access.registerElasticsearch(context); - context.register(CustomizerConfiguration.class); - context.refresh(); - - HttpClientFactory factory = context.getBean(HttpClientFactory.class); - assertThat(factory.options.maxResponseLength()).isEqualTo(12345L); - assertThat(factory.options.headers().get("test")).isEqualTo("bar"); - } - - @Test public void timeout_defaultsTo10Seconds() { - TestPropertyValues.of("zipkin.storage.type:elasticsearch").applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - HttpClientFactory factory = context.getBean(HttpClientFactory.class); - // TODO(anuraaga): Verify connect timeout after https://github.com/line/armeria/issues/1890 - assertThat(factory.options.responseTimeoutMillis()).isEqualTo(10000L); - assertThat(factory.options.writeTimeoutMillis()).isEqualTo(10000L); - } - - @Test public void timeout_override() { - long timeout = 30000L; - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:127.0.0.1:1234", - "zipkin.storage.elasticsearch.timeout:" + timeout) - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - HttpClientFactory factory = context.getBean(HttpClientFactory.class); - // TODO(anuraaga): Verify connect timeout after https://github.com/line/armeria/issues/1890 - assertThat(factory.options.responseTimeoutMillis()).isEqualTo(timeout); - assertThat(factory.options.writeTimeoutMillis()).isEqualTo(timeout); - } - - @Test public void strictTraceId_defaultsToTrue() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - assertThat(es().strictTraceId()).isTrue(); - } - - @Test public void strictTraceId_canSetToFalse() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200", - "zipkin.storage.strict-trace-id:false") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es().strictTraceId()).isFalse(); - } - - @Test public void dailyIndexFormat() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es().indexNameFormatter().formatTypeAndTimestamp("span", 0)) - .isEqualTo("zipkin*span-1970-01-01"); - } - - @Test public void dailyIndexFormat_overridingPrefix() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200", - "zipkin.storage.elasticsearch.index:zipkin_prod") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es().indexNameFormatter().formatTypeAndTimestamp("span", 0)) - .isEqualTo("zipkin_prod*span-1970-01-01"); - } - - @Test public void dailyIndexFormat_overridingDateSeparator() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200", - "zipkin.storage.elasticsearch.date-separator:.") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es().indexNameFormatter().formatTypeAndTimestamp("span", 0)) - .isEqualTo("zipkin*span-1970.01.01"); - } - - @Test public void dailyIndexFormat_overridingDateSeparator_empty() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200", - "zipkin.storage.elasticsearch.date-separator:") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es().indexNameFormatter().formatTypeAndTimestamp("span", 0)) - .isEqualTo("zipkin*span-19700101"); - } - - @Test(expected = BeanCreationException.class) - public void dailyIndexFormat_overridingDateSeparator_invalidToBeMultiChar() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200", - "zipkin.storage.elasticsearch.date-separator:blagho") - .applyTo(context); - Access.registerElasticsearch(context); - - context.refresh(); - } - - @Test public void namesLookbackAssignedFromQueryLookback() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:http://host1:9200", - "zipkin.query.lookback:" + TimeUnit.DAYS.toMillis(2)) - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es().namesLookback()).isEqualTo((int) TimeUnit.DAYS.toMillis(2)); - } - - @Test - public void doesntProvideBasicAuthInterceptor_whenBasicAuthUserNameandPasswordNotConfigured() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:127.0.0.1:1234") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - HttpClientFactory factory = context.getBean(HttpClientFactory.class); - WebClient client = WebClient.builder("http://127.0.0.1:1234") - .option(ClientOptions.DECORATION, factory.options.decoration()) - .build(); - assertThat(client.as(BasicAuthInterceptor.class)).isNull(); - } - - @Test public void providesBasicAuthInterceptor_whenBasicAuthUserNameAndPasswordConfigured() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:127.0.0.1:1234", - "zipkin.storage.elasticsearch.username:somename", - "zipkin.storage.elasticsearch.password:pass") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - HttpClientFactory factory = context.getBean(HttpClientFactory.class); - - WebClient client = WebClient.builder("http://127.0.0.1:1234") - .option(ClientOptions.DECORATION, factory.options.decoration()) - .build(); - assertThat(client.as(BasicAuthInterceptor.class)).isNotNull(); - } - - @Test - public void providesBasicAuthInterceptor_whenDynamicCredentialsConfigured() { - String credentialsFile = pathOfResource("es-credentials"); - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:127.0.0.1:1234", - "zipkin.storage.elasticsearch.credentials-file:" + credentialsFile, - "zipkin.storage.elasticsearch.credentials-refresh-interval:2") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - HttpClientFactory factory = context.getBean(HttpClientFactory.class); - - WebClient client = WebClient.builder("http://127.0.0.1:1234") - .option(ClientOptions.DECORATION, factory.options.decoration()) - .build(); - assertThat(client.as(BasicAuthInterceptor.class)).isNotNull(); - BasicCredentials basicCredentials = - Objects.requireNonNull(client.as(BasicAuthInterceptor.class)).basicCredentials; - String credentials = basicCredentials.getCredentials(); - assertThat(credentials).isEqualTo("Basic Zm9vOmJhcg=="); - } - - @Test(expected = BeanCreationException.class) - public void providesBasicAuthInterceptor_whenInvalidDynamicCredentialsConfigured() { - String credentialsFile = pathOfResource("es-credentials-invalid"); - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:127.0.0.1:1234", - "zipkin.storage.elasticsearch.credentials-file:" + credentialsFile, - "zipkin.storage.elasticsearch.credentials-refresh-interval:2") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - } - - @Test(expected = BeanCreationException.class) - public void providesBasicAuthInterceptor_whenDynamicCredentialsConfiguredButFileAbsent() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.hosts:127.0.0.1:1234", - "zipkin.storage.elasticsearch.credentials-file:no-this-file", - "zipkin.storage.elasticsearch.credentials-refresh-interval:2") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - } - - @Test public void searchEnabled_false() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.search-enabled:false") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es()).extracting("searchEnabled") - .isEqualTo(false); - } - - @Test public void autocompleteKeys_list() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.autocomplete-keys:environment") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es()).extracting("autocompleteKeys") - .isEqualTo(Arrays.asList("environment")); - } - - @Test public void autocompleteTtl() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.autocomplete-ttl:60000") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es()).extracting("autocompleteTtl") - .isEqualTo(60000); - } - - @Test public void autocompleteCardinality() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.autocomplete-cardinality:5000") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es()).extracting("autocompleteCardinality") - .isEqualTo(5000); - } - - @Test public void templatePriority_valid() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.template-priority:0") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es()).extracting("templatePriority") - .isEqualTo(0); - } - - @Test public void templatePriority_null() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.template-priority:") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - assertThat(es()).extracting("templatePriority") - .isNull(); - } - - @Test(expected = UnsatisfiedDependencyException.class) - public void templatePriority_Invalid() { - TestPropertyValues.of( - "zipkin.storage.type:elasticsearch", - "zipkin.storage.elasticsearch.template-priority:string") - .applyTo(context); - Access.registerElasticsearch(context); - context.refresh(); - - es(); - } - - ElasticsearchStorage es() { - return context.getBean(ElasticsearchStorage.class); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/health/ComponentHealthTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/health/ComponentHealthTest.java deleted file mode 100644 index f80136e1e90..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/health/ComponentHealthTest.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.health; - -import com.linecorp.armeria.common.ClosedSessionException; -import java.io.IOException; -import org.junit.Test; -import zipkin2.CheckResult; -import zipkin2.Component; - -import static org.assertj.core.api.Assertions.assertThat; - -public class ComponentHealthTest { - @Test public void addsMessageToDetails() { - ComponentHealth health = ComponentHealth.ofComponent(new Component() { - @Override public CheckResult check() { - return CheckResult.failed(new IOException("socket disconnect")); - } - }); - - assertThat(health.error) - .isEqualTo("java.io.IOException: socket disconnect"); - } - - @Test public void doesntAddNullMessageToDetails() { - ComponentHealth health = ComponentHealth.ofComponent(new Component() { - @Override public CheckResult check() { - return CheckResult.failed(ClosedSessionException.get()); - } - }); - - assertThat(health.error) - .isEqualTo("com.linecorp.armeria.common.ClosedSessionException"); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/health/ITZipkinHealth.java b/zipkin-server/src/test/java/zipkin2/server/internal/health/ITZipkinHealth.java deleted file mode 100644 index f30e4fc165a..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/health/ITZipkinHealth.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.health; - -import com.jayway.jsonpath.JsonPath; -import com.linecorp.armeria.server.Server; -import io.micrometer.prometheus.PrometheusMeterRegistry; -import java.io.IOException; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; -import zipkin2.storage.InMemoryStorage; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.ITZipkinServer.url; - -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server" - } -) -@RunWith(SpringRunner.class) -public class ITZipkinHealth { - @Autowired InMemoryStorage storage; - @Autowired PrometheusMeterRegistry registry; - @Autowired Server server; - - OkHttpClient client = new OkHttpClient.Builder().followRedirects(true).build(); - - @Before public void init() { - storage.clear(); - } - - @Test public void healthIsOK() throws Exception { - Response health = get("/health"); - assertThat(health.isSuccessful()).isTrue(); - assertThat(health.body().contentType()) - .hasToString("application/json; charset=utf-8"); - assertThat(health.body().string()).isEqualTo("" - + "{\n" - + " \"status\" : \"UP\",\n" - + " \"zipkin\" : {\n" - + " \"status\" : \"UP\",\n" - + " \"details\" : {\n" - + " \"InMemoryStorage{}\" : {\n" - + " \"status\" : \"UP\"\n" - + " }\n" - + " }\n" - + " }\n" - + "}" - ); - - // ensure we don't track health in prometheus - assertThat(scrape()) - .doesNotContain("health"); - } - - String scrape() throws InterruptedException { - Thread.sleep(100); - return registry.scrape(); - } - - @Test public void readsHealth() throws Exception { - String json = getAsString("/health"); - assertThat(readString(json, "$.status")) - .isIn("UP", "DOWN", "UNKNOWN"); - assertThat(readString(json, "$.zipkin.status")) - .isIn("UP", "DOWN", "UNKNOWN"); - } - - private String getAsString(String path) throws IOException { - Response response = get(path); - assertThat(response.isSuccessful()) - .withFailMessage(response.toString()) - .isTrue(); - return response.body().string(); - } - - private Response get(String path) throws IOException { - return client.newCall(new Request.Builder().url(url(server, path)).build()).execute(); - } - - static String readString(String json, String jsonPath) { - return JsonPath.compile(jsonPath).read(json); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/health/ITZipkinHealthDown.java b/zipkin-server/src/test/java/zipkin2/server/internal/health/ITZipkinHealthDown.java deleted file mode 100644 index 3c8834ff389..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/health/ITZipkinHealthDown.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.health; - -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.ITZipkinServer.url; - -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server", - "zipkin.storage.type=elasticsearch", - "zipkin.storage.elasticsearch.hosts=127.0.0.1:9999" - } -) -@RunWith(SpringRunner.class) -public class ITZipkinHealthDown { - @Autowired Server server; - - OkHttpClient client = new OkHttpClient.Builder().followRedirects(true).build(); - - @Test public void downHasCorrectCode() throws Exception { - Response check = get("/health"); - assertThat(check.code()).isEqualTo(503); - } - - private Response get(String path) throws IOException { - return client.newCall(new Request.Builder().url(url(server, path)).build()).execute(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/health/ZipkinHealthControllerTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/health/ZipkinHealthControllerTest.java deleted file mode 100644 index 424783a999b..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/health/ZipkinHealthControllerTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.health; - -import java.util.List; -import org.junit.Test; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.health.ComponentHealth.STATUS_DOWN; -import static zipkin2.server.internal.health.ComponentHealth.STATUS_UP; - -public class ZipkinHealthControllerTest { - @Test public void writeJsonError_writesNestedError() throws Exception { - assertThat(ZipkinHealthController.writeJsonError("robots")).isEqualTo("" - + "{\n" - + " \"status\" : \"DOWN\",\n" - + " \"zipkin\" : {\n" - + " \"status\" : \"DOWN\",\n" - + " \"details\" : {\n" - + " \"error\" : \"robots\"\n" - + " }\n" - + " }\n" - + "}" - ); - } - - @Test public void writeJson_mappedByName() throws Exception { - List healths = asList( - new ComponentHealth("foo", STATUS_UP, null), - new ComponentHealth("bar", STATUS_DOWN, "java.io.IOException: socket disconnect") - ); - assertThat(ZipkinHealthController.writeJson(STATUS_DOWN, healths)).isEqualTo("" - + "{\n" - + " \"status\" : \"DOWN\",\n" - + " \"zipkin\" : {\n" - + " \"status\" : \"DOWN\",\n" - + " \"details\" : {\n" - + " \"foo\" : {\n" - + " \"status\" : \"UP\"\n" - + " },\n" - + " \"bar\" : {\n" - + " \"status\" : \"DOWN\",\n" - + " \"details\" : {\n" - + " \"error\" : \"java.io.IOException: socket disconnect\"\n" - + " }\n" - + " }\n" - + " }\n" - + " }\n" - + "}" - ); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/kafka/Access.java b/zipkin-server/src/test/java/zipkin2/server/internal/kafka/Access.java deleted file mode 100644 index a0cc5b90a39..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/kafka/Access.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.kafka; - -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import org.springframework.context.annotation.Configuration; -import zipkin2.collector.kafka.KafkaCollector; - -/** opens package access for testing */ -public final class Access { - - /** Just registering properties to avoid automatically connecting to a Kafka server */ - public static void registerKafkaProperties(AnnotationConfigApplicationContext context) { - context.register( - PropertyPlaceholderAutoConfiguration.class, EnableKafkaCollectorProperties.class); - } - - @Configuration - @EnableConfigurationProperties(ZipkinKafkaCollectorProperties.class) - static class EnableKafkaCollectorProperties {} - - public static KafkaCollector.Builder collectorBuilder( - AnnotationConfigApplicationContext context) { - return context.getBean(ZipkinKafkaCollectorProperties.class).toBuilder(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorConfigurationTest.java deleted file mode 100644 index 5bf293b26a0..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorConfigurationTest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.kafka; - -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.collector.kafka.KafkaCollector; -import zipkin2.server.internal.InMemoryConfiguration; - -import static org.assertj.core.api.Assertions.assertThat; - -public class ZipkinKafkaCollectorConfigurationTest { - - @Rule public ExpectedException thrown = ExpectedException.none(); - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After public void close() { - context.close(); - } - - @Test public void doesNotProvideCollectorComponent_whenBootstrapServersUnset() { - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinKafkaCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - - thrown.expect(NoSuchBeanDefinitionException.class); - context.getBean(KafkaCollector.class); - } - - @Test public void providesCollectorComponent_whenBootstrapServersEmptyString() { - TestPropertyValues.of("zipkin.collector.kafka.bootstrap-servers:").applyTo(context); - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinKafkaCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - - thrown.expect(NoSuchBeanDefinitionException.class); - context.getBean(KafkaCollector.class); - } - - @Test public void providesCollectorComponent_whenBootstrapServersSet() { - TestPropertyValues.of("zipkin.collector.kafka.bootstrap-servers:localhost:9092") - .applyTo(context); - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinKafkaCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - - assertThat(context.getBean(KafkaCollector.class)).isNotNull(); - } - - @Test public void doesNotProvidesCollectorComponent_whenBootstrapServersSetAndDisabled() { - TestPropertyValues.of("zipkin.collector.kafka.bootstrap-servers:localhost:9092") - .applyTo(context); - TestPropertyValues.of("zipkin.collector.kafka.enabled:false").applyTo(context); - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinKafkaCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - - thrown.expect(NoSuchBeanDefinitionException.class); - context.getBean(KafkaCollector.class); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorPropertiesTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorPropertiesTest.java deleted file mode 100644 index 3d7f444d378..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/kafka/ZipkinKafkaCollectorPropertiesTest.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.kafka; - -import org.junit.Test; - -import static org.assertj.core.api.Assertions.assertThat; - -public class ZipkinKafkaCollectorPropertiesTest { - @Test public void stringPropertiesConvertEmptyStringsToNull() { - final ZipkinKafkaCollectorProperties properties = new ZipkinKafkaCollectorProperties(); - properties.setBootstrapServers(""); - properties.setGroupId(""); - properties.setTopic(""); - assertThat(properties.getBootstrapServers()).isNull(); - assertThat(properties.getGroupId()).isNull(); - assertThat(properties.getTopic()).isNull(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/mysql/Access.java b/zipkin-server/src/test/java/zipkin2/server/internal/mysql/Access.java deleted file mode 100644 index b0ab1a09d6e..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/mysql/Access.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.mysql; - -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; - -/** opens package access for testing */ -public final class Access { - - public static void registerMySQL(AnnotationConfigApplicationContext context) { - context.register( - PropertyPlaceholderAutoConfiguration.class, ZipkinMySQLStorageConfiguration.class); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/prometheus/ITZipkinMetrics.java b/zipkin-server/src/test/java/zipkin2/server/internal/prometheus/ITZipkinMetrics.java deleted file mode 100644 index d9be1abbecd..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/prometheus/ITZipkinMetrics.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.prometheus; - -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; -import com.linecorp.armeria.server.Server; -import io.micrometer.prometheus.PrometheusMeterRegistry; -import java.io.IOException; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.RequestBody; -import okhttp3.Response; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import zipkin.server.ZipkinServer; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.storage.InMemoryStorage; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.LOTS_OF_SPANS; -import static zipkin2.server.internal.ITZipkinServer.url; - -/** - * Only add tests that do not consider the value of a counter or gauge, as these will flake and so - * should only exist in {@link ITZipkinMetricsDirty}. - */ -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server" - } -) -public class ITZipkinMetrics { - @Autowired InMemoryStorage storage; - @Autowired PrometheusMeterRegistry registry; - @Autowired Server server; - - OkHttpClient client = new OkHttpClient.Builder().followRedirects(true).build(); - - @BeforeEach void init() { - storage.clear(); - } - - @Test void metricsIsOK() throws Exception { - assertThat(get("/metrics").isSuccessful()) - .isTrue(); - - // ensure we don't track metrics in prometheus - assertThat(scrape()) - .doesNotContain("metrics"); - } - - @Test void prometheusIsOK() throws Exception { - assertThat(get("/prometheus").isSuccessful()) - .isTrue(); - - // ensure we don't track prometheus, UI requests in prometheus - assertThat(scrape()) - .doesNotContain("uri=\"/prometheus") - .doesNotContain("uri=\"/zipkin") - .doesNotContain("uri=\"/\""); - } - - @Test void apiTemplate_prometheus() throws Exception { - List spans = asList(LOTS_OF_SPANS[0]); - byte[] body = SpanBytesEncoder.JSON_V2.encodeList(spans); - assertThat(post("/api/v2/spans", body).isSuccessful()) - .isTrue(); - - assertThat(get("/api/v2/trace/" + LOTS_OF_SPANS[0].traceId()).isSuccessful()) - .isTrue(); - - assertThat(get("/api/v2/traceMany?traceIds=abcde," + LOTS_OF_SPANS[0].traceId()).isSuccessful()) - .isTrue(); - - assertThat(scrape()) - .contains("uri=\"/api/v2/traceMany\"") // sanity check - .contains("uri=\"/api/v2/trace/{traceId}\"") - .doesNotContain(LOTS_OF_SPANS[0].traceId()); - } - - @Test void forwardedRoute_prometheus() throws Exception { - assertThat(get("/zipkin/api/v2/services").isSuccessful()) - .isTrue(); - - assertThat(scrape()) - .contains("uri=\"/api/v2/services\"") - .doesNotContain("uri=\"/zipkin/api/v2/services\""); - } - - @Test void jvmMetrics_prometheus() throws Exception { - assertThat(scrape()) - .contains("jvm_memory_max_bytes") - .contains("jvm_memory_used_bytes") - .contains("jvm_memory_committed_bytes") - .contains("jvm_buffer_count_buffers") - .contains("jvm_buffer_memory_used_bytes") - .contains("jvm_buffer_total_capacity_bytes") - .contains("jvm_classes_loaded_classes") - .contains("jvm_classes_unloaded_classes_total") - .contains("jvm_threads_live_threads") - .contains("jvm_threads_states_threads") - .contains("jvm_threads_peak_threads") - .contains("jvm_threads_daemon_threads"); - // gc metrics are not tested as are not present during test running - } - - String scrape() throws Exception { - Thread.sleep(100); - return registry.scrape(); - } - - @Test void writesSpans_readMetricsFormat() throws Exception { - byte[] span = {'z', 'i', 'p', 'k', 'i', 'n'}; - List spans = asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1], LOTS_OF_SPANS[2]); - byte[] body = SpanBytesEncoder.JSON_V2.encodeList(spans); - post("/api/v2/spans", body); - post("/api/v2/spans", body); - post("/api/v2/spans", span); - Thread.sleep(1500); - - String metrics = getAsString("/metrics"); - - assertThat(readJson(metrics)).containsOnlyKeys( - "gauge.zipkin_collector.message_spans.http" - , "gauge.zipkin_collector.message_bytes.http" - , "counter.zipkin_collector.messages.http" - , "counter.zipkin_collector.bytes.http" - , "counter.zipkin_collector.spans.http" - , "counter.zipkin_collector.messages_dropped.http" - , "counter.zipkin_collector.spans_dropped.http" - ); - } - - private String getAsString(String path) throws IOException { - Response response = get(path); - assertThat(response.isSuccessful()) - .withFailMessage(response.toString()) - .isTrue(); - return response.body().string(); - } - - private Response get(String path) throws IOException { - return client.newCall(new Request.Builder().url(url(server, path)).build()).execute(); - } - - private Response post(String path, byte[] body) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .post(RequestBody.create(body)) - .build()).execute(); - } - - static Map readJson(String json) throws Exception { - Map result = new LinkedHashMap<>(); - JsonParser parser = new JsonFactory().createParser(json); - assertThat(parser.nextToken()).isEqualTo(JsonToken.START_OBJECT); - String nextField; - while ((nextField = parser.nextFieldName()) != null) { - result.put(nextField, parser.nextIntValue(0)); - } - return result; - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/prometheus/ITZipkinMetricsDirty.java b/zipkin-server/src/test/java/zipkin2/server/internal/prometheus/ITZipkinMetricsDirty.java deleted file mode 100644 index 5d56fb2fe6b..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/prometheus/ITZipkinMetricsDirty.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.prometheus; - -import com.jayway.jsonpath.JsonPath; -import com.linecorp.armeria.server.Server; -import io.micrometer.prometheus.PrometheusMeterRegistry; -import java.io.IOException; -import java.util.List; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.RequestBody; -import okhttp3.Response; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.annotation.DirtiesContext; -import zipkin.server.ZipkinServer; -import zipkin2.Span; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.storage.InMemoryStorage; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.springframework.test.annotation.DirtiesContext.ClassMode.BEFORE_EACH_TEST_METHOD; -import static zipkin2.TestObjects.LOTS_OF_SPANS; -import static zipkin2.server.internal.ITZipkinServer.url; - -/** - * Tests here look at values based on counter values, so need to run independently. It would seem - * correct to {@link PrometheusMeterRegistry#clear() clear the registry} to isolate counters - * incremented in one test from interfering with another. However, this clears the metrics - * themselves, resulting in an empty {@link PrometheusMeterRegistry#scrape()}. - * - *

Currently, the only way we know how to reset the whole registry is to recreate the Spring - * context, via {@link DirtiesContext} on each test. This is extremely slow, so please only add - * tests that require isolation here! - */ -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server" - } -) -// Clearing the prometheus registry also clears the metrics themselves, not just the values, so we -// have to use dirties context so that each test runs in a separate instance of Spring Boot. -@DirtiesContext(classMode = BEFORE_EACH_TEST_METHOD) -public class ITZipkinMetricsDirty { - - @Autowired InMemoryStorage storage; - @Autowired PrometheusMeterRegistry registry; - @Autowired Server server; - - OkHttpClient client = new OkHttpClient.Builder().followRedirects(true).build(); - - @BeforeEach void init() { - // We use DirtiesContext, not registry.clear(), as the latter would cause an empty scrape - storage.clear(); - } - - @Test void writeSpans_updatesMetrics() throws Exception { - List spans = asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1], LOTS_OF_SPANS[2]); - byte[] body = SpanBytesEncoder.JSON_V2.encodeList(spans); - double messagesCount = - registry.counter("zipkin_collector.messages", "transport", "http").count(); - double bytesCount = registry.counter("zipkin_collector.bytes", "transport", "http").count(); - double spansCount = registry.counter("zipkin_collector.spans", "transport", "http").count(); - post("/api/v2/spans", body); - post("/api/v2/spans", body); - - String json = getAsString("/metrics"); - - assertThat(readDouble(json, "$.['counter.zipkin_collector.messages.http']")) - .isEqualTo(messagesCount + 2.0); - assertThat(readDouble(json, "$.['counter.zipkin_collector.bytes.http']")) - .isEqualTo(bytesCount + (body.length * 2)); - assertThat(readDouble(json, "$.['gauge.zipkin_collector.message_bytes.http']")) - .isEqualTo(body.length); - assertThat(readDouble(json, "$.['counter.zipkin_collector.spans.http']")) - .isEqualTo(spansCount + (spans.size() * 2)); - assertThat(readDouble(json, "$.['gauge.zipkin_collector.message_spans.http']")) - .isEqualTo(spans.size()); - } - - @Test void writeSpans_malformedUpdatesMetrics() throws Exception { - byte[] body = {'h', 'e', 'l', 'l', 'o'}; - double messagesCount = - registry.counter("zipkin_collector.messages", "transport", "http").count(); - double messagesDroppedCount = - registry.counter("zipkin_collector.messages_dropped", "transport", "http").count(); - post("/api/v2/spans", body); - - String json = getAsString("/metrics"); - - assertThat(readDouble(json, "$.['counter.zipkin_collector.messages.http']")) - .isEqualTo(messagesCount + 1); - assertThat(readDouble(json, "$.['counter.zipkin_collector.messages_dropped.http']")) - .isEqualTo(messagesDroppedCount + 1); - } - - /** This tests logic in {@code BodyIsExceptionMessage} is scoped to POST requests. */ - @Test void getTrace_malformedDoesntUpdateCollectorMetrics() throws Exception { - double messagesCount = - registry.counter("zipkin_collector.messages", "transport", "http").count(); - double messagesDroppedCount = - registry.counter("zipkin_collector.messages_dropped", "transport", "http").count(); - - Response response = get("/api/v2/trace/0e8b46e1-81b"); - assertThat(response.code()).isEqualTo(400); - - String json = getAsString("/metrics"); - - assertThat(readDouble(json, "$.['counter.zipkin_collector.messages.http']")) - .isEqualTo(messagesCount); - assertThat(readDouble(json, "$.['counter.zipkin_collector.messages_dropped.http']")) - .isEqualTo(messagesDroppedCount); - } - - /** - * Makes sure the prometheus filter doesn't count twice - */ - @Test void writeSpans_updatesPrometheusMetrics() throws Exception { - List spans = asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1], LOTS_OF_SPANS[2]); - byte[] body = SpanBytesEncoder.JSON_V2.encodeList(spans); - - post("/api/v2/spans", body); - post("/api/v2/spans", body); - - Thread.sleep(100); // sometimes CI flakes getting the "http.server.requests" timer - double messagesCount = registry.counter("zipkin_collector.spans", "transport", "http").count(); - // Get the http count from the registry and it should match the summation previous count - // and count of calls below - long httpCount = registry - .find("http.server.requests") - .tag("uri", "/api/v2/spans") - .timer() - .count(); - - // ensure unscoped counter does not exist - assertThat(scrape()) - .doesNotContain("zipkin_collector_spans_total " + messagesCount) - .contains("zipkin_collector_spans_total{transport=\"http\",} " + messagesCount) - .contains( - "http_server_requests_seconds_count{method=\"POST\",status=\"202\",uri=\"/api/v2/spans\",} " - + httpCount); - } - - String getAsString(String path) throws IOException { - Response response = get(path); - assertThat(response.isSuccessful()) - .withFailMessage(response.toString()) - .isTrue(); - return response.body().string(); - } - - Response get(String path) throws IOException { - return client.newCall(new Request.Builder().url(url(server, path)).build()).execute(); - } - - Response post(String path, byte[] body) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .post(RequestBody.create(body)) - .build()).execute(); - } - - String scrape() throws Exception { - Thread.sleep(100); - return registry.scrape(); - } - - static double readDouble(String json, String jsonPath) { - return JsonPath.compile(jsonPath).read(json); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/prometheus/ZipkinPrometheusMetricsConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/prometheus/ZipkinPrometheusMetricsConfigurationTest.java deleted file mode 100644 index 0c847e57289..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/prometheus/ZipkinPrometheusMetricsConfigurationTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.prometheus; - -import com.linecorp.armeria.spring.ArmeriaServerConfigurator; -import org.junit.After; -import org.junit.Test; -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; - -import static org.assertj.core.api.Assertions.assertThat; - -public class ZipkinPrometheusMetricsConfigurationTest { - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - public void refresh() { - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinPrometheusMetricsConfiguration.class - ); - context.refresh(); - } - - @After public void close() { - context.close(); - } - - @Test public void providesHttpRequestDurationCustomizer() { - refresh(); - - context.getBeansOfType(ArmeriaServerConfigurator.class); - } - - @Test public void defaultMetricName() { - refresh(); - - assertThat(context.getBean(ZipkinPrometheusMetricsConfiguration.class).metricName) - .isEqualTo("http.server.requests"); - } - - @Test public void overrideMetricName() { - TestPropertyValues.of("management.metrics.web.server.requests-metric-name:foo").applyTo(context); - refresh(); - - assertThat(context.getBean(ZipkinPrometheusMetricsConfiguration.class).metricName) - .isEqualTo("foo"); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/rabbitmq/Access.java b/zipkin-server/src/test/java/zipkin2/server/internal/rabbitmq/Access.java deleted file mode 100644 index eca2a468c48..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/rabbitmq/Access.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.rabbitmq; - -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import org.springframework.context.annotation.Configuration; -import zipkin2.collector.rabbitmq.RabbitMQCollector; - -/** opens package access for testing */ -public final class Access { - - /** Just registering properties to avoid automatically connecting to a Rabbit MQ server */ - public static void registerRabbitMQProperties(AnnotationConfigApplicationContext context) { - context.register( - PropertyPlaceholderAutoConfiguration.class, EnableRabbitMQCollectorProperties.class); - } - - @Configuration - @EnableConfigurationProperties(ZipkinRabbitMQCollectorProperties.class) - static class EnableRabbitMQCollectorProperties {} - - public static RabbitMQCollector.Builder collectorBuilder( - AnnotationConfigApplicationContext context) throws Exception { - return context.getBean(ZipkinRabbitMQCollectorProperties.class).toBuilder(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorConfigurationTest.java deleted file mode 100644 index e29ab7c46c6..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorConfigurationTest.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.rabbitmq; - -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.springframework.beans.factory.BeanCreationException; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.collector.rabbitmq.RabbitMQCollector; -import zipkin2.server.internal.InMemoryConfiguration; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown; - -public class ZipkinRabbitMQCollectorConfigurationTest { - - @Rule public ExpectedException thrown = ExpectedException.none(); - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After public void close() { - context.close(); - } - - @Test public void doesNotProvideCollectorComponent_whenAddressAndUriNotSet() { - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinRabbitMQCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - - thrown.expect(NoSuchBeanDefinitionException.class); - context.getBean(RabbitMQCollector.class); - } - - @Test public void doesNotProvideCollectorComponent_whenAddressesAndUriIsEmptyString() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of( - "zipkin.collector.rabbitmq.addresses:", - "zipkin.collector.rabbitmq.uri:") - .applyTo(context); - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinRabbitMQCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - - thrown.expect(NoSuchBeanDefinitionException.class); - context.getBean(RabbitMQCollector.class); - } - - @Test public void providesCollectorComponent_whenAddressesSet() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of("zipkin.collector.rabbitmq.addresses:localhost:1234").applyTo(context); - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinRabbitMQCollectorConfiguration.class, - InMemoryConfiguration.class); - - try { - context.refresh(); - failBecauseExceptionWasNotThrown(BeanCreationException.class); - } catch (BeanCreationException e) { - assertThat(e.getCause()).hasMessageContaining( - "Unable to establish connection to RabbitMQ server: Connection refused"); - } - } - - @Test public void doesNotProvidesCollectorComponent_whenAddressesSetAndDisabled() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of("zipkin.collector.rabbitmq.addresses:localhost:1234").applyTo(context); - TestPropertyValues.of("zipkin.collector.rabbitmq.enabled:false").applyTo(context); - context.register( - PropertyPlaceholderAutoConfiguration.class, - ZipkinRabbitMQCollectorConfiguration.class, - InMemoryConfiguration.class); - context.refresh(); - - thrown.expect(NoSuchBeanDefinitionException.class); - context.getBean(RabbitMQCollector.class); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorPropertiesTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorPropertiesTest.java deleted file mode 100644 index bda359b6101..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/rabbitmq/ZipkinRabbitMQCollectorPropertiesTest.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.rabbitmq; - -import com.rabbitmq.client.ConnectionFactory; -import java.net.URI; -import java.util.Collections; -import org.junit.Test; - -import static org.assertj.core.api.Assertions.assertThat; - -public class ZipkinRabbitMQCollectorPropertiesTest { - ZipkinRabbitMQCollectorProperties properties = new ZipkinRabbitMQCollectorProperties(); - - @Test public void uriProperlyParsedAndIgnoresOtherProperties_whenUriSet() throws Exception { - properties.setUri(URI.create("amqp://admin:admin@localhost:5678/myv")); - properties.setAddresses(Collections.singletonList("will_not^work!")); - properties.setUsername("bob"); - properties.setPassword("letmein"); - properties.setVirtualHost("drwho"); - - assertThat(properties.toBuilder()) - .extracting("connectionFactory") - .satisfies(object -> { - ConnectionFactory connFactory = (ConnectionFactory) object; - assertThat(connFactory.getHost()).isEqualTo("localhost"); - assertThat(connFactory.getPort()).isEqualTo(5678); - assertThat(connFactory.getUsername()).isEqualTo("admin"); - assertThat(connFactory.getPassword()).isEqualTo("admin"); - assertThat(connFactory.getVirtualHost()).isEqualTo("myv"); - }); - } - - /** This prevents an empty RABBIT_URI variable from being mistaken as a real one */ - @Test public void ignoresEmptyURI() { - properties.setUri(URI.create("")); - - assertThat(properties.getUri()).isNull(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/throttle/FakeCall.java b/zipkin-server/src/test/java/zipkin2/server/internal/throttle/FakeCall.java deleted file mode 100644 index 1d8b7d317fd..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/throttle/FakeCall.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.throttle; - -import java.util.concurrent.RejectedExecutionException; -import zipkin2.Call; -import zipkin2.Callback; - -class FakeCall extends Call { - boolean overCapacity = false; - - void setOverCapacity(boolean isOverCapacity) { - this.overCapacity = isOverCapacity; - } - - @Override public Void execute() { - if (overCapacity) throw new RejectedExecutionException(); - return null; - } - - @Override public void enqueue(Callback callback) { - if (overCapacity) { - callback.onError(new RejectedExecutionException()); - } else { - callback.onSuccess(null); - } - } - - @Override public void cancel() { - } - - @Override public boolean isCanceled() { - return false; - } - - @Override public Call clone() { - return null; - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/throttle/ThrottledCallTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/throttle/ThrottledCallTest.java deleted file mode 100644 index 7e980642841..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/throttle/ThrottledCallTest.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.throttle; - -import com.linecorp.armeria.common.metric.NoopMeterRegistry; -import com.netflix.concurrency.limits.Limiter; -import com.netflix.concurrency.limits.Limiter.Listener; -import com.netflix.concurrency.limits.limit.SettableLimit; -import com.netflix.concurrency.limits.limiter.SimpleLimiter; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.Optional; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.Semaphore; -import java.util.function.Predicate; -import org.junit.After; -import org.junit.Test; -import zipkin2.Call; -import zipkin2.Callback; -import zipkin2.reporter.AwaitableCallback; - -import static com.linecorp.armeria.common.util.Exceptions.clearTrace; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static zipkin2.server.internal.throttle.ThrottledCall.NOOP_CALLBACK; -import static zipkin2.server.internal.throttle.ThrottledCall.STORAGE_THROTTLE_MAX_CONCURRENCY; -import static zipkin2.server.internal.throttle.ThrottledStorageComponent.STORAGE_THROTTLE_MAX_QUEUE_SIZE; - -public class ThrottledCallTest { - SettableLimit limit = SettableLimit.startingAt(0); - SimpleLimiter limiter = SimpleLimiter.newBuilder().limit(limit).build(); - LimiterMetrics limiterMetrics = new LimiterMetrics(NoopMeterRegistry.get()); - Predicate isOverCapacity = RejectedExecutionException.class::isInstance; - - int numThreads = 1; - ExecutorService executor = Executors.newSingleThreadExecutor(); - - @After public void shutdownExecutor() { - executor.shutdown(); - } - - @Test public void niceToString() { - Call delegate = mock(Call.class); - when(delegate.toString()).thenReturn("StoreSpansCall{}"); - - assertThat(new ThrottledCall(delegate, executor, limiter, limiterMetrics, isOverCapacity)) - .hasToString("Throttled(StoreSpansCall{})"); - } - - @Test public void execute_isThrottled() throws Exception { - int queueSize = 1; - int totalTasks = numThreads + queueSize; - limit.setLimit(totalTasks); - - Semaphore startLock = new Semaphore(numThreads); - Semaphore waitLock = new Semaphore(totalTasks); - Semaphore failLock = new Semaphore(1); - ThrottledCall throttled = throttle(new LockedCall(startLock, waitLock)); - - // Step 1: drain appropriate locks - startLock.drainPermits(); - waitLock.drainPermits(); - failLock.drainPermits(); - - // Step 2: saturate threads and fill queue - ExecutorService backgroundPool = Executors.newCachedThreadPool(); - for (int i = 0; i < totalTasks; i++) { - backgroundPool.submit(() -> throttled.clone().execute()); - } - - try { - // Step 3: make sure the threads actually started - startLock.acquire(numThreads); - - // Step 4: submit something beyond our limits - Future future = backgroundPool.submit(() -> { - try { - throttled.execute(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } finally { - // Step 6: signal that we tripped the limit - failLock.release(); - } - }); - - // Step 5: wait to make sure our limit actually tripped - failLock.acquire(); - - future.get(); - - // Step 7: Expect great things - failBecauseExceptionWasNotThrown(ExecutionException.class); - } catch (ExecutionException t) { - assertThat(t) - .isInstanceOf(ExecutionException.class) // from future.get - .hasCauseInstanceOf(RejectedExecutionException.class); - } finally { - waitLock.release(totalTasks); - startLock.release(totalTasks); - backgroundPool.shutdownNow(); - } - } - - @Test public void execute_throttlesBack_whenStorageRejects() throws Exception { - Listener listener = mock(Listener.class); - FakeCall call = new FakeCall(); - call.overCapacity = true; - - ThrottledCall throttle = - new ThrottledCall(call, executor, mockLimiter(listener), limiterMetrics, isOverCapacity); - - try { - throttle.execute(); - assertThat(true).isFalse(); // should raise a RejectedExecutionException - } catch (RejectedExecutionException e) { - verify(listener).onDropped(); - } - } - - @Test public void execute_ignoresLimit_whenPoolFull() throws Exception { - Listener listener = mock(Listener.class); - - ThrottledCall throttle = new ThrottledCall(new FakeCall(), mockExhaustedPool(), - mockLimiter(listener), limiterMetrics, isOverCapacity); - - try { - throttle.execute(); - assertThat(true).isFalse(); // should raise a RejectedExecutionException - } catch (RejectedExecutionException e) { - verify(listener).onIgnore(); - } - } - - @Test public void enqueue_isThrottled() throws Exception { - int queueSize = 1; - int totalTasks = numThreads + queueSize; - limit.setLimit(totalTasks); - - Semaphore startLock = new Semaphore(numThreads); - Semaphore waitLock = new Semaphore(totalTasks); - ThrottledCall throttle = throttle(new LockedCall(startLock, waitLock)); - - // Step 1: drain appropriate locks - startLock.drainPermits(); - waitLock.drainPermits(); - - // Step 2: saturate threads and fill queue - Callback callback = mock(Callback.class); - for (int i = 0; i < totalTasks; i++) { - throttle.clone().enqueue(callback); - } - - // Step 3: make sure the threads actually started - startLock.acquire(numThreads); - - // Step 4: submit something beyond our limits and make sure it fails - assertThatThrownBy(() -> throttle.clone().enqueue(callback)) - .isEqualTo(STORAGE_THROTTLE_MAX_CONCURRENCY); - } - - @Test public void enqueue_throttlesBack_whenStorageRejects() { - Listener listener = mock(Listener.class); - FakeCall call = new FakeCall(); - call.overCapacity = true; - - ThrottledCall throttle = - new ThrottledCall(call, executor, mockLimiter(listener), limiterMetrics, isOverCapacity); - - AwaitableCallback callback = new AwaitableCallback(); - throttle.enqueue(callback); - - assertThatThrownBy(callback::await).isEqualTo(OVER_CAPACITY); - - verify(listener).onDropped(); - } - - @Test public void enqueue_ignoresLimit_whenPoolFull() { - Listener listener = mock(Listener.class); - - ThrottledCall throttle = new ThrottledCall(new FakeCall(), mockExhaustedPool(), - mockLimiter(listener), limiterMetrics, isOverCapacity); - - assertThatThrownBy(() -> throttle.enqueue(NOOP_CALLBACK)) - .isEqualTo(STORAGE_THROTTLE_MAX_QUEUE_SIZE); - - verify(listener).onIgnore(); - } - - ThrottledCall throttle(Call delegate) { - return new ThrottledCall(delegate, executor, limiter, limiterMetrics, isOverCapacity); - } - - static final class LockedCall extends Call.Base { - final Semaphore startLock, waitLock; - - LockedCall(Semaphore startLock, Semaphore waitLock) { - this.startLock = startLock; - this.waitLock = waitLock; - } - - @Override public Void doExecute() { - try { - startLock.release(); - waitLock.acquire(); - return null; - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new AssertionError(e); - } - } - - @Override public void doEnqueue(Callback callback) { - try { - callback.onSuccess(doExecute()); - } catch (Throwable t) { - propagateIfFatal(t); - callback.onError(t); - } - } - - @Override public LockedCall clone() { - return new LockedCall(startLock, waitLock); - } - } - - ExecutorService mockExhaustedPool() { - ExecutorService mock = mock(ExecutorService.class); - doThrow(STORAGE_THROTTLE_MAX_QUEUE_SIZE).when(mock).execute(any()); - doThrow(STORAGE_THROTTLE_MAX_QUEUE_SIZE).when(mock).submit(any(Callable.class)); - return mock; - } - - Limiter mockLimiter(Listener listener) { - Limiter mock = mock(Limiter.class); - when(mock.acquire(any())).thenReturn(Optional.of(listener)); - return mock; - } - - static final Exception OVER_CAPACITY = clearTrace(new RejectedExecutionException("overCapacity")); - - static final class FakeCall extends Call.Base { - boolean overCapacity = false; - - @Override public Void doExecute() { - throw new AssertionError("throttling never uses execute"); - } - - @Override public void doEnqueue(Callback callback) { - if (overCapacity) { - callback.onError(OVER_CAPACITY); - } else { - callback.onSuccess(null); - } - } - - @Override public FakeCall clone() { - return new FakeCall(); - } - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/throttle/ThrottledStorageComponentTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/throttle/ThrottledStorageComponentTest.java deleted file mode 100644 index c2fb01c6889..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/throttle/ThrottledStorageComponentTest.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.throttle; - -import brave.Tracing; -import com.linecorp.armeria.common.metric.NoopMeterRegistry; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import zipkin2.Component; -import zipkin2.internal.Nullable; -import zipkin2.server.internal.throttle.ThrottledStorageComponent.ThrottledSpanConsumer; -import zipkin2.storage.InMemoryStorage; -import zipkin2.storage.StorageComponent; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -public class ThrottledStorageComponentTest { - @Rule public ExpectedException expectedException = ExpectedException.none(); - InMemoryStorage delegate = InMemoryStorage.newBuilder().build(); - @Nullable Tracing tracing; - NoopMeterRegistry registry = NoopMeterRegistry.get(); - - @Test public void spanConsumer_isProxied() { - ThrottledStorageComponent throttle = - new ThrottledStorageComponent(delegate, registry, tracing, 1, 2, 1); - - assertThat(ThrottledSpanConsumer.class) - .isSameAs(throttle.spanConsumer().getClass()); - } - - @Test public void createComponent_withZeroSizedQueue() { - int queueSize = 0; - new ThrottledStorageComponent(delegate, registry, tracing, 1, 2, queueSize); - // no exception == pass - } - - @Test public void createComponent_withNegativeQueue() { - expectedException.expect(IllegalArgumentException.class); - int queueSize = -1; - new ThrottledStorageComponent(delegate, registry, tracing, 1, 2, queueSize); - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test public void toStringContainsOnlySummaryInformation() { - assertThat(new ThrottledStorageComponent(delegate, registry, tracing, 1, 2, 1)) - .hasToString("Throttled{InMemoryStorage{}}"); - } - - @Test public void delegatesCheck() { - StorageComponent mock = mock(StorageComponent.class); - - new ThrottledStorageComponent(mock, registry, tracing, 1, 2, 1).check(); - verify(mock, times(1)).check(); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ui/ITZipkinUiConfiguration.java b/zipkin-server/src/test/java/zipkin2/server/internal/ui/ITZipkinUiConfiguration.java deleted file mode 100644 index b4f8d1b97ec..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ui/ITZipkinUiConfiguration.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.ui; - -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.RequestHeaders; -import com.linecorp.armeria.server.Server; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.stream.Stream; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.junit4.SpringRunner; -import zipkin.server.ZipkinServer; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.server.internal.ITZipkinServer.stringFromClasspath; -import static zipkin2.server.internal.ITZipkinServer.url; - -@SpringBootTest( - classes = ZipkinServer.class, - webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web - properties = { - "server.port=0", - "spring.config.name=zipkin-server", - "zipkin.ui.base-path=/foozipkin", - "server.compression.enabled=true", - "server.compression.min-response-size=128" - }) -@RunWith(SpringRunner.class) -public class ITZipkinUiConfiguration { - @Autowired Server server; - OkHttpClient client = new OkHttpClient.Builder().followRedirects(false).build(); - - @Test public void configJson() throws Exception { - assertThat(get("/zipkin/config.json").body().string()).isEqualTo("" - + "{\n" - + " \"environment\" : \"\",\n" - + " \"queryLimit\" : 10,\n" - + " \"defaultLookback\" : 900000,\n" - + " \"searchEnabled\" : true,\n" - + " \"logsUrl\" : null,\n" - + " \"supportUrl\" : null,\n" - + " \"archivePostUrl\" : null,\n" - + " \"archiveUrl\" : null,\n" - + " \"dependency\" : {\n" - + " \"enabled\" : true,\n" - + " \"lowErrorRate\" : 0.5,\n" - + " \"highErrorRate\" : 0.75\n" - + " }\n" - + "}" - ); - } - - /** The zipkin-lens is a single-page app. This prevents reloading all resources on each click. */ - @Test public void setsMaxAgeOnUiResources() throws Exception { - assertThat(get("/zipkin/config.json").header("Cache-Control")) - .isEqualTo("max-age=600"); - assertThat(get("/zipkin/index.html").header("Cache-Control")) - .isEqualTo("max-age=60"); - assertThat(get("/zipkin/test.txt").header("Cache-Control")) - .isEqualTo("max-age=31536000"); - } - - @Test public void redirectsIndex() throws Exception { - String index = get("/zipkin/index.html").body().string(); - - client = new OkHttpClient.Builder().followRedirects(true).build(); - - Stream.of("/zipkin", "/").forEach(path -> { - try { - assertThat(get(path).body().string()).isEqualTo(index); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - } - - /** Browsers honor conditional requests such as eTag. Let's make sure the server does */ - @Test public void conditionalRequests() { - Stream.of("/zipkin/config.json", "/zipkin/index.html", "/zipkin/test.txt").forEach(path -> { - try { - String etag = get(path).header("etag"); - assertThat(conditionalGet(path, etag).code()) - .isEqualTo(304); - assertThat(conditionalGet(path, "aargh").code()) - .isEqualTo(200); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - } - - /** Some assets are pretty big. ensure they use compression. */ - @Test public void supportsCompression() { - assertThat(getContentEncodingFromRequestThatAcceptsGzip("/zipkin/test.txt")) - .isNull(); // too small to compress - assertThat(getContentEncodingFromRequestThatAcceptsGzip("/zipkin/config.json")) - .isEqualTo("gzip"); - } - - /** - * The test sets the property {@code zipkin.ui.base-path=/foozipkin}, which should reflect in - * index.html - */ - @Test public void replacesBaseTag() throws Exception { - assertThat(get("/zipkin/index.html").body().string()) - .isEqualToIgnoringWhitespace(stringFromClasspath(getClass(), "zipkin-lens/index.html") - .replace("", "")); - } - - /** index.html is served separately. This tests other content is also loaded from the classpath. */ - @Test public void servesOtherContentFromClasspath() throws Exception { - assertThat(get("/zipkin/test.txt").body().string()) - .isEqualToIgnoringWhitespace(stringFromClasspath(getClass(), "zipkin-lens/test.txt")); - } - - private Response get(String path) throws IOException { - return client.newCall(new Request.Builder().url(url(server, path)).build()).execute(); - } - - private Response conditionalGet(String path, String etag) throws IOException { - return client.newCall(new Request.Builder() - .url(url(server, path)) - .header("If-None-Match", etag) - .build()).execute(); - } - - private String getContentEncodingFromRequestThatAcceptsGzip(String path) { - // We typically use OkHttp in our tests, but that automatically unzips.. - AggregatedHttpResponse response = WebClient.of(url(server, "/")) - .execute(RequestHeaders.of(HttpMethod.GET, path, HttpHeaderNames.ACCEPT_ENCODING, "gzip")) - .aggregate().join(); - - return response.headers().get(HttpHeaderNames.CONTENT_ENCODING); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/server/internal/ui/ZipkinUiConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/server/internal/ui/ZipkinUiConfigurationTest.java deleted file mode 100644 index c51c099316f..00000000000 --- a/zipkin-server/src/test/java/zipkin2/server/internal/ui/ZipkinUiConfigurationTest.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.server.internal.ui; - -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.HttpRequest; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.RequestHeaders; -import com.linecorp.armeria.server.HttpService; -import com.linecorp.armeria.server.ServiceRequestContext; -import io.netty.handler.codec.http.cookie.ClientCookieEncoder; -import io.netty.handler.codec.http.cookie.Cookie; -import io.netty.handler.codec.http.cookie.DefaultCookie; -import java.io.ByteArrayInputStream; -import org.junit.After; -import org.junit.Test; -import org.springframework.beans.factory.BeanCreationException; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import org.springframework.core.io.ClassPathResource; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -public class ZipkinUiConfigurationTest { - - AnnotationConfigApplicationContext context; - - @After - public void close() { - if (context != null) { - context.close(); - } - } - - @Test - public void indexContentType() { - context = createContext(); - assertThat( - serveIndex().headers().contentType()) - .isEqualTo(MediaType.HTML_UTF_8); - } - - @Test - public void indexHtml() throws Exception { - // Instantiate directly so that spring doesn't cache it - ZipkinUiConfiguration ui = new ZipkinUiConfiguration(); - ui.ui = new ZipkinUiProperties(); - ui.lensIndexHtml = new ClassPathResource("does-not-exist.html"); - assertThatThrownBy(ui::indexService) - .isInstanceOf(BeanCreationException.class); - } - - @Test - public void canOverridesProperty_defaultLookback() { - context = createContextWithOverridenProperty("zipkin.ui.defaultLookback:100"); - - assertThat(context.getBean(ZipkinUiProperties.class).getDefaultLookback()) - .isEqualTo(100); - } - - @Test - public void canOverrideProperty_logsUrl() { - final String url = "http://mycompany.com/kibana"; - context = createContextWithOverridenProperty("zipkin.ui.logs-url:" + url); - - assertThat(context.getBean(ZipkinUiProperties.class).getLogsUrl()).isEqualTo(url); - } - - @Test - public void canOverrideProperty_archivePostUrl() { - final String url = "http://zipkin.archive.com/api/v2/spans"; - context = createContextWithOverridenProperty("zipkin.ui.archive-post-url:" + url); - - assertThat(context.getBean(ZipkinUiProperties.class).getArchivePostUrl()).isEqualTo(url); - } - - @Test - public void canOverrideProperty_archiveUrl() { - final String url = "http://zipkin.archive.com/zipkin/traces/{traceId}"; - context = createContextWithOverridenProperty("zipkin.ui.archive-url:" + url); - - assertThat(context.getBean(ZipkinUiProperties.class).getArchiveUrl()).isEqualTo(url); - } - - @Test - public void canOverrideProperty_supportUrl() { - final String url = "http://mycompany.com/file-a-bug"; - context = createContextWithOverridenProperty("zipkin.ui.support-url:" + url); - - assertThat(context.getBean(ZipkinUiProperties.class).getSupportUrl()).isEqualTo(url); - } - - @Test - public void logsUrlIsNullIfOverridenByEmpty() { - context = createContextWithOverridenProperty("zipkin.ui.logs-url:"); - - assertThat(context.getBean(ZipkinUiProperties.class).getLogsUrl()).isNull(); - } - - @Test - public void logsUrlIsNullByDefault() { - context = createContext(); - - assertThat(context.getBean(ZipkinUiProperties.class).getLogsUrl()).isNull(); - } - - @Test(expected = NoSuchBeanDefinitionException.class) - public void canOverridesProperty_disable() { - context = createContextWithOverridenProperty("zipkin.ui.enabled:false"); - - context.getBean(ZipkinUiProperties.class); - } - - @Test - public void canOverridesProperty_searchEnabled() { - context = createContextWithOverridenProperty("zipkin.ui.search-enabled:false"); - - assertThat(context.getBean(ZipkinUiProperties.class).isSearchEnabled()).isFalse(); - } - - @Test - public void canOverridesProperty_dependenciesEnabled() { - context = createContextWithOverridenProperty("zipkin.ui.dependency.enabled:false"); - - assertThat(context.getBean(ZipkinUiProperties.class).getDependency().isEnabled()).isFalse(); - } - - @Test - public void canOverrideProperty_dependencyLowErrorRate() { - context = createContextWithOverridenProperty("zipkin.ui.dependency.low-error-rate:0.1"); - - assertThat(context.getBean(ZipkinUiProperties.class).getDependency().getLowErrorRate()) - .isEqualTo(0.1f); - } - - @Test - public void canOverrideProperty_dependencyHighErrorRate() { - context = createContextWithOverridenProperty("zipkin.ui.dependency.high-error-rate:0.1"); - - assertThat(context.getBean(ZipkinUiProperties.class).getDependency().getHighErrorRate()) - .isEqualTo(0.1f); - } - - @Test - public void defaultBaseUrl_doesNotChangeResource() { - context = createContext(); - - assertThat(new ByteArrayInputStream(serveIndex().content().array())) - .hasSameContentAs(getClass().getResourceAsStream("/zipkin-lens/index.html")); - } - - @Test - public void canOverrideProperty_basePath() { - context = createContextWithOverridenProperty("zipkin.ui.basepath:/foo/bar"); - - assertThat(serveIndex().contentUtf8()) - .contains(""); - } - - @Test - public void lensCookieOverridesIndex() { - context = createContext(); - - assertThat(serveIndex(new DefaultCookie("lens", "true")).contentUtf8()) - .contains("zipkin-lens"); - } - - @Test - public void canOverrideProperty_specialCaseRoot() { - context = createContextWithOverridenProperty("zipkin.ui.basepath:/"); - - assertThat(serveIndex().contentUtf8()) - .contains(""); - } - - AggregatedHttpResponse serveIndex(Cookie... cookies) { - RequestHeaders headers = RequestHeaders.of(HttpMethod.GET, "/"); - String encodedCookies = ClientCookieEncoder.LAX.encode(cookies); - if (encodedCookies != null) { - headers = headers.toBuilder().set(HttpHeaderNames.COOKIE, encodedCookies).build(); - } - HttpRequest req = HttpRequest.of(headers); - try { - return context.getBean(HttpService.class) - .serve(ServiceRequestContext.of(req), req).aggregate() - .get(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - private static AnnotationConfigApplicationContext createContext() { - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - context.register(PropertyPlaceholderAutoConfiguration.class, ZipkinUiConfiguration.class); - context.refresh(); - return context; - } - - private static AnnotationConfigApplicationContext createContextWithOverridenProperty( - String pair) { - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of(pair).applyTo(context); - context.register(PropertyPlaceholderAutoConfiguration.class, ZipkinUiConfiguration.class); - context.refresh(); - return context; - } -} diff --git a/zipkin-server/src/test/java/zipkin2/storage/cassandra/ZipkinCassandraStorageAutoConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/storage/cassandra/ZipkinCassandraStorageAutoConfigurationTest.java deleted file mode 100644 index d5bc592c4cb..00000000000 --- a/zipkin-server/src/test/java/zipkin2/storage/cassandra/ZipkinCassandraStorageAutoConfigurationTest.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import org.junit.After; -import org.junit.Test; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.server.internal.cassandra3.Access; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -public class ZipkinCassandraStorageAutoConfigurationTest { - - AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); - - @After public void close() { - context.close(); - } - - @Test public void doesntProvidesStorageComponent_whenStorageTypeNotCassandra() { - TestPropertyValues.of("zipkin.storage.type:elasticsearch").applyTo(context); - Access.registerCassandra3(context); - context.refresh(); - - assertThatThrownBy(() -> context.getBean(CassandraStorage.class)) - .isInstanceOf(NoSuchBeanDefinitionException.class); - } - - @Test public void providesStorageComponent_whenStorageTypeCassandra() { - TestPropertyValues.of("zipkin.storage.type:cassandra3").applyTo(context); - Access.registerCassandra3(context); - context.refresh(); - - assertThat(context.getBean(CassandraStorage.class)).isNotNull(); - } - - @Test public void canOverridesProperty_contactPoints() { - TestPropertyValues.of( - "zipkin.storage.type:cassandra3", - "zipkin.storage.cassandra3.contact-points:host1,host2" // note snake-case supported - ).applyTo(context); - Access.registerCassandra3(context); - context.refresh(); - - assertThat(context.getBean(CassandraStorage.class).contactPoints).isEqualTo("host1,host2"); - } - - @Test public void strictTraceId_defaultsToTrue() { - TestPropertyValues.of("zipkin.storage.type:cassandra3").applyTo(context); - Access.registerCassandra3(context); - context.refresh(); - - assertThat(context.getBean(CassandraStorage.class).strictTraceId).isTrue(); - } - - @Test public void strictTraceId_canSetToFalse() { - TestPropertyValues.of( - "zipkin.storage.type:cassandra3", - "zipkin.storage.strict-trace-id:false") - .applyTo(context); - Access.registerCassandra3(context); - context.refresh(); - - assertThat(context.getBean(CassandraStorage.class).strictTraceId).isFalse(); - } - - @Test public void searchEnabled_canSetToFalse() { - TestPropertyValues.of( - "zipkin.storage.type:cassandra3", - "zipkin.storage.search-enabled:false") - .applyTo(context); - Access.registerCassandra3(context); - context.refresh(); - - assertThat(context.getBean(CassandraStorage.class).searchEnabled).isFalse(); - } - - @Test public void autocompleteKeys_list() { - TestPropertyValues.of( - "zipkin.storage.type:cassandra3", - "zipkin.storage.autocomplete-keys:environment") - .applyTo(context); - Access.registerCassandra3(context); - context.refresh(); - - assertThat(context.getBean(CassandraStorage.class).autocompleteKeys) - .containsOnly("environment"); - } - - @Test public void autocompleteTtl() { - TestPropertyValues.of( - "zipkin.storage.type:cassandra3", - "zipkin.storage.autocomplete-ttl:60000") - .applyTo(context); - Access.registerCassandra3(context); - context.refresh(); - - assertThat(context.getBean(CassandraStorage.class).autocompleteTtl) - .isEqualTo(60000); - } - - @Test public void autocompleteCardinality() { - TestPropertyValues.of( - "zipkin.storage.type:cassandra3", - "zipkin.storage.autocomplete-cardinality:5000") - .applyTo(context); - Access.registerCassandra3(context); - context.refresh(); - - assertThat(context.getBean(CassandraStorage.class).autocompleteCardinality) - .isEqualTo(5000); - } -} diff --git a/zipkin-server/src/test/java/zipkin2/storage/mysql/v1/ZipkinMySQLStorageConfigurationTest.java b/zipkin-server/src/test/java/zipkin2/storage/mysql/v1/ZipkinMySQLStorageConfigurationTest.java deleted file mode 100644 index 5c5e8b91a79..00000000000 --- a/zipkin-server/src/test/java/zipkin2/storage/mysql/v1/ZipkinMySQLStorageConfigurationTest.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import com.zaxxer.hikari.HikariDataSource; -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import org.springframework.boot.test.util.TestPropertyValues; -import org.springframework.context.annotation.AnnotationConfigApplicationContext; -import zipkin2.server.internal.mysql.Access; - -import static org.assertj.core.api.Assertions.assertThat; - -public class ZipkinMySQLStorageConfigurationTest { - - @Rule public ExpectedException thrown = ExpectedException.none(); - - AnnotationConfigApplicationContext context; - - @After - public void close() { - if (context != null) { - context.close(); - } - } - - @Test - public void doesntProvidesStorageComponent_whenStorageTypeNotMySQL() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of("zipkin.storage.type:cassandra").applyTo(context); - Access.registerMySQL(context); - context.refresh(); - - thrown.expect(NoSuchBeanDefinitionException.class); - context.getBean(MySQLStorage.class); - } - - @Test - public void providesStorageComponent_whenStorageTypeMySQL() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of("zipkin.storage.type:mysql").applyTo(context); - Access.registerMySQL(context); - context.refresh(); - - assertThat(context.getBean(MySQLStorage.class)).isNotNull(); - } - - @Test - public void canOverridesProperty_username() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of( - "zipkin.storage.type:mysql", - "zipkin.storage.mysql.username:robot") - .applyTo(context); - Access.registerMySQL(context); - context.refresh(); - - assertThat(context.getBean(HikariDataSource.class).getUsername()).isEqualTo("robot"); - } - - @Test - public void strictTraceId_defaultsToTrue() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of("zipkin.storage.type:mysql").applyTo(context); - Access.registerMySQL(context); - context.refresh(); - assertThat(context.getBean(MySQLStorage.class).strictTraceId).isTrue(); - } - - @Test - public void strictTraceId_canSetToFalse() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of( - "zipkin.storage.type:mysql", - "zipkin.storage.strict-trace-id:false") - .applyTo(context); - Access.registerMySQL(context); - context.refresh(); - - assertThat(context.getBean(MySQLStorage.class).strictTraceId).isFalse(); - } - - @Test - public void searchEnabled_canSetToFalse() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of( - "zipkin.storage.type:mysql", - "zipkin.storage.search-enabled:false") - .applyTo(context); - Access.registerMySQL(context); - context.refresh(); - - assertThat(context.getBean(MySQLStorage.class).searchEnabled).isFalse(); - } - - @Test - public void autocompleteKeys_list() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of( - "zipkin.storage.type:mysql", - "zipkin.storage.autocomplete-keys:environment") - .applyTo(context); - Access.registerMySQL(context); - context.refresh(); - - assertThat(context.getBean(MySQLStorage.class).autocompleteKeys) - .containsOnly("environment"); - } - - @Test - public void usesJdbcUrl_whenPresent() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of( - "zipkin.storage.type:mysql", - "zipkin.storage.mysql" - + ".jdbc-url:jdbc:mysql://host1,host2,host3/zipkin") - .applyTo(context); - Access.registerMySQL(context); - context.refresh(); - - assertThat(context.getBean(HikariDataSource.class).getJdbcUrl()).isEqualTo("jdbc:mysql://host1,host2,host3/zipkin"); - } - - @Test - public void usesRegularConfig_whenBlank() { - context = new AnnotationConfigApplicationContext(); - TestPropertyValues.of( - "zipkin.storage.type:mysql", - "zipkin.storage.mysql.jdbc-url:", - "zipkin.storage.mysql.host:host", - "zipkin.storage.mysql.port:3306", - "zipkin.storage.mysql.username:root", - "zipkin.storage.mysql.password:secret", - "zipkin.storage.mysql.db:zipkin") - .applyTo(context); - Access.registerMySQL(context); - context.refresh(); - - assertThat(context.getBean(HikariDataSource.class).getJdbcUrl()).isEqualTo("jdbc:mysql://host:3306/zipkin?autoReconnect=true&useSSL=false&useUnicode=yes&characterEncoding=UTF-8"); - } -} diff --git a/zipkin-server/src/test/resources/application.yml b/zipkin-server/src/test/resources/application.yml deleted file mode 100644 index 7ff554e4215..00000000000 --- a/zipkin-server/src/test/resources/application.yml +++ /dev/null @@ -1,8 +0,0 @@ -spring.main.web-application-type: none - -# We are using Armeria instead of Tomcat. Have it inherit the default configuration from Spring -armeria: - ports: - - port: ${server.port} - protocols: - - http diff --git a/zipkin-server/src/test/resources/es-credentials b/zipkin-server/src/test/resources/es-credentials deleted file mode 100644 index 2f33f79ea1c..00000000000 --- a/zipkin-server/src/test/resources/es-credentials +++ /dev/null @@ -1,2 +0,0 @@ -zipkin.storage.elasticsearch.username=foo -zipkin.storage.elasticsearch.password=bar diff --git a/zipkin-server/src/test/resources/es-credentials-invalid b/zipkin-server/src/test/resources/es-credentials-invalid deleted file mode 100644 index 7c1a4b9d22b..00000000000 --- a/zipkin-server/src/test/resources/es-credentials-invalid +++ /dev/null @@ -1,2 +0,0 @@ -zipkin.storage.elasticsearch.username -bar diff --git a/zipkin-server/src/test/resources/keystore.jks b/zipkin-server/src/test/resources/keystore.jks deleted file mode 100644 index 19108e3579c..00000000000 Binary files a/zipkin-server/src/test/resources/keystore.jks and /dev/null differ diff --git a/zipkin-server/src/test/resources/keystore.p12 b/zipkin-server/src/test/resources/keystore.p12 deleted file mode 100644 index b47d7be0ed4..00000000000 Binary files a/zipkin-server/src/test/resources/keystore.p12 and /dev/null differ diff --git a/zipkin-server/src/test/resources/log4j2.properties b/zipkin-server/src/test/resources/log4j2.properties deleted file mode 100755 index a3f199a7124..00000000000 --- a/zipkin-server/src/test/resources/log4j2.properties +++ /dev/null @@ -1,17 +0,0 @@ -# Maven configuration conflicts on simplelogger vs Log4J2, but IntelliJ unit tests use Log4J2 -appenders=console -appender.console.type=Console -appender.console.name=STDOUT -appender.console.layout.type=PatternLayout -appender.console.layout.pattern=%d{ABSOLUTE} %-5p [%t] %C{2} (%F:%L) - %m%n -rootLogger.level=warn -rootLogger.appenderRefs=stdout -rootLogger.appenderRef.stdout.ref=STDOUT - -# uncomment to see outbound client connections (useful in Elasticsearch troubleshooting) -#logger.client.name=com.linecorp.armeria.client -#logger.client.level=info - -# example of enabling logging for a unit test -#logger.healthchecktest.name=zipkin2.server.internal.elasticsearch.ITElasticsearchHealthCheck -#logger.healthchecktest.level=info diff --git a/zipkin-server/src/test/resources/zipkin-lens/index.html b/zipkin-server/src/test/resources/zipkin-lens/index.html deleted file mode 100644 index f1540e86dbd..00000000000 --- a/zipkin-server/src/test/resources/zipkin-lens/index.html +++ /dev/null @@ -1,6 +0,0 @@ - - - - - zipkin-lens - diff --git a/zipkin-server/src/test/resources/zipkin-lens/test.txt b/zipkin-server/src/test/resources/zipkin-lens/test.txt deleted file mode 100644 index 3b18e512dba..00000000000 --- a/zipkin-server/src/test/resources/zipkin-lens/test.txt +++ /dev/null @@ -1 +0,0 @@ -hello world diff --git a/zipkin-storage/README.md b/zipkin-storage/README.md deleted file mode 100644 index bebfdc2c811..00000000000 --- a/zipkin-storage/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# zipkin-storage - -Modules here implement popular storage options available by default in -the [server build](../zipkin-server). - -Please note all modules here require JRE 8+ eventhough `InMemoryStorage` -will run on JRE 6+. - -These libraries are also usable outside the server, for example in -custom collectors or storage pipelines. While compatibility guarantees -are strong, choices may be dropped over time. - -Storage modules ending in `-v1` are discouraged for new sites as they -use an older data model. At some point in the future, we will stop -publishing v1 storage options. diff --git a/zipkin-storage/cassandra/RATIONALE.md b/zipkin-storage/cassandra/RATIONALE.md deleted file mode 100644 index 54f35aec49e..00000000000 --- a/zipkin-storage/cassandra/RATIONALE.md +++ /dev/null @@ -1,15 +0,0 @@ -# zipkin-storage-cassandra rationale - -## Why do we use prepared statements? - -We use prepared statements (instead of simple statements) for anything executed more than once. -This reduces load on the server, as the CQL query does not have to parsed server-side again and -again. - -This applies even for health checks and querying for service names, which have only constant -parameters and do not select partition keys. - -When partition keys are in use, ex `SELECT * FROM span WHERE trace_id = ?`, prepared statements -offer a second advantage in that you get automatic token-aware routing. - -The above was distilled from https://groups.google.com/a/lists.datastax.com/d/msg/java-driver-user/d6wLkH3xDLI/jUWOokKVAgAJ diff --git a/zipkin-storage/cassandra/README.md b/zipkin-storage/cassandra/README.md deleted file mode 100644 index 04ce517523e..00000000000 --- a/zipkin-storage/cassandra/README.md +++ /dev/null @@ -1,180 +0,0 @@ -# zipkin-storage-cassandra - -This is a CQL-based Cassandra storage component, built upon the [Zipkin v2 api and model](https://zipkin.io/zipkin-api/#/default/post_spans). -This uses Cassandra 3.11.3+ features, but is tested against the latest patch of Cassandra 3.11. - -`CassandraSpanStore.getDependencies()` returns pre-aggregated dependency links (ex via [zipkin-dependencies](https://github.com/openzipkin/zipkin-dependencies)). - -The implementation uses the [Datastax Java Driver 4.x](https://github.com/datastax/java-driver). - -`zipkin2.storage.cassandra.CassandraStorage.Builder` includes defaults that will operate against a local Cassandra installation. - -## Logging -Since the underlying driver uses SLF4J, Zipkin's storage layer also uses -this (note SLF4J is supported out-of-the-box with no configuration in -zipkin-server). - -Zipkin's storage layer logs to the category "zipkin2.storage.cassandra", -but you may wish to see the entire "zipkin2" when troubleshooting. - -If you want to see requests and latency, set the logging category -"com.datastax.oss.driver.internal.core.tracker.RequestLogger" to DEBUG. -TRACE includes query values. - -See [Request Logger](https://docs.datastax.com/en/developer/java-driver/4.9/manual/core/request_tracker/#request-logger) for more details. - -## Testing -This module conditionally runs integration tests against a local Cassandra instance. - -This starts a docker container or attempts to re-use an existing cassandra node running on localhost. - -If you run tests via Maven or otherwise when Cassandra is not running, -you'll notice tests are silently skipped. -``` -Results : - -Tests run: 62, Failures: 0, Errors: 0, Skipped: 48 -``` - -This behaviour is intentional: We don't want to burden developers with -installing and running all storage options to test unrelated change. -That said, all integration tests run on pull request. - -### Running a single test - -To run a single integration test, use the following syntax: - -```bash -$ ./mvnw -Dit.test='ITCassandraStorage$ITSpanStore#getTraces_duration' -pl zipkin-storage/cassandra clean verify -``` - -## Strict trace ID -By default, trace identifiers are written at the length received to indexes and span tables. This -means if instrumentation downgraded a 128-bit trace ID to 64-bit, it will appear in a search as two -traces. This situation is possible when using unmaintained or out-of-date trace instrumentation. - -By setting strict trace ID to false, indexes only consider the right-most 16 chars, allowing mixed -trace length lookup at a slight collision risk. Retrieval of the 32-character trace ID is retained -by concatenating two columns in the span table like so: - -``` -trace_id text, // when strictTraceId=false, only contains right-most 16 chars -trace_id_high text, // when strictTraceId=false, contains left-most 16 chars if present -``` - -It is important to only set strict trace ID false during a transition and revert once complete, as -data written during this period is less intuitive for those using CQL, and contains a small -collision risk. - -## Tuning -This component is tuned to help reduce the size of indexes needed to -perform query operations. The most important aspects are described below. -See [CassandraStorage](src/main/java/zipkin2/storage/cassandra/CassandraStorage.java) for details. - -### Autocomplete indexing -Redundant requests to store autocomplete values are ignored for an hour -to reduce load. This is implemented by -[DelayLimiter](../../zipkin/src/main/java/zipkin2/internal/DelayLimiter.java) - -### Trace indexing -Indexing in CQL is simplified by SASI, for example, reducing the number -of tables from 7 down to 4 (from the original cassandra schema). SASI -also moves some write-amplification from CassandraSpanConsumer into C*. - -CassandraSpanConsumer directly writes to the tables `span`, -`trace_by_service_remote_service` `trace_by_service_span` and -`span_by_service`. The latter service based indexes amplify writes by a -factor of the distinct service names (`Span.localServiceName`). - -Other amplification happens internally to C*, visible in the increase -write latency (although write latency remains performant at single digit -milliseconds). - -#### `span` indexing -When queries only include a time range, trace ids are returned from a `ts_uuid` -range. This means no indexes are used when `GET /api/v2/traces` includes no -parameters or only `endTs` or `lookback`. - -Two secondary (SASI) indexes support `annotationQuery` with `serviceName`: -* `annotation_query` supports LIKE (substring match) in `░error░error=500░` -* `l_service` in used in conjunction with annotation_query searches. - -Ex, `GET /api/v2/traces?serviceName=tweetiebird&annotationQuery=error` results -in a single trace ID query against the above two indexes. - -Note: annotations with values longer than 256 characters are not written to the -`annotation_query` SASI, as they aren't intended for use in user queries. - -#### `trace_by_service_X` indexing - -`trace_by_service_X` rows are answers to a shard of trace query. A query -request is broken down into possibly multiple shards based on our index -implementation. - -Ex. `GET /api/v2/traces?serviceName=tweetiebird%remoteService=s3` - -Breaks down into two query shards (this example omits time range and limit) -* `(service=tweetiebird, span=)` -* `(service=tweetiebird, remote_service=s3)` - -The results intersect prioritizing on timestamp to return the distinct -trace IDs needed for a follow-up fetch. - -#### `trace_by_service_remote_service` indexing - -For example, a span in trace ID 1 named "get" created by "tweetiebird", -accessing the remote service "s3" results in the following row: - -* `service=service1, span=remote_service, ts=timestamp_millis, trace_id=1` - -This index is only used when the `remoteServiceName` query is used. Ex. -1. `GET /api/v2/traces?serviceName=tweetiebird&remoteServiceName=s3` -1. `GET /api/v2/traces?serviceName=tweetiebird&maxDuration=199500&remoteServiceName=s3` - -#### `trace_by_service_span` indexing - -For example, a span in trace ID 1 named "get" created by "service1", -taking 20 milliseconds results in the following rows: - -1. `service=service1, span=get, trace_id=1, ts=timestamp_millis, duration=200` -2. `service=service1, span=, trace_id=1, ts=timestamp_millis, duration=200` - -Here are corresponding queries that relate to the above rows: -1. `GET /api/v2/traces?serviceName=service1&spanName=get` -1. `GET /api/v2/traces?serviceName=service1&spanName=get&minDuration=200000` -1. `GET /api/v2/traces?serviceName=service1&minDuration=200000` -1. `GET /api/v2/traces?spanName=get` -1. `GET /api/v2/traces?maxDuration=199500` - -As you'll notice, the duration component is optional, and stored in -millisecond resolution as opposed to microsecond (which the query represents). -The final query shows that the input is rounded up to the nearest millisecond. - -The reason we can query on `duration` is due to a SASI index. Eventhough the -search granularity is millisecond, original duration data remains microsecond -granularity. Meanwhile, write performance is dramatically better than writing -discrete values, due to fewer distinct writes. - -#### Disabling indexing -Indexing is a good default, but some sites who don't use Zipkin UI's -"Find a Trace" screen may want to disable indexing. This means [indexing schema](src/main/resources/zipkin2-schema-indexes.cql) -won't be setup, nor written at runtime. This increases write throughput -and reduces size on disk by not amplifying writes with index data. - -[Disabling search](../../README.md#disabling-search) disables indexing. - -### Time-To_live -Time-To-Live is default now at the table level. It cannot be overridden in write requests. - -There's a different default TTL for trace data and indexes, 7 days vs 3 days respectively. The impact is that you can -retrieve a trace by ID for up to 7 days, but you can only search the last 3 days of traces (ex by service name). - -### Compaction -Time-series data is compacted using TimeWindowCompactionStrategy, a known improved over DateTieredCompactionStrategy. Data is -optimised for queries within a single day. The penalty of reading multiple days is small, a few disk seeks, compared to the -otherwise overhead of reading a significantly larger amount of data. - -### Benchmarking -Benchmarking the new datamodel demonstrates a significant performance improvement on reads. How much of this translates to the -Zipkin UI is hard to tell due to the complexity of CassandraSpanConsumer and how searches are possible. Benchmarking stress -profiles are found in traces-stress.yaml and trace_by_service_span-stress.yaml and span_by_service-stress.yaml. diff --git a/zipkin-storage/cassandra/pom.xml b/zipkin-storage/cassandra/pom.xml deleted file mode 100644 index c73d2b11967..00000000000 --- a/zipkin-storage/cassandra/pom.xml +++ /dev/null @@ -1,82 +0,0 @@ - - - - 4.0.0 - - - io.zipkin.zipkin2 - zipkin-storage-parent - 2.24.4-SNAPSHOT - - - zipkin-storage-cassandra - Storage: Cassandra - - - ${project.basedir}/../.. - - - - -Xep:CheckReturnValue:OFF -Xep:AutoValueImmutableFields:OFF - - - - - - io.netty - netty-bom - ${netty.version} - pom - import - - - - - - - com.google.auto.value - auto-value-annotations - ${auto-value.version} - - - com.google.auto.value - auto-value - ${auto-value.version} - provided - - - - com.datastax.oss - java-driver-core - ${java-driver.version} - - - - com.esri.geometry - * - - - org.apache.tinkerpop - * - - - - - - diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/AnnotationCodec.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/AnnotationCodec.java deleted file mode 100644 index 68d3cb3528f..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/AnnotationCodec.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import zipkin2.Annotation; -import zipkin2.internal.Nullable; - -/** - * For better performance, this relies on ordinals instead of name lookups. - * - *

0 = "ts" = {@link Annotation#timestamp()} - *

0 = "v" = {@link Annotation#value()} - */ -final class AnnotationCodec extends MappingCodec { - AnnotationCodec(TypeCodec innerCodec) { - super(innerCodec, GenericType.of(Annotation.class)); - } - - @Override public UserDefinedType getCqlType() { - return (UserDefinedType) super.getCqlType(); - } - - @Nullable @Override protected Annotation innerToOuter(@Nullable UdtValue value) { - if (value == null || value.isNull(0) || value.isNull(1)) return null; - return Annotation.create(value.getLong(0), value.getString(1)); - } - - @Nullable @Override protected UdtValue outerToInner(@Nullable Annotation value) { - if (value == null) return null; - return getCqlType().newValue().setLong(0, value.timestamp()).setString(1, value.value()); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraAutocompleteTags.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraAutocompleteTags.java deleted file mode 100644 index b3add5aac73..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraAutocompleteTags.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import zipkin2.Call; -import zipkin2.storage.AutocompleteTags; - -class CassandraAutocompleteTags implements AutocompleteTags { - final boolean enabled; - final Call> keysCall; - final SelectAutocompleteValues.Factory valuesCallFactory; - - CassandraAutocompleteTags(CassandraStorage storage) { - enabled = storage.searchEnabled - && !storage.autocompleteKeys.isEmpty() - && storage.metadata().hasAutocompleteTags; - keysCall = Call.create(Collections.unmodifiableList(new ArrayList<>(storage.autocompleteKeys))); - valuesCallFactory = enabled ? new SelectAutocompleteValues.Factory(storage.session()) : null; - } - - @Override public Call> getKeys() { - if (!enabled) return Call.emptyList(); - return keysCall.clone(); - } - - @Override public Call> getValues(String key) { - if (key == null) throw new NullPointerException("key == null"); - if (key.isEmpty()) throw new IllegalArgumentException("key was empty"); - if (!enabled) return Call.emptyList(); - return valuesCallFactory.create(key); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraSpanConsumer.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraSpanConsumer.java deleted file mode 100644 index e0666ea497f..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraSpanConsumer.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.ArrayList; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import zipkin2.Annotation; -import zipkin2.Call; -import zipkin2.Span; -import zipkin2.internal.AggregateCall; -import zipkin2.internal.Nullable; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.cassandra.internal.call.InsertEntry; - -import static zipkin2.storage.cassandra.CassandraUtil.durationIndexBucket; -import static zipkin2.storage.cassandra.Schema.TABLE_AUTOCOMPLETE_TAGS; -import static zipkin2.storage.cassandra.Schema.TABLE_SERVICE_REMOTE_SERVICES; -import static zipkin2.storage.cassandra.Schema.TABLE_SERVICE_SPANS; - -class CassandraSpanConsumer implements SpanConsumer { // not final for testing - final CqlSession session; - final boolean strictTraceId, searchEnabled; - final InsertSpan.Factory insertSpan; - final Set autocompleteKeys; - - // Everything below here is null when search is disabled - @Nullable final InsertTraceByServiceRemoteService.Factory insertTraceByServiceRemoteService; - @Nullable final InsertTraceByServiceSpan.Factory insertTraceByServiceSpan; - @Nullable final InsertEntry.Factory insertServiceSpan; - @Nullable final InsertEntry.Factory insertServiceRemoteService; - @Nullable final InsertEntry.Factory insertAutocompleteValue; - - void clear() { - if (insertServiceSpan != null) insertServiceSpan.clear(); - if (insertServiceRemoteService != null) insertServiceRemoteService.clear(); - if (insertAutocompleteValue != null) insertAutocompleteValue.clear(); - } - - CassandraSpanConsumer(CassandraStorage storage) { - this( - storage.session(), storage.metadata(), - storage.strictTraceId, storage.searchEnabled, - storage.autocompleteKeys, storage.autocompleteTtl, storage.autocompleteCardinality - ); - } - - // Exposed to allow tests to switch from strictTraceId to not - CassandraSpanConsumer(CqlSession session, Schema.Metadata metadata, boolean strictTraceId, - boolean searchEnabled, Set autocompleteKeys, int autocompleteTtl, - int autocompleteCardinality) { - this.session = session; - this.strictTraceId = strictTraceId; - this.searchEnabled = searchEnabled; - this.autocompleteKeys = autocompleteKeys; - - insertSpan = new InsertSpan.Factory(session, strictTraceId, searchEnabled); - - if (!searchEnabled) { - insertTraceByServiceRemoteService = null; - insertTraceByServiceSpan = null; - insertServiceRemoteService = null; - insertServiceSpan = null; - insertAutocompleteValue = null; - return; - } - - insertTraceByServiceSpan = new InsertTraceByServiceSpan.Factory(session, strictTraceId); - if (metadata.hasRemoteService) { - insertTraceByServiceRemoteService = - new InsertTraceByServiceRemoteService.Factory(session, strictTraceId); - insertServiceRemoteService = new InsertEntry.Factory( - "INSERT INTO " + TABLE_SERVICE_REMOTE_SERVICES + " (service, remote_service) VALUES (?,?)", - session, autocompleteTtl, autocompleteCardinality - ); - } else { - insertTraceByServiceRemoteService = null; - insertServiceRemoteService = null; - } - insertServiceSpan = new InsertEntry.Factory( - "INSERT INTO " + TABLE_SERVICE_SPANS + " (service, span) VALUES (?,?)", - session, autocompleteTtl, autocompleteCardinality - ); - if (metadata.hasAutocompleteTags && !autocompleteKeys.isEmpty()) { - insertAutocompleteValue = new InsertEntry.Factory( - "INSERT INTO " + TABLE_AUTOCOMPLETE_TAGS + " (key, value) VALUES (?,?)", - session, autocompleteTtl, autocompleteCardinality - ); - } else { - insertAutocompleteValue = null; - } - } - - /** - * This fans out into many requests, last count was 2 * spans.size. If any of these fail, the - * returned future will fail. Most callers drop or log the result. - */ - @Override public Call accept(List input) { - if (input.isEmpty()) return Call.create(null); - - Set spans = new LinkedHashSet<>(); - Set> serviceRemoteServices = new LinkedHashSet<>(); - Set> serviceSpans = new LinkedHashSet<>(); - Set traceByServiceRemoteServices = - new LinkedHashSet<>(); - Set traceByServiceSpans = new LinkedHashSet<>(); - Set> autocompleteTags = new LinkedHashSet<>(); - - for (Span s : input) { - // indexing occurs by timestamp, so derive one if not present. - long ts_micro = s.timestampAsLong(); - if (ts_micro == 0L) ts_micro = guessTimestamp(s); - - // fallback to current time on the ts_uuid for span data, so we know when it was inserted - UUID ts_uuid = - new UUID( - Uuids.startOf(ts_micro != 0L ? (ts_micro / 1000L) : System.currentTimeMillis()) - .getMostSignificantBits(), - Uuids.random().getLeastSignificantBits()); - - spans.add(insertSpan.newInput(s, ts_uuid)); - - if (!searchEnabled) continue; - - // Empty values allow for api queries with blank service or span name - String service = s.localServiceName() != null ? s.localServiceName() : ""; - String span = - null != s.name() ? s.name() : ""; // Empty value allows for api queries without span name - - if (null == s.localServiceName()) continue; // don't index further w/o a service name - - // service span and remote service indexes is refreshed regardless of timestamp - String remoteService = s.remoteServiceName(); - if (insertServiceRemoteService != null && remoteService != null) { - serviceRemoteServices.add(new SimpleImmutableEntry<>(service, remoteService)); - } - serviceSpans.add(new SimpleImmutableEntry<>(service, span)); - - if (ts_micro == 0L) continue; // search is only valid with a timestamp, don't index w/o it! - int bucket = durationIndexBucket(ts_micro); // duration index is milliseconds not microseconds - long duration = s.durationAsLong() / 1000L; - traceByServiceSpans.add( - insertTraceByServiceSpan.newInput(service, span, bucket, ts_uuid, s.traceId(), duration)); - if (span.isEmpty()) continue; - - if (insertServiceRemoteService != null && remoteService != null) { - traceByServiceRemoteServices.add( - insertTraceByServiceRemoteService.newInput(service, remoteService, bucket, ts_uuid, - s.traceId())); - } - traceByServiceSpans.add( // Allows lookup without the span name - insertTraceByServiceSpan.newInput(service, "", bucket, ts_uuid, s.traceId(), duration)); - - if (insertAutocompleteValue != null) { - for (Map.Entry entry : s.tags().entrySet()) { - if (autocompleteKeys.contains(entry.getKey())) autocompleteTags.add(entry); - } - } - } - List> calls = new ArrayList<>(); - for (InsertSpan.Input span : spans) { - calls.add(insertSpan.create(span)); - } - for (Map.Entry serviceSpan : serviceSpans) { - insertServiceSpan.maybeAdd(serviceSpan, calls); - } - for (Map.Entry serviceRemoteService : serviceRemoteServices) { - insertServiceRemoteService.maybeAdd(serviceRemoteService, calls); - } - for (InsertTraceByServiceSpan.Input serviceSpan : traceByServiceSpans) { - calls.add(insertTraceByServiceSpan.create(serviceSpan)); - } - for (InsertTraceByServiceRemoteService.Input serviceRemoteService : traceByServiceRemoteServices) { - calls.add(insertTraceByServiceRemoteService.create(serviceRemoteService)); - } - for (Map.Entry autocompleteTag : autocompleteTags) { - insertAutocompleteValue.maybeAdd(autocompleteTag, calls); - } - return calls.isEmpty() ? Call.create(null) : AggregateCall.newVoidCall(calls); - } - - static long guessTimestamp(Span span) { - assert 0L == span.timestampAsLong() : "method only for when span has no timestamp"; - for (Annotation annotation : span.annotations()) { - if (0L < annotation.timestamp()) return annotation.timestamp(); - } - return 0L; // return a timestamp that won't match a query - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraSpanStore.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraSpanStore.java deleted file mode 100644 index 10014282495..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraSpanStore.java +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.Call; -import zipkin2.Call.FlatMapper; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.internal.Nullable; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.ServiceAndSpanNames; -import zipkin2.storage.SpanStore; -import zipkin2.storage.Traces; -import zipkin2.storage.cassandra.internal.KeyspaceMetadataUtil; -import zipkin2.storage.cassandra.internal.call.IntersectKeySets; -import zipkin2.storage.cassandra.internal.call.IntersectMaps; - -import static java.util.Arrays.asList; -import static zipkin2.storage.cassandra.CassandraUtil.durationIndexBucket; -import static zipkin2.storage.cassandra.CassandraUtil.traceIdsSortedByDescTimestamp; -import static zipkin2.storage.cassandra.Schema.TABLE_SERVICE_REMOTE_SERVICES; -import static zipkin2.storage.cassandra.Schema.TABLE_TRACE_BY_SERVICE_SPAN; - -class CassandraSpanStore implements SpanStore, Traces, ServiceAndSpanNames { //not final for testing - static final Logger LOG = LoggerFactory.getLogger(CassandraSpanStore.class); - - final int indexFetchMultiplier; - final boolean searchEnabled; - final SelectFromSpan.Factory spans; - final SelectDependencies.Factory dependencies; - - // Everything below here is null when search is disabled - final int indexTtl; // zero when disabled - @Nullable final Call> serviceNames; - @Nullable final SelectRemoteServiceNames.Factory remoteServiceNames; - @Nullable final SelectSpanNames.Factory spanNames; - @Nullable final SelectTraceIdsFromSpan.Factory spanTable; - @Nullable final SelectTraceIdsFromServiceSpan.Factory traceIdsFromServiceSpan; - @Nullable final SelectTraceIdsFromServiceRemoteService.Factory traceIdsFromServiceRemoteService; - - CassandraSpanStore(CassandraStorage storage) { - CqlSession session = storage.session(); - Schema.Metadata metadata = storage.metadata(); - int maxTraceCols = storage.maxTraceCols; - indexFetchMultiplier = storage.indexFetchMultiplier; - boolean strictTraceId = storage.strictTraceId; - searchEnabled = storage.searchEnabled; - - spans = new SelectFromSpan.Factory(session, strictTraceId, maxTraceCols); - dependencies = new SelectDependencies.Factory(session); - - if (!searchEnabled) { - indexTtl = 0; - serviceNames = null; - remoteServiceNames = null; - spanNames = null; - spanTable = null; - traceIdsFromServiceSpan = null; - traceIdsFromServiceRemoteService = null; - return; - } - - KeyspaceMetadata md = Schema.ensureKeyspaceMetadata(session, storage.keyspace); - indexTtl = KeyspaceMetadataUtil.getDefaultTtl(md, TABLE_TRACE_BY_SERVICE_SPAN); - serviceNames = new SelectServiceNames.Factory(session).create(); - if (metadata.hasRemoteService) { - remoteServiceNames = new SelectRemoteServiceNames.Factory(session); - traceIdsFromServiceRemoteService = - new SelectTraceIdsFromServiceRemoteService.Factory(session); - } else { - remoteServiceNames = null; - traceIdsFromServiceRemoteService = null; - } - spanNames = new SelectSpanNames.Factory(session); - traceIdsFromServiceSpan = new SelectTraceIdsFromServiceSpan.Factory(session); - spanTable = initialiseSelectTraceIdsFromSpan(session); - } - - /** - * This makes it possible to safely drop the annotations_query SASI. - * - *

If dropped, trying to search by annotation in the UI will throw an IllegalStateException. - */ - static SelectTraceIdsFromSpan.Factory initialiseSelectTraceIdsFromSpan(CqlSession session) { - try { - return new SelectTraceIdsFromSpan.Factory(session); - } catch (DriverException ex) { - LOG.warn("failed to prepare annotation_query index statements: " + ex.getMessage()); - return null; - } - } - - /** - * This fans out into a number of requests corresponding to query input. In simplest case, there - * is less than a day of data queried, and only one expression. This implies one call to fetch - * trace IDs and another to retrieve the span details. - * - *

The amount of backend calls increase in dimensions of query complexity, days of data, and - * limit of traces requested. For example, a query like "http.path=/foo and error" will be two - * select statements for the expression, possibly follow-up calls for pagination (when over 5K - * rows match). Once IDs are parsed, there's one call for each 5K rows of span data. This means - * "http.path=/foo and error" is minimally 3 network calls, the first two in parallel. - */ - @Override public Call>> getTraces(QueryRequest request) { - if (!searchEnabled) return Call.emptyList(); - - TimestampRange timestampRange = timestampRange(request); - // If we have to make multiple queries, over fetch on indexes as they don't return distinct - // (trace id, timestamp) rows. This mitigates intersection resulting in < limit traces - final int traceIndexFetchSize = request.limit() * indexFetchMultiplier; - List>> callsToIntersect = new ArrayList<>(); - - List annotationKeys = CassandraUtil.annotationKeys(request); - for (String annotationKey : annotationKeys) { - if (spanTable == null) { - throw new IllegalArgumentException(request.annotationQueryString() - + " query unsupported due to missing annotation_query index"); - } - callsToIntersect.add( - spanTable.newCall(request.serviceName(), annotationKey, timestampRange, traceIndexFetchSize) - ); - } - - // Bucketed calls can be expensive when service name isn't specified. This guards against abuse. - if (request.remoteServiceName() != null - || request.spanName() != null - || request.minDuration() != null - || callsToIntersect.isEmpty()) { - callsToIntersect.add(newBucketedTraceIdCall(request, timestampRange, traceIndexFetchSize)); - } - - if (callsToIntersect.size() == 1) { - return callsToIntersect.get(0) - .map(traceIdsSortedByDescTimestamp()) - .flatMap(spans.newFlatMapper(request)); - } - - // We achieve the AND goal, by intersecting each of the key sets. - IntersectKeySets intersectedTraceIds = new IntersectKeySets(callsToIntersect); - // @xxx the sorting by timestamp desc is broken here^ - return intersectedTraceIds.flatMap(spans.newFlatMapper(request)); - } - - /** - * Creates a call representing one or more queries against {@link Schema#TABLE_TRACE_BY_SERVICE_SPAN} - * and possibly {@link Schema#TABLE_TRACE_BY_SERVICE_REMOTE_SERVICE}. - * - *

The result will be an aggregate if the input requests's serviceName is null, both span name - * and remote service name are supplied, or there's more than one day of data in the timestamp - * range. - * - *

Note that when {@link QueryRequest#serviceName()} is null, the returned query composes over - * {@link #getServiceNames()}. This means that if you have 1000 service names, you will end up - * with a composition of at least 1000 calls. - */ - // TODO: smartly handle when serviceName is null. For example, rank recently written serviceNames - // and speculatively query those first. - Call> newBucketedTraceIdCall( - QueryRequest request, TimestampRange timestampRange, int traceIndexFetchSize) { - // trace_by_service_span adds special empty-string span name in order to search by all - String spanName = null != request.spanName() ? request.spanName() : ""; - Long minDuration = request.minDuration(), maxDuration = request.maxDuration(); - int startBucket = durationIndexBucket(timestampRange.startMillis * 1000); - int endBucket = durationIndexBucket(timestampRange.endMillis * 1000); - if (startBucket > endBucket) { - throw new IllegalArgumentException( - "Start bucket (" + startBucket + ") > end bucket (" + endBucket + ")"); - } - - // "" isn't a real value. it is used to template bucketed calls and replaced later - String serviceName = null != request.serviceName() ? request.serviceName() : ""; - - // TODO: ideally, the buckets are traversed backwards, only spawning queries for older buckets - // if younger buckets are empty. This will be an async continuation, punted for now. - List serviceSpans = new ArrayList<>(); - List serviceRemoteServices = new ArrayList<>(); - String remoteService = request.remoteServiceName(); - for (int bucket = endBucket; bucket >= startBucket; bucket--) { - boolean addSpanQuery = true; - if (remoteService != null) { - if (traceIdsFromServiceRemoteService == null) { - throw new IllegalArgumentException("remoteService=" + remoteService - + " unsupported due to missing table " + TABLE_SERVICE_REMOTE_SERVICES); - } - serviceRemoteServices.add( - traceIdsFromServiceRemoteService.newInput( - serviceName, - remoteService, - bucket, - timestampRange, - traceIndexFetchSize)); - // If the remote service query can satisfy the request, don't make a redundant span query - addSpanQuery = !spanName.isEmpty() || minDuration != null; - } - if (!addSpanQuery) continue; - - serviceSpans.add( - traceIdsFromServiceSpan.newInput( - serviceName, - spanName, - bucket, - minDuration, - maxDuration, - timestampRange, - traceIndexFetchSize)); - } - - if ("".equals(serviceName)) { - // If we have no service name, we have to lookup service names before running trace ID queries - Call> serviceNames = getServiceNames(); - if (serviceRemoteServices.isEmpty()) { - return serviceNames.flatMap(traceIdsFromServiceSpan.newFlatMapper(serviceSpans)); - } else if (serviceSpans.isEmpty()) { - return serviceNames.flatMap( - traceIdsFromServiceRemoteService.newFlatMapper(serviceRemoteServices)); - } - return serviceNames.flatMap(new AggregateFlatMapper<>( - traceIdsFromServiceSpan.newFlatMapper(serviceSpans), - traceIdsFromServiceRemoteService.newFlatMapper(serviceRemoteServices) - )); - } - if (serviceRemoteServices.isEmpty()) { - return traceIdsFromServiceSpan.newCall(serviceSpans); - } else if (serviceSpans.isEmpty()) { - return traceIdsFromServiceRemoteService.newCall(serviceRemoteServices); - } else { - return new IntersectMaps<>(asList( - traceIdsFromServiceSpan.newCall(serviceSpans), - traceIdsFromServiceRemoteService.newCall(serviceRemoteServices) - )); - } - } - - static class AggregateFlatMapper implements FlatMapper, Map> { - final FlatMapper, Map> left, right; - - AggregateFlatMapper(FlatMapper, Map> left, FlatMapper, Map> right) { - this.left = left; - this.right = right; - } - - @Override public Call> map(List input) { - return new IntersectMaps<>(asList(left.map(input), right.map(input))); - } - } - - @Override public Call> getTrace(String traceId) { - // make sure we have a 16 or 32 character trace ID - String normalizedTraceId = Span.normalizeTraceId(traceId); - return spans.newCall(normalizedTraceId); - } - - @Override public Call>> getTraces(Iterable traceIds) { - return spans.newCall(traceIds); - } - - @Override public Call> getServiceNames() { - if (!searchEnabled) return Call.emptyList(); - return serviceNames.clone(); - } - - @Override public Call> getRemoteServiceNames(String serviceName) { - if (serviceName.isEmpty() || !searchEnabled || remoteServiceNames == null) { - return Call.emptyList(); - } - return remoteServiceNames.create(serviceName); - } - - @Override public Call> getSpanNames(String serviceName) { - if (serviceName.isEmpty() || !searchEnabled) return Call.emptyList(); - return spanNames.create(serviceName); - } - - @Override public Call> getDependencies(long endTs, long lookback) { - if (endTs <= 0) throw new IllegalArgumentException("endTs <= 0"); - if (lookback <= 0) throw new IllegalArgumentException("lookback <= 0"); - return dependencies.create(endTs, lookback); - } - - static final class TimestampRange { - long startMillis; - UUID startUUID; - long endMillis; - UUID endUUID; - } - - TimestampRange timestampRange(QueryRequest request) { - long oldestData = Math.max(System.currentTimeMillis() - indexTtl * 1000, 0); // >= 1970 - TimestampRange result = new TimestampRange(); - result.startMillis = Math.max((request.endTs() - request.lookback()), oldestData); - result.startUUID = Uuids.startOf(result.startMillis); - result.endMillis = Math.max(request.endTs(), oldestData); - result.endUUID = Uuids.endOf(result.endMillis); - return result; - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraStorage.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraStorage.java deleted file mode 100644 index 1cd3a37a691..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraStorage.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.internal.core.auth.ProgrammaticPlainTextAuthProvider; -import java.util.Map; -import java.util.Set; -import zipkin2.Call; -import zipkin2.CheckResult; -import zipkin2.internal.Nullable; -import zipkin2.storage.AutocompleteTags; -import zipkin2.storage.ServiceAndSpanNames; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.SpanStore; -import zipkin2.storage.StorageComponent; -import zipkin2.storage.Traces; -import zipkin2.storage.cassandra.internal.CassandraStorageBuilder; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -/** - * CQL3 implementation of zipkin storage. - * - *

Queries are logged to the category "com.datastax.oss.driver.api.core.cql.QueryLogger" when - * debug or trace is enabled via SLF4J. Trace level includes bound values. - * - *

Schema is installed by default from "/zipkin2-schema.cql" - * - *

When {@link StorageComponent.Builder#strictTraceId(boolean)} is disabled, span and index data - * are uniformly written with 64-bit trace ID length. When retrieving data, an extra "trace_id_high" - * field clarifies if a 128-bit trace ID was sent. - */ -public final class CassandraStorage extends StorageComponent { - // @FunctionalInterface, except safe for lower language levels - public interface SessionFactory { - SessionFactory DEFAULT = new DefaultSessionFactory(); - - CqlSession create(CassandraStorage storage); - } - - public static Builder newBuilder() { - return new Builder(); - } - - public static final class Builder extends CassandraStorageBuilder { - SessionFactory sessionFactory = SessionFactory.DEFAULT; - - Builder() { - super(Schema.DEFAULT_KEYSPACE); - } - - /** Keyspace to store span and index data. Defaults to "zipkin2" */ - @Override public Builder keyspace(String keyspace) { - return super.keyspace(keyspace); - } - - /** - * Ensures that schema exists, if enabled tries to execute: - *

    - *
  1. io.zipkin.zipkin2:zipkin-storage-cassandra/zipkin2-schema.cql
  2. - *
  3. io.zipkin.zipkin2:zipkin-storage-cassandra/zipkin2-indexes.cql
  4. - *
- * Defaults to true. - */ - @Override public Builder ensureSchema(boolean ensureSchema) { - return super.ensureSchema(ensureSchema); - } - - /** Override to control how sessions are created. */ - public Builder sessionFactory(SessionFactory sessionFactory) { - if (sessionFactory == null) throw new NullPointerException("sessionFactory == null"); - this.sessionFactory = sessionFactory; - return this; - } - - @Override public CassandraStorage build() { - AuthProvider authProvider = null; - if (username != null) { - authProvider = new ProgrammaticPlainTextAuthProvider(username, password); - } - return new CassandraStorage(strictTraceId, searchEnabled, autocompleteKeys, autocompleteTtl, - autocompleteCardinality, contactPoints, localDc, poolingOptions(), authProvider, useSsl, - sessionFactory, keyspace, ensureSchema, maxTraceCols, indexFetchMultiplier); - } - } - - final boolean strictTraceId, searchEnabled; - final Set autocompleteKeys; - final int autocompleteTtl, autocompleteCardinality; - - final String contactPoints, localDc; - final Map poolingOptions; - @Nullable final AuthProvider authProvider; - final boolean useSsl; - final String keyspace; - final boolean ensureSchema; - - final int maxTraceCols, indexFetchMultiplier; - - final LazySession session; - - CassandraStorage(boolean strictTraceId, boolean searchEnabled, Set autocompleteKeys, - int autocompleteTtl, int autocompleteCardinality, String contactPoints, String localDc, - Map poolingOptions, AuthProvider authProvider, boolean useSsl, - SessionFactory sessionFactory, String keyspace, boolean ensureSchema, int maxTraceCols, - int indexFetchMultiplier) { - // Assign generic configuration for all storage components - this.strictTraceId = strictTraceId; - this.searchEnabled = searchEnabled; - this.autocompleteKeys = autocompleteKeys; - this.autocompleteTtl = autocompleteTtl; - this.autocompleteCardinality = autocompleteCardinality; - - // Assign configuration used to create a session - this.contactPoints = contactPoints; - this.localDc = localDc; - this.poolingOptions = poolingOptions; - this.authProvider = authProvider; - this.useSsl = useSsl; - this.ensureSchema = ensureSchema; - this.keyspace = keyspace; - - // Assign configuration used to control queries - this.maxTraceCols = maxTraceCols; - this.indexFetchMultiplier = indexFetchMultiplier; - - this.session = new LazySession(sessionFactory, this); - } - - /** close is typically called from a different thread */ - volatile boolean closeCalled; - - volatile CassandraSpanConsumer spanConsumer; - volatile CassandraSpanStore spanStore; - volatile CassandraAutocompleteTags tagStore; - - /** Lazy initializes or returns the session in use by this storage component. */ - CqlSession session() { - return session.get(); - } - - Schema.Metadata metadata() { - return session.metadata(); - } - - /** {@inheritDoc} Memoized in order to avoid re-preparing statements */ - @Override public SpanStore spanStore() { - if (spanStore == null) { - synchronized (this) { - if (spanStore == null) { - spanStore = new CassandraSpanStore(this); - } - } - } - return spanStore; - } - - @Override public Traces traces() { - return (Traces) spanStore(); - } - - @Override public ServiceAndSpanNames serviceAndSpanNames() { - return (ServiceAndSpanNames) spanStore(); - } - - @Override public AutocompleteTags autocompleteTags() { - if (tagStore == null) { - synchronized (this) { - if (tagStore == null) { - tagStore = new CassandraAutocompleteTags(this); - } - } - } - return tagStore; - } - - // Memoized in order to avoid re-preparing statements - @Override public SpanConsumer spanConsumer() { - if (spanConsumer == null) { - synchronized (this) { - if (spanConsumer == null) { - spanConsumer = new CassandraSpanConsumer(this); - } - } - } - return spanConsumer; - } - - @Override public boolean isOverCapacity(Throwable e) { - return ResultSetFutureCall.isOverCapacity(e); - } - - @Override public final String toString() { - return "CassandraStorage{contactPoints=" + contactPoints + ", keyspace=" + keyspace + "}"; - } - - @Override public CheckResult check() { - if (closeCalled) throw new IllegalStateException("closed"); - try { - session.healthCheck(); - } catch (Throwable e) { - Call.propagateIfFatal(e); - return CheckResult.failed(e); - } - return CheckResult.OK; - } - - @Override public void close() { - if (closeCalled) return; - session.close(); - closeCalled = true; - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraUtil.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraUtil.java deleted file mode 100644 index a6dfa8df3fa..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/CassandraUtil.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import java.math.BigInteger; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.time.Instant; -import java.time.LocalDate; -import java.time.ZoneOffset; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.Annotation; -import zipkin2.Call; -import zipkin2.Span; -import zipkin2.internal.DateUtil; -import zipkin2.internal.Nullable; -import zipkin2.internal.RecyclableBuffers; -import zipkin2.storage.QueryRequest; - -import static zipkin2.internal.RecyclableBuffers.SHORT_STRING_LENGTH; - -final class CassandraUtil { - static final Logger LOG = LoggerFactory.getLogger(CassandraUtil.class); - - /** - * Time window covered by a single bucket of the {@link Schema#TABLE_TRACE_BY_SERVICE_SPAN} and - * {@link Schema#TABLE_TRACE_BY_SERVICE_REMOTE_SERVICE}, in seconds. Default: 1 day - */ - private static final long DURATION_INDEX_BUCKET_WINDOW_SECONDS = - Long.getLong("zipkin.store.cassandra.internal.durationIndexBucket", 24 * 60 * 60); - - public static int durationIndexBucket(long ts_micro) { - // if the window constant has microsecond precision, the division produces negative getValues - return (int) (ts_micro / (DURATION_INDEX_BUCKET_WINDOW_SECONDS * 1_000_000)); - } - - /** - * Returns a set of annotation getValues and tags joined on equals, delimited by ░ - * - *

Values over {@link RecyclableBuffers#SHORT_STRING_LENGTH} are not considered. Zipkin's - * {@link QueryRequest#annotationQuery()} are equals match. Not all values are lookup values. For - * example, {@code sql.query} isn't something that is likely to be looked up by value and indexing - * that could add a potentially kilobyte partition key on {@link Schema#TABLE_SPAN} - * - * @see QueryRequest#annotationQuery() - */ - @Nullable static String annotationQuery(Span span) { - if (span.annotations().isEmpty() && span.tags().isEmpty()) return null; - - char delimiter = '░'; // as very unlikely to be in the query - StringBuilder result = new StringBuilder().append(delimiter); - for (Annotation a : span.annotations()) { - if (a.value().length() > SHORT_STRING_LENGTH) continue; - - result.append(a.value()).append(delimiter); - } - - for (Map.Entry tag : span.tags().entrySet()) { - if (tag.getValue().length() > SHORT_STRING_LENGTH) continue; - - result.append(tag.getKey()).append(delimiter); // search is possible by key alone - result.append(tag.getKey()).append('=').append(tag.getValue()).append(delimiter); - } - return result.length() == 1 ? null : result.toString(); - } - - static List annotationKeys(QueryRequest request) { - Set annotationKeys = new LinkedHashSet<>(); - for (Map.Entry e : request.annotationQuery().entrySet()) { - if (e.getValue().isEmpty()) { - annotationKeys.add(e.getKey()); - } else { - annotationKeys.add(e.getKey() + "=" + e.getValue()); - } - } - return new ArrayList<>(annotationKeys); - } - - static Call.Mapper, Set> traceIdsSortedByDescTimestamp() { - return TraceIdsSortedByDescTimestamp.INSTANCE; - } - - enum TraceIdsSortedByDescTimestamp implements Call.Mapper, Set> { - INSTANCE; - - @Override public Set map(Map map) { - // timestamps can collide, so we need to add some random digits on end before using them as - // serviceSpanKeys - TreeMap sorted = new TreeMap<>(Collections.reverseOrder()); - for (Map.Entry entry : map.entrySet()) { - BigInteger uncollided = - BigInteger.valueOf(entry.getValue()) - .multiply(OFFSET) - .add(BigInteger.valueOf(RAND.nextInt() & Integer.MAX_VALUE)); - sorted.put(uncollided, entry.getKey()); - } - return new LinkedHashSet<>(sorted.values()); - } - - @Override public String toString() { - return "TraceIdsSortedByDescTimestamp"; - } - - private static final Random RAND = new Random(System.nanoTime()); - private static final BigInteger OFFSET = BigInteger.valueOf(Integer.MAX_VALUE); - } - - static List getDays(long endTs, @Nullable Long lookback) { - List result = new ArrayList<>(); - for (long epochMillis : DateUtil.epochDays(endTs, lookback)) { - result.add(Instant.ofEpochMilli(epochMillis).atZone(ZoneOffset.UTC).toLocalDate()); - } - return result; - } - - @Nullable static InetAddress inetAddressOrNull(@Nullable String string, @Nullable byte[] bytes) { - try { - return bytes == null ? null : InetAddress.getByAddress(bytes); - } catch (UnknownHostException e) { - LOG.debug("InetAddress.getByAddress failed with input {}: {}", string, e.getMessage()); - return null; - } - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/DefaultSessionFactory.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/DefaultSessionFactory.java deleted file mode 100644 index 82f623bfdd9..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/DefaultSessionFactory.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.storage.cassandra.internal.SessionBuilder; - -import static zipkin2.Call.propagateIfFatal; - -/** - * Creates a session and ensures schema if configured. Closes the cluster and session if any - * exception occurred. - */ -final class DefaultSessionFactory implements CassandraStorage.SessionFactory { - static final Logger LOG = LoggerFactory.getLogger(Schema.class); - - /** - * Creates a session and ensures schema if configured. Closes the cluster and session if any - * exception occurred. - */ - @Override public CqlSession create(CassandraStorage cassandra) { - CqlSession session = null; - try { - session = buildSession(cassandra); - - String keyspace = cassandra.keyspace; - if (cassandra.ensureSchema) { - Schema.ensureExists(keyspace, cassandra.searchEnabled, session); - } else { - LOG.debug("Skipping schema check on keyspace {} as ensureSchema was false", keyspace); - } - - session.execute("USE " + keyspace); - initializeUDTs(session, keyspace); - - return session; - } catch (RuntimeException | Error e) { // don't leak on unexpected exception! - propagateIfFatal(e); - if (session != null) session.close(); - throw e; - } - } - - static CqlSession buildSession(CassandraStorage cassandra) { - return SessionBuilder.buildSession( - cassandra.contactPoints, - cassandra.localDc, - cassandra.poolingOptions, - cassandra.authProvider, - cassandra.useSsl - ); - } - - static void initializeUDTs(CqlSession session, String keyspace) { - KeyspaceMetadata ks = session.getMetadata().getKeyspace(keyspace).get(); - MutableCodecRegistry codecRegistry = - (MutableCodecRegistry) session.getContext().getCodecRegistry(); - - TypeCodec annotationUDTCodec = - codecRegistry.codecFor(ks.getUserDefinedType("annotation").get()); - codecRegistry.register(new AnnotationCodec(annotationUDTCodec)); - - LOG.debug("Registering endpoint and annotation UDTs to keyspace {}", keyspace); - TypeCodec endpointUDTCodec = - codecRegistry.codecFor(ks.getUserDefinedType("endpoint").get()); - codecRegistry.register(new EndpointCodec(endpointUDTCodec)); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/EndpointCodec.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/EndpointCodec.java deleted file mode 100644 index a97f1d69e18..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/EndpointCodec.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.data.UdtValue; -import com.datastax.oss.driver.api.core.type.UserDefinedType; -import com.datastax.oss.driver.api.core.type.codec.MappingCodec; -import com.datastax.oss.driver.api.core.type.codec.TypeCodec; -import com.datastax.oss.driver.api.core.type.reflect.GenericType; -import zipkin2.Endpoint; -import zipkin2.internal.Nullable; - -import static zipkin2.storage.cassandra.CassandraUtil.inetAddressOrNull; - -final class EndpointCodec extends MappingCodec { - - EndpointCodec(TypeCodec innerCodec) { - super(innerCodec, GenericType.of(Endpoint.class)); - } - - @Override public UserDefinedType getCqlType() { - return (UserDefinedType) super.getCqlType(); - } - - @Nullable @Override protected Endpoint innerToOuter(@Nullable UdtValue value) { - if (value == null) return null; - Endpoint.Builder builder = - Endpoint.newBuilder().serviceName(value.getString("service")).port(value.getInt("port")); - builder.parseIp(value.getInetAddress("ipv4")); - builder.parseIp(value.getInetAddress("ipv6")); - return builder.build(); - } - - @Nullable @Override protected UdtValue outerToInner(@Nullable Endpoint endpoint) { - if (endpoint == null) return null; - UdtValue result = getCqlType().newValue(); - result.setString("service", endpoint.serviceName()); - result.setInetAddress("ipv4", inetAddressOrNull(endpoint.ipv4(), endpoint.ipv4Bytes())); - result.setInetAddress("ipv6", inetAddressOrNull(endpoint.ipv6(), endpoint.ipv6Bytes())); - result.setInt("port", endpoint.portAsInt()); - return result; - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/InsertSpan.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/InsertSpan.java deleted file mode 100644 index e624fb64410..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/InsertSpan.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.google.auto.value.AutoValue; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionStage; -import zipkin2.Annotation; -import zipkin2.Call; -import zipkin2.Endpoint; -import zipkin2.Span; -import zipkin2.internal.Nullable; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_SPAN; - -final class InsertSpan extends ResultSetFutureCall { - @AutoValue abstract static class Input { - abstract UUID ts_uuid(); - - @Nullable abstract String trace_id_high(); - - abstract String trace_id(); - - @Nullable abstract String parent_id(); - - abstract String id(); - - @Nullable abstract String kind(); - - @Nullable abstract String span(); - - abstract long ts(); - - abstract long duration(); - - @Nullable abstract Endpoint l_ep(); - - @Nullable abstract Endpoint r_ep(); - - abstract List annotations(); - - abstract Map tags(); - - @Nullable abstract String annotation_query(); - - abstract boolean debug(); - - abstract boolean shared(); - } - - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - final boolean strictTraceId, searchEnabled; - - Factory(CqlSession session, boolean strictTraceId, boolean searchEnabled) { - this.session = session; - String insertQuery = "INSERT INTO " + TABLE_SPAN - + " (trace_id,trace_id_high,ts_uuid,parent_id,id,kind,span,ts,duration,l_ep,r_ep,annotations,tags,debug,shared)" - + " VALUES (:trace_id,:trace_id_high,:ts_uuid,:parent_id,:id,:kind,:span,:ts,:duration,:l_ep,:r_ep,:annotations,:tags,:debug,:shared)"; - - if (searchEnabled) { - insertQuery = insertQuery.replace(",shared)", ",shared, l_service, annotation_query)"); - insertQuery = insertQuery.replace(",:shared)", ",:shared, :l_service, :annotation_query)"); - } - - this.preparedStatement = session.prepare(insertQuery); - this.strictTraceId = strictTraceId; - this.searchEnabled = searchEnabled; - } - - Input newInput(Span span, UUID ts_uuid) { - boolean traceIdHigh = !strictTraceId && span.traceId().length() == 32; - String annotation_query = searchEnabled ? CassandraUtil.annotationQuery(span) : null; - return new AutoValue_InsertSpan_Input( - ts_uuid, - traceIdHigh ? span.traceId().substring(0, 16) : null, - traceIdHigh ? span.traceId().substring(16) : span.traceId(), - span.parentId(), - span.id(), - span.kind() != null ? span.kind().name() : null, - span.name(), - span.timestampAsLong(), - span.durationAsLong(), - span.localEndpoint(), - span.remoteEndpoint(), - span.annotations(), - span.tags(), - annotation_query, - Boolean.TRUE.equals(span.debug()), - Boolean.TRUE.equals(span.shared())); - } - - Call create(Input span) { - return new InsertSpan(this, span); - } - } - - final Factory factory; - final Input input; - - InsertSpan(Factory factory, Input input) { - this.factory = factory; - this.input = input; - } - - /** - * TLDR: we are guarding against setting null, as doing so implies tombstones. We are dodging setX - * to keep code simpler than other alternatives described below. - * - *

If there's consistently 8 tombstones (nulls) per row, then we'll only need 125 spans in a - * trace (rows in a partition) to trigger the `tombstone_warn_threshold warnings being logged in - * the C* nodes. And if we go to 12500 spans in a trace then that whole trace partition would - * become unreadable. Cassandra warns at a 1000 tombstones in any query, and fails on 100000 - * tombstones. - * - *

There's also a small question about disk usage efficiency. Each tombstone is a cell name - * and basically empty cell value entry stored on disk. Given that the cells are, apart from tags - * and annotations, generally very small then this could be proportionally an unnecessary waste of - * disk. - * - *

To avoid this relying upon a number of variant prepared statements for inserting a span is - * the normal practice. - * - *

Another popular practice is to insert those potentially null columns as separate statements - * (and optionally put them together into UNLOGGED batches). This works as multiple writes to the - * same partition has little overhead, and here we're not worried about lack of isolation between - * those writes, as the write is asynchronous anyway. An example of this approach is in the - * cassandra-reaper project here: https://github.com/thelastpickle/cassandra-reaper/blob/master/src/server/src/main/java/io/cassandrareaper/storage/CassandraStorage.java#L622-L642 - */ - @Override protected CompletionStage newCompletionStage() { - BoundStatementBuilder bound = factory.preparedStatement.boundStatementBuilder() - .setUuid("ts_uuid", input.ts_uuid()) - .setString("trace_id", input.trace_id()) - .setString("id", input.id()); - - // Don't set null as we don't want to add tombstones - if (null != input.trace_id_high()) bound.setString("trace_id_high", input.trace_id_high()); - if (null != input.parent_id()) bound.setString("parent_id", input.parent_id()); - if (null != input.kind()) bound.setString("kind", input.kind()); - if (null != input.span()) bound.setString("span", input.span()); - if (0L != input.ts()) bound.setLong("ts", input.ts()); - if (0L != input.duration()) bound.setLong("duration", input.duration()); - if (null != input.l_ep()) bound.set("l_ep", input.l_ep(), Endpoint.class); - if (null != input.r_ep()) bound.set("r_ep", input.r_ep(), Endpoint.class); - if (!input.annotations().isEmpty()) { - bound.setList("annotations", input.annotations(), Annotation.class); - } - if (!input.tags().isEmpty()) bound.setMap("tags", input.tags(), String.class, String.class); - if (input.debug()) bound.setBoolean("debug", true); - if (input.shared()) bound.setBoolean("shared", true); - - if (factory.searchEnabled) { - if (null != input.l_ep()) bound.setString("l_service", input.l_ep().serviceName()); - if (null != input.annotation_query()) { - bound.setString("annotation_query", input.annotation_query()); - } - } - return factory.session.executeAsync(bound.build()); - } - - @Override public Void map(AsyncResultSet input) { - return null; - } - - @Override public String toString() { - return input.toString().replace("Input", "InsertSpan"); - } - - @Override public InsertSpan clone() { - return new InsertSpan(factory, input); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/InsertTraceByServiceRemoteService.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/InsertTraceByServiceRemoteService.java deleted file mode 100644 index bf93b75eb63..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/InsertTraceByServiceRemoteService.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.google.auto.value.AutoValue; -import java.util.UUID; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_TRACE_BY_SERVICE_REMOTE_SERVICE; - -final class InsertTraceByServiceRemoteService extends ResultSetFutureCall { - @AutoValue abstract static class Input { - abstract String service(); - - abstract String remote_service(); - - abstract int bucket(); - - abstract UUID ts(); - - abstract String trace_id(); - } - - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - final boolean strictTraceId; - - Factory(CqlSession session, boolean strictTraceId) { - this.session = session; - this.preparedStatement = - session.prepare("INSERT INTO " + TABLE_TRACE_BY_SERVICE_REMOTE_SERVICE - + " (service,remote_service,bucket,ts,trace_id)" - + " VALUES (?,?,?,?,?)"); - this.strictTraceId = strictTraceId; - } - - Input newInput(String service, String remote_service, int bucket, UUID ts, String trace_id) { - return new AutoValue_InsertTraceByServiceRemoteService_Input( - service, - remote_service, - bucket, - ts, - !strictTraceId && trace_id.length() == 32 ? trace_id.substring(16) : trace_id); - } - - Call create(Input input) { - return new InsertTraceByServiceRemoteService(this, input); - } - } - - final Factory factory; - final Input input; - - InsertTraceByServiceRemoteService(Factory factory, Input input) { - this.factory = factory; - this.input = input; - } - - @Override protected CompletionStage newCompletionStage() { - return factory.session.executeAsync(factory.preparedStatement.boundStatementBuilder() - .setString(0, input.service()) - .setString(1, input.remote_service()) - .setInt(2, input.bucket()) - .setUuid(3, input.ts()) - .setString(4, input.trace_id()).build()); - } - - @Override public Void map(AsyncResultSet input) { - return null; - } - - @Override public String toString() { - return input.toString().replace("Input", "InsertTraceByServiceRemoteService"); - } - - @Override public InsertTraceByServiceRemoteService clone() { - return new InsertTraceByServiceRemoteService(factory, input); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/InsertTraceByServiceSpan.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/InsertTraceByServiceSpan.java deleted file mode 100644 index 5d7e7d1eec5..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/InsertTraceByServiceSpan.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.google.auto.value.AutoValue; -import java.util.UUID; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_TRACE_BY_SERVICE_SPAN; - -final class InsertTraceByServiceSpan extends ResultSetFutureCall { - @AutoValue abstract static class Input { - abstract String service(); - - abstract String span(); - - abstract int bucket(); - - abstract UUID ts(); - - abstract String trace_id(); - - abstract long duration(); - } - - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - final boolean strictTraceId; - - Factory(CqlSession session, boolean strictTraceId) { - this.session = session; - this.preparedStatement = session.prepare("INSERT INTO " + TABLE_TRACE_BY_SERVICE_SPAN - + " (service,span,bucket,ts,trace_id,duration)" - + " VALUES (?,?,?,?,?,?)"); - this.strictTraceId = strictTraceId; - } - - /** - * While {@link zipkin2.Span#duration()} cannot be zero, zero duration in milliseconds is - * permitted, as it implies the span took less than 1 millisecond (1-999us). - */ - Input newInput( - String service, String span, int bucket, UUID ts, String trace_id, long durationMillis) { - return new AutoValue_InsertTraceByServiceSpan_Input( - service, - span, - bucket, - ts, - !strictTraceId && trace_id.length() == 32 ? trace_id.substring(16) : trace_id, - durationMillis); - } - - Call create(Input input) { - return new InsertTraceByServiceSpan(this, input); - } - } - - final Factory factory; - final Input input; - - InsertTraceByServiceSpan(Factory factory, Input input) { - this.factory = factory; - this.input = input; - } - - @Override protected CompletionStage newCompletionStage() { - BoundStatementBuilder bound = factory.preparedStatement.boundStatementBuilder() - .setString(0, input.service()) - .setString(1, input.span()) - .setInt(2, input.bucket()) - .setUuid(3, input.ts()) - .setString(4, input.trace_id()); - - if (0L != input.duration()) bound.setLong(5, input.duration()); - - return factory.session.executeAsync(bound.build()); - } - - @Override public Void map(AsyncResultSet input) { - return null; - } - - @Override public String toString() { - return input.toString().replace("Input", "InsertTraceByServiceSpan"); - } - - @Override public InsertTraceByServiceSpan clone() { - return new InsertTraceByServiceSpan(factory, input); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/LazySession.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/LazySession.java deleted file mode 100644 index 2614543ad5f..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/LazySession.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import zipkin2.storage.cassandra.CassandraStorage.SessionFactory; - -import static zipkin2.storage.cassandra.Schema.TABLE_SPAN; - -final class LazySession { - final SessionFactory sessionFactory; - final CassandraStorage storage; - volatile CqlSession session; - volatile PreparedStatement healthCheck; // guarded by session - volatile Schema.Metadata metadata; // guarded by session - - LazySession(SessionFactory sessionFactory, CassandraStorage storage) { - this.sessionFactory = sessionFactory; - this.storage = storage; - } - - CqlSession get() { - if (session == null) { - synchronized (this) { - if (session == null) { - session = sessionFactory.create(storage); - // cached here to warn only once when schema problems exist - metadata = Schema.readMetadata(session, storage.keyspace); - healthCheck = session.prepare("SELECT trace_id FROM " + TABLE_SPAN + " limit 1"); - } - } - } - return session; - } - - Schema.Metadata metadata() { - get(); - return metadata; - } - - ResultSet healthCheck() { - get(); - return session.execute(healthCheck.bind()); - } - - void close() { - CqlSession maybeSession = session; - if (maybeSession != null) { - session.close(); - session = null; - } - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/Schema.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/Schema.java deleted file mode 100644 index 709f0fa4ae5..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/Schema.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import java.util.Map; -import java.util.UUID; -import java.util.regex.Pattern; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static zipkin2.storage.cassandra.internal.Resources.resourceToString; - -final class Schema { - static final Logger LOG = LoggerFactory.getLogger(Schema.class); - - static final String TABLE_SPAN = "span"; - static final String TABLE_TRACE_BY_SERVICE_SPAN = "trace_by_service_span"; - static final String TABLE_TRACE_BY_SERVICE_REMOTE_SERVICE = "trace_by_service_remote_service"; - static final String TABLE_SERVICE_SPANS = "span_by_service"; - static final String TABLE_SERVICE_REMOTE_SERVICES = "remote_service_by_service"; - static final String TABLE_DEPENDENCY = "dependency"; - static final String TABLE_AUTOCOMPLETE_TAGS = "autocomplete_tags"; - - static final String DEFAULT_KEYSPACE = "zipkin2"; - static final String SCHEMA_RESOURCE = "/zipkin2-schema.cql"; - static final String INDEX_RESOURCE = "/zipkin2-schema-indexes.cql"; - static final String UPGRADE_1 = "/zipkin2-schema-upgrade-1.cql"; - static final String UPGRADE_2 = "/zipkin2-schema-upgrade-2.cql"; - - static Metadata readMetadata(CqlSession session, String keyspace) { - KeyspaceMetadata keyspaceMetadata = ensureKeyspaceMetadata(session, keyspace); - - Map replication = keyspaceMetadata.getReplication(); - if ("SimpleStrategy".equals(replication.get("class"))) { - if ("1".equals(replication.get("replication_factor"))) { - LOG.warn("running with RF=1, this is not suitable for production. Optimal is 3+"); - } - } - - boolean hasAutocompleteTags = hasUpgrade1_autocompleteTags(keyspaceMetadata); - if (!hasAutocompleteTags) { - LOG.warn( - "schema lacks autocomplete indexing: apply {}, or set CassandraStorage.ensureSchema=true", - UPGRADE_1); - } - - boolean hasRemoteService = hasUpgrade2_remoteService(keyspaceMetadata); - if (!hasRemoteService) { - LOG.warn( - "schema lacks remote service indexing: apply {}, or set CassandraStorage.ensureSchema=true", - UPGRADE_2); - } - - return new Metadata(hasAutocompleteTags, hasRemoteService); - } - - static final class Metadata { - final boolean hasAutocompleteTags, hasRemoteService; - - Metadata(boolean hasAutocompleteTags, boolean hasRemoteService) { - this.hasAutocompleteTags = hasAutocompleteTags; - this.hasRemoteService = hasRemoteService; - } - } - - static KeyspaceMetadata ensureKeyspaceMetadata(CqlSession session, String keyspace) { - ensureVersion(session.getMetadata()); - KeyspaceMetadata keyspaceMetadata = session.getMetadata().getKeyspace(keyspace).orElse(null); - if (keyspaceMetadata == null) { - throw new IllegalStateException( - String.format( - "Cannot read keyspace metadata for keyspace: %s and cluster: %s", - keyspace, session.getMetadata().getClusterName())); - } - return keyspaceMetadata; - } - - static Version ensureVersion(com.datastax.oss.driver.api.core.metadata.Metadata metadata) { - Version version = null; - for (Map.Entry entry : metadata.getNodes().entrySet()) { - version = entry.getValue().getCassandraVersion(); - if (version == null) throw new RuntimeException("node had no version: " + entry.getValue()); - if (Version.parse("3.11.3").compareTo(version) > 0) { - throw new RuntimeException(String.format( - "Node %s is running Cassandra %s, but minimum version is 3.11.3", - entry.getKey(), entry.getValue().getCassandraVersion())); - } - } - if (version == null) throw new RuntimeException("No nodes in the cluster"); - return version; - } - - static KeyspaceMetadata ensureExists(String keyspace, boolean searchEnabled, CqlSession session) { - KeyspaceMetadata result = session.getMetadata().getKeyspace(keyspace).orElse(null); - if (result == null || !result.getTable(Schema.TABLE_SPAN).isPresent()) { - LOG.info("Installing schema {} for keyspace {}", SCHEMA_RESOURCE, keyspace); - applyCqlFile(keyspace, session, SCHEMA_RESOURCE); - if (searchEnabled) { - LOG.info("Installing indexes {} for keyspace {}", INDEX_RESOURCE, keyspace); - applyCqlFile(keyspace, session, INDEX_RESOURCE); - } - // refresh metadata since we've installed the schema - result = ensureKeyspaceMetadata(session, keyspace); - } - if (searchEnabled && !hasUpgrade1_autocompleteTags(result)) { - LOG.info("Upgrading schema {}", UPGRADE_1); - applyCqlFile(keyspace, session, UPGRADE_1); - } - if (searchEnabled && !hasUpgrade2_remoteService(result)) { - LOG.info("Upgrading schema {}", UPGRADE_2); - applyCqlFile(keyspace, session, UPGRADE_2); - } - return result; - } - - static boolean hasUpgrade1_autocompleteTags(KeyspaceMetadata keyspaceMetadata) { - return keyspaceMetadata.getTable(TABLE_AUTOCOMPLETE_TAGS).isPresent(); - } - - static boolean hasUpgrade2_remoteService(KeyspaceMetadata keyspaceMetadata) { - return keyspaceMetadata.getTable(TABLE_SERVICE_REMOTE_SERVICES).isPresent(); - } - - static void applyCqlFile(String keyspace, CqlSession session, String resource) { - Version version = ensureVersion(session.getMetadata()); - for (String cmd : resourceToString(resource).split(";", 100)) { - cmd = cmd.trim().replace(" " + DEFAULT_KEYSPACE, " " + keyspace); - if (cmd.isEmpty()) continue; - cmd = reviseCQL(version, cmd); - session.execute(cmd); - } - } - - static String reviseCQL(Version version, String cql) { - if (version.getMajor() == 4) { - // read_repair_chance options were removed and make Cassandra crash starting in v4 - // See https://cassandra.apache.org/doc/latest/operating/read_repair.html#background-read-repair - cql = cql.replaceAll(" *AND [^\\s]*read_repair_chance = 0\n", ""); - } - return cql; - } - - Schema() { - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectAutocompleteValues.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectAutocompleteValues.java deleted file mode 100644 index 6ee5b59a15e..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectAutocompleteValues.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import java.util.List; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; -import zipkin2.storage.cassandra.internal.call.DistinctSortedStrings; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_AUTOCOMPLETE_TAGS; - -final class SelectAutocompleteValues extends ResultSetFutureCall { - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - - Factory(CqlSession session) { - this.session = session; - this.preparedStatement = session.prepare("SELECT value" - + " FROM " + TABLE_AUTOCOMPLETE_TAGS - + " WHERE key=?" - + " LIMIT " + 10000); - } - - Call> create(String key) { - return new SelectAutocompleteValues(this, key).flatMap(DistinctSortedStrings.get()); - } - } - - final SelectAutocompleteValues.Factory factory; - final String key; - - SelectAutocompleteValues(SelectAutocompleteValues.Factory factory, String key) { - this.factory = factory; - this.key = key; - } - - @Override protected CompletionStage newCompletionStage() { - return factory.session.executeAsync(factory.preparedStatement.boundStatementBuilder() - .setString(0, key).build()); - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - - @Override public Call clone() { - return new SelectAutocompleteValues(factory, key); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectDependencies.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectDependencies.java deleted file mode 100644 index 3f2d4cfba58..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectDependencies.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import java.time.LocalDate; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; -import zipkin2.DependencyLink; -import zipkin2.internal.DependencyLinker; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_DEPENDENCY; - -final class SelectDependencies extends ResultSetFutureCall> { - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - - Factory(CqlSession session) { - this.session = session; - this.preparedStatement = session.prepare("SELECT parent,child,errors,calls" - + " FROM " + TABLE_DEPENDENCY - + " WHERE day IN ?"); - } - - Call> create(long endTs, long lookback) { - List days = CassandraUtil.getDays(endTs, lookback); - return new SelectDependencies(this, days); - } - } - - final Factory factory; - final List days; - - SelectDependencies(Factory factory, List days) { - this.factory = factory; - this.days = days; - } - - @Override protected CompletionStage newCompletionStage() { - return factory.session.executeAsync(factory.preparedStatement.boundStatementBuilder() - .setList(0, days, LocalDate.class).build()); - } - - @Override public String toString() { - return "SelectDependencies{days=" + days + "}"; - } - - @Override public SelectDependencies clone() { - return new SelectDependencies(factory, days); - } - - @Override public List map(AsyncResultSet rs) { - List unmerged = new ArrayList<>(); - for (Row row : rs.currentPage()) { - unmerged.add(DependencyLink.newBuilder() - .parent(row.getString("parent")) - .child(row.getString("child")) - .errorCount(row.getLong("errors")) - .callCount(row.getLong("calls")) - .build()); - } - return DependencyLinker.merge(unmerged); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectFromSpan.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectFromSpan.java deleted file mode 100644 index cb36a1ac1d4..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectFromSpan.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletionStage; -import java.util.function.BiConsumer; -import java.util.function.Supplier; -import zipkin2.Annotation; -import zipkin2.Call; -import zipkin2.Endpoint; -import zipkin2.Span; -import zipkin2.internal.FilterTraces; -import zipkin2.internal.Nullable; -import zipkin2.storage.GroupByTraceId; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.StrictTraceId; -import zipkin2.storage.cassandra.internal.call.AccumulateAllResults; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_SPAN; - -final class SelectFromSpan extends ResultSetFutureCall { - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - final Call.Mapper, List>> groupByTraceId; - final boolean strictTraceId; - final int maxTraceCols; - - Factory(CqlSession session, boolean strictTraceId, int maxTraceCols) { - this.session = session; - this.preparedStatement = session.prepare( - "SELECT trace_id_high,trace_id,parent_id,id,kind,span,ts,duration,l_ep,r_ep,annotations,tags,debug,shared" - + " FROM " + TABLE_SPAN - + " WHERE trace_id IN ?" - + " LIMIT ?"); - this.strictTraceId = strictTraceId; - this.maxTraceCols = maxTraceCols; - this.groupByTraceId = GroupByTraceId.create(strictTraceId); - } - - Call> newCall(String hexTraceId) { - // Unless we are strict, truncate the trace ID to 64bit (encoded as 16 characters) - Set traceIds; - if (!strictTraceId && hexTraceId.length() == 32) { - traceIds = new LinkedHashSet<>(); - traceIds.add(hexTraceId); - traceIds.add(hexTraceId.substring(16)); - } else { - traceIds = Collections.singleton(hexTraceId); - } - - Call> result = - new SelectFromSpan(this, traceIds, maxTraceCols).flatMap(READ_SPANS); - return strictTraceId ? result.map(StrictTraceId.filterSpans(hexTraceId)) : result; - } - - Call>> newCall(Iterable traceIds) { - Set normalizedTraceIds = new LinkedHashSet<>(); - for (String traceId : traceIds) { - // make sure we have a 16 or 32 character trace ID - traceId = Span.normalizeTraceId(traceId); - // Unless we are strict, truncate the trace ID to 64bit (encoded as 16 characters) - if (!strictTraceId && traceId.length() == 32) traceId = traceId.substring(16); - normalizedTraceIds.add(traceId); - } - - if (normalizedTraceIds.isEmpty()) return Call.emptyList(); - Call>> result = new SelectFromSpan(this, normalizedTraceIds, maxTraceCols) - .flatMap(READ_SPANS) - .map(groupByTraceId); - return strictTraceId ? result.map(StrictTraceId.filterTraces(normalizedTraceIds)) : result; - } - - FlatMapper, List>> newFlatMapper(QueryRequest request) { - return new SelectSpansByTraceIds(this, request); - } - } - - final Factory factory; - final Set trace_id; - final int limit_; - - /** @param limit_ amount of spans per trace is almost always larger than trace IDs */ - SelectFromSpan(Factory factory, Set trace_id, int limit_) { - this.factory = factory; - this.trace_id = trace_id; - this.limit_ = limit_; - } - - @Override protected CompletionStage newCompletionStage() { - return factory.session.executeAsync(factory.preparedStatement.boundStatementBuilder() - // Switched Set to List which is higher overhead, as have to copy into it, but avoids this: - // com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException: Codec not found for requested operation: [List(TEXT, not frozen) <-> java.util.Set] - .setList(0, new ArrayList<>(trace_id), String.class) - .setInt(1, limit_).build()); - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - - @Override public String toString() { - return "SelectFromSpan{trace_id=" + trace_id + ", limit_=" + limit_ + "}"; - } - - @Override public SelectFromSpan clone() { - return new SelectFromSpan(factory, trace_id, limit_); - } - - static final class SelectSpansByTraceIds implements FlatMapper, List>> { - final Factory factory; - final int limit; - @Nullable final Call.Mapper>, List>> filter; - - SelectSpansByTraceIds(Factory factory, QueryRequest request) { - this.factory = factory; - this.limit = request.limit(); - // Cassandra always looks up traces by 64-bit trace ID, so we have to unconditionally filter - // when strict trace ID is enabled. - this.filter = factory.strictTraceId ? FilterTraces.create(request) : null; - } - - @Override public Call>> map(Set input) { - if (input.isEmpty()) return Call.emptyList(); - Set traceIds; - if (input.size() > limit) { - traceIds = new LinkedHashSet<>(); - Iterator iterator = input.iterator(); - for (int i = 0; i < limit; i++) { - traceIds.add(iterator.next()); - } - } else { - traceIds = input; - } - Call>> result = new SelectFromSpan(factory, traceIds, factory.maxTraceCols) - .flatMap(READ_SPANS) - .map(factory.groupByTraceId); - return filter != null ? result.map(filter) : result; - } - - @Override public String toString() { - return "SelectSpansByTraceIds{limit=" + limit + "}"; - } - } - - static final AccumulateAllResults> READ_SPANS = new ReadSpans(); - - static final class ReadSpans extends AccumulateAllResults> { - - @Override protected Supplier> supplier() { - return ArrayList::new; - } - - @Override protected BiConsumer> accumulator() { - return (row, result) -> { - String traceId = row.getString("trace_id"); - String traceIdHigh = row.getString("trace_id_high"); - if (traceIdHigh != null) traceId = traceIdHigh + traceId; - Span.Builder builder = Span.newBuilder() - .traceId(traceId) - .parentId(row.getString("parent_id")) - .id(row.getString("id")) - .name(row.getString("span")); - - if (!row.isNull("ts")) builder.timestamp(row.getLong("ts")); - if (!row.isNull("duration")) builder.duration(row.getLong("duration")); - - if (!row.isNull("kind")) { - try { - builder.kind(Span.Kind.valueOf(row.getString("kind"))); - } catch (IllegalArgumentException ignored) { - // EmptyCatch ignored - } - } - - if (!row.isNull("l_ep")) builder.localEndpoint(row.get("l_ep", Endpoint.class)); - if (!row.isNull("r_ep")) builder.remoteEndpoint(row.get("r_ep", Endpoint.class)); - - if (!row.isNull("debug")) builder.debug(row.getBoolean("debug")); - if (!row.isNull("shared")) builder.shared(row.getBoolean("shared")); - - for (Annotation annotation : row.getList("annotations", Annotation.class)) { - builder.addAnnotation(annotation.timestamp(), annotation.value()); - } - for (Map.Entry tag : - row.getMap("tags", String.class, String.class).entrySet()) { - builder.putTag(tag.getKey(), tag.getValue()); - } - result.add(builder.build()); - }; - } - - @Override public String toString() { - return "ReadSpans{}"; - } - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectRemoteServiceNames.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectRemoteServiceNames.java deleted file mode 100644 index 2f0774022c7..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectRemoteServiceNames.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; -import zipkin2.storage.cassandra.internal.call.DistinctSortedStrings; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_SERVICE_REMOTE_SERVICES; - -final class SelectRemoteServiceNames extends ResultSetFutureCall { - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - - Factory(CqlSession session) { - this.session = session; - this.preparedStatement = session.prepare("SELECT remote_service" - + " FROM " + TABLE_SERVICE_REMOTE_SERVICES - + " WHERE service=?" - + " LIMIT " + 1000); - } - - Call> create(String serviceName) { - if (serviceName == null || serviceName.isEmpty()) return Call.emptyList(); - String service = serviceName.toLowerCase(Locale.ROOT); // service names are always lowercase! - return new SelectRemoteServiceNames(this, service).flatMap(DistinctSortedStrings.get()); - } - } - - final Factory factory; - final String service; - - SelectRemoteServiceNames(Factory factory, String service) { - this.factory = factory; - this.service = service; - } - - @Override protected CompletionStage newCompletionStage() { - return factory.session.executeAsync(factory.preparedStatement.boundStatementBuilder() - .setString(0, service).build()); - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - - @Override public String toString() { - return "SelectSpanNames{service=" + service + "}"; - } - - @Override public SelectRemoteServiceNames clone() { - return new SelectRemoteServiceNames(factory, service); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectServiceNames.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectServiceNames.java deleted file mode 100644 index 0efef5aff34..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectServiceNames.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import java.util.List; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; -import zipkin2.storage.cassandra.internal.call.DistinctSortedStrings; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_SERVICE_SPANS; - -final class SelectServiceNames extends ResultSetFutureCall { - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - - Factory(CqlSession session) { - this.session = session; - this.preparedStatement = session.prepare("SELECT DISTINCT service" - + " FROM " + TABLE_SERVICE_SPANS); - } - - Call> create() { - return new SelectServiceNames(this).flatMap(DistinctSortedStrings.get()); - } - } - - final Factory factory; - - SelectServiceNames(Factory factory) { - this.factory = factory; - } - - @Override protected CompletionStage newCompletionStage() { - return factory.session.executeAsync(factory.preparedStatement.bind()); - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - - @Override public String toString() { - return "SelectServiceNames{}"; - } - - @Override public SelectServiceNames clone() { - return new SelectServiceNames(factory); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectSpanNames.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectSpanNames.java deleted file mode 100644 index 415b3bde3dc..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectSpanNames.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; -import zipkin2.storage.cassandra.internal.call.DistinctSortedStrings; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_SERVICE_SPANS; - -final class SelectSpanNames extends ResultSetFutureCall { - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - - Factory(CqlSession session) { - this.session = session; - this.preparedStatement = session.prepare("SELECT span" - + " FROM " + TABLE_SERVICE_SPANS - + " WHERE service=?" - + " LIMIT " + 10000); - } - - Call> create(String serviceName) { - if (serviceName == null || serviceName.isEmpty()) return Call.emptyList(); - String service = serviceName.toLowerCase(Locale.ROOT); // service names are always lowercase! - return new SelectSpanNames(this, service).flatMap(DistinctSortedStrings.get()); - } - } - - final Factory factory; - final String service; - - SelectSpanNames(Factory factory, String service) { - this.factory = factory; - this.service = service; - } - - @Override protected CompletionStage newCompletionStage() { - return factory.session.executeAsync(factory.preparedStatement.boundStatementBuilder() - .setString(0, service).build()); - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - - @Override public String toString() { - return "SelectSpanNames{service=" + service + "}"; - } - - @Override public SelectSpanNames clone() { - return new SelectSpanNames(factory, service); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectTraceIdsFromServiceRemoteService.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectTraceIdsFromServiceRemoteService.java deleted file mode 100644 index 03a0827261a..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectTraceIdsFromServiceRemoteService.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.google.auto.value.AutoValue; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; -import zipkin2.storage.cassandra.CassandraSpanStore.TimestampRange; -import zipkin2.storage.cassandra.internal.call.AccumulateTraceIdTsUuid; -import zipkin2.storage.cassandra.internal.call.AggregateIntoMap; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_TRACE_BY_SERVICE_REMOTE_SERVICE; - -final class SelectTraceIdsFromServiceRemoteService extends ResultSetFutureCall { - @AutoValue abstract static class Input { - abstract String service(); - - abstract String remote_service(); - - abstract int bucket(); - - abstract UUID start_ts(); - - abstract UUID end_ts(); - - abstract int limit_(); - - Input withService(String service) { - return new AutoValue_SelectTraceIdsFromServiceRemoteService_Input( - service, - remote_service(), - bucket(), - start_ts(), - end_ts(), - limit_()); - } - } - - static final class Factory { - final CqlSession session; - final PreparedStatement preparedStatement; - - Factory(CqlSession session) { - this.session = session; - this.preparedStatement = session.prepare("SELECT trace_id,ts" - + " FROM " + TABLE_TRACE_BY_SERVICE_REMOTE_SERVICE - + " WHERE service=? AND remote_service=?" - + " AND bucket=?" - + " AND ts>=?" - + " AND ts<=?" - + " LIMIT ?"); - } - - Input newInput( - String serviceName, - String remoteServiceName, - int bucket, - TimestampRange timestampRange, - int limit) { - return new AutoValue_SelectTraceIdsFromServiceRemoteService_Input( - serviceName, - remoteServiceName, - bucket, - timestampRange.startUUID, - timestampRange.endUUID, - limit); - } - - Call> newCall(List inputs) { - if (inputs.isEmpty()) return Call.create(Collections.emptyMap()); - if (inputs.size() == 1) return newCall(inputs.get(0)); - - List>> bucketedTraceIdCalls = new ArrayList<>(); - for (SelectTraceIdsFromServiceRemoteService.Input input : inputs) { - bucketedTraceIdCalls.add(newCall(input)); - } - return new AggregateIntoMap<>(bucketedTraceIdCalls); - } - - Call> newCall(Input input) { - return new SelectTraceIdsFromServiceRemoteService(this, preparedStatement, input) - .flatMap(AccumulateTraceIdTsUuid.get()); - } - - /** Applies all deferred service names to all input templates */ - FlatMapper, Map> newFlatMapper(List inputTemplates) { - return new FlatMapServicesToInputs(inputTemplates); - } - - class FlatMapServicesToInputs implements FlatMapper, Map> { - final List inputTemplates; - - FlatMapServicesToInputs(List inputTemplates) { - this.inputTemplates = inputTemplates; - } - - @Override public Call> map(List serviceNames) { - List>> bucketedTraceIdCalls = new ArrayList<>(); - - for (String service : serviceNames) { // fan out every input for each service name - List scopedInputs = new ArrayList<>(); - for (Input input : inputTemplates) { - scopedInputs.add(input.withService(service)); - } - bucketedTraceIdCalls.add(newCall(scopedInputs)); - } - - if (bucketedTraceIdCalls.isEmpty()) return Call.create(Collections.emptyMap()); - if (bucketedTraceIdCalls.size() == 1) return bucketedTraceIdCalls.get(0); - return new AggregateIntoMap<>(bucketedTraceIdCalls); - } - - @Override public String toString() { - List inputs = new ArrayList<>(); - for (Input input : inputTemplates) { - inputs.add(input.toString().replace("Input", "SelectTraceIdsFromServiceRemoteService")); - } - return "FlatMapServicesToInputs{" + inputs + "}"; - } - } - } - - final Factory factory; - final PreparedStatement preparedStatement; - final Input input; - - SelectTraceIdsFromServiceRemoteService(Factory factory, PreparedStatement preparedStatement, - Input input) { - this.factory = factory; - this.preparedStatement = preparedStatement; - this.input = input; - } - - @Override protected CompletionStage newCompletionStage() { - return factory.session.executeAsync(preparedStatement.boundStatementBuilder() - .setString(0, input.service()) - .setString(1, input.remote_service()) - .setInt(2, input.bucket()) - .setUuid(3, input.start_ts()) - .setUuid(4, input.end_ts()) - .setInt(5, input.limit_()) - .setPageSize(input.limit_()).build()); - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - - @Override public String toString() { - return input.toString().replace("Input", "SelectTraceIdsFromServiceRemoteService"); - } - - @Override public SelectTraceIdsFromServiceRemoteService clone() { - return new SelectTraceIdsFromServiceRemoteService(factory, preparedStatement, input); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectTraceIdsFromServiceSpan.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectTraceIdsFromServiceSpan.java deleted file mode 100644 index aa8a7799daa..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectTraceIdsFromServiceSpan.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.google.auto.value.AutoValue; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; -import zipkin2.internal.Nullable; -import zipkin2.storage.cassandra.CassandraSpanStore.TimestampRange; -import zipkin2.storage.cassandra.internal.call.AccumulateTraceIdTsUuid; -import zipkin2.storage.cassandra.internal.call.AggregateIntoMap; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_TRACE_BY_SERVICE_SPAN; - -final class SelectTraceIdsFromServiceSpan extends ResultSetFutureCall { - @AutoValue abstract static class Input { - abstract String service(); - - abstract String span(); - - abstract int bucket(); - - @Nullable abstract Long start_duration(); - - @Nullable abstract Long end_duration(); - - abstract UUID start_ts(); - - abstract UUID end_ts(); - - abstract int limit_(); - - Input withService(String service) { - return new AutoValue_SelectTraceIdsFromServiceSpan_Input( - service, - span(), - bucket(), - start_duration(), - end_duration(), - start_ts(), - end_ts(), - limit_()); - } - } - - static final class Factory { - final CqlSession session; - final PreparedStatement selectTraceIdsByServiceSpanName; - final PreparedStatement selectTraceIdsByServiceSpanNameAndDuration; - - Factory(CqlSession session) { - this.session = session; - String baseQuery = "SELECT trace_id,ts" - + " FROM " + TABLE_TRACE_BY_SERVICE_SPAN - + " WHERE service=?" - + " AND span=?" - + " AND bucket=?" - + " AND ts>=?" - + " AND ts<=?"; - this.selectTraceIdsByServiceSpanName = session.prepare(baseQuery - + " LIMIT ?"); - this.selectTraceIdsByServiceSpanNameAndDuration = session.prepare(baseQuery - + " AND duration>=?" - + " AND duration<=?" - + " LIMIT ?"); - } - - Input newInput( - String serviceName, - String spanName, - int bucket, - @Nullable Long minDurationMicros, - @Nullable Long maxDurationMicros, - TimestampRange timestampRange, - int limit) { - Long start_duration = null, end_duration = null; - if (minDurationMicros != null) { - start_duration = minDurationMicros / 1000L; - end_duration = maxDurationMicros != null ? maxDurationMicros / 1000L : Long.MAX_VALUE; - } - return new AutoValue_SelectTraceIdsFromServiceSpan_Input( - serviceName, - spanName, - bucket, - start_duration, - end_duration, - timestampRange.startUUID, - timestampRange.endUUID, - limit); - } - - Call> newCall(List inputs) { - if (inputs.isEmpty()) return Call.create(Collections.emptyMap()); - if (inputs.size() == 1) return newCall(inputs.get(0)); - - List>> bucketedTraceIdCalls = new ArrayList<>(); - for (SelectTraceIdsFromServiceSpan.Input input : inputs) { - bucketedTraceIdCalls.add(newCall(input)); - } - return new AggregateIntoMap<>(bucketedTraceIdCalls); - } - - Call> newCall(Input input) { - PreparedStatement preparedStatement = input.start_duration() != null - ? selectTraceIdsByServiceSpanNameAndDuration - : selectTraceIdsByServiceSpanName; - return new SelectTraceIdsFromServiceSpan(this, preparedStatement, input) - .flatMap(AccumulateTraceIdTsUuid.get()); - } - - /** Applies all deferred service names to all input templates */ - FlatMapper, Map> newFlatMapper(List inputTemplates) { - return new FlatMapServicesToInputs(inputTemplates); - } - - class FlatMapServicesToInputs implements FlatMapper, Map> { - final List inputTemplates; - - FlatMapServicesToInputs(List inputTemplates) { - this.inputTemplates = inputTemplates; - } - - @Override public Call> map(List serviceNames) { - List>> bucketedTraceIdCalls = new ArrayList<>(); - - for (String service : serviceNames) { // fan out every input for each service name - List scopedInputs = new ArrayList<>(); - for (SelectTraceIdsFromServiceSpan.Input input : inputTemplates) { - scopedInputs.add(input.withService(service)); - } - bucketedTraceIdCalls.add(newCall(scopedInputs)); - } - - if (bucketedTraceIdCalls.isEmpty()) return Call.create(Collections.emptyMap()); - if (bucketedTraceIdCalls.size() == 1) return bucketedTraceIdCalls.get(0); - return new AggregateIntoMap<>(bucketedTraceIdCalls); - } - - @Override public String toString() { - List inputs = new ArrayList<>(); - for (Input input : inputTemplates) { - inputs.add(input.toString().replace("Input", "SelectTraceIdsFromServiceSpan")); - } - return "FlatMapServicesToInputs{" + inputs + "}"; - } - } - } - - final Factory factory; - final PreparedStatement preparedStatement; - final Input input; - - SelectTraceIdsFromServiceSpan(Factory factory, PreparedStatement preparedStatement, Input input) { - this.factory = factory; - this.preparedStatement = preparedStatement; - this.input = input; - } - - @Override protected CompletionStage newCompletionStage() { - int i = 0; - BoundStatementBuilder bound = preparedStatement.boundStatementBuilder() - .setString(i++, input.service()) - .setString(i++, input.span()) - .setInt(i++, input.bucket()) - .setUuid(i++, input.start_ts()) - .setUuid(i++, input.end_ts()); - - if (input.start_duration() != null) { - bound.setLong(i++, input.start_duration()); - bound.setLong(i++, input.end_duration()); - } - - bound - .setInt(i, input.limit_()) - .setPageSize(input.limit_()); - - return factory.session.executeAsync(bound.build()); - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - - @Override public String toString() { - return input.toString().replace("Input", "SelectTraceIdsFromServiceSpan"); - } - - @Override public SelectTraceIdsFromServiceSpan clone() { - return new SelectTraceIdsFromServiceSpan(factory, preparedStatement, input); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectTraceIdsFromSpan.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectTraceIdsFromSpan.java deleted file mode 100644 index d384b497f72..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/SelectTraceIdsFromSpan.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.cql.Row; -import com.google.auto.value.AutoValue; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionStage; -import java.util.function.BiConsumer; -import java.util.function.Supplier; -import zipkin2.Call; -import zipkin2.internal.Nullable; -import zipkin2.storage.cassandra.CassandraSpanStore.TimestampRange; -import zipkin2.storage.cassandra.internal.call.AccumulateAllResults; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static zipkin2.storage.cassandra.Schema.TABLE_SPAN; - -/** - * Selects from the {@link Schema#TABLE_SPAN} using data in the partition key or SASI indexes. - * - *

Note: While queries here use "ALLOW FILTERING", they do so within a SASI clause, and only - * return (traceId, timestamp) tuples. This means the entire spans table is not scanned, unless the - * time range implies that. - * - *

The spans table is sorted descending by timestamp. When a query includes only a time range, - * the first N rows are already in the correct order. However, the cardinality of rows is a function - * of span count, not trace count. This implies an over-fetch function based on average span count - * per trace in order to achieve N distinct trace IDs. For example if there are 3 spans per trace, - * and over-fetch function of 3 * intended limit will work. See {@link - * CassandraStorage#indexFetchMultiplier} for an associated parameter. - */ -final class SelectTraceIdsFromSpan extends ResultSetFutureCall { - @AutoValue abstract static class Input { - @Nullable abstract String l_service(); - - @Nullable abstract String annotation_query(); - - abstract UUID start_ts(); - - abstract UUID end_ts(); - - abstract int limit_(); - } - - static final class Factory { - final CqlSession session; - final PreparedStatement withAnnotationQuery, withServiceAndAnnotationQuery; - - Factory(CqlSession session) { - this.session = session; - String querySuffix = "annotation_query LIKE ?" - + " AND ts_uuid>=?" - + " AND ts_uuid<=?" - + " LIMIT ?" - + " ALLOW FILTERING"; - this.withAnnotationQuery = session.prepare("SELECT trace_id,ts" - + " FROM " + TABLE_SPAN - + " WHERE " + querySuffix); - this.withServiceAndAnnotationQuery = session.prepare("SELECT trace_id,ts" - + " FROM " + TABLE_SPAN - + " WHERE l_service=:l_service" - + " AND " + querySuffix); - } - - Call> newCall( - @Nullable String serviceName, - String annotationKey, - TimestampRange timestampRange, - int limit) { - Input input = new AutoValue_SelectTraceIdsFromSpan_Input( - serviceName, - annotationKey, - timestampRange.startUUID, - timestampRange.endUUID, - limit); - PreparedStatement preparedStatement = - serviceName != null ? withServiceAndAnnotationQuery : withAnnotationQuery; - return new SelectTraceIdsFromSpan(this, preparedStatement, input) - .flatMap(AccumulateTraceIdTsLong.get()); - } - } - - final Factory factory; - final PreparedStatement preparedStatement; - final Input input; - - SelectTraceIdsFromSpan(Factory factory, PreparedStatement preparedStatement, Input input) { - this.factory = factory; - this.preparedStatement = preparedStatement; - this.input = input; - } - - @Override protected CompletionStage newCompletionStage() { - BoundStatementBuilder bound = preparedStatement.boundStatementBuilder(); - int i = 0; - if (input.l_service() != null) bound.setString(i++, input.l_service()); - if (input.annotation_query() != null) { - bound.setString(i++, input.annotation_query()); - } else { - throw new IllegalArgumentException(input.toString()); - } - bound - .setUuid(i++, input.start_ts()) - .setUuid(i++, input.end_ts()) - .setInt(i, input.limit_()) - .setPageSize(input.limit_()); - return factory.session.executeAsync(bound.build()); - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - - @Override public SelectTraceIdsFromSpan clone() { - return new SelectTraceIdsFromSpan(factory, preparedStatement, input); - } - - @Override public String toString() { - return input.toString().replace("Input", "SelectTraceIdsFromSpan"); - } - - static final class AccumulateTraceIdTsLong extends AccumulateAllResults> { - static final AccumulateAllResults> INSTANCE = new AccumulateTraceIdTsLong(); - - static AccumulateAllResults> get() { - return INSTANCE; - } - - @Override protected Supplier> supplier() { - return LinkedHashMap::new; // because results are not distinct - } - - @Override protected BiConsumer> accumulator() { - return (row, result) -> { - if (row.isNull(1)) return; // no timestamp - result.put(row.getString(0), row.getLong(1)); - }; - } - - @Override public String toString() { - return "AccumulateTraceIdTsLong{}"; - } - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/CassandraStorageBuilder.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/CassandraStorageBuilder.java deleted file mode 100644 index 7ca61937841..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/CassandraStorageBuilder.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal; - -import com.datastax.oss.driver.api.core.config.DefaultDriverOption; -import com.datastax.oss.driver.api.core.config.DriverOption; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import zipkin2.internal.Nullable; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.StorageComponent; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_MAX_REQUESTS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE; - -public abstract class CassandraStorageBuilder> - extends StorageComponent.Builder { - protected boolean strictTraceId = true, searchEnabled = true; - protected Set autocompleteKeys = Collections.emptySet(); - protected int autocompleteTtl = (int) TimeUnit.HOURS.toMillis(1); - protected int autocompleteCardinality = 5 * 4000; // Ex. 5 site tags with cardinality 4000 each - - protected String contactPoints = "localhost"; - // Driver v4 requires this, so take a guess! When we are wrong, the user can override anyway - protected String localDc = "datacenter1"; - @Nullable protected String username, password; - protected boolean useSsl = false; - - protected String keyspace; - protected boolean ensureSchema = true; - - protected int maxTraceCols = 100_000; - protected int indexFetchMultiplier = 3; - - // Zipkin collectors can create out a lot of async requests in bursts, so we - // increase some properties beyond the norm. - /** @see DefaultDriverOption#CONNECTION_POOL_LOCAL_SIZE */ - // Ported from java-driver v3 PoolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, 8) - int poolLocalSize = 8; - /** @see DefaultDriverOption#CONNECTION_MAX_REQUESTS */ - // Ported from java-driver v3 PoolingOptions.setMaxQueueSize(40960) - final int maxRequestsPerConnection = 40960 / poolLocalSize; - - protected Map poolingOptions() { - Map result = new LinkedHashMap<>(); - result.put(CONNECTION_POOL_LOCAL_SIZE, poolLocalSize); - result.put(CONNECTION_MAX_REQUESTS, maxRequestsPerConnection); - return result; - } - - protected CassandraStorageBuilder(String defaultKeyspace) { - keyspace = defaultKeyspace; - } - - @Override public B strictTraceId(boolean strictTraceId) { - this.strictTraceId = strictTraceId; - return (B) this; - } - - @Override public B searchEnabled(boolean searchEnabled) { - this.searchEnabled = searchEnabled; - return (B) this; - } - - @Override public B autocompleteKeys(List keys) { - if (keys == null) throw new NullPointerException("keys == null"); - this.autocompleteKeys = Collections.unmodifiableSet(new LinkedHashSet<>(keys)); - return (B) this; - } - - @Override public B autocompleteTtl(int autocompleteTtl) { - if (autocompleteTtl <= 0) throw new IllegalArgumentException("autocompleteTtl <= 0"); - this.autocompleteTtl = autocompleteTtl; - return (B) this; - } - - @Override public B autocompleteCardinality(int autocompleteCardinality) { - if (autocompleteCardinality <= 0) { - throw new IllegalArgumentException("autocompleteCardinality <= 0"); - } - this.autocompleteCardinality = autocompleteCardinality; - return (B) this; - } - - /** - * Comma separated list of host addresses part of Cassandra cluster. You can also specify a custom - * port with 'host:port'. Defaults to localhost on port 9042 * - */ - public B contactPoints(String contactPoints) { - if (contactPoints == null) throw new NullPointerException("contactPoints == null"); - this.contactPoints = contactPoints; - return (B) this; - } - - /** - * Name of the datacenter that will be considered "local" for latency load balancing. When unset, - * load-balancing is round-robin. - */ - public B localDc(String localDc) { - if (localDc == null) throw new NullPointerException("localDc == null"); - this.localDc = localDc; - return (B) this; - } - - /** Max pooled connections per datacenter-local host. Defaults to 8 */ - public B maxConnections(int maxConnections) { - if (maxConnections <= 0) throw new IllegalArgumentException("maxConnections <= 0"); - this.poolLocalSize = maxConnections; - return (B) this; - } - - /** Will throw an exception on startup if authentication fails. No default. */ - public B username(@Nullable String username) { - this.username = username; - return (B) this; - } - - /** Will throw an exception on startup if authentication fails. No default. */ - public B password(@Nullable String password) { - this.password = password; - return (B) this; - } - - /** Use ssl for connection. Defaults to false. */ - public B useSsl(boolean useSsl) { - this.useSsl = useSsl; - return (B) this; - } - - /** Keyspace to store span and index data. Defaults to "zipkin3" */ - public B keyspace(String keyspace) { - if (keyspace == null) throw new NullPointerException("keyspace == null"); - this.keyspace = keyspace; - return (B) this; - } - - public B ensureSchema(boolean ensureSchema) { - this.ensureSchema = ensureSchema; - return (B) this; - } - - /** - * Spans have multiple values for the same id. For example, a client and server contribute to the - * same span id. When searching for spans by id, the amount of results may be larger than the ids. - * This defines a threshold which accommodates this situation, without looking for an unbounded - * number of results. - */ - public B maxTraceCols(int maxTraceCols) { - if (maxTraceCols <= 0) throw new IllegalArgumentException("maxTraceCols <= 0"); - this.maxTraceCols = maxTraceCols; - return (B) this; - } - - /** - * How many more index rows to fetch than the user-supplied query limit. Defaults to 3. - * - *

Backend requests will request {@link QueryRequest#limit()} times this factor rows from - * Cassandra indexes in attempts to return {@link QueryRequest#limit()} traces. - * - *

Indexing in cassandra will usually have more rows than trace identifiers due to factors - * including table design and collection implementation. As there's no way to DISTINCT out - * duplicates server-side, this over-fetches client-side when {@code indexFetchMultiplier} > - * 1. - */ - public B indexFetchMultiplier(int indexFetchMultiplier) { - if (indexFetchMultiplier <= 0) throw new IllegalArgumentException("indexFetchMultiplier <= 0"); - this.indexFetchMultiplier = indexFetchMultiplier; - return (B) this; - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/HostAndPort.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/HostAndPort.java deleted file mode 100644 index e0ad3d4ee83..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/HostAndPort.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal; - -import zipkin2.Endpoint; - -// Similar to com.google.common.net.HostAndPort, but no guava dep -public final class HostAndPort { - final String host; - final int port; - - HostAndPort(String host, int port) { - this.host = host; - this.port = port; - } - - /** Returns the unvalidated hostname or IP literal */ - public String getHost() { - return host; - } - - /** Returns the port */ - public int getPort() { - return port; - } - - @Override public boolean equals(Object o) { - if (o == this) return true; - if (!(o instanceof HostAndPort)) return false; - HostAndPort that = (HostAndPort) o; - return host.equals(that.host) && port == that.port; - } - - @Override public int hashCode() { - int h = 1; - h *= 1000003; - h ^= (host == null) ? 0 : host.hashCode(); - h *= 1000003; - h ^= port; - return h; - } - - @Override public String toString() { - return "HostAndPort{host=" + host + ", port=" + port + "}"; - } - - /** - * Constructs a host-port pair from the given string, defaulting to the indicated port if absent - */ - public static HostAndPort fromString(String hostPort, int defaultPort) { - if (hostPort == null) throw new NullPointerException("hostPort == null"); - - String host = hostPort; - int endHostIndex = hostPort.length(); - if (hostPort.startsWith("[")) { // Bracketed IPv6 - endHostIndex = hostPort.lastIndexOf(']') + 1; - host = hostPort.substring(1, endHostIndex == 0 ? 1 : endHostIndex - 1); - if (!Endpoint.newBuilder().parseIp(host)) { // reuse our IPv6 validator - throw new IllegalArgumentException(hostPort + " contains an invalid IPv6 literal"); - } - } else { - int colonIndex = hostPort.indexOf(':'), nextColonIndex = hostPort.lastIndexOf(':'); - if (colonIndex >= 0) { - if (colonIndex == nextColonIndex) { // only 1 colon - host = hostPort.substring(0, colonIndex); - endHostIndex = colonIndex; - } else if (!Endpoint.newBuilder().parseIp(hostPort)) { // reuse our IPv6 validator - throw new IllegalArgumentException(hostPort + " is an invalid IPv6 literal"); - } - } - } - if (host.isEmpty()) throw new IllegalArgumentException(hostPort + " has an empty host"); - if (endHostIndex + 1 < hostPort.length() && hostPort.charAt(endHostIndex) == ':') { - return new HostAndPort(host, validatePort(hostPort.substring(endHostIndex + 1), hostPort)); - } - return new HostAndPort(host, defaultPort); - } - - static int validatePort(String portString, String hostPort) { - for (int i = 0, length = portString.length(); i < length; i++) { - char c = portString.charAt(i); - if (c >= '0' && c <= '9') continue; // isDigit - throw new IllegalArgumentException(hostPort + " has an invalid port"); - } - int result = Integer.parseInt(portString); - if (result == 0 || result > 0xffff) { - throw new IllegalArgumentException(hostPort + " has an invalid port"); - } - return result; - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/KeyspaceMetadataUtil.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/KeyspaceMetadataUtil.java deleted file mode 100644 index dda082609d1..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/KeyspaceMetadataUtil.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal; - -import com.datastax.oss.driver.api.core.CqlIdentifier; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import java.util.Optional; - -public final class KeyspaceMetadataUtil { - - public static int getDefaultTtl(KeyspaceMetadata keyspaceMetadata, String table) { - return (int) keyspaceMetadata.getTable(table) - .map(TableMetadata::getOptions) - .flatMap(o -> Optional.ofNullable(o.get(CqlIdentifier.fromCql("default_time_to_live")))) - .orElse(0); - } - - KeyspaceMetadataUtil() { - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/Resources.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/Resources.java deleted file mode 100644 index c50081d36c6..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/Resources.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal; - -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.Reader; -import java.io.UncheckedIOException; - -import static java.nio.charset.StandardCharsets.UTF_8; - -public final class Resources { - public static String resourceToString(String resource) { - try ( - Reader reader = new InputStreamReader(Resources.class.getResourceAsStream(resource), UTF_8)) { - char[] buf = new char[2048]; - StringBuilder builder = new StringBuilder(); - int read; - while ((read = reader.read(buf)) != -1) { - builder.append(buf, 0, read); - } - return builder.toString(); - } catch (IOException ex) { - throw new UncheckedIOException(ex); - } - } - - Resources() { - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/SessionBuilder.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/SessionBuilder.java deleted file mode 100644 index ab31f195577..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/SessionBuilder.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.CqlSessionBuilder; -import com.datastax.oss.driver.api.core.auth.AuthProvider; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.DriverOption; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.datastax.oss.driver.internal.core.ssl.DefaultSslEngineFactory; -import com.datastax.oss.driver.internal.core.tracker.RequestLogger; -import java.net.InetSocketAddress; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.internal.Nullable; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_CONSISTENCY; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_DEFAULT_IDEMPOTENCE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_LOGGER_SUCCESS_ENABLED; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_LOGGER_VALUES; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TIMEOUT; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TRACKER_CLASS; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_WARN_IF_SET_KEYSPACE; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.SSL_ENGINE_FACTORY_CLASS; - -public final class SessionBuilder { - /** Returns a connected session. Closes the cluster if any exception occurred. */ - public static CqlSession buildSession( - String contactPoints, - String localDc, - Map poolingOptions, - @Nullable AuthProvider authProvider, - boolean useSsl - ) { - // Some options aren't supported by builder methods. In these cases, we use driver config - // See https://groups.google.com/a/lists.datastax.com/forum/#!topic/java-driver-user/Z8HrCDX47Q0 - ProgrammaticDriverConfigLoaderBuilder config = - // We aren't reading any resources from the classpath, but this prevents errors running in the - // server, where Thread.currentThread().getContextClassLoader() returns null - DriverConfigLoader.programmaticBuilder(SessionBuilder.class.getClassLoader()); - - // Ported from java-driver v3 PoolingOptions.setPoolTimeoutMillis as request timeout includes that - config.withDuration(REQUEST_TIMEOUT, Duration.ofMinutes(1)); - - CqlSessionBuilder builder = CqlSession.builder(); - builder.addContactPoints(parseContactPoints(contactPoints)); - if (authProvider != null) builder.withAuthProvider(authProvider); - - // In java-driver v3, we used LatencyAwarePolicy(DCAwareRoundRobinPolicy|RoundRobinPolicy) - // where DCAwareRoundRobinPolicy was used if localDc != null - // - // In java-driver v4, the default policy is token-aware and localDc is required. Hence, we - // use the default load balancing policy - // * https://github.com/datastax/java-driver/blob/master/manual/core/load_balancing/README.md - builder.withLocalDatacenter(localDc); - config = config.withString(REQUEST_CONSISTENCY, "LOCAL_ONE"); - // Pooling options changed dramatically from v3->v4. This is a close match. - poolingOptions.forEach(config::withInt); - - // All Zipkin CQL writes are idempotent - config = config.withBoolean(REQUEST_DEFAULT_IDEMPOTENCE, true); - - if (useSsl) config = config.withClass(SSL_ENGINE_FACTORY_CLASS, DefaultSslEngineFactory.class); - - // Log categories can enable query logging - Logger requestLogger = LoggerFactory.getLogger(RequestLogger.class); - if (requestLogger.isDebugEnabled()) { - config = config.withClass(REQUEST_TRACKER_CLASS, RequestLogger.class); - config = config.withBoolean(REQUEST_LOGGER_SUCCESS_ENABLED, true); - // Only show bodies when TRACE is enabled - config = config.withBoolean(REQUEST_LOGGER_VALUES, requestLogger.isTraceEnabled()); - } - - // Don't warn: ensureSchema creates the keyspace. Hence, we need to "use" it later. - config = config.withBoolean(REQUEST_WARN_IF_SET_KEYSPACE, false); - - return builder.withConfigLoader(config.build()).build(); - } - - static List parseContactPoints(String contactPoints) { - List result = new ArrayList<>(); - for (String contactPoint : contactPoints.split(",", 100)) { - HostAndPort parsed = HostAndPort.fromString(contactPoint, 9042); - result.add(new InetSocketAddress(parsed.getHost(), parsed.getPort())); - } - return result; - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/AccumulateAllResults.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/AccumulateAllResults.java deleted file mode 100644 index 925e4d09768..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/AccumulateAllResults.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import java.util.concurrent.CompletionStage; -import java.util.function.BiConsumer; -import java.util.function.Function; -import java.util.function.Supplier; -import zipkin2.Call; -import zipkin2.Call.FlatMapper; - -public abstract class AccumulateAllResults implements FlatMapper { - protected abstract Supplier supplier(); - - protected abstract BiConsumer accumulator(); - - /** Customizes the aggregated result. For example, summarizing or making immutable. */ - protected Function finisher() { - return Function.identity(); - } - - @Override public Call map(AsyncResultSet rs) { - return new AccumulateNextResults<>( - supplier().get(), - accumulator(), - finisher() - ).map(rs); - } - - static final class FetchMoreResults extends ResultSetFutureCall { - final AsyncResultSet resultSet; - - FetchMoreResults(AsyncResultSet resultSet) { - this.resultSet = resultSet; - } - - @Override protected CompletionStage newCompletionStage() { - return resultSet.fetchNextPage().toCompletableFuture(); - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - - @Override public Call clone() { - throw new UnsupportedOperationException(); - } - - @Override public String toString() { - return "FetchMoreResults{" + resultSet + "}"; - } - } - - static final class AccumulateNextResults implements FlatMapper { - final T pendingResults; - final BiConsumer accumulator; - final Function finisher; - - AccumulateNextResults( - T pendingResults, BiConsumer accumulator, Function finisher) { - this.pendingResults = pendingResults; - this.accumulator = accumulator; - this.finisher = finisher; - } - - /** Iterates through the rows in each page, flatmapping on more results until exhausted */ - @Override public Call map(AsyncResultSet rs) { - while (rs.remaining() > 0) { - accumulator.accept(rs.one(), pendingResults); - } - // Return collected results if there are no more pages - return rs.getExecutionInfo().getPagingState() == null && !rs.hasMorePages() - ? Call.create(finisher.apply(pendingResults)) - : new FetchMoreResults(rs).flatMap(this); - } - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/AccumulateTraceIdTsUuid.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/AccumulateTraceIdTsUuid.java deleted file mode 100644 index 71ad64838b3..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/AccumulateTraceIdTsUuid.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import com.datastax.oss.driver.api.core.cql.Row; -import com.datastax.oss.driver.api.core.uuid.Uuids; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.function.BiConsumer; -import java.util.function.Supplier; - -public final class AccumulateTraceIdTsUuid - extends AccumulateAllResults> { - static final AccumulateAllResults> INSTANCE = new AccumulateTraceIdTsUuid(); - - public static AccumulateAllResults> get() { - return INSTANCE; - } - - @Override protected Supplier> supplier() { - return LinkedHashMap::new; // because results are not distinct - } - - @Override protected BiConsumer> accumulator() { - return (row, result) -> - result.put(row.getString(0), Uuids.unixTimestamp(row.getUuid(1))); - } - - @Override public String toString() { - return "AccumulateTraceIdTsUuid{}"; - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/AggregateIntoMap.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/AggregateIntoMap.java deleted file mode 100644 index 4218d626cf5..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/AggregateIntoMap.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import zipkin2.Call; -import zipkin2.internal.AggregateCall; - -public final class AggregateIntoMap extends AggregateCall, Map> { - public AggregateIntoMap(List>> calls) { - super(calls); - } - - @Override protected Map newOutput() { - return new LinkedHashMap<>(); - } - - @Override protected void append(Map input, Map output) { - output.putAll(input); - } - - @Override protected boolean isEmpty(Map output) { - return output.isEmpty(); - } - - @Override public AggregateIntoMap clone() { - return new AggregateIntoMap<>(cloneCalls()); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/DeduplicatingInsert.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/DeduplicatingInsert.java deleted file mode 100644 index 1c0588363d6..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/DeduplicatingInsert.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import java.util.List; -import java.util.concurrent.TimeUnit; -import zipkin2.Call; -import zipkin2.Callback; -import zipkin2.internal.DelayLimiter; - -public abstract class DeduplicatingInsert extends ResultSetFutureCall { - public static abstract class Factory { - protected final DelayLimiter delayLimiter; - - protected Factory(long ttl, int cardinality) { - delayLimiter = - DelayLimiter.newBuilder().ttl(ttl, TimeUnit.MILLISECONDS).cardinality(cardinality).build(); - } - - protected abstract Call newCall(I input); - - public final void maybeAdd(I input, List> calls) { - if (input == null) throw new NullPointerException("input == null"); - if (!delayLimiter.shouldInvoke(input)) return; - calls.add(newCall(input)); - } - - public void clear() { - delayLimiter.clear(); - } - } - - protected final DelayLimiter delayLimiter; - protected final I input; - - protected DeduplicatingInsert(DelayLimiter delayLimiter, I input) { - this.delayLimiter = delayLimiter; - this.input = input; - } - - @Override protected final Void doExecute() { - try { - return super.doExecute(); - } catch (RuntimeException | Error e) { - delayLimiter.invalidate(input); - throw e; - } - } - - @Override protected final void doEnqueue(Callback callback) { - super.doEnqueue(new Callback() { - @Override public void onSuccess(Void value) { - callback.onSuccess(value); - } - - @Override public void onError(Throwable t) { - delayLimiter.invalidate(input); - callback.onError(t); - } - }); - } - - @Override public final void doCancel() { - delayLimiter.invalidate(input); - super.doCancel(); - } - - @Override public final Void map(AsyncResultSet input) { - return null; - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/DirectExecutor.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/DirectExecutor.java deleted file mode 100644 index 412a7094297..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/DirectExecutor.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import java.util.concurrent.Executor; - -/** Same as {@code MoreExecutors.directExecutor()} except without a guava 18 dep */ -enum DirectExecutor implements Executor { - INSTANCE; - - @Override public void execute(Runnable command) { - command.run(); - } - - @Override public String toString() { - return "DirectExecutor"; - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/DistinctSortedStrings.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/DistinctSortedStrings.java deleted file mode 100644 index 2ee40c031a1..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/DistinctSortedStrings.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import com.datastax.oss.driver.api.core.cql.Row; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.function.BiConsumer; -import java.util.function.Function; -import java.util.function.Supplier; - -public final class DistinctSortedStrings extends AccumulateAllResults> { - static final AccumulateAllResults> INSTANCE = new DistinctSortedStrings(); - - public static AccumulateAllResults> get() { - return INSTANCE; - } - - @Override protected Supplier> supplier() { - return ArrayList::new; - } - - @Override protected Function, List> finisher() { - return SortDistinct.INSTANCE; - } - - enum SortDistinct implements Function, List> { - INSTANCE; - - @Override public List apply(List strings) { - Collections.sort(strings); - return new ArrayList<>(new LinkedHashSet<>(strings)); - } - } - - @Override protected BiConsumer> accumulator() { - return (row, list) -> { - String result = row.getString(0); - if (!result.isEmpty()) list.add(result); - }; - } - - @Override public String toString() { - return "DistinctSortedStrings{}"; - } - - DistinctSortedStrings() { - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/InsertEntry.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/InsertEntry.java deleted file mode 100644 index c4d4d27ad54..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/InsertEntry.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import java.util.Map; -import java.util.concurrent.CompletionStage; -import zipkin2.Call; - -public final class InsertEntry extends DeduplicatingInsert> { - public static final class Factory extends DeduplicatingInsert.Factory> { - final CqlSession session; - final PreparedStatement preparedStatement; - - public Factory(String statement, CqlSession session, long ttl, int cardinality) { - this(statement, session, ttl, cardinality, 0); - } - - /** Cassandra v1 has deprecated support for indexTtl. */ - public Factory(String statement, CqlSession session, long ttl, int cardinality, int indexTtl) { - super(ttl, cardinality); - this.session = session; - this.preparedStatement = - session.prepare(indexTtl > 0 ? statement + " USING TTL " + indexTtl : statement); - } - - @Override protected Call newCall(Map.Entry input) { - return new InsertEntry(this, input); - } - } - - final Factory factory; - - InsertEntry(Factory factory, Map.Entry input) { - super(factory.delayLimiter, input); - this.factory = factory; - } - - @Override protected CompletionStage newCompletionStage() { - return factory.session.executeAsync(factory.preparedStatement.boundStatementBuilder() - .setString(0, input.getKey()) - .setString(1, input.getValue()).build()); - } - - @Override public String toString() { - return factory.preparedStatement.getQuery() - .replace("(?,?)", "(" + input.getKey() + "," + input.getValue() + ")"); - } - - @Override public Call clone() { - return new InsertEntry(factory, input); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/IntersectKeySets.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/IntersectKeySets.java deleted file mode 100644 index 0d9ba1d12c6..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/IntersectKeySets.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import zipkin2.Call; -import zipkin2.internal.AggregateCall; - -public final class IntersectKeySets extends AggregateCall, Set> { - public IntersectKeySets(List>> calls) { - super(calls); - } - - @Override protected Set newOutput() { - return new LinkedHashSet<>(); - } - - boolean firstInput = true; - - @Override protected void append(Map input, Set output) { - if (firstInput) { - firstInput = false; - output.addAll(input.keySet()); - } else { - output.retainAll(input.keySet()); - } - } - - @Override protected boolean isEmpty(Set output) { - return output.isEmpty(); - } - - @Override public IntersectKeySets clone() { - return new IntersectKeySets(cloneCalls()); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/IntersectMaps.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/IntersectMaps.java deleted file mode 100644 index c7923fcdf75..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/IntersectMaps.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import zipkin2.Call; -import zipkin2.internal.AggregateCall; - -public final class IntersectMaps extends AggregateCall, Map> { - - public IntersectMaps(List>> calls) { - super(calls); - } - - @Override protected Map newOutput() { - return new LinkedHashMap<>(); - } - - boolean firstInput = true; - - @Override protected void append(Map input, Map output) { - if (firstInput) { - firstInput = false; - output.putAll(input); - } else { - output.keySet().retainAll(input.keySet()); - } - } - - @Override protected boolean isEmpty(Map output) { - return output.isEmpty(); - } - - @Override public IntersectMaps clone() { - return new IntersectMaps<>(cloneCalls()); - } -} diff --git a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/ResultSetFutureCall.java b/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/ResultSetFutureCall.java deleted file mode 100644 index ccbdb736249..00000000000 --- a/zipkin-storage/cassandra/src/main/java/zipkin2/storage/cassandra/internal/call/ResultSetFutureCall.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import com.datastax.oss.driver.api.core.DriverException; -import com.datastax.oss.driver.api.core.DriverExecutionException; -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.servererrors.QueryConsistencyException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.ExecutionException; -import java.util.function.BiFunction; -import java.util.function.Function; -import zipkin2.Call; -import zipkin2.Call.Mapper; -import zipkin2.Callback; -import zipkin2.internal.Nullable; - -// some copy/pasting is ok here as debugging is obscured when the type hierarchy gets deep. -public abstract class ResultSetFutureCall extends Call.Base - implements Mapper, Function { - /** Defers I/O until {@link #enqueue(Callback)} or {@link #execute()} are called. */ - protected abstract CompletionStage newCompletionStage(); - - volatile CompletableFuture future; - - @Override protected V doExecute() { - return getUninterruptibly(newCompletionStage().thenApply(this)); - } - - @Override protected void doEnqueue(Callback callback) { - try { - future = newCompletionStage() - .thenApply(this) - .handleAsync(new CallbackFunction<>(callback)) - .toCompletableFuture(); - } catch (Throwable t) { - propagateIfFatal(t); - callback.onError(t); - } - } - - @Override public V apply(AsyncResultSet input) { - return map(input); // dispatched to Function so that toString is nicer vs a lambda - } - - @Override protected void doCancel() { - CompletableFuture maybeFuture = future; - if (maybeFuture != null) maybeFuture.cancel(true); - } - - @Override protected final boolean doIsCanceled() { - CompletableFuture maybeFuture = future; - return maybeFuture != null && maybeFuture.isCancelled(); - } - - static final class CallbackFunction implements BiFunction { - final Callback callback; - - CallbackFunction(Callback callback) { - this.callback = callback; - } - - @Override public V apply(V input, @Nullable Throwable error) { - if (error != null) { - callback.onError(error); - return input; - } - try { - callback.onSuccess(input); - } catch (Throwable t) { - propagateIfFatal(t); - callback.onError(t); - } - return input; - } - - @Override public String toString() { - return callback.toString(); - } - } - - // Avoid internal dependency on Datastax CompletableFutures and shaded Throwables - static T getUninterruptibly(CompletionStage stage) { - boolean interrupted = false; - try { - while (true) { - try { - return stage.toCompletableFuture().get(); - } catch (InterruptedException e) { - interrupted = true; - } catch (ExecutionException e) { - Throwable cause = e.getCause(); - if (cause instanceof DriverException) { - throw ((DriverException) cause).copy(); - } - if (cause instanceof RuntimeException) throw (RuntimeException) cause; - if (cause instanceof Error) throw (Error) cause; - throw new DriverExecutionException(cause); - } - } - } finally { - if (interrupted) { - Thread.currentThread().interrupt(); - } - } - } - - /** - * Sets {@link zipkin2.storage.StorageComponent#isOverCapacity(java.lang.Throwable)} - */ - public static boolean isOverCapacity(Throwable e) { - return e instanceof QueryConsistencyException || - e instanceof BusyConnectionException || - e instanceof RequestThrottlingException; - } -} diff --git a/zipkin-storage/cassandra/src/main/resources/zipkin2-schema-indexes.cql b/zipkin-storage/cassandra/src/main/resources/zipkin2-schema-indexes.cql deleted file mode 100644 index 3ed52759b8a..00000000000 --- a/zipkin-storage/cassandra/src/main/resources/zipkin2-schema-indexes.cql +++ /dev/null @@ -1,92 +0,0 @@ -ALTER TABLE zipkin2.span ADD l_service text; -DROP INDEX IF EXISTS zipkin2.span_l_service_idx; -CREATE CUSTOM INDEX IF NOT EXISTS ON zipkin2.span (l_service) USING 'org.apache.cassandra.index.sasi.SASIIndex' - WITH OPTIONS = {'mode': 'PREFIX'}; - -ALTER TABLE zipkin2.span ADD annotation_query text; //-- can't do SASI on set: ░-joined until CASSANDRA-11182 -CREATE CUSTOM INDEX IF NOT EXISTS ON zipkin2.span (annotation_query) USING 'org.apache.cassandra.index.sasi.SASIIndex' - WITH OPTIONS = { - 'mode': 'PREFIX', - 'analyzed': 'true', - 'analyzer_class':'org.apache.cassandra.index.sasi.analyzer.DelimiterAnalyzer', - 'delimiter': '░'}; - -CREATE TABLE IF NOT EXISTS zipkin2.trace_by_service_span ( - service text, //-- service name - span text, //-- span name, or blank for queries without span name - bucket int, //-- time bucket, calculated as ts/interval (in microseconds), for some pre-configured interval like 1 day. - ts timeuuid, //-- start timestamp of the span, truncated to millisecond precision - trace_id text, //-- trace ID - duration bigint, //-- span duration, in milliseconds - PRIMARY KEY ((service, span, bucket), ts) -) - WITH CLUSTERING ORDER BY (ts DESC) - AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up a trace by a service, or service and span. span column may be blank (when only looking up by service). bucket column adds time bucketing to the partition key, values are microseconds rounded to a pre-configured interval (typically one day). ts column is start timestamp of the span as time-uuid, truncated to millisecond precision. duration column is span duration, rounded up to tens of milliseconds (or hundredths of seconds)'; - -CREATE CUSTOM INDEX IF NOT EXISTS ON zipkin2.trace_by_service_span (duration) USING 'org.apache.cassandra.index.sasi.SASIIndex' - WITH OPTIONS = {'mode': 'PREFIX'}; - -CREATE TABLE IF NOT EXISTS zipkin2.trace_by_service_remote_service ( - service text, //-- service name - remote_service text, //-- remote servie name - bucket int, //-- time bucket, calculated as ts/interval (in microseconds), for some pre-configured interval like 1 day. - ts timeuuid, //-- start timestamp of the span, truncated to millisecond precision - trace_id text, //-- trace ID - PRIMARY KEY ((service, remote_service, bucket), ts) -) - WITH CLUSTERING ORDER BY (ts DESC) - AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up a trace by a remote service. bucket column adds time bucketing to the partition key, values are microseconds rounded to a pre-configured interval (typically one day). ts column is start timestamp of the span as time-uuid, truncated to millisecond precision.'; - -CREATE TABLE IF NOT EXISTS zipkin2.span_by_service ( - service text, - span text, - PRIMARY KEY (service, span) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND caching = {'rows_per_partition': 'ALL'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up span names by a service name. To compensate for hot partitions, we deduplicate write client side, use LeveledCompactionStrategy with a low threshold and add row caching.'; - -CREATE TABLE IF NOT EXISTS zipkin2.remote_service_by_service ( - service text, - remote_service text, - PRIMARY KEY (service, remote_service) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND caching = {'rows_per_partition': 'ALL'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up remote service names by a service name. To compensate for hot partitions, we deduplicate write client side, use LeveledCompactionStrategy with a low threshold and add row caching.'; - -CREATE TABLE IF NOT EXISTS zipkin2.autocomplete_tags ( - key text, - value text, - PRIMARY KEY (key, value) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND caching = {'rows_per_partition': 'ALL'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up span tag values for auto-complete purposes. To compensate for hot partitions, we deduplicate write client side, use LeveledCompactionStrategy with a low threshold and add row caching.'; diff --git a/zipkin-storage/cassandra/src/main/resources/zipkin2-schema-upgrade-1.cql b/zipkin-storage/cassandra/src/main/resources/zipkin2-schema-upgrade-1.cql deleted file mode 100644 index 94ab1f6714d..00000000000 --- a/zipkin-storage/cassandra/src/main/resources/zipkin2-schema-upgrade-1.cql +++ /dev/null @@ -1,13 +0,0 @@ -CREATE TABLE IF NOT EXISTS zipkin2.autocomplete_tags ( - key text, - value text, - PRIMARY KEY (key, value) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND caching = {'rows_per_partition': 'ALL'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up tag key and values for a service'; diff --git a/zipkin-storage/cassandra/src/main/resources/zipkin2-schema-upgrade-2.cql b/zipkin-storage/cassandra/src/main/resources/zipkin2-schema-upgrade-2.cql deleted file mode 100644 index 907e5fc6ed3..00000000000 --- a/zipkin-storage/cassandra/src/main/resources/zipkin2-schema-upgrade-2.cql +++ /dev/null @@ -1,30 +0,0 @@ -CREATE TABLE IF NOT EXISTS zipkin2.remote_service_by_service ( - service text, - remote_service text, - PRIMARY KEY (service, remote_service) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND caching = {'rows_per_partition': 'ALL'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up remote service names by a service name.'; - -CREATE TABLE IF NOT EXISTS zipkin2.trace_by_service_remote_service ( - service text, //-- service name - remote_service text, //-- remote servie name - bucket int, //-- time bucket, calculated as ts/interval (in microseconds), for some pre-configured interval like 1 day. - ts timeuuid, //-- start timestamp of the span, truncated to millisecond precision - trace_id text, //-- trace ID - PRIMARY KEY ((service, remote_service, bucket), ts) -) - WITH CLUSTERING ORDER BY (ts DESC) - AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up a trace by a remote service. bucket column adds time bucketing to the partition key, values are microseconds rounded to a pre-configured interval (typically one day). ts column is start timestamp of the span as time-uuid, truncated to millisecond precision.'; diff --git a/zipkin-storage/cassandra/src/main/resources/zipkin2-schema.cql b/zipkin-storage/cassandra/src/main/resources/zipkin2-schema.cql deleted file mode 100644 index b6421f1a3c1..00000000000 --- a/zipkin-storage/cassandra/src/main/resources/zipkin2-schema.cql +++ /dev/null @@ -1,57 +0,0 @@ -CREATE KEYSPACE IF NOT EXISTS zipkin2 - WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} - AND durable_writes = false; - -CREATE TYPE IF NOT EXISTS zipkin2.endpoint ( - service text, - ipv4 inet, - ipv6 inet, - port int, -); - -CREATE TYPE IF NOT EXISTS zipkin2.annotation ( - ts bigint, - v text, -); - -CREATE TABLE IF NOT EXISTS zipkin2.span ( - trace_id text, // when strictTraceId=false, only contains right-most 16 chars - ts_uuid timeuuid, - id text, - trace_id_high text, // when strictTraceId=false, contains left-most 16 chars if present - parent_id text, - kind text, - span text, // span.name - ts bigint, - duration bigint, - l_ep Endpoint, - r_ep Endpoint, - annotations list>, - tags map, - debug boolean, - shared boolean, - PRIMARY KEY (trace_id, ts_uuid, id) -) - WITH CLUSTERING ORDER BY (ts_uuid DESC) - AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'} - AND default_time_to_live = 604800 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Primary table for holding trace data'; - -CREATE TABLE IF NOT EXISTS zipkin2.dependency ( - day date, - parent text, - child text, - errors bigint, - calls bigint, - PRIMARY KEY (day, parent, child) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND comment = 'Holder for each days generation of zipkin2.DependencyLink'; diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraSpanConsumerTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraSpanConsumerTest.java deleted file mode 100644 index 491b90d828d..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraSpanConsumerTest.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import java.util.Collections; -import org.junit.Test; -import zipkin2.Call; -import zipkin2.Span; -import zipkin2.internal.AggregateCall; -import zipkin2.storage.cassandra.internal.call.InsertEntry; -import zipkin2.storage.cassandra.internal.call.ResultSetFutureCall; - -import static java.util.Collections.singletonList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.entry; -import static org.assertj.core.api.Assertions.tuple; -import static zipkin2.TestObjects.BACKEND; -import static zipkin2.TestObjects.FRONTEND; -import static zipkin2.TestObjects.TODAY; -import static zipkin2.storage.cassandra.InternalForTests.mockSession; - -public class CassandraSpanConsumerTest { - CassandraSpanConsumer consumer = spanConsumer(CassandraStorage.newBuilder()); - - Span spanWithoutAnnotationsOrTags = - Span.newBuilder() - .traceId("a") - .id("1") - .name("get") - .localEndpoint(FRONTEND) - .timestamp(TODAY * 1000L) - .duration(207000L) - .build(); - - @Test public void emptyInput_emptyCall() { - Call call = consumer.accept(Collections.emptyList()); - assertThat(call).hasSameClassAs(Call.create(null)); - } - - @Test public void doesntSetTraceIdHigh_128() { - Span span = spanWithoutAnnotationsOrTags.toBuilder() - .traceId("77fcac3d4c5be8d2a037812820c65f28") - .build(); - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertSpan) - .extracting("input.trace_id_high", "input.trace_id") - .containsExactly(tuple(null, span.traceId())); - } - - @Test public void doesntSetTraceIdHigh_64() { - Span span = spanWithoutAnnotationsOrTags; - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertSpan) - .extracting("input.trace_id_high", "input.trace_id") - .containsExactly(tuple(null, span.traceId())); - } - - @Test public void strictTraceIdFalse_setsTraceIdHigh() { - consumer = spanConsumer(CassandraStorage.newBuilder().strictTraceId(false)); - - Span span = spanWithoutAnnotationsOrTags.toBuilder() - .traceId("77fcac3d4c5be8d2a037812820c65f28") - .build(); - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertSpan) - .extracting("input.trace_id_high", "input.trace_id") - .containsExactly(tuple("77fcac3d4c5be8d2", "a037812820c65f28")); - } - - @Test public void serviceSpanKeys() { - Span span = spanWithoutAnnotationsOrTags; - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertEntry) - .extracting("input") - .containsExactly(entry(FRONTEND.serviceName(), span.name())); - } - - @Test public void serviceRemoteServiceKeys_addsRemoteServiceName() { - Span span = spanWithoutAnnotationsOrTags.toBuilder().remoteEndpoint(BACKEND).build(); - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertEntry) - .extracting("input") - .containsExactly( - entry(FRONTEND.serviceName(), span.name()), - entry(FRONTEND.serviceName(), BACKEND.serviceName()) - ); - } - - @Test public void serviceRemoteServiceKeys_skipsRemoteServiceNameWhenNoLocalService() { - Span span = spanWithoutAnnotationsOrTags.toBuilder() - .localEndpoint(null) - .remoteEndpoint(BACKEND).build(); - - Call call = consumer.accept(singletonList(span)); - - assertThat(call).isInstanceOf(InsertSpan.class); - } - - @Test public void serviceSpanKeys_emptyWhenNoEndpoints() { - Span span = spanWithoutAnnotationsOrTags.toBuilder().localEndpoint(null).build(); - - assertThat(consumer.accept(singletonList(span))) - .isInstanceOf(ResultSetFutureCall.class); - } - - /** - * To allow lookups w/o a span name, we index "". "" is used instead of null to avoid creating - * tombstones. - */ - @Test public void traceByServiceSpan_indexesLocalServiceNameAndEmptySpanName() { - Span span = spanWithoutAnnotationsOrTags; - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertTraceByServiceSpan) - .extracting("input.service", "input.span") - .containsExactly( - tuple(FRONTEND.serviceName(), span.name()), tuple(FRONTEND.serviceName(), "")); - } - - @Test public void traceByServiceSpan_indexesDurationInMillis() { - Span span = spanWithoutAnnotationsOrTags; - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertTraceByServiceSpan) - .extracting("input.duration") - .containsOnly(span.durationAsLong() / 1000L); - } - - @Test public void traceByServiceSpan_indexesDurationMinimumZero() { - Span span = spanWithoutAnnotationsOrTags.toBuilder().duration(12L).build(); - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertTraceByServiceSpan) - .extracting("input.duration") - .containsOnly(0L); - } - - @Test public void traceByServiceSpan_skipsOnNoTimestamp() { - Span span = spanWithoutAnnotationsOrTags.toBuilder().timestamp(null).build(); - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertTraceByServiceSpan) - .extracting("input.service", "input.span") - .isEmpty(); - } - - @Test public void traceByServiceSpan_doesntIndexRemoteService() { - Span span = spanWithoutAnnotationsOrTags.toBuilder().remoteEndpoint(BACKEND).build(); - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertTraceByServiceSpan) - .hasSize(2) - .extracting("input.service") - .doesNotContain(BACKEND.serviceName()); - } - - @Test public void traceByServiceSpan_appendsEmptyWhenNoName() { - Span span = spanWithoutAnnotationsOrTags.toBuilder().name(null).build(); - - AggregateCall call = (AggregateCall) consumer.accept(singletonList(span)); - assertThat(call.delegate()) - .filteredOn(c -> c instanceof InsertTraceByServiceSpan) - .extracting("input.service", "input.span") - .containsExactly(tuple(FRONTEND.serviceName(), "")); - } - - @Test public void traceByServiceSpan_emptyWhenNoEndpoints() { - Span span = spanWithoutAnnotationsOrTags.toBuilder().localEndpoint(null).build(); - - assertThat(consumer.accept(singletonList(span))) - .isInstanceOf(ResultSetFutureCall.class); - } - - @Test public void searchDisabled_doesntIndex() { - consumer = spanConsumer(CassandraStorage.newBuilder().searchEnabled(false)); - - Span span = spanWithoutAnnotationsOrTags.toBuilder() - .addAnnotation(TODAY * 1000L, "annotation") - .putTag("foo", "bar") - .duration(10000L) - .build(); - - assertThat(consumer.accept(singletonList(span))) - .extracting("input.annotation_query") - .satisfies(q -> assertThat(q).isNull()); - } - - @Test public void doesntIndexWhenOnlyIncludesTimestamp() { - Span span = Span.newBuilder().traceId("a").id("1").timestamp(TODAY * 1000L).build(); - - assertThat(consumer.accept(singletonList(span))) - .isInstanceOf(ResultSetFutureCall.class); - } - - static CassandraSpanConsumer spanConsumer(CassandraStorage.Builder builder) { - return new CassandraSpanConsumer(builder.sessionFactory(storage -> mockSession()).build()); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraSpanStoreTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraSpanStoreTest.java deleted file mode 100644 index a6c97427235..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraSpanStoreTest.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import java.util.Collections; -import java.util.List; -import org.junit.Test; -import zipkin2.Call; -import zipkin2.Span; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.cassandra.SelectTraceIdsFromServiceSpan.Factory.FlatMapServicesToInputs; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.DAY; -import static zipkin2.TestObjects.TODAY; -import static zipkin2.storage.cassandra.InternalForTests.mockSession; - -// TODO: tests use toString because the call composition chain is complex (includes flat mapping) -// This could be made a little less complex if we scrub out map=>map to a list of transformations, -// or possibly special-casing common transformations. -public class CassandraSpanStoreTest { - CassandraSpanStore spanStore = spanStore(CassandraStorage.newBuilder()); - QueryRequest.Builder queryBuilder = QueryRequest.newBuilder().endTs(TODAY).lookback(DAY).limit(5); - - @Test public void getTraces_fansOutAgainstServices() { - Call>> call = spanStore.getTraces(queryBuilder.build()); - - assertThat(call.toString()).contains(FlatMapServicesToInputs.class.getSimpleName()); - } - - @Test public void getTraces_withSpanNameButNoServiceName() { - Call>> call = spanStore.getTraces(queryBuilder.spanName("get").build()); - - assertThat(call.toString()) - .contains(FlatMapServicesToInputs.class.getSimpleName()) - .contains("span=get"); // no need to look at two indexes - } - - @Test public void getTraces_withTagButNoServiceName() { - Call>> call = spanStore.getTraces( - queryBuilder.annotationQuery(Collections.singletonMap("environment", "production")).build()); - - assertThat(call.toString()) - .doesNotContain(FlatMapServicesToInputs.class.getSimpleName()) // works against the span table - .contains("l_service=null, annotation_query=environment=production"); - } - - @Test public void getTraces_withDurationButNoServiceName() { - Call>> call = spanStore.getTraces(queryBuilder.minDuration(1000L).build()); - - assertThat(call.toString()) - .contains(FlatMapServicesToInputs.class.getSimpleName()) - .contains("start_duration=1,"); - } - - @Test public void getTraces_withRemoteServiceNameButNoServiceName() { - Call>> call = - spanStore.getTraces(queryBuilder.remoteServiceName("backend").build()); - - assertThat(call.toString()) - .contains(FlatMapServicesToInputs.class.getSimpleName()) - .contains("remote_service=backend,") - .doesNotContain("span="); // no need to look at two indexes - } - - @Test public void getTraces() { - Call>> call = spanStore.getTraces(queryBuilder.serviceName("frontend").build()); - - assertThat(call.toString()).contains("service=frontend, span=,"); - } - - @Test public void getTraces_withSpanName() { - Call>> call = spanStore.getTraces( - queryBuilder.serviceName("frontend").spanName("get").build()); - - assertThat(call.toString()) - .contains("service=frontend, span=get,"); - } - - @Test public void getTraces_withRemoteServiceName() { - Call>> call = spanStore.getTraces( - queryBuilder.serviceName("frontend").remoteServiceName("backend").build()); - - assertThat(call.toString()) - .contains("service=frontend, remote_service=backend,") - .doesNotContain("service=frontend, span="); // no need to look at two indexes - } - - @Test public void getTraces_withSpanNameAndRemoteServiceName() { - Call>> call = spanStore.getTraces( - queryBuilder.serviceName("frontend").remoteServiceName("backend").spanName("get").build()); - - assertThat(call.toString()) // needs to look at two indexes - .contains("service=frontend, remote_service=backend,") - .contains("service=frontend, span=get,"); - } - - @Test public void searchDisabled_doesntMakeRemoteQueryRequests() { - CassandraSpanStore spanStore = spanStore(CassandraStorage.newBuilder().searchEnabled(false)); - - assertThat(spanStore.getTraces(queryBuilder.build())).hasToString("ConstantCall{value=[]}"); - assertThat(spanStore.getServiceNames()).hasToString("ConstantCall{value=[]}"); - assertThat(spanStore.getRemoteServiceNames("icecream")).hasToString("ConstantCall{value=[]}"); - assertThat(spanStore.getSpanNames("icecream")).hasToString("ConstantCall{value=[]}"); - } - - static CassandraSpanStore spanStore(CassandraStorage.Builder builder) { - return new CassandraSpanStore(builder.sessionFactory(storage -> mockSession()).build()); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraStorageExtension.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraStorageExtension.java deleted file mode 100644 index ef066f92cb7..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraStorageExtension.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.codahale.metrics.Gauge; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metrics.DefaultNodeMetric; -import com.datastax.oss.driver.api.core.metrics.Metrics; -import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import org.junit.jupiter.api.extension.AfterAllCallback; -import org.junit.jupiter.api.extension.BeforeAllCallback; -import org.junit.jupiter.api.extension.ExtensionContext; -import org.opentest4j.TestAbortedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.output.Slf4jLogConsumer; -import org.testcontainers.containers.wait.strategy.Wait; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assumptions.assumeTrue; -import static org.testcontainers.utility.DockerImageName.parse; -import static zipkin2.Call.propagateIfFatal; -import static zipkin2.storage.cassandra.ITCassandraStorage.SEARCH_TABLES; -import static zipkin2.storage.cassandra.Schema.TABLE_DEPENDENCY; -import static zipkin2.storage.cassandra.Schema.TABLE_SPAN; - -public class CassandraStorageExtension implements BeforeAllCallback, AfterAllCallback { - static final Logger LOGGER = LoggerFactory.getLogger(CassandraStorageExtension.class); - - final CassandraContainer container = new CassandraContainer(); - CqlSession globalSession; - - @Override public void beforeAll(ExtensionContext context) { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - - container.start(); - LOGGER.info("Using contactPoint " + contactPoint()); - globalSession = tryToInitializeSession(contactPoint()); - } - - // Builds a session without trying to use a namespace or init UDTs - static CqlSession tryToInitializeSession(String contactPoint) { - CassandraStorage storage = newStorageBuilder(contactPoint).build(); - CqlSession session = null; - try { - session = DefaultSessionFactory.buildSession(storage); - session.execute("SELECT now() FROM system.local"); - } catch (Throwable e) { - propagateIfFatal(e); - if (session != null) session.close(); - assumeTrue(false, e.getMessage()); - } - return session; - } - - CassandraStorage.Builder newStorageBuilder() { - return newStorageBuilder(contactPoint()); - } - - static CassandraStorage.Builder newStorageBuilder(String contactPoint) { - return CassandraStorage.newBuilder().contactPoints(contactPoint).maxConnections(1); - } - - String contactPoint() { - return container.getHost() + ":" + container.getMappedPort(9042); - } - - void clear(CassandraStorage storage) { - // Clear any key cache - CassandraSpanConsumer spanConsumer = storage.spanConsumer; - if (spanConsumer != null) spanConsumer.clear(); - - CqlSession session = storage.session.session; - if (session == null) session = globalSession; - - List toTruncate = new ArrayList<>(SEARCH_TABLES); - toTruncate.add(TABLE_DEPENDENCY); - toTruncate.add(TABLE_SPAN); - - for (String table : toTruncate) { - try { - session.execute("TRUNCATE " + storage.keyspace + "." + table); - } catch (InvalidQueryException e) { - assertThat(e).hasMessage("unconfigured table " + table); - } - } - - blockWhileInFlight(storage); - } - - @Override public void afterAll(ExtensionContext context) { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - if (globalSession != null) globalSession.close(); - } - - static void blockWhileInFlight(CassandraStorage storage) { - CqlSession session = storage.session.get(); - // Now, block until writes complete, notably so we can read them. - boolean wasInFlight = false; - while (true) { - if (!poolInFlight(session)) { - if (wasInFlight) sleep(100); // give a little more to avoid flakey tests - return; - } - wasInFlight = true; - sleep(100); - } - } - - static void sleep(long millis) { - try { - Thread.sleep(millis); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new AssertionError(e); - } - } - - // Use metrics to wait for in-flight requests to settle per - // https://groups.google.com/a/lists.datastax.com/g/java-driver-user/c/5um_yGNynow/m/cInH5I5jBgAJ - static boolean poolInFlight(CqlSession session) { - Collection nodes = session.getMetadata().getNodes().values(); - Optional metrics = session.getMetrics(); - for (Node node : nodes) { - int inFlight = metrics.flatMap(m -> m.getNodeMetric(node, DefaultNodeMetric.IN_FLIGHT)) - .map(m -> ((Gauge) m).getValue()) - .orElse(0); - if (inFlight > 0) return true; - } - return false; - } - - // mostly waiting for https://github.com/testcontainers/testcontainers-java/issues/3537 - static final class CassandraContainer extends GenericContainer { - CassandraContainer() { - super(parse("ghcr.io/openzipkin/zipkin-cassandra:2.23.2")); - if ("true".equals(System.getProperty("docker.skip"))) { - throw new TestAbortedException("${docker.skip} == true"); - } - addExposedPort(9042); - waitStrategy = Wait.forHealthcheck(); - withLogConsumer(new Slf4jLogConsumer(LOGGER)); - } - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraStorageTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraStorageTest.java deleted file mode 100644 index 03e534f4e4c..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraStorageTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.AllNodesFailedException; -import com.datastax.oss.driver.api.core.auth.Authenticator; -import com.datastax.oss.driver.api.core.metadata.EndPoint; -import com.datastax.oss.driver.internal.core.auth.ProgrammaticPlainTextAuthProvider; -import java.nio.ByteBuffer; -import org.junit.Test; -import zipkin2.CheckResult; -import zipkin2.Component; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - -public class CassandraStorageTest { - - @Test public void authProvider_defaultsToNull() { - assertThat(CassandraStorage.newBuilder().build().authProvider) - .isNull(); - } - - @Test public void usernamePassword_impliesNullDelimitedUtf8Bytes() throws Exception { - ProgrammaticPlainTextAuthProvider authProvider = - (ProgrammaticPlainTextAuthProvider) CassandraStorage.newBuilder() - .username("bob") - .password("secret") - .build().authProvider; - - Authenticator authenticator = - authProvider.newAuthenticator(mock(EndPoint.class), "serverAuthenticator"); - - byte[] SASLhandshake = {0, 'b', 'o', 'b', 0, 's', 'e', 'c', 'r', 'e', 't'}; - assertThat(authenticator.initialResponse().toCompletableFuture().get()) - .extracting(ByteBuffer::array) - .isEqualTo(SASLhandshake); - } - - @Test public void check_failsInsteadOfThrowing() { - CheckResult result = CassandraStorage.newBuilder().contactPoints("1.1.1.1").build().check(); - - assertThat(result.ok()).isFalse(); - assertThat(result.error()).isInstanceOf(AllNodesFailedException.class); - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test public void toStringContainsOnlySummaryInformation() { - try (CassandraStorage cassandra = - CassandraStorage.newBuilder().contactPoints("1.1.1.1").build()) { - - assertThat(cassandra) - .hasToString("CassandraStorage{contactPoints=1.1.1.1, keyspace=zipkin2}"); - } - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraUtilTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraUtilTest.java deleted file mode 100644 index e3393771e43..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/CassandraUtilTest.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import java.time.ZoneOffset; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import org.junit.Test; -import zipkin2.Span; -import zipkin2.TestObjects; -import zipkin2.internal.DateUtil; -import zipkin2.storage.QueryRequest; - -import static java.util.concurrent.TimeUnit.DAYS; -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.TODAY; - -public class CassandraUtilTest { - @Test public void annotationKeys_emptyRequest() { - QueryRequest request = QueryRequest.newBuilder() - .endTs(System.currentTimeMillis()) - .limit(10) - .serviceName("test") - .lookback(86400000L) - .build(); - - assertThat(CassandraUtil.annotationKeys(request)) - .isEmpty(); - } - - @Test public void annotationKeys() { - QueryRequest request = QueryRequest.newBuilder() - .endTs(System.currentTimeMillis()) - .limit(10) - .lookback(86400000L) - .serviceName("service") - .parseAnnotationQuery("error and http.method=GET") - .build(); - - assertThat(CassandraUtil.annotationKeys(request)) - .containsExactly("error", "http.method=GET"); - } - - @Test public void annotationKeys_dedupes() { - QueryRequest request = QueryRequest.newBuilder() - .endTs(System.currentTimeMillis()) - .limit(10) - .lookback(86400000L) - .serviceName("service") - .parseAnnotationQuery("error and error") - .build(); - - assertThat(CassandraUtil.annotationKeys(request)) - .containsExactly("error"); - } - - @Test public void annotationKeys_skipsTagsLongerThan256chars() { - // example long value - String arn = - "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"; - // example too long value - String url = - "http://webservices.amazon.com/onca/xml?AWSAccessKeyId=AKIAIOSFODNN7EXAMPLE&AssociateTag=mytag-20&ItemId=0679722769&Operation=ItemLookup&ResponseGroup=Images%2CItemAttributes%2COffers%2CReviews&Service=AWSECommerceService&Timestamp=2014-08-18T12%3A00%3A00Z&Version=2013-08-01&Signature=j7bZM0LXZ9eXeZruTqWm2DIvDYVUU3wxPPpp%2BiXxzQc%3D"; - - Span span = - TestObjects.CLIENT_SPAN.toBuilder().putTag("aws.arn", arn).putTag("http.url", url).build(); - - assertThat(CassandraUtil.annotationQuery(span)) - .contains("aws.arn", "aws.arn=" + arn) - .doesNotContain("http.url") - .doesNotContain("http.url=" + url); - } - - @Test public void annotationKeys_skipsAnnotationsLongerThan256chars() { - // example long value - String arn = - "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"; - // example too long value - String url = - "http://webservices.amazon.com/onca/xml?AWSAccessKeyId=AKIAIOSFODNN7EXAMPLE&AssociateTag=mytag-20&ItemId=0679722769&Operation=ItemLookup&ResponseGroup=Images%2CItemAttributes%2COffers%2CReviews&Service=AWSECommerceService&Timestamp=2014-08-18T12%3A00%3A00Z&Version=2013-08-01&Signature=j7bZM0LXZ9eXeZruTqWm2DIvDYVUU3wxPPpp%2BiXxzQc%3D"; - - Span span = - TestObjects.CLIENT_SPAN.toBuilder().addAnnotation(1L, arn).addAnnotation(1L, url).build(); - - assertThat(CassandraUtil.annotationQuery(span)).contains(arn).doesNotContain(url); - } - - @Test public void annotationKeys_skipsAllocationWhenNoValidInput() { - // example too long value - String url = - "http://webservices.amazon.com/onca/xml?AWSAccessKeyId=AKIAIOSFODNN7EXAMPLE&AssociateTag=mytag-20&ItemId=0679722769&Operation=ItemLookup&ResponseGroup=Images%2CItemAttributes%2COffers%2CReviews&Service=AWSECommerceService&Timestamp=2014-08-18T12%3A00%3A00Z&Version=2013-08-01&Signature=j7bZM0LXZ9eXeZruTqWm2DIvDYVUU3wxPPpp%2BiXxzQc%3D"; - - Span span = Span.newBuilder().traceId("1").id("1").build(); - - assertThat(CassandraUtil.annotationQuery(span)).isNull(); - - span = span.toBuilder().addAnnotation(1L, url).putTag("http.url", url).build(); - - assertThat(CassandraUtil.annotationQuery(span)).isNull(); - } - - /** Sanity checks our bucketing scheme for numeric overflow */ - @Test public void durationIndexBucket_notNegative() { - // today isn't negative - assertThat(CassandraUtil.durationIndexBucket(TODAY * 1000L)).isNotNegative(); - // neither is 10 years from now - assertThat(CassandraUtil.durationIndexBucket((TODAY + TimeUnit.DAYS.toMillis(3654)) * 1000L)) - .isNotNegative(); - } - - @Test public void traceIdsSortedByDescTimestamp_doesntCollideOnSameTimestamp() { - Map input = new LinkedHashMap<>(); - input.put("a", 1L); - input.put("b", 1L); - input.put("c", 2L); - - Set sortedTraceIds = CassandraUtil.traceIdsSortedByDescTimestamp().map(input); - - try { - assertThat(sortedTraceIds).containsExactly("c", "b", "a"); - } catch (AssertionError e) { - assertThat(sortedTraceIds).containsExactly("c", "a", "b"); - } - } - - @Test public void getDays_consistentWithDateUtil() { - assertThat(CassandraUtil.getDays(DAYS.toMillis(2), DAYS.toMillis(1))) - .extracting(d -> d.atStartOfDay().toEpochSecond(ZoneOffset.UTC) * 1000) - .containsExactlyElementsOf(DateUtil.epochDays(DAYS.toMillis(2), DAYS.toMillis(1))); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITCassandraStorage.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITCassandraStorage.java deleted file mode 100644 index d37aa3bbd38..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITCassandraStorage.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import java.util.List; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.Span; -import zipkin2.storage.StorageComponent.Builder; - -import static java.util.Arrays.asList; -import static zipkin2.storage.cassandra.InternalForTests.writeDependencyLinks; -import static zipkin2.storage.cassandra.Schema.TABLE_AUTOCOMPLETE_TAGS; -import static zipkin2.storage.cassandra.Schema.TABLE_SERVICE_REMOTE_SERVICES; -import static zipkin2.storage.cassandra.Schema.TABLE_SERVICE_SPANS; -import static zipkin2.storage.cassandra.Schema.TABLE_TRACE_BY_SERVICE_REMOTE_SERVICE; -import static zipkin2.storage.cassandra.Schema.TABLE_TRACE_BY_SERVICE_SPAN; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class ITCassandraStorage { - static final List SEARCH_TABLES = asList( - TABLE_AUTOCOMPLETE_TAGS, - TABLE_SERVICE_REMOTE_SERVICES, - TABLE_SERVICE_SPANS, - TABLE_TRACE_BY_SERVICE_REMOTE_SERVICE, - TABLE_TRACE_BY_SERVICE_SPAN - ); - - @RegisterExtension CassandraStorageExtension cassandra = new CassandraStorageExtension(); - - @Nested - class ITTraces extends zipkin2.storage.ITTraces { - @Override protected Builder newStorageBuilder(TestInfo testInfo) { - return cassandra.newStorageBuilder(); - } - - @Override @Test @Disabled("No consumer-side span deduplication") - public void getTrace_deduplicates(TestInfo testInfo) { - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - cassandra.clear(storage); - } - } - - @Nested - class ITSpanStore extends zipkin2.storage.ITSpanStore { - @Override protected Builder newStorageBuilder(TestInfo testInfo) { - return cassandra.newStorageBuilder(); - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - cassandra.clear(storage); - } - } - - @Nested - class ITSearchEnabledFalse extends zipkin2.storage.ITSearchEnabledFalse { - @Override protected Builder newStorageBuilder(TestInfo testInfo) { - return cassandra.newStorageBuilder(); - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - cassandra.clear(storage); - } - } - - @Nested - class ITStrictTraceIdFalse extends zipkin2.storage.ITStrictTraceIdFalse { - CassandraStorage strictTraceId; - - @Override protected Builder newStorageBuilder(TestInfo testInfo) { - return cassandra.newStorageBuilder(); - } - - @BeforeEach void initializeStorageBeforeSwitch() { - strictTraceId = CassandraStorageExtension.newStorageBuilder(storage.contactPoints) - .keyspace(storage.keyspace) - .build(); - } - - @AfterEach void closeStorageBeforeSwitch() { - if (strictTraceId != null) { - strictTraceId.close(); - strictTraceId = null; - } - } - - /** Ensures we can still lookup fully 128-bit traces when strict trace ID id disabled */ - @Test public void getTraces_128BitTraceId(TestInfo testInfo) throws Exception { - getTraces_128BitTraceId(accept128BitTrace(strictTraceId, testInfo), testInfo); - } - - /** Ensures data written before strict trace ID was enabled can be read */ - @Test - public void getTrace_retrievesBy128BitTraceId_afterSwitch(TestInfo testInfo) throws Exception { - List trace = accept128BitTrace(strictTraceId, testInfo); - - assertGetTraceReturns(trace.get(0).traceId(), trace); - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - cassandra.clear(storage); - } - } - - @Nested - class ITServiceAndSpanNames extends zipkin2.storage.ITServiceAndSpanNames { - @Override protected Builder newStorageBuilder(TestInfo testInfo) { - return cassandra.newStorageBuilder(); - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - cassandra.clear(storage); - } - } - - @Nested - class ITAutocompleteTags extends zipkin2.storage.ITAutocompleteTags { - @Override protected Builder newStorageBuilder(TestInfo testInfo) { - return cassandra.newStorageBuilder(); - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - cassandra.clear(storage); - } - } - - @Nested - class ITDependencies extends zipkin2.storage.ITDependencies { - @Override protected Builder newStorageBuilder(TestInfo testInfo) { - return cassandra.newStorageBuilder(); - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - cassandra.clear(storage); - } - - /** - * The current implementation does not include dependency aggregation. It includes retrieval of - * pre-aggregated links, usually made via zipkin-dependencies - */ - @Override protected void processDependencies(List spans) { - aggregateLinks(spans).forEach( - (midnight, links) -> writeDependencyLinks(storage, links, midnight)); - blockWhileInFlight(); - } - } - - @Nested - class ITSpanConsumer extends zipkin2.storage.cassandra.ITSpanConsumer { - @Override protected Builder newStorageBuilder(TestInfo testInfo) { - return cassandra.newStorageBuilder(); - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - cassandra.clear(storage); - } - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITCassandraStorageHeavy.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITCassandraStorageHeavy.java deleted file mode 100644 index e2c1d88bc59..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITCassandraStorageHeavy.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import java.util.ArrayList; -import java.util.List; -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.Span; -import zipkin2.storage.QueryRequest; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.DAY; -import static zipkin2.TestObjects.FRONTEND; -import static zipkin2.TestObjects.appendSuffix; -import static zipkin2.TestObjects.newTrace; -import static zipkin2.storage.ITDependencies.aggregateLinks; -import static zipkin2.storage.cassandra.InternalForTests.writeDependencyLinks; - -/** - * Large amounts of writes can make other tests flake. This can happen for reasons such as - * overloading the test Cassandra container or knock-on effects of tombstones left from {@link - * CassandraStorageExtension#clear(CassandraStorage)}. - * - *

Tests here share a different Cassandra container and each method runs in an isolated - * keyspace. As schema installation takes ~10s, hesitate adding too many tests here. - */ -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class ITCassandraStorageHeavy { - - @RegisterExtension CassandraStorageExtension backend = new CassandraStorageExtension(); - - @Nested - class ITSpanStoreHeavy extends zipkin2.storage.ITSpanStoreHeavy { - @Override protected CassandraStorage.Builder newStorageBuilder(TestInfo testInfo) { - return backend.newStorageBuilder().keyspace(InternalForTests.keyspace(testInfo)); - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - // Intentionally don't clean up as each method runs in an isolated keyspace. This prevents - // adding more load to the shared Cassandra instance used for all tests. - } - - @Test void overFetchesToCompensateForDuplicateIndexData(TestInfo testInfo) throws Exception { - String testSuffix = testSuffix(testInfo); - int traceCount = 2000; - - List spans = new ArrayList<>(); - for (int i = 0; i < traceCount; i++) { - final long delta = i * 1000; // all timestamps happen a millisecond later - for (Span s : newTrace(testSuffix)) { - Span.Builder builder = s.toBuilder() - .timestamp(s.timestampAsLong() + delta) - .clearAnnotations(); - s.annotations().forEach(a -> builder.addAnnotation(a.timestamp() + delta, a.value())); - spans.add(builder.build()); - } - } - - accept(spans.toArray(new Span[0])); - - // Index ends up containing more rows than services * trace count, and cannot be de-duped - // in a server-side query. - int localServiceCount = storage.serviceAndSpanNames().getServiceNames().execute().size(); - assertThat(storage - .session() - .execute("SELECT COUNT(*) from trace_by_service_span") - .one() - .getLong(0)) - .isGreaterThan(traceCount * localServiceCount); - - // Implementation over-fetches on the index to allow the user to receive unsurprising results. - QueryRequest request = requestBuilder() - // Ensure we use serviceName so that trace_by_service_span is used - .serviceName(appendSuffix(FRONTEND.serviceName(), testSuffix)) - .lookback(DAY).limit(traceCount).build(); - assertThat(store().getTraces(request).execute()) - .hasSize(traceCount); - } - } - - @Nested - class ITDependenciesHeavy extends zipkin2.storage.ITDependenciesHeavy { - @Override protected CassandraStorage.Builder newStorageBuilder(TestInfo testInfo) { - return backend.newStorageBuilder().keyspace(InternalForTests.keyspace(testInfo)); - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - // Intentionally don't clean up as each method runs in an isolated keyspace. This prevents - // adding more load to the shared Cassandra instance used for all tests. - } - - /** - * The current implementation does not include dependency aggregation. It includes retrieval of - * pre-aggregated links, usually made via zipkin-dependencies - */ - @Override protected void processDependencies(List spans) { - aggregateLinks(spans).forEach( - (midnight, links) -> writeDependencyLinks(storage, links, midnight)); - blockWhileInFlight(); - } - } - - @Nested - class ITEnsureSchema extends zipkin2.storage.cassandra.ITEnsureSchema { - @Override protected CassandraStorage.Builder newStorageBuilder(TestInfo testInfo) { - return backend.newStorageBuilder().keyspace(InternalForTests.keyspace(testInfo)); - } - - @Override CqlSession session() { - return backend.globalSession; - } - - @Override protected void blockWhileInFlight() { - CassandraStorageExtension.blockWhileInFlight(storage); - } - - @Override public void clear() { - // Intentionally don't clean up as each method runs in an isolated keyspace. This prevents - // adding more load to the shared Cassandra instance used for all tests. - } - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITEnsureSchema.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITEnsureSchema.java deleted file mode 100644 index 5c77f212e10..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITEnsureSchema.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import java.util.List; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import zipkin2.CheckResult; -import zipkin2.Span; -import zipkin2.storage.ITStorage; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.StorageComponent; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static zipkin2.TestObjects.BACKEND; -import static zipkin2.TestObjects.appendSuffix; -import static zipkin2.TestObjects.newTrace; -import static zipkin2.storage.cassandra.ITCassandraStorage.SEARCH_TABLES; -import static zipkin2.storage.cassandra.Schema.TABLE_DEPENDENCY; -import static zipkin2.storage.cassandra.Schema.TABLE_SPAN; - -/** This test is very slow as installing the schema can take 10s per method. */ -abstract class ITEnsureSchema extends ITStorage { - @Override protected abstract CassandraStorage.Builder newStorageBuilder(TestInfo testInfo); - - @Override protected void configureStorageForTest(StorageComponent.Builder storage) { - ((CassandraStorage.Builder) storage) - .ensureSchema(false).autocompleteKeys(asList("environment")); - } - - @Override protected boolean initializeStoragePerTest() { - return true; // We need a different keyspace per test - } - - @Override protected void checkStorage() { - // don't check as it requires the keyspace which these tests install - } - - abstract CqlSession session(); - - @Test void installsKeyspaceWhenMissing() { - Schema.ensureExists(storage.keyspace, false, session()); - - KeyspaceMetadata metadata = session().getMetadata().getKeyspace(storage.keyspace).get(); - assertThat(metadata).isNotNull(); - } - - @Test void installsTablesWhenMissing() { - session().execute("CREATE KEYSPACE " + storage.keyspace - + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};"); - - Schema.ensureExists(storage.keyspace, false, session()); - - KeyspaceMetadata metadata = session().getMetadata().getKeyspace(storage.keyspace).get(); - assertThat(metadata.getTable(TABLE_SPAN)).isNotNull(); - assertThat(metadata.getTable(TABLE_DEPENDENCY)).isNotNull(); - - for (String searchTable : SEARCH_TABLES) { - assertThat(metadata.getTable(searchTable)) - .withFailMessage("Expected to not find " + searchTable).isEmpty(); - } - } - - @Test void installsSearchTablesWhenMissing() { - session().execute("CREATE KEYSPACE " + storage.keyspace - + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};"); - - Schema.ensureExists(storage.keyspace, true, session()); - - KeyspaceMetadata metadata = session().getMetadata().getKeyspace(storage.keyspace).get(); - - for (String searchTable : SEARCH_TABLES) { - assertThat(metadata.getTable(searchTable)) - .withFailMessage("Expected to find " + searchTable).isPresent(); - } - } - - @Test void upgradesOldSchema_autocomplete() { - Schema.applyCqlFile(storage.keyspace, session(), "/zipkin2-schema.cql"); - Schema.applyCqlFile(storage.keyspace, session(), "/zipkin2-schema-indexes-original.cql"); - - Schema.ensureExists(storage.keyspace, true, session()); - - KeyspaceMetadata metadata = session().getMetadata().getKeyspace(storage.keyspace).get(); - assertThat(Schema.hasUpgrade1_autocompleteTags(metadata)).isTrue(); - } - - @Test void upgradesOldSchema_remoteService() { - Schema.applyCqlFile(storage.keyspace, session(), "/zipkin2-schema.cql"); - Schema.applyCqlFile(storage.keyspace, session(), "/zipkin2-schema-indexes-original.cql"); - Schema.applyCqlFile(storage.keyspace, session(), "/zipkin2-schema-upgrade-1.cql"); - - Schema.ensureExists(storage.keyspace, true, session()); - - KeyspaceMetadata metadata = session().getMetadata().getKeyspace(storage.keyspace).get(); - assertThat(Schema.hasUpgrade2_remoteService(metadata)).isTrue(); - } - - /** This tests we don't accidentally rely on new indexes such as autocomplete tags */ - @Test void worksWithOldSchema(TestInfo testInfo) throws Exception { - String testSuffix = testSuffix(testInfo); - Schema.applyCqlFile(storage.keyspace, session(), "/zipkin2-schema.cql"); - Schema.applyCqlFile(storage.keyspace, session(), "/zipkin2-schema-indexes-original.cql"); - - // Ensure the storage component is functional before proceeding - CheckResult check = storage.check(); - if (!check.ok()) { - throw new AssertionError("Could not connect to storage: " - + check.error().getMessage(), check.error()); - } - - List trace = newTrace(testSuffix); - - accept(trace); - - assertGetTraceReturns(trace.get(0).traceId(), trace); - - assertThat(storage.autocompleteTags().getValues("environment").execute()) - .isEmpty(); // instead of an exception - String serviceName = trace.get(0).localServiceName(); - assertThat(storage.serviceAndSpanNames().getRemoteServiceNames(serviceName).execute()) - .isEmpty(); // instead of an exception - - QueryRequest request = requestBuilder() - .serviceName(serviceName) - .remoteServiceName(appendSuffix(BACKEND.serviceName(), testSuffix)).build(); - - // Make sure there's an error if a query will return incorrectly vs returning invalid results - assertThatThrownBy(() -> storage.spanStore().getTraces(request)) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("remoteService=" + trace.get(1).remoteServiceName() + - " unsupported due to missing table remote_service_by_service"); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITSpanConsumer.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITSpanConsumer.java deleted file mode 100644 index 85a554b088f..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/ITSpanConsumer.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import zipkin2.Call; -import zipkin2.Span; -import zipkin2.internal.AggregateCall; -import zipkin2.storage.ITStorage; -import zipkin2.storage.StorageComponent; -import zipkin2.storage.cassandra.internal.call.InsertEntry; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.Span.Kind.SERVER; -import static zipkin2.TestObjects.CLIENT_SPAN; -import static zipkin2.TestObjects.newClientSpan; -import static zipkin2.TestObjects.spanBuilder; - -abstract class ITSpanConsumer extends ITStorage { - @Override protected void configureStorageForTest(StorageComponent.Builder storage) { - storage.autocompleteKeys(asList("environment")); - } - - /** - * {@link Span#timestamp()} == 0 is likely to be a mistake, and coerces to null. It is not helpful - * to index rows who have no timestamp. - */ - @Test public void doesntIndexSpansMissingTimestamp(TestInfo testInfo) throws Exception { - String testSuffix = testSuffix(testInfo); - accept(spanBuilder(testSuffix).timestamp(0L).duration(0L).build()); - - assertThat(rowCountForTraceByServiceSpan(storage)).isZero(); - } - - /** - * Simulates a trace with a step pattern, where each span starts a millisecond after the prior - * one. The consumer code optimizes index inserts to only represent the interval represented by - * the trace as opposed to each individual timestamp. - */ - @Test public void skipsRedundantIndexingInATrace(TestInfo testInfo) throws Exception { - String testSuffix = testSuffix(testInfo); - Span[] trace = new Span[101]; - trace[0] = newClientSpan(testSuffix).toBuilder().kind(SERVER).build(); - - IntStream.range(0, 100).forEach(i -> trace[i + 1] = Span.newBuilder() - .traceId(trace[0].traceId()) - .parentId(trace[0].id()) - .id(i + 1) - .name("get") - .kind(Span.Kind.CLIENT) - .localEndpoint(trace[0].localEndpoint()) - .timestamp(trace[0].timestampAsLong() + i * 1000) // all peer span timestamps happen 1ms later - .duration(10L) - .build()); - - accept(trace); - assertThat(rowCountForTraceByServiceSpan(storage)) - .isGreaterThanOrEqualTo(4L); - assertThat(rowCountForTraceByServiceSpan(storage)) - .isGreaterThanOrEqualTo(4L); - - CassandraSpanConsumer withoutStrictTraceId = new CassandraSpanConsumer( - storage.session(), storage.metadata(), - false /* strictTraceId */, storage.searchEnabled, - storage.autocompleteKeys, storage.autocompleteTtl, storage.autocompleteCardinality - ); - - // sanity check base case - withoutStrictTraceId.accept(asList(trace)).execute(); - blockWhileInFlight(); - - assertThat(rowCountForTraceByServiceSpan(storage)) - .isGreaterThanOrEqualTo(120L); // TODO: magic number - assertThat(rowCountForTraceByServiceSpan(storage)) - .isGreaterThanOrEqualTo(120L); - } - - @Test public void insertTags_SelectTags_CalculateCount(TestInfo testInfo) throws Exception { - String testSuffix = testSuffix(testInfo); - Span[] trace = new Span[101]; - trace[0] = newClientSpan(testSuffix).toBuilder().kind(SERVER).build(); - - IntStream.range(0, 100).forEach(i -> trace[i + 1] = Span.newBuilder() - .traceId(trace[0].traceId()) - .parentId(trace[0].id()) - .id(i + 1) - .name("get") - .kind(Span.Kind.CLIENT) - .localEndpoint(trace[0].localEndpoint()) - .putTag("environment", "dev") - .putTag("a", "b") - .timestamp(trace[0].timestampAsLong() + i * 1000) // all peer span timestamps happen 1ms later - .duration(10L) - .build()); - - accept(trace); - - assertThat(rowCountForTags(storage)) - .isEqualTo(1L); // Since tag {a,b} are not in the whitelist - - assertThat(getTagValue(storage, "environment")).isEqualTo("dev"); - } - - /** It is easier to use a real Cassandra connection than mock a prepared statement. */ - @Test public void insertEntry_niceToString() { - // This test can use fake data as it is never written to cassandra - Span clientSpan = CLIENT_SPAN; - - AggregateCall acceptCall = - (AggregateCall) storage.spanConsumer().accept(asList(clientSpan)); - - List> insertEntryCalls = acceptCall.delegate().stream() - .filter(c -> c instanceof InsertEntry) - .collect(Collectors.toList()); - - assertThat(insertEntryCalls.get(0)) - .hasToString("INSERT INTO span_by_service (service, span) VALUES (frontend,get)"); - assertThat(insertEntryCalls.get(1)) - .hasToString( - "INSERT INTO remote_service_by_service (service, remote_service) VALUES (frontend,backend)"); - } - - static long rowCountForTraceByServiceSpan(CassandraStorage storage) { - return storage - .session() - .execute("SELECT COUNT(*) from " + Schema.TABLE_TRACE_BY_SERVICE_SPAN) - .one() - .getLong(0); - } - - static long rowCountForTags(CassandraStorage storage) { - return storage - .session() - .execute("SELECT COUNT(*) from " + Schema.TABLE_AUTOCOMPLETE_TAGS) - .one() - .getLong(0); - } - - static String getTagValue(CassandraStorage storage, String key) { - return storage - .session() - .execute("SELECT value from " + Schema.TABLE_AUTOCOMPLETE_TAGS + " WHERE key='environment'") - .one() - .getString(0); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/InternalForTests.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/InternalForTests.java deleted file mode 100644 index af11be71ad7..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/InternalForTests.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.cql.PreparedStatement; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata; -import java.time.Instant; -import java.time.LocalDate; -import java.time.ZoneOffset; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.UUID; -import org.junit.jupiter.api.TestInfo; -import zipkin2.DependencyLink; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static zipkin2.storage.cassandra.Schema.TABLE_SERVICE_REMOTE_SERVICES; - -class InternalForTests { - static CqlSession mockSession() { - CqlSession session = mock(CqlSession.class); - Metadata metadata = mock(Metadata.class); - Node node = mock(Node.class); - - when(session.getMetadata()).thenReturn(metadata); - when(metadata.getNodes()).thenReturn(Collections.singletonMap( - UUID.fromString("11111111-1111-1111-1111-111111111111"), node - )); - when(node.getCassandraVersion()).thenReturn(Version.parse("3.11.9")); - - KeyspaceMetadata keyspaceMetadata = mock(KeyspaceMetadata.class); - when(session.getMetadata()).thenReturn(metadata); - when(metadata.getKeyspace("zipkin2")).thenReturn(Optional.of(keyspaceMetadata)); - - when(keyspaceMetadata.getTable(TABLE_SERVICE_REMOTE_SERVICES)) - .thenReturn(Optional.of(mock(TableMetadata.class))); - return session; - } - - static void writeDependencyLinks( - CassandraStorage storage, List links, long midnightUTC) { - CqlSession session = storage.session(); - PreparedStatement prepared = session.prepare("INSERT INTO " + Schema.TABLE_DEPENDENCY - + " (day,parent,child,calls,errors)" - + " VALUES (?,?,?,?,?)"); - LocalDate day = Instant.ofEpochMilli(midnightUTC).atZone(ZoneOffset.UTC).toLocalDate(); - for (DependencyLink link : links) { - int i = 0; - storage.session().execute(prepared.bind() - .setLocalDate(i++, day) - .setString(i++, link.parent()) - .setString(i++, link.child()) - .setLong(i++, link.callCount()) - .setLong(i, link.errorCount())); - } - } - - static String keyspace(TestInfo testInfo) { - String result; - if (testInfo.getTestMethod().isPresent()) { - result = testInfo.getTestMethod().get().getName(); - } else { - assert testInfo.getTestClass().isPresent(); - result = testInfo.getTestClass().get().getSimpleName(); - } - result = result.toLowerCase(); - return result.length() <= 48 ? result : result.substring(result.length() - 48); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/SchemaTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/SchemaTest.java deleted file mode 100644 index c8866c1081e..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/SchemaTest.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.Version; -import com.datastax.oss.driver.api.core.metadata.Metadata; -import com.datastax.oss.driver.api.core.metadata.Node; -import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; -import org.junit.Test; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class SchemaTest { - @Test public void ensureKeyspaceMetadata_failsWhenVersionLessThan3_11_3() { - CqlSession session = mock(CqlSession.class); - Metadata metadata = mock(Metadata.class); - Node node = mock(Node.class); - - when(session.getMetadata()).thenReturn(metadata); - when(metadata.getNodes()).thenReturn(Collections.singletonMap( - UUID.fromString("11111111-1111-1111-1111-111111111111"), node - )); - when(node.getCassandraVersion()).thenReturn(Version.parse("3.11.2")); - - assertThatThrownBy(() -> Schema.ensureKeyspaceMetadata(session, "zipkin2")) - .isInstanceOf(RuntimeException.class) - .hasMessage( - "Node 11111111-1111-1111-1111-111111111111 is running Cassandra 3.11.2, but minimum version is 3.11.3"); - } - - @Test public void ensureKeyspaceMetadata_failsWhenOneVersionLessThan3_11_3() { - CqlSession session = mock(CqlSession.class); - Metadata metadata = mock(Metadata.class); - Node node1 = mock(Node.class); - Node node2 = mock(Node.class); - Map nodes = new LinkedHashMap<>(); - nodes.put(UUID.fromString("11111111-1111-1111-1111-111111111111"), node1); - nodes.put(UUID.fromString("22222222-2222-2222-2222-222222222222"), node2); - - when(session.getMetadata()).thenReturn(metadata); - when(metadata.getNodes()).thenReturn(nodes); - when(node1.getCassandraVersion()).thenReturn(Version.parse("3.11.3")); - when(node2.getCassandraVersion()).thenReturn(Version.parse("3.11.2")); - - assertThatThrownBy(() -> Schema.ensureKeyspaceMetadata(session, "zipkin2")) - .isInstanceOf(RuntimeException.class) - .hasMessage( - "Node 22222222-2222-2222-2222-222222222222 is running Cassandra 3.11.2, but minimum version is 3.11.3"); - } - - @Test public void ensureKeyspaceMetadata_passesWhenVersion3_11_3AndKeyspaceMetadataIsNotNull() { - CqlSession session = mock(CqlSession.class); - Metadata metadata = mock(Metadata.class); - Node node = mock(Node.class); - KeyspaceMetadata keyspaceMetadata = mock(KeyspaceMetadata.class); - - when(session.getMetadata()).thenReturn(metadata); - when(metadata.getNodes()).thenReturn(Collections.singletonMap( - UUID.fromString("11111111-1111-1111-1111-111111111111"), node - )); - when(node.getCassandraVersion()).thenReturn(Version.parse("3.11.3")); - when(metadata.getKeyspace("zipkin2")).thenReturn(Optional.of(keyspaceMetadata)); - - assertThat(Schema.ensureKeyspaceMetadata(session, "zipkin2")) - .isSameAs(keyspaceMetadata); - } - - @Test public void ensureKeyspaceMetadata_passesWhenVersion3_11_4AndKeyspaceMetadataIsNotNull() { - CqlSession session = mock(CqlSession.class); - Metadata metadata = mock(Metadata.class); - Node node = mock(Node.class); - KeyspaceMetadata keyspaceMetadata = mock(KeyspaceMetadata.class); - - when(session.getMetadata()).thenReturn(metadata); - when(metadata.getNodes()).thenReturn(Collections.singletonMap( - UUID.fromString("11111111-1111-1111-1111-111111111111"), node - )); - when(node.getCassandraVersion()).thenReturn(Version.parse("3.11.4")); - when(metadata.getKeyspace("zipkin2")).thenReturn(Optional.of(keyspaceMetadata)); - - assertThat(Schema.ensureKeyspaceMetadata(session, "zipkin2")) - .isSameAs(keyspaceMetadata); - } - - @Test public void ensureKeyspaceMetadata_failsWhenKeyspaceMetadataIsNotNull() { - CqlSession session = mock(CqlSession.class); - Metadata metadata = mock(Metadata.class); - Node node = mock(Node.class); - - when(session.getMetadata()).thenReturn(metadata); - when(metadata.getNodes()).thenReturn(Collections.singletonMap( - UUID.fromString("11111111-1111-1111-1111-111111111111"), node - )); - when(node.getCassandraVersion()).thenReturn(Version.parse("3.11.3")); - - assertThatThrownBy(() -> Schema.ensureKeyspaceMetadata(session, "zipkin2")) - .isInstanceOf(RuntimeException.class) - .hasMessageStartingWith("Cannot read keyspace metadata for keyspace"); - } - - String schemaWithReadRepair = "" - + "CREATE TABLE IF NOT EXISTS zipkin2.remote_service_by_service (\n" - + " service text,\n" - + " remote_service text,\n" - + " PRIMARY KEY (service, remote_service)\n" - + ")\n" - + " WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'}\n" - + " AND caching = {'rows_per_partition': 'ALL'}\n" - + " AND default_time_to_live = 259200\n" - + " AND gc_grace_seconds = 3600\n" - + " AND read_repair_chance = 0\n" - + " AND dclocal_read_repair_chance = 0\n" - + " AND speculative_retry = '95percentile'\n" - + " AND comment = 'Secondary table for looking up remote service names by a service name.';"; - - @Test public void reviseCql_leaves_read_repair_chance_on_v3() { - assertThat(Schema.reviseCQL(Version.parse("3.11.9"), schemaWithReadRepair)) - .isSameAs(schemaWithReadRepair); - } - - @Test public void reviseCql_removes_dclocal_read_repair_chance_on_v4() { - assertThat(Schema.reviseCQL(Version.V4_0_0, schemaWithReadRepair)) - // literal used to show newlines etc are in-tact - .isEqualTo("" - + "CREATE TABLE IF NOT EXISTS zipkin2.remote_service_by_service (\n" - + " service text,\n" - + " remote_service text,\n" - + " PRIMARY KEY (service, remote_service)\n" - + ")\n" - + " WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'}\n" - + " AND caching = {'rows_per_partition': 'ALL'}\n" - + " AND default_time_to_live = 259200\n" - + " AND gc_grace_seconds = 3600\n" - + " AND speculative_retry = '95percentile'\n" - + " AND comment = 'Secondary table for looking up remote service names by a service name.';"); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/CassandraStorageBuilderTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/CassandraStorageBuilderTest.java deleted file mode 100644 index 319620c9b1e..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/CassandraStorageBuilderTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal; - -import java.util.List; -import java.util.function.Function; -import org.junit.Test; -import zipkin2.storage.StorageComponent; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONNECTION_POOL_LOCAL_SIZE; -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -public class CassandraStorageBuilderTest { - CassandraStorageBuilder builder = new CassandraStorageBuilder("zipkin3") { - @Override public StorageComponent build() { - return null; - } - }; - - @Test public void maxConnections_setsMaxConnectionsPerDatacenterLocalHost() { - assertThat(builder.maxConnections(16).poolingOptions().get(CONNECTION_POOL_LOCAL_SIZE)) - .isEqualTo(16); - } - - @Test public void badArguments() { - List, CassandraStorageBuilder>> badArguments = asList( - b -> b.autocompleteTtl(0), - b -> b.autocompleteCardinality(0), - b -> b.maxTraceCols(0), - b -> b.indexFetchMultiplier(0) - ); - badArguments.forEach(customizer -> - assertThatThrownBy(() -> customizer.apply(builder)) - .isInstanceOf(IllegalArgumentException.class) - ); - } - - /** Ensure NPE happens early. */ - @Test public void nullPointers() { - List, CassandraStorageBuilder>> nullPointers = asList( - b -> b.autocompleteKeys(null), - b -> b.contactPoints(null), - b -> b.localDc(null), - b -> b.keyspace(null) - ); - nullPointers.forEach(customizer -> - assertThatThrownBy(() -> customizer.apply(builder)) - .isInstanceOf(NullPointerException.class) - ); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/HostAndPortTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/HostAndPortTest.java deleted file mode 100644 index 56ec947812f..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/HostAndPortTest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal; - -import java.util.stream.Stream; -import org.junit.Test; - -import static org.assertj.core.api.Assertions.assertThat; - -// Reuses inputs from com.google.common.net.HostAndPortTest -public class HostAndPortTest { - - @Test public void parsesHost() { - Stream.of( - "google.com", - "google.com", - "192.0.2.1", - "2001::3" - ).forEach(host -> { - assertThat(HostAndPort.fromString(host, 77)) - .isEqualTo(new HostAndPort(host, 77)); - }); - } - - @Test public void parsesHost_emptyPortOk() { - assertThat(HostAndPort.fromString("gmail.com:", 77)) - .isEqualTo(new HostAndPort("gmail.com", 77)); - - assertThat(HostAndPort.fromString("192.0.2.2:", 77)) - .isEqualTo(new HostAndPort("192.0.2.2", 77)); - - assertThat(HostAndPort.fromString("[2001::2]:", 77)) - .isEqualTo(new HostAndPort("2001::2", 77)); - } - - @Test public void parsesHostAndPort() { - assertThat(HostAndPort.fromString("gmail.com:77", 1)) - .isEqualTo(new HostAndPort("gmail.com", 77)); - - assertThat(HostAndPort.fromString("192.0.2.2:77", 1)) - .isEqualTo(new HostAndPort("192.0.2.2", 77)); - - assertThat(HostAndPort.fromString("[2001::2]:77", 1)) - .isEqualTo(new HostAndPort("2001::2", 77)); - } - - @Test public void throwsOnInvalidInput() { - Stream.of( - "google.com:65536", - "google.com:9999999999", - "google.com:port", - "google.com:-25", - "google.com:+25", - "google.com:25 ", - "google.com:25\t", - "google.com:0x25 ", - "[goo.gl]", - "[goo.gl]:80", - "[", - "[]:", - "[]:80", - "[]bad", - "[[:]]", - "x:y:z", - "", - ":", - ":123" - ).forEach(hostPort -> { - try { - HostAndPort.fromString(hostPort, 77); - throw new AssertionError(hostPort + " should have failed to parse"); - } catch (IllegalArgumentException e) { - } - }); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/SessionBuilderTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/SessionBuilderTest.java deleted file mode 100644 index a688d6b89e2..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/SessionBuilderTest.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal; - -import java.net.InetSocketAddress; -import org.junit.Test; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.storage.cassandra.internal.SessionBuilder.parseContactPoints; - -public class SessionBuilderTest { - @Test public void contactPoints_defaultsToLocalhost() { - assertThat(parseContactPoints("localhost")) - .containsExactly(new InetSocketAddress("127.0.0.1", 9042)); - } - - @Test public void contactPoints_defaultsToPort9042() { - assertThat(parseContactPoints("1.1.1.1")) - .containsExactly(new InetSocketAddress("1.1.1.1", 9042)); - } - - @Test public void contactPoints_defaultsToPort9042_multi() { - assertThat(parseContactPoints("1.1.1.1:9143,2.2.2.2")) - .containsExactly( - new InetSocketAddress("1.1.1.1", 9143), new InetSocketAddress("2.2.2.2", 9042)); - } - - @Test public void contactPoints_hostAndPort() { - assertThat(parseContactPoints("1.1.1.1:9142")) - .containsExactly(new InetSocketAddress("1.1.1.1", 9142)); - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/call/DeduplicatingInsertTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/call/DeduplicatingInsertTest.java deleted file mode 100644 index a26446f1523..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/call/DeduplicatingInsertTest.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import java.util.concurrent.atomic.AtomicReference; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; -import zipkin2.Call; -import zipkin2.Callback; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown; -import static org.mockito.Mockito.mock; - -public class DeduplicatingInsertTest { - @Test void dedupesSameCalls() throws Exception { - TestFactory testFactory = new TestFactory(); - - List> calls = new ArrayList<>(); - testFactory.maybeAdd("foo", calls); - testFactory.maybeAdd("bar", calls); - testFactory.maybeAdd("foo", calls); - testFactory.maybeAdd("bar", calls); - testFactory.maybeAdd("bar", calls); - assertThat(calls).hasSize(2); - - for (Call call : calls) { - call.execute(); - } - assertThat(testFactory.values).containsExactly("foo", "bar"); - } - - Callback assertFailOnError = new Callback() { - @Override public void onSuccess(Void value) { - } - - @Override public void onError(Throwable t) { - throw (AssertionError) t; - } - }; - - @Test void enqueuesInOrder() { - TestFactory testFactory = new TestFactory(); - - List> calls = new ArrayList<>(); - testFactory.maybeAdd("foo", calls); - testFactory.maybeAdd("bar", calls); - - for (Call call : calls) { - call.enqueue(assertFailOnError); - } - assertThat(testFactory.values).containsExactly("foo", "bar"); - } - - @Disabled("Flakey: https://github.com/openzipkin/zipkin/issues/3255") - @Test void exceptionsInvalidate_enqueue() { - TestFactory testFactory = new TestFactory(); - - List> calls = new ArrayList<>(); - testFactory.maybeAdd("foo", calls); - testFactory.maybeAdd("bar", calls); - - testFactory.failValue.set("foo"); - - try { - calls.get(0).enqueue(assertFailOnError); - failBecauseExceptionWasNotThrown(AssertionError.class); - } catch (AssertionError e) { - } - - calls.get(1).enqueue(assertFailOnError); - assertThat(testFactory.values).containsExactly("bar"); - - calls.clear(); - testFactory.maybeAdd("foo", calls); - assertThat(calls).isNotEmpty(); // invalidates on exception - - calls.get(0).enqueue(assertFailOnError); - assertThat(testFactory.values).containsExactly("bar", "foo"); - } - - @Test void exceptionsInvalidate_execute() throws Exception { - TestFactory testFactory = new TestFactory(); - - List> calls = new ArrayList<>(); - testFactory.maybeAdd("foo", calls); - testFactory.maybeAdd("bar", calls); - - testFactory.failValue.set("foo"); - - try { - calls.get(0).execute(); - failBecauseExceptionWasNotThrown(AssertionError.class); - } catch (AssertionError e) { - } - - calls.get(1).execute(); - assertThat(testFactory.values).containsExactly("bar"); - - calls.clear(); - testFactory.maybeAdd("foo", calls); - assertThat(calls).isNotEmpty(); // invalidates on exception - - calls.get(0).execute(); - assertThat(testFactory.values).containsExactly("bar", "foo"); - } - - static final class TestFactory extends DeduplicatingInsert.Factory { - List values = new ArrayList<>(); - AtomicReference failValue = new AtomicReference<>(); - - TestFactory() { - super(1000, 1000); - } - - @Override protected Call newCall(String string) { - return new TestDeduplicatingInsert(this, string); - } - } - - static final class TestDeduplicatingInsert extends DeduplicatingInsert { - final TestFactory factory; - - TestDeduplicatingInsert(TestFactory factory, String input) { - super(factory.delayLimiter, input); - this.factory = factory; - } - - @Override protected CompletionStage newCompletionStage() { - if (input.equals(factory.failValue.get())) { - factory.failValue.set(null); - CompletableFuture result = new CompletableFuture<>(); - result.completeExceptionally(new AssertionError()); - return result; - } - factory.values.add(input); - return CompletableFuture.completedFuture(mock(AsyncResultSet.class)); - } - - @Override public Call clone() { - return new TestDeduplicatingInsert(factory, input); - } - } -} diff --git a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/call/ResultSetFutureCallTest.java b/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/call/ResultSetFutureCallTest.java deleted file mode 100644 index 09c818abb97..00000000000 --- a/zipkin-storage/cassandra/src/test/java/zipkin2/storage/cassandra/internal/call/ResultSetFutureCallTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.cassandra.internal.call; - -import com.datastax.oss.driver.api.core.RequestThrottlingException; -import com.datastax.oss.driver.api.core.connection.BusyConnectionException; -import com.datastax.oss.driver.api.core.cql.AsyncResultSet; -import com.datastax.oss.driver.api.core.servererrors.QueryConsistencyException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionStage; -import org.junit.Test; -import zipkin2.Call; -import zipkin2.Callback; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.mock; - -public class ResultSetFutureCallTest { - CompletableFuture future = new CompletableFuture<>(); - AsyncResultSet resultSet = mock(AsyncResultSet.class); - - ResultSetFutureCall call = new ResultSetFutureCall() { - @Override protected CompletionStage newCompletionStage() { - return ResultSetFutureCallTest.this.future; - } - - @Override public Call clone() { - return null; - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - }; - - static final class CompletableCallback extends CompletableFuture implements Callback { - @Override public void onSuccess(T value) { - complete(value); - } - - @Override public void onError(Throwable t) { - completeExceptionally(t); - } - } - - CompletableCallback callback = new CompletableCallback<>(); - - @Test public void enqueue_cancel_beforeCreateFuture() { - call.cancel(); - - assertThat(call.isCanceled()).isTrue(); - } - - @Test public void enqueue_callsFutureGet() throws Exception { - call.enqueue(callback); - - future.complete(resultSet); - - assertThat(callback.get()).isEqualTo(resultSet); - } - - @Test public void enqueue_cancel_afterEnqueue() { - call.enqueue(callback); - call.cancel(); - - assertThat(call.isCanceled()).isTrue(); - // this.future will be wrapped, so can't check if that is canceled. - assertThat(call.future.isCancelled()).isTrue(); - } - - @Test public void enqueue_callbackError_onErrorCreatingFuture() { - IllegalArgumentException error = new IllegalArgumentException(); - call = new ResultSetFutureCall() { - @Override protected CompletionStage newCompletionStage() { - throw error; - } - - @Override public Call clone() { - return null; - } - - @Override public AsyncResultSet map(AsyncResultSet input) { - return input; - } - }; - - call.enqueue(callback); - - // ensure the callback received the exception - assertThat(callback.isCompletedExceptionally()).isTrue(); - assertThatThrownBy(callback::get).hasCause(error); - } - - // below are load related exceptions which should result in a backoff of storage requests - @Test public void isOverCapacity() { - assertThat(ResultSetFutureCall.isOverCapacity( - new RequestThrottlingException("The session is shutting down"))).isTrue(); - assertThat(ResultSetFutureCall.isOverCapacity(new BusyConnectionException(100))).isTrue(); - assertThat(ResultSetFutureCall.isOverCapacity(mock(QueryConsistencyException.class))).isTrue(); - - // not applicable - assertThat(ResultSetFutureCall.isOverCapacity( - new IllegalStateException("Rejected execution"))).isFalse(); - } -} diff --git a/zipkin-storage/cassandra/src/test/resources/autocomplete_tags-stress.yaml b/zipkin-storage/cassandra/src/test/resources/autocomplete_tags-stress.yaml deleted file mode 100644 index a0dc7dc3c19..00000000000 --- a/zipkin-storage/cassandra/src/test/resources/autocomplete_tags-stress.yaml +++ /dev/null @@ -1,49 +0,0 @@ -### -### Stress test for stress_zipkin2.autocomplete_tags table -### -### Stress testing is done using the `cassandra-stress` tool -### -### For example -### cqlsh -f zipkin2-test-schema.cql -### cassandra-stress user profile=autocomplete_tags-stress.yaml ops\(insert=1\) no-warmup duration=1m -rate threads=4 throttle=50/s -### -### after a benchmark has been run with only writes, a mixed read-write benchmark can be run with -### cassandra-stress user profile=autocomplete_tags-stress.yaml ops\(insert=1,select=1,select_values=1\) duration=1m -rate threads=4 throttle=50/s - -# Keyspace Name -keyspace: stress_zipkin2 - -# Table name -table: autocomplete_tags - - -### Column Distribution Specifications ### - -columnspec: - - name: key - size: uniform(5..20) - population: uniform(1..5) - - - name: value - size: uniform(5..20) - population: uniform(1..100) - - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # 1 partition key at a time inserts to model a message being generated - select: fixed(1)/1000 - batchtype: UNLOGGED # Unlogged batches - - -# -# A set of basic queries -# -queries: - select: - cql: SELECT DISTINCT key FROM autocomplete_tags - fields: samerow - select_values: - cql: SELECT value FROM autocomplete_tags WHERE key = ? LIMIT 10000 - fields: samerow diff --git a/zipkin-storage/cassandra/src/test/resources/remote_service_by_service-stress.yaml b/zipkin-storage/cassandra/src/test/resources/remote_service_by_service-stress.yaml deleted file mode 100644 index b5d67fb8342..00000000000 --- a/zipkin-storage/cassandra/src/test/resources/remote_service_by_service-stress.yaml +++ /dev/null @@ -1,49 +0,0 @@ -### -### Stress test for stress_zipkin2.service_remote_service_index table -### -### Stress testing is done using the `cassandra-stress` tool -### -### For example -### cqlsh -f zipkin2-test-schema.cql -### cassandra-stress user profile=remote_service_by_service-stress.yaml ops\(insert=1\) no-warmup duration=1m -rate threads=4 throttle=50/s -### -### after a benchmark has been run with only writes, a mixed read-write benchmark can be run with -### cassandra-stress user profile=remote_service_by_service-stress.yaml ops\(insert=1,select=1,select_remote_services=1\) duration=1m -rate threads=4 throttle=50/s - -# Keyspace Name -keyspace: stress_zipkin2 - -# Table name -table: remote_service_by_service - - -### Column Distribution Specifications ### - -columnspec: - - name: service - size: uniform(5..20) - population: uniform(1..100) - - - name: remote_service - size: uniform(5..20) - population: uniform(1..100) - - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # 1 partition key at a time inserts to model a message being generated - select: fixed(1)/1000 - batchtype: UNLOGGED # Unlogged batches - - -# -# A set of basic queries -# -queries: - select: - cql: SELECT DISTINCT service FROM remote_service_by_service - fields: samerow - select_remote_services: - cql: SELECT remote_service FROM remote_service_by_service WHERE service = ? LIMIT 1000 - fields: samerow diff --git a/zipkin-storage/cassandra/src/test/resources/simplelogger.properties b/zipkin-storage/cassandra/src/test/resources/simplelogger.properties deleted file mode 100644 index af93d53119e..00000000000 --- a/zipkin-storage/cassandra/src/test/resources/simplelogger.properties +++ /dev/null @@ -1,22 +0,0 @@ -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options -org.slf4j.simpleLogger.logFile=System.out -org.slf4j.simpleLogger.defaultLogLevel=warn -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS - -# Note: this will dump a large amount of data in the logs -#org.slf4j.simpleLogger.log.zipkin2.storage.cassandra=info -#org.slf4j.simpleLogger.log.com.datastax.oss.driver.internal.core.tracker.RequestLogger=trace -# our tests check for schema portability, so hush lack of schema logs -org.slf4j.simpleLogger.log.zipkin2.storage.cassandra.Schema=off -# don't spam about SASI -org.slf4j.simpleLogger.log.com.datastax.oss.driver.internal.core.cql.CqlRequestHandler=error -# ignore connection close errors when polling for cassandra to start -org.slf4j.simpleLogger.log.com.datastax.oss.driver.internal.core.control.ControlConnection=error -# ignore warnings about too many sessions -org.slf4j.simpleLogger.log.com.datastax.oss.driver.internal.core.session.DefaultSession=error -# stop huge spam -org.slf4j.simpleLogger.log.org.testcontainers.dockerclient=off - -# Schema install takes a while. Log basic information to prevent CI from thinking things are hung -org.slf4j.simpleLogger.log.zipkin2.storage.cassandra.CassandraStorageExtension=info diff --git a/zipkin-storage/cassandra/src/test/resources/span-stress.yaml b/zipkin-storage/cassandra/src/test/resources/span-stress.yaml deleted file mode 100644 index 9f49b0a7b68..00000000000 --- a/zipkin-storage/cassandra/src/test/resources/span-stress.yaml +++ /dev/null @@ -1,80 +0,0 @@ -### Stress test for stress_zipkin2.span table -### -### Stress testing is done using the `cassandra-stress` tool -### -### For example -### cqlsh -f zipkin2-test-schema.cql -### cassandra-stress user profile=span-stress.yaml ops\(insert=1\) no-warmup duration=1m -rate threads=4 throttle=50/s -### -### after a benchmark has been run with only writes, a mixed read-write benchmark can be run with -### cassandra-stress user profile=span-stress.yaml ops\(insert=1,by_trace=1,by_trace_ts_id=1,by_annotation=1\) duration=1m -rate threads=4 throttle=50/s - -# Keyspace Name -keyspace: stress_zipkin2 - -# Table name -table: span - - -### Column Distribution Specifications ### -# - -columnspec: - - name: trace_id - size: fixed(32) - population: uniform(1..10k) - - - name: ts_uuid - population: uniform(1..10k) - - - name: id - size: fixed(32) - population: uniform(1..10k) - - - name: ts - size: fixed(12) - population: uniform(1..10k) - - - name: span - size: uniform(5..20) - population: uniform(1..100) - - - name: parent_id - size: uniform(5..20) - population: uniform(1..100) - - - name: duration - size: fixed(12) - population: uniform(1..10k) - - - name: l_service - size: uniform(5..20) - population: uniform(1..100) - - - name: annotation_query - size: gaussian(50..500) - population: uniform(1..1k) - - - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # 1 partition key at a time inserts to model a message being generated - select: fixed(1)/1000 - batchtype: UNLOGGED # Unlogged batches - - -# -# A set of basic queries -# -queries: - by_trace: - cql: SELECT * FROM span WHERE trace_id = ? - fields: samerow - by_trace_ts_id: - cql: SELECT * FROM span WHERE trace_id = ? AND ts_uuid = ? AND id = ? - fields: samerow - by_annotation: - cql: SELECT trace_id, ts, id FROM span WHERE l_service = ? AND annotation_query LIKE ? ALLOW FILTERING - fields: samerow diff --git a/zipkin-storage/cassandra/src/test/resources/span_by_service-stress.yaml b/zipkin-storage/cassandra/src/test/resources/span_by_service-stress.yaml deleted file mode 100644 index 1c5304976b4..00000000000 --- a/zipkin-storage/cassandra/src/test/resources/span_by_service-stress.yaml +++ /dev/null @@ -1,49 +0,0 @@ -### -### Stress test for stress_zipkin2.span_by_service table -### -### Stress testing is done using the `cassandra-stress` tool -### -### For example -### cqlsh -f zipkin2-test-schema.cql -### cassandra-stress user profile=span_by_service-stress.yaml ops\(insert=1\) no-warmup duration=1m -rate threads=4 throttle=50/s -### -### after a benchmark has been run with only writes, a mixed read-write benchmark can be run with -### cassandra-stress user profile=span_by_service-stress.yaml ops\(insert=1,select=1,select_spans=1\) duration=1m -rate threads=4 throttle=50/s - -# Keyspace Name -keyspace: stress_zipkin2 - -# Table name -table: span_by_service - - -### Column Distribution Specifications ### - -columnspec: - - name: service - size: uniform(5..20) - population: uniform(1..100) - - - name: span - size: uniform(5..20) - population: uniform(1..100) - - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # 1 partition key at a time inserts to model a message being generated - select: fixed(1)/1000 - batchtype: UNLOGGED # Unlogged batches - - -# -# A set of basic queries -# -queries: - select: - cql: SELECT DISTINCT service FROM span_by_service - fields: samerow - select_spans: - cql: SELECT span FROM span_by_service WHERE service = ? LIMIT 10000 - fields: samerow diff --git a/zipkin-storage/cassandra/src/test/resources/trace_by_service_remote_service-stress.yaml b/zipkin-storage/cassandra/src/test/resources/trace_by_service_remote_service-stress.yaml deleted file mode 100644 index 5cf8b383a11..00000000000 --- a/zipkin-storage/cassandra/src/test/resources/trace_by_service_remote_service-stress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -### -### Stress test for stress_zipkin2.trace_by_service_remote_service table -### -### Stress testing is done using the `cassandra-stress` tool -### -### For example -### cqlsh -f zipkin2-test-schema.cql -### cassandra-stress user profile=trace_by_service_remote_service-stress.yaml ops\(insert=1\) no-warmup duration=1m -rate threads=4 throttle=50/s -### -### after a benchmark has been run with only writes, a mixed read-write benchmark can be run with -### cassandra-stress user profile=trace_by_service_remote_service-stress.yaml ops\(insert=1,select=1\) duration=1m -rate threads=4 throttle=50/s - -# Keyspace Name -keyspace: stress_zipkin2 - -# Table name -table: trace_by_service_remote_service - - -### Column Distribution Specifications ### - -columnspec: - - name: service - size: uniform(5..20) - population: uniform(1..100) - - - name: remote_service - size: uniform(5..20) - population: uniform(1..100) - - - name: bucket - size: fixed(12) - population: fixed(123456789012) - - - name: ts - size: fixed(12) - population: uniform(1..10k) - - - name: trace_id - size: fixed(32) - population: uniform(1..10k) - - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # 1 partition key at a time inserts to model a message being generated - select: fixed(1)/1000 - batchtype: UNLOGGED # Unlogged batches - - -# -# A set of basic queries -# -queries: - select: - cql: SELECT * FROM trace_by_service_remote_service WHERE service = ? AND remote_service = ? AND bucket = ? LIMIT 1 - fields: samerow - -# TODO: adrian doesn't know how to make a timestamp range query from test data -# search by timestamp range diff --git a/zipkin-storage/cassandra/src/test/resources/trace_by_service_span-stress.yaml b/zipkin-storage/cassandra/src/test/resources/trace_by_service_span-stress.yaml deleted file mode 100644 index 381520619ee..00000000000 --- a/zipkin-storage/cassandra/src/test/resources/trace_by_service_span-stress.yaml +++ /dev/null @@ -1,70 +0,0 @@ -### -### Stress test for stress_zipkin2.trace_by_service_span table -### -### Stress testing is done using the `cassandra-stress` tool -### -### For example -### cqlsh -f zipkin2-test-schema.cql -### cassandra-stress user profile=trace_by_service_span-stress.yaml ops\(insert=1\) no-warmup duration=1m -rate threads=4 throttle=50/s -### -### after a benchmark has been run with only writes, a mixed read-write benchmark can be run with -### cassandra-stress user profile=trace_by_service_span-stress.yaml ops\(insert=1,select=1,by_duration=1\) duration=1m -rate threads=4 throttle=50/s - -# Keyspace Name -keyspace: stress_zipkin2 - -# Table name -table: trace_by_service_span - - -### Column Distribution Specifications ### - -columnspec: - - name: service - size: uniform(5..20) - population: uniform(1..100) - - - name: span - size: uniform(5..20) - population: uniform(1..100) - - - name: bucket - size: fixed(12) - population: fixed(123456789012) - - - name: ts - size: fixed(12) - population: uniform(1..10k) - - - name: trace_id - size: fixed(32) - population: uniform(1..10k) - - - name: duration - size: fixed(12) - population: uniform(1..10k) - - - -### Batch Ratio Distribution Specifications ### - -insert: - partitions: fixed(1) # 1 partition key at a time inserts to model a message being generated - select: fixed(1)/1000 - batchtype: UNLOGGED # Unlogged batches - - -# -# A set of basic queries -# -queries: - select: - cql: SELECT * FROM trace_by_service_span WHERE service = ? AND span = ? AND bucket = ? LIMIT 1 - fields: samerow - by_duration: - cql: SELECT * FROM trace_by_service_span WHERE service = ? AND span = ? AND bucket = ? AND duration < ? LIMIT 1 - fields: samerow - -# TODO: adrian doesn't know how to make a timestamp range query from test data -# search by timestamp range, -# search by timestamp range and duration diff --git a/zipkin-storage/cassandra/src/test/resources/zipkin2-schema-indexes-original.cql b/zipkin-storage/cassandra/src/test/resources/zipkin2-schema-indexes-original.cql deleted file mode 100644 index 1d31d56314e..00000000000 --- a/zipkin-storage/cassandra/src/test/resources/zipkin2-schema-indexes-original.cql +++ /dev/null @@ -1,49 +0,0 @@ -ALTER TABLE zipkin2.span ADD l_service text; -ALTER TABLE zipkin2.span ADD annotation_query text; //-- can't do SASI on set: ░-joined until CASSANDRA-11182 - -CREATE TABLE IF NOT EXISTS zipkin2.trace_by_service_span ( - service text, //-- service name - span text, //-- span name, or blank for queries without span name - bucket int, //-- time bucket, calculated as ts/interval (in microseconds), for some pre-configured interval like 1 day. - ts timeuuid, //-- start timestamp of the span, truncated to millisecond precision - trace_id text, //-- trace ID - duration bigint, //-- span duration, in milliseconds - PRIMARY KEY ((service, span, bucket), ts) -) - WITH CLUSTERING ORDER BY (ts DESC) - AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up a trace by a service, or service and span. span column may be blank (when only looking up by service). bucket column adds time bucketing to the partition key, values are microseconds rounded to a pre-configured interval (typically one day). ts column is start timestamp of the span as time-uuid, truncated to millisecond precision. duration column is span duration, rounded up to tens of milliseconds (or hundredths of seconds)'; - -CREATE TABLE IF NOT EXISTS zipkin2.span_by_service ( - service text, - span text, - PRIMARY KEY (service, span) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND caching = {'rows_per_partition': 'ALL'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up span names by a service name.'; - -DROP INDEX IF EXISTS zipkin2.span_annotation_query_idx; - -CREATE CUSTOM INDEX IF NOT EXISTS ON zipkin2.span (annotation_query) USING 'org.apache.cassandra.index.sasi.SASIIndex' - WITH OPTIONS = { - 'mode': 'PREFIX', - 'analyzed': 'true', - 'analyzer_class':'org.apache.cassandra.index.sasi.analyzer.DelimiterAnalyzer', - 'delimiter': '░'}; - -CREATE CUSTOM INDEX IF NOT EXISTS ON zipkin2.span (l_service) USING 'org.apache.cassandra.index.sasi.SASIIndex' - WITH OPTIONS = {'mode': 'PREFIX'}; - -CREATE CUSTOM INDEX IF NOT EXISTS ON zipkin2.trace_by_service_span (duration) USING 'org.apache.cassandra.index.sasi.SASIIndex' - WITH OPTIONS = {'mode': 'PREFIX'}; diff --git a/zipkin-storage/cassandra/src/test/resources/zipkin2-test-schema.cql b/zipkin-storage/cassandra/src/test/resources/zipkin2-test-schema.cql deleted file mode 100644 index 9450640b8dd..00000000000 --- a/zipkin-storage/cassandra/src/test/resources/zipkin2-test-schema.cql +++ /dev/null @@ -1,115 +0,0 @@ -CREATE KEYSPACE IF NOT EXISTS stress_zipkin2 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = false; - -//-- same schema but remove all UDTs and collections (as cassandra-stress doesn't support them) - -CREATE TABLE IF NOT EXISTS stress_zipkin2.span ( - trace_id text, // when strictTraceId=false, only contains right-most 16 chars - ts_uuid timeuuid, - id text, - trace_id_high text, // when strictTraceId=false, contains left-most 16 chars if present - parent_id text, - kind text, - span text, // span.name - ts bigint, - duration bigint, - shared boolean, - debug boolean, - l_service text, - annotation_query text, //-- can't do SASI on set: ░-joined until CASSANDRA-11182 - PRIMARY KEY (trace_id, ts_uuid, id) -) - WITH CLUSTERING ORDER BY (ts_uuid DESC) - AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'} - AND default_time_to_live = 604800 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0.0 - AND speculative_retry = '95percentile' - AND comment = 'Primary table for holding trace data'; -CREATE CUSTOM INDEX IF NOT EXISTS ON stress_zipkin2.span (l_service) USING 'org.apache.cassandra.index.sasi.SASIIndex' - WITH OPTIONS = {'mode': 'PREFIX'}; -CREATE CUSTOM INDEX IF NOT EXISTS ON stress_zipkin2.span (annotation_query) USING 'org.apache.cassandra.index.sasi.SASIIndex' - WITH OPTIONS = { - 'mode': 'PREFIX', - 'analyzed': 'true', - 'analyzer_class':'org.apache.cassandra.index.sasi.analyzer.DelimiterAnalyzer', - 'delimiter': '░'}; - -CREATE TABLE IF NOT EXISTS stress_zipkin2.trace_by_service_span ( - service text, //-- service name - span text, //-- span name, or blank for queries without span name - bucket int, //-- time bucket, calculated as ts/interval (in microseconds), for some pre-configured interval like 1 day. - ts timeuuid, //-- start timestamp of the span, truncated to millisecond precision - trace_id text, //-- trace ID - duration bigint, //-- span duration, in milliseconds - PRIMARY KEY ((service, span, bucket), ts) -) - WITH CLUSTERING ORDER BY (ts DESC) - AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up a trace by a service, or service and span. span column may be blank (when only looking up by service). bucket column adds time bucketing to the partition key, values are microseconds rounded to a pre-configured interval (typically one day). ts column is start timestamp of the span as time-uuid, truncated to millisecond precision. duration column is span duration, rounded up to tens of milliseconds (or hundredths of seconds)'; -CREATE CUSTOM INDEX IF NOT EXISTS ON stress_zipkin2.trace_by_service_span (duration) USING 'org.apache.cassandra.index.sasi.SASIIndex' - WITH OPTIONS = {'mode': 'PREFIX'}; - -CREATE TABLE IF NOT EXISTS stress_zipkin2.trace_by_service_remote_service ( - service text, //-- service name - remote_service text, //-- remote servie name - bucket int, //-- time bucket, calculated as ts/interval (in microseconds), for some pre-configured interval like 1 day. - ts timeuuid, //-- start timestamp of the span, truncated to millisecond precision - trace_id text, //-- trace ID - PRIMARY KEY ((service, remote_service, bucket), ts) -) - WITH CLUSTERING ORDER BY (ts DESC) - AND compaction = {'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up a trace by a remote service. bucket column adds time bucketing to the partition key, values are microseconds rounded to a pre-configured interval (typically one day). ts column is start timestamp of the span as time-uuid, truncated to millisecond precision.'; - -CREATE TABLE IF NOT EXISTS stress_zipkin2.span_by_service ( - service text, - span text, - PRIMARY KEY (service, span) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND caching = {'rows_per_partition': 'ALL'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up span names by a service name. To compensate for hot partitions, we deduplicate write client side, use LeveledCompactionStrategy with a low threshold and add row caching.'; - -CREATE TABLE IF NOT EXISTS stress_zipkin2.remote_service_by_service ( - service text, - remote_service text, - PRIMARY KEY (service, remote_service) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND caching = {'rows_per_partition': 'ALL'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up remote service names by a service name. To compensate for hot partitions, we deduplicate write client side, use LeveledCompactionStrategy with a low threshold and add row caching.'; - -CREATE TABLE IF NOT EXISTS stress_zipkin2.autocomplete_tags ( - key text, - value text, - PRIMARY KEY (key, value) -) - WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'} - AND caching = {'rows_per_partition': 'ALL'} - AND default_time_to_live = 259200 - AND gc_grace_seconds = 3600 - AND read_repair_chance = 0 - AND dclocal_read_repair_chance = 0 - AND speculative_retry = '95percentile' - AND comment = 'Secondary table for looking up span tag values for auto-complete purposes. To compensate for hot partitions, we deduplicate write client side, use LeveledCompactionStrategy with a low threshold and add row caching.'; diff --git a/zipkin-storage/elasticsearch/README.md b/zipkin-storage/elasticsearch/README.md deleted file mode 100644 index e2b539e13e8..00000000000 --- a/zipkin-storage/elasticsearch/README.md +++ /dev/null @@ -1,168 +0,0 @@ -# storage-elasticsearch - -This is is a plugin to the Elasticsearch storage component, which uses -HTTP by way of [Armeria](https://github.com/line/armeria) and -[Jackson](https://github.com/FasterXML/jackson). This uses Elasticsearch 5+ -features, but is tested against Elasticsearch 6-7.x. - -## Multiple hosts -Most users will supply a DNS name that's mapped to multiple A or AAAA -records. For example, `http://elasticsearch:9200` will use normal host -lookups to get the list of IP addresses, though you can alternatively supply -a list of http base urls. In either case, all of the resolved IP addresses -from all provided hosts will be iterated over round-robin, with requests made -only to healthy addresses. - -Here are some examples: - -* http://1.1.1.1:9200,http://2.2.2.2:19200 -* http://1.1.1.1:9200,http://[2001:db8::c001]:9200 -* http://elasticsearch:9200,http://1.2.3.4:9200 -* http://elasticsearch-1:9200,http://elasticsearch-2:9200 - -## Format -Spans are stored in version 2 format, which is the same as the [v2 POST endpoint](https://zipkin.io/zipkin-api/#/default/post_spans) -with one difference described below. We add a "timestamp_millis" field -to aid in integration with other tools. - -### Timestamps -Zipkin's timestamps are in epoch microseconds, which is not a supported date type in Elasticsearch. -In consideration of tools like like Kibana, this component adds "timestamp_millis" when writing -spans. This is mapped to the Elasticsearch date type, so can be used to any date-based queries. - -## Indexes -Spans are stored into daily indices, for example spans with a timestamp -falling on 2016/03/19 will be stored in the index named 'zipkin:span-2016-03-19' -or 'zipkin-span-2016-03-19' if using Elasticsearch version 7 or higher. -There is no support for TTL through this SpanStore. It is recommended -instead to use [Elastic Curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/about.html) -to remove indices older than the point you are interested in. - -### Customizing daily index format -The daily index format can be adjusted in two ways. You can change the -index prefix from 'zipkin' to something else. You can also change -the date separator from '-' to something else. -`ElasticsearchStorage.Builder.index` and `ElasticsearchStorage.Builder.dateSeparator` -control the daily index format. - -For example, using Elasticsearch 7+, spans with a timestamp falling on -2016/03/19 end up in the index 'zipkin-span-2016-03-19'. When the date -separator is '.', the index would be 'zipkin-span-2016.03.19'. - -### String Mapping -The Zipkin api implies aggregation and exact match (keyword) on string -fields named `traceId` and `name` and `serviceName`. Indexing on these -fields is limited to 256 characters eventhough storage is currently -unbounded. - -### Query indexing -To support the zipkin query api, a special index field named `_q` is -added to documents, containing annotation values and tag entry pairs. -Ex: the tag `"error": "500"` results in `"_q":["error", "error=500"]`. -The values in `q` are limited to 256 characters and searched as keywords. - -You can check these manually like so: -```bash -$ curl -s 'localhost:9200/zipkin*span-2017-08-11/_search?q=_q:error=500' -``` - -The reason for special casing is around dotted name constraints. Tags -are stored as a dictionary. Some keys include inconsistent number of dots -(ex "error" and "error.message"). Elasticsearch cannot index these as it -inteprets them as fields, and dots in fields imply an object path. - -### Trace Identifiers -Unless `ElasticsearchStorage.Builder.strictTraceId` is set to false, -trace identifiers are unanalyzed keywords (exact string match). This -means that trace IDs should be written fixed length as either 16 or 32 -lowercase hex characters, corresponding to 64 or 128 bit length. If -writing a custom collector in a different language, make sure you trace -identifiers the same way. - -#### Migrating from 64 to 128-bit trace IDs -When [migrating from 64 to 128-bit trace IDs](../../zipkin-server/README.md#migrating-from-64-to-128-bit-trace-ids), -`ElasticsearchStorage.Builder.strictTraceId` will be false, and traceId -fields will be tokenized to support mixed lookup. This setting should -only be used temporarily, but is explained below. - -The index template tokenizes trace identifiers to match on either 64-bit -or 128-bit length. This allows span lookup by 64-bit trace ID to include -spans reported with 128-bit variants of the same id. This allows interop -with tools who only support 64-bit ids, and buys time for applications -to upgrade to 128-bit instrumentation. - -For example, application A starts a trace with a 128-bit `traceId` -"48485a3953bb61246b221d5bc9e6496c". The next hop, application B, doesn't -yet support 128-bit ids, B truncates `traceId` to "6b221d5bc9e6496c". -When `SpanStore.getTrace(toLong("6b221d5bc9e6496c"))` executes, it -is able to retrieve spans with the longer `traceId`, due to tokenization -setup in the index template. - -To see this in action, you can run a test command like so against one of -your indexes: - -```bash -# the output below shows which tokens will match on the trace id supplied. -$ curl -s 'localhost:9200/zipkin*span-2017-08-22/_analyze' -d '{ - "text": "48485a3953bb61246b221d5bc9e6496c", - "analyzer": "traceId_analyzer" - }'|jq '.tokens|.[]|.token' - "48485a3953bb61246b221d5bc9e6496c" - "6b221d5bc9e6496c" -``` - -### Disabling indexing -Indexing is a good default, but some sites who don't use Zipkin UI's -"Find a Trace" screen may want to disable indexing. This means templates -will opt-out of analyzing any data in `span`, except `traceId`. This -also means the special fields `_q` and `timestamp_millis` will neither -be written, nor analyzed. - -[Disabling search](../../README.md#disabling-search) disables indexing. - -### Composable Index Template -Elasticsearch 7.8 introduces [composable templates](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html) and -deprecates [legacy/v1 templates](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates-v1.html) used in version prior. -Merging of multiple templates with matching index patterns is no longer allowed, and Elasticsearch will return error on PUT of the second template -with matching index pattern and priority. Templates with matching index patterns are required to have different priorities, and Elasticsearch will -only use the template with the highest priority. This also means that [secondary template](https://gist.github.com/adriancole/1af1259102e7a2da1b3c9103565165d7) -is no longer achievable. - -By default, Zipkin will use legacy template during initialization, but you can opt to use composable template by -providing `ES_TEMPLATE_PRIORITY` environment variable. - -## Customizing the ingest pipeline - -You can setup an [ingest pipeline](https://www.elastic.co/guide/en/elasticsearch/reference/master/pipeline.html) to perform custom processing. - -Here's an example, which you'd setup prior to configuring Zipkin to use -it via `ElasticsearchStorage.Builder.pipeline` - - -``` -PUT _ingest/pipeline/zipkin -{ - "description" : "add collector_timestamp_millis", - "processors" : [ - { - "set" : { - "field": "collector_timestamp_millis", - "value": "{{_ingest.timestamp}}" - } - } - ] -} -``` - -## Tuning - -### Autocomplete indexing -Redundant requests to store autocomplete values are ignored for an hour -to reduce load. This is implemented by -[DelayLimiter](../../zipkin/src/main/java/zipkin2/internal/DelayLimiter.java) - -## Data retention -Zipkin-server does not handle retention management of the trace data. Use the tools recommended by ElasticSearch to manage data retention, or your cluster -will grow indefinitely! - * [Curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html) - * [Index Lifecycle Management](https://www.elastic.co/guide/en/elasticsearch/reference/7.3/index-lifecycle-management.html) diff --git a/zipkin-storage/elasticsearch/pom.xml b/zipkin-storage/elasticsearch/pom.xml deleted file mode 100644 index 2e9e912a161..00000000000 --- a/zipkin-storage/elasticsearch/pom.xml +++ /dev/null @@ -1,75 +0,0 @@ - - - - 4.0.0 - - - io.zipkin.zipkin2 - zipkin-storage-parent - 2.24.4-SNAPSHOT - - - zipkin-storage-elasticsearch - Storage: Elasticsearch (V2) - - - ${project.basedir}/../.. - - - -XepDisableWarningsInGeneratedCode -Xep:AutoValueImmutableFields:OFF -Xep:ExtendsAutoValue:OFF -Xep:AutoValueSubclassLeaked:OFF - - - - - ${armeria.groupId} - armeria - ${armeria.version} - - - - com.google.auto.value - auto-value-annotations - ${auto-value.version} - - - com.google.auto.value - auto-value - ${auto-value.version} - provided - - - - org.awaitility - awaitility - ${awaitility.version} - test - - - - ${armeria.groupId} - armeria-junit5 - ${armeria.version} - test - - - ${armeria.groupId} - armeria-junit4 - ${armeria.version} - test - - - diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/BodyConverters.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/BodyConverters.java deleted file mode 100644 index 70f8c4c45a6..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/BodyConverters.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.fasterxml.jackson.core.JsonParser; -import java.io.IOException; -import java.util.List; -import java.util.function.Supplier; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.elasticsearch.internal.JsonSerializers; -import zipkin2.elasticsearch.internal.client.HttpCall.BodyConverter; -import zipkin2.elasticsearch.internal.client.SearchResultConverter; -import zipkin2.internal.DependencyLinker; - -import static zipkin2.elasticsearch.internal.JsonReaders.collectValuesNamed; - -final class BodyConverters { - static final BodyConverter NULL = (parser, contentString) -> null; - static final BodyConverter> KEYS = - (parser, contentString) -> collectValuesNamed(parser, "key"); - static final BodyConverter> SPANS = - SearchResultConverter.create(JsonSerializers.SPAN_PARSER); - static final BodyConverter> DEPENDENCY_LINKS = - new SearchResultConverter(JsonSerializers.DEPENDENCY_LINK_PARSER) { - @Override - public List convert(JsonParser parser, Supplier contentString) - throws IOException { - List result = super.convert(parser, contentString); - return result.isEmpty() ? result : DependencyLinker.merge(result); - } - }; -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchAutocompleteTags.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchAutocompleteTags.java deleted file mode 100644 index ba3944499df..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchAutocompleteTags.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import java.util.List; -import zipkin2.Call; -import zipkin2.elasticsearch.internal.IndexNameFormatter; -import zipkin2.elasticsearch.internal.client.Aggregation; -import zipkin2.elasticsearch.internal.client.SearchCallFactory; -import zipkin2.elasticsearch.internal.client.SearchRequest; -import zipkin2.storage.AutocompleteTags; - -import static zipkin2.elasticsearch.VersionSpecificTemplates.TYPE_AUTOCOMPLETE; - -final class ElasticsearchAutocompleteTags implements AutocompleteTags { - - final boolean enabled; - final IndexNameFormatter indexNameFormatter; - final SearchCallFactory search; - final int namesLookback; - final Call> keysCall; - - ElasticsearchAutocompleteTags(ElasticsearchStorage es) { - this.search = new SearchCallFactory(es.http()); - this.indexNameFormatter = es.indexNameFormatter(); - this.enabled = es.searchEnabled() && !es.autocompleteKeys().isEmpty(); - this.namesLookback = es.namesLookback(); - this.keysCall = Call.create(es.autocompleteKeys()); - } - - @Override public Call> getKeys() { - if (!enabled) return Call.emptyList(); - return keysCall.clone(); - } - - @Override public Call> getValues(String key) { - if (key == null) throw new NullPointerException("key == null"); - if (key.isEmpty()) throw new IllegalArgumentException("key was empty"); - if (!enabled) return Call.emptyList(); - - long endMillis = System.currentTimeMillis(); - long beginMillis = endMillis - namesLookback; - List indices = - indexNameFormatter.formatTypeAndRange(TYPE_AUTOCOMPLETE, beginMillis, endMillis); - - if (indices.isEmpty()) return Call.emptyList(); - - SearchRequest.Filters filters = - new SearchRequest.Filters().addTerm("tagKey", key); - - SearchRequest request = SearchRequest.create(indices) - .filters(filters) - .addAggregation(Aggregation.terms("tagValue", Integer.MAX_VALUE)); - return search.newCall(request, BodyConverters.KEYS); - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchSpanConsumer.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchSpanConsumer.java deleted file mode 100644 index dcf9a978302..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchSpanConsumer.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import java.util.ArrayList; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import zipkin2.Call; -import zipkin2.Span; -import zipkin2.elasticsearch.internal.BulkCallBuilder; -import zipkin2.elasticsearch.internal.BulkIndexWriter; -import zipkin2.elasticsearch.internal.IndexNameFormatter; -import zipkin2.internal.DelayLimiter; -import zipkin2.storage.SpanConsumer; - -import static zipkin2.elasticsearch.VersionSpecificTemplates.TYPE_AUTOCOMPLETE; -import static zipkin2.elasticsearch.VersionSpecificTemplates.TYPE_SPAN; -import static zipkin2.internal.RecyclableBuffers.SHORT_STRING_LENGTH; - -class ElasticsearchSpanConsumer implements SpanConsumer { // not final for testing - final ElasticsearchStorage es; - final Set autocompleteKeys; - final IndexNameFormatter indexNameFormatter; - final char indexTypeDelimiter; - final boolean searchEnabled; - final DelayLimiter delayLimiter; - - ElasticsearchSpanConsumer(ElasticsearchStorage es) { - this.es = es; - this.autocompleteKeys = new LinkedHashSet<>(es.autocompleteKeys()); - this.indexNameFormatter = es.indexNameFormatter(); - this.indexTypeDelimiter = es.indexTypeDelimiter(); - this.searchEnabled = es.searchEnabled(); - this.delayLimiter = DelayLimiter.newBuilder() - .ttl(es.autocompleteTtl(), TimeUnit.MILLISECONDS) - .cardinality(es.autocompleteCardinality()).build(); - } - - String formatTypeAndTimestampForInsert(String type, long timestampMillis) { - return indexNameFormatter - .formatTypeAndTimestampForInsert(type, indexTypeDelimiter, timestampMillis); - } - - @Override public Call accept(List spans) { - if (spans.isEmpty()) return Call.create(null); - BulkSpanIndexer indexer = new BulkSpanIndexer(this); - indexSpans(indexer, spans); - return indexer.newCall(); - } - - void indexSpans(BulkSpanIndexer indexer, List spans) { - for (Span span : spans) { - final long indexTimestamp; // which index to store this span into - if (span.timestampAsLong() != 0L) { - indexTimestamp = span.timestampAsLong() / 1000L; - } else if (!span.annotations().isEmpty()) { - // guessTimestamp is made for determining the span's authoritative timestamp. When choosing - // the index bucket, any annotation is better than using current time. - indexTimestamp = span.annotations().get(0).timestamp() / 1000L; - } else { - indexTimestamp = System.currentTimeMillis(); - } - indexer.add(indexTimestamp, span); - if (searchEnabled && !span.tags().isEmpty()) { - indexer.addAutocompleteValues(indexTimestamp, span); - } - } - } - - /** Mutable type used for each call to store spans */ - static final class BulkSpanIndexer { - final BulkCallBuilder bulkCallBuilder; - final ElasticsearchSpanConsumer consumer; - final List pendingAutocompleteContexts = new ArrayList<>(); - final BulkIndexWriter spanWriter; - - BulkSpanIndexer(ElasticsearchSpanConsumer consumer) { - this.bulkCallBuilder = new BulkCallBuilder(consumer.es, consumer.es.version(), "index-span"); - this.consumer = consumer; - this.spanWriter = - consumer.searchEnabled ? BulkIndexWriter.SPAN : BulkIndexWriter.SPAN_SEARCH_DISABLED; - } - - void add(long indexTimestamp, Span span) { - String index = consumer.formatTypeAndTimestampForInsert(TYPE_SPAN, indexTimestamp); - bulkCallBuilder.index(index, TYPE_SPAN, span, spanWriter); - } - - void addAutocompleteValues(long indexTimestamp, Span span) { - String idx = consumer.formatTypeAndTimestampForInsert(TYPE_AUTOCOMPLETE, indexTimestamp); - for (Map.Entry tag : span.tags().entrySet()) { - int length = tag.getKey().length() + tag.getValue().length() + 1; - if (length > SHORT_STRING_LENGTH) continue; - - // If the autocomplete whitelist doesn't contain the key, skip storing its value - if (!consumer.autocompleteKeys.contains(tag.getKey())) continue; - - AutocompleteContext context = - new AutocompleteContext(indexTimestamp, tag.getKey(), tag.getValue()); - if (!consumer.delayLimiter.shouldInvoke(context)) continue; - pendingAutocompleteContexts.add(context); - - bulkCallBuilder.index(idx, TYPE_AUTOCOMPLETE, tag, BulkIndexWriter.AUTOCOMPLETE); - } - } - - Call newCall() { - Call storeCall = bulkCallBuilder.build(); - if (pendingAutocompleteContexts.isEmpty()) return storeCall; - return storeCall.handleError((error, callback) -> { - for (AutocompleteContext context : pendingAutocompleteContexts) { - consumer.delayLimiter.invalidate(context); - } - callback.onError(error); - }); - } - } - - static final class AutocompleteContext { - final long timestamp; - final String key, value; - - AutocompleteContext(long timestamp, String key, String value) { - this.timestamp = timestamp; - this.key = key; - this.value = value; - } - - @Override public boolean equals(Object o) { - if (o == this) return true; - if (!(o instanceof AutocompleteContext)) return false; - AutocompleteContext that = (AutocompleteContext) o; - return timestamp == that.timestamp && key.equals(that.key) && value.equals(that.value); - } - - @Override public int hashCode() { - int h$ = 1; - h$ *= 1000003; - h$ ^= (int) (h$ ^ ((timestamp >>> 32) ^ timestamp)); - h$ *= 1000003; - h$ ^= key.hashCode(); - h$ *= 1000003; - h$ ^= value.hashCode(); - return h$; - } - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchSpanStore.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchSpanStore.java deleted file mode 100644 index e547ee5233b..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchSpanStore.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import zipkin2.Call; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.elasticsearch.internal.IndexNameFormatter; -import zipkin2.elasticsearch.internal.client.Aggregation; -import zipkin2.elasticsearch.internal.client.HttpCall; -import zipkin2.elasticsearch.internal.client.SearchCallFactory; -import zipkin2.elasticsearch.internal.client.SearchRequest; -import zipkin2.storage.GroupByTraceId; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.ServiceAndSpanNames; -import zipkin2.storage.SpanStore; -import zipkin2.storage.StrictTraceId; -import zipkin2.storage.Traces; - -import static java.util.Arrays.asList; -import static zipkin2.elasticsearch.VersionSpecificTemplates.TYPE_DEPENDENCY; -import static zipkin2.elasticsearch.VersionSpecificTemplates.TYPE_SPAN; - -final class ElasticsearchSpanStore implements SpanStore, Traces, ServiceAndSpanNames { - - /** To not produce unnecessarily long queries, we don't look back further than first ES support */ - static final long EARLIEST_MS = 1456790400000L; // March 2016 - - final SearchCallFactory search; - final Call.Mapper, List>> groupByTraceId; - final String[] allSpanIndices; - final IndexNameFormatter indexNameFormatter; - final boolean strictTraceId, searchEnabled; - final int namesLookback; - - ElasticsearchSpanStore(ElasticsearchStorage es) { - this.search = new SearchCallFactory(es.http()); - this.groupByTraceId = GroupByTraceId.create(es.strictTraceId()); - this.allSpanIndices = new String[] {es.indexNameFormatter().formatType(TYPE_SPAN)}; - this.indexNameFormatter = es.indexNameFormatter(); - this.strictTraceId = es.strictTraceId(); - this.searchEnabled = es.searchEnabled(); - this.namesLookback = es.namesLookback(); - } - - @Override - public Call>> getTraces(QueryRequest request) { - if (!searchEnabled) return Call.emptyList(); - - long endMillis = request.endTs(); - long beginMillis = Math.max(endMillis - request.lookback(), EARLIEST_MS); - - SearchRequest.Filters filters = new SearchRequest.Filters(); - filters.addRange("timestamp_millis", beginMillis, endMillis); - if (request.serviceName() != null) { - filters.addTerm("localEndpoint.serviceName", request.serviceName()); - } - - if (request.remoteServiceName() != null) { - filters.addTerm("remoteEndpoint.serviceName", request.remoteServiceName()); - } - - if (request.spanName() != null) { - filters.addTerm("name", request.spanName()); - } - - for (Map.Entry kv : request.annotationQuery().entrySet()) { - if (kv.getValue().isEmpty()) { - filters.addTerm("_q", kv.getKey()); - } else { - filters.addTerm("_q", kv.getKey() + "=" + kv.getValue()); - } - } - - if (request.minDuration() != null) { - filters.addRange("duration", request.minDuration(), request.maxDuration()); - } - - // We need to filter to traces that contain at least one span that matches the request, - // but the zipkin API is supposed to order traces by first span, regardless of if it was - // filtered or not. This is not possible without either multiple, heavyweight queries - // or complex multiple indexing, defeating much of the elegance of using elasticsearch for this. - // So we fudge and order on the first span among the filtered spans - in practice, there should - // be no significant difference in user experience since span start times are usually very - // close to each other in human time. - Aggregation traceIdTimestamp = - Aggregation.terms("traceId", request.limit()) - .addSubAggregation(Aggregation.min("timestamp_millis")) - .orderBy("timestamp_millis", "desc"); - - List indices = indexNameFormatter.formatTypeAndRange(TYPE_SPAN, beginMillis, endMillis); - if (indices.isEmpty()) return Call.emptyList(); - - SearchRequest esRequest = - SearchRequest.create(indices).filters(filters).addAggregation(traceIdTimestamp); - - HttpCall> traceIdsCall = search.newCall(esRequest, BodyConverters.KEYS); - - Call>> result = - traceIdsCall.flatMap(new GetSpansByTraceId(search, indices)).map(groupByTraceId); - // Elasticsearch lookup by trace ID is by the full 128-bit length, but there's still a chance of - // clash on lower-64 bit. When strict trace ID is enabled, we only filter client-side on clash. - return strictTraceId ? result.map(StrictTraceId.filterTraces(request)) : result; - } - - @Override - public Call> getTrace(String traceId) { - // make sure we have a 16 or 32 character trace ID - traceId = Span.normalizeTraceId(traceId); - - // Unless we are strict, truncate the trace ID to 64bit (encoded as 16 characters) - if (!strictTraceId && traceId.length() == 32) traceId = traceId.substring(16); - - SearchRequest request = SearchRequest.create(asList(allSpanIndices)).term("traceId", traceId); - return search.newCall(request, BodyConverters.SPANS); - } - - @Override public Call>> getTraces(Iterable traceIds) { - Set normalizedTraceIds = new LinkedHashSet<>(); - for (String traceId : traceIds) { - // make sure we have a 16 or 32 character trace ID - traceId = Span.normalizeTraceId(traceId); - - // Unless we are strict, truncate the trace ID to 64bit (encoded as 16 characters) - if (!strictTraceId && traceId.length() == 32) traceId = traceId.substring(16); - - normalizedTraceIds.add(traceId); - } - - if (normalizedTraceIds.isEmpty()) return Call.emptyList(); - SearchRequest request = - SearchRequest.create(asList(allSpanIndices)).terms("traceId", normalizedTraceIds); - return search.newCall(request, BodyConverters.SPANS).map(groupByTraceId); - } - - @Override public Call> getServiceNames() { - if (!searchEnabled) return Call.emptyList(); - - long endMillis = System.currentTimeMillis(); - long beginMillis = endMillis - namesLookback; - - List indices = indexNameFormatter.formatTypeAndRange(TYPE_SPAN, beginMillis, endMillis); - if (indices.isEmpty()) return Call.emptyList(); - - SearchRequest request = SearchRequest.create(indices) - .filters(new SearchRequest.Filters().addRange("timestamp_millis", beginMillis, endMillis)) - .addAggregation(Aggregation.terms("localEndpoint.serviceName", Integer.MAX_VALUE)); - return search.newCall(request, BodyConverters.KEYS); - } - - @Override public Call> getRemoteServiceNames(String serviceName) { - return aggregatedFieldByServiceName(serviceName, "remoteEndpoint.serviceName"); - } - - @Override public Call> getSpanNames(String serviceName) { - return aggregatedFieldByServiceName(serviceName, "name"); - } - - Call> aggregatedFieldByServiceName(String serviceName, String term) { - if (serviceName.isEmpty() || !searchEnabled) return Call.emptyList(); - - long endMillis = System.currentTimeMillis(); - long beginMillis = endMillis - namesLookback; - - List indices = indexNameFormatter.formatTypeAndRange(TYPE_SPAN, beginMillis, endMillis); - if (indices.isEmpty()) return Call.emptyList(); - - // A span name is only valid on a local endpoint, as a span name is defined locally - SearchRequest.Filters filters = new SearchRequest.Filters() - .addRange("timestamp_millis", beginMillis, endMillis) - .addTerm("localEndpoint.serviceName", serviceName.toLowerCase(Locale.ROOT)); - - SearchRequest request = SearchRequest.create(indices).filters(filters) - .addAggregation(Aggregation.terms(term, Integer.MAX_VALUE)); - - return search.newCall(request, BodyConverters.KEYS); - } - - @Override - public Call> getDependencies(long endTs, long lookback) { - if (endTs <= 0) throw new IllegalArgumentException("endTs <= 0"); - if (lookback <= 0) throw new IllegalArgumentException("lookback <= 0"); - - long beginMillis = Math.max(endTs - lookback, EARLIEST_MS); - - // We just return all dependencies in the days that fall within endTs and lookback as - // dependency links themselves don't have timestamps. - List indices = - indexNameFormatter.formatTypeAndRange(TYPE_DEPENDENCY, beginMillis, endTs); - if (indices.isEmpty()) return Call.emptyList(); - - return search.newCall(SearchRequest.create(indices), BodyConverters.DEPENDENCY_LINKS); - } - - static final class GetSpansByTraceId implements Call.FlatMapper, List> { - final SearchCallFactory search; - final List indices; - - GetSpansByTraceId(SearchCallFactory search, List indices) { - this.search = search; - this.indices = indices; - } - - @Override - public Call> map(List input) { - if (input.isEmpty()) return Call.emptyList(); - - SearchRequest getTraces = SearchRequest.create(indices).terms("traceId", input); - return search.newCall(getTraces, BodyConverters.SPANS); - } - - @Override - public String toString() { - return "GetSpansByTraceId{indices=" + indices + "}"; - } - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchStorage.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchStorage.java deleted file mode 100644 index a8ff6e5de6c..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchStorage.java +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Copyright 2015-2021 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.fasterxml.jackson.core.JsonParser; -import com.google.auto.value.AutoValue; -import com.google.auto.value.extension.memoized.Memoized; -import com.linecorp.armeria.client.ResponseTimeoutException; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.client.endpoint.EndpointGroup; -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.HttpMethod; -import java.io.Closeable; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; -import zipkin2.Call; -import zipkin2.CheckResult; -import zipkin2.elasticsearch.internal.IndexNameFormatter; -import zipkin2.elasticsearch.internal.Internal; -import zipkin2.elasticsearch.internal.client.HttpCall; -import zipkin2.elasticsearch.internal.client.HttpCall.BodyConverter; -import zipkin2.internal.Nullable; -import zipkin2.storage.AutocompleteTags; -import zipkin2.storage.ServiceAndSpanNames; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.SpanStore; -import zipkin2.storage.StorageComponent; -import zipkin2.storage.Traces; - -import static com.linecorp.armeria.common.HttpMethod.GET; -import static zipkin2.elasticsearch.ElasticsearchVersion.V7_0; -import static zipkin2.elasticsearch.ElasticsearchVersion.V6_7; -import static zipkin2.elasticsearch.ElasticsearchVersion.V7_8; -import static zipkin2.elasticsearch.EnsureIndexTemplate.ensureIndexTemplate; -import static zipkin2.elasticsearch.VersionSpecificTemplates.TYPE_AUTOCOMPLETE; -import static zipkin2.elasticsearch.VersionSpecificTemplates.TYPE_DEPENDENCY; -import static zipkin2.elasticsearch.VersionSpecificTemplates.TYPE_SPAN; -import static zipkin2.elasticsearch.internal.JsonReaders.enterPath; - -@AutoValue -public abstract class ElasticsearchStorage extends zipkin2.storage.StorageComponent { - /** - * This defers creation of an {@link WebClient}. This is needed because routinely, I/O occurs in - * constructors and this can delay or cause startup to crash. For example, an underlying {@link - * EndpointGroup} could be delayed due to DNS, implicit api calls or health checks. - */ - public interface LazyHttpClient extends Supplier, Closeable { - /** - * Lazily creates an instance of the http client configured to the correct elasticsearch host or - * cluster. The same value should always be returned. - */ - @Override WebClient get(); - - @Override default void close() { - } - - /** This should return the initial endpoints in a single-string without resolving them. */ - @Override String toString(); - } - - /** The lazy http client supplier will be closed on {@link #close()} */ - public static Builder newBuilder(LazyHttpClient lazyHttpClient) { - return new $AutoValue_ElasticsearchStorage.Builder() - .lazyHttpClient(lazyHttpClient) - .strictTraceId(true) - .searchEnabled(true) - .index("zipkin") - .dateSeparator('-') - .indexShards(5) - .indexReplicas(1) - .ensureTemplates(true) - .namesLookback(86400000) - .flushOnWrites(false) - .autocompleteKeys(Collections.emptyList()) - .autocompleteTtl((int) TimeUnit.HOURS.toMillis(1)) - .autocompleteCardinality(5 * 4000); // Ex. 5 site tags with cardinality 4000 each - } - - abstract Builder toBuilder(); - - @AutoValue.Builder - public abstract static class Builder extends StorageComponent.Builder { - - /** - * Only valid when the destination is Elasticsearch 5.x. Indicates the ingest pipeline used - * before spans are indexed. No default. - * - *

See https://www.elastic.co/guide/en/elasticsearch/reference/master/pipeline.html - */ - public abstract Builder pipeline(String pipeline); - - /** - * Only return span and service names where all {@link zipkin2.Span#timestamp()} are at or after - * (now - lookback) in milliseconds. Defaults to 1 day (86400000). - */ - public abstract Builder namesLookback(int namesLookback); - - /** - * Internal and visible only for testing. - * - *

See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-refresh.html - */ - public abstract Builder flushOnWrites(boolean flushOnWrites); - - /** The index prefix to use when generating daily index names. Defaults to zipkin. */ - public final Builder index(String index) { - indexNameFormatterBuilder().index(index); - return this; - } - - /** - * The date separator to use when generating daily index names. Defaults to '-'. - * - *

By default, spans with a timestamp falling on 2016/03/19 end up in the index - * 'zipkin-span-2016-03-19'. When the date separator is '.', the index would be - * 'zipkin-span-2016.03.19'. If the date separator is 0, there is no delimiter. Ex the index - * would be 'zipkin-span-20160319' - */ - public final Builder dateSeparator(char dateSeparator) { - indexNameFormatterBuilder().dateSeparator(dateSeparator); - return this; - } - - /** - * The number of shards to split the index into. Each shard and its replicas are assigned to a - * machine in the cluster. Increasing the number of shards and machines in the cluster will - * improve read and write performance. Number of shards cannot be changed for existing indices, - * but new daily indices will pick up changes to the setting. Defaults to 5. - * - *

Corresponds to index.number_of_shards - */ - public abstract Builder indexShards(int indexShards); - - /** - * The number of replica copies of each shard in the index. Each shard and its replicas are - * assigned to a machine in the cluster. Increasing the number of replicas and machines in the - * cluster will improve read performance, but not write performance. Number of replicas can be - * changed for existing indices. Defaults to 1. It is highly discouraged to set this to 0 as it - * would mean a machine failure results in data loss. - * - *

Corresponds to index.number_of_replicas - */ - public abstract Builder indexReplicas(int indexReplicas); - - /** False disables automatic index template installation. */ - public abstract Builder ensureTemplates(boolean ensureTemplates); - - /** - * Only valid when the destination is Elasticsearch >= 7.8. Indicates the index template - * priority in case of multiple matching templates. The template with highest priority is used. - * Default to 0. - * - *

See https://www.elastic.co/guide/en/elasticsearch/reference/7.8/_index_template_and_settings_priority.html - */ - public abstract Builder templatePriority(@Nullable Integer templatePriority); - - /** {@inheritDoc} */ - @Override public abstract Builder strictTraceId(boolean strictTraceId); - - /** {@inheritDoc} */ - @Override public abstract Builder searchEnabled(boolean searchEnabled); - - /** {@inheritDoc} */ - @Override public abstract Builder autocompleteKeys(List autocompleteKeys); - - /** {@inheritDoc} */ - @Override public abstract Builder autocompleteTtl(int autocompleteTtl); - - /** {@inheritDoc} */ - @Override public abstract Builder autocompleteCardinality(int autocompleteCardinality); - - @Override public abstract ElasticsearchStorage build(); - - abstract Builder lazyHttpClient(LazyHttpClient lazyHttpClient); - - abstract IndexNameFormatter.Builder indexNameFormatterBuilder(); - - Builder() { - } - } - - abstract LazyHttpClient lazyHttpClient(); - - @Nullable public abstract String pipeline(); - - public abstract boolean flushOnWrites(); - - public abstract boolean strictTraceId(); - - abstract boolean searchEnabled(); - - abstract List autocompleteKeys(); - - abstract int autocompleteTtl(); - - abstract int autocompleteCardinality(); - - abstract int indexShards(); - - abstract int indexReplicas(); - - public abstract IndexNameFormatter indexNameFormatter(); - - abstract boolean ensureTemplates(); - - public abstract int namesLookback(); - - @Nullable abstract Integer templatePriority(); - - @Override public SpanStore spanStore() { - ensureIndexTemplates(); - return new ElasticsearchSpanStore(this); - } - - @Override public Traces traces() { - return (Traces) spanStore(); - } - - @Override public ServiceAndSpanNames serviceAndSpanNames() { - return (ServiceAndSpanNames) spanStore(); - } - - @Override public AutocompleteTags autocompleteTags() { - ensureIndexTemplates(); - return new ElasticsearchAutocompleteTags(this); - } - - @Override public SpanConsumer spanConsumer() { - ensureIndexTemplates(); - return new ElasticsearchSpanConsumer(this); - } - - /** Returns the Elasticsearch version of the connected cluster. Internal use only */ - @Memoized public ElasticsearchVersion version() { - try { - return ElasticsearchVersion.get(http()); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - char indexTypeDelimiter() { - return VersionSpecificTemplates.indexTypeDelimiter(version()); - } - - /** This is an internal blocking call, only used in tests. */ - public void clear() throws IOException { - Set toClear = new LinkedHashSet<>(); - toClear.add(indexNameFormatter().formatType(TYPE_SPAN)); - toClear.add(indexNameFormatter().formatType(TYPE_DEPENDENCY)); - for (String index : toClear) clear(index); - } - - void clear(String index) throws IOException { - String url = '/' + index; - AggregatedHttpRequest delete = AggregatedHttpRequest.of(HttpMethod.DELETE, url); - http().newCall(delete, BodyConverters.NULL, "delete-index").execute(); - } - - /** - * Internal code and api responses coerce to {@link RejectedExecutionException} when work is - * rejected. We also classify {@link ResponseTimeoutException} as a capacity related exception - * eventhough capacity is not the only reason (timeout could also result from a misconfiguration - * or a network problem). - */ - @Override public boolean isOverCapacity(Throwable e) { - return e instanceof RejectedExecutionException || e instanceof ResponseTimeoutException; - } - - /** This is blocking so that we can determine if the cluster is healthy or not */ - @Override public CheckResult check() { - return ensureIndexTemplatesAndClusterReady(indexNameFormatter().formatType(TYPE_SPAN)); - } - - /** - * This allows the health check to display problems, such as access, installing the index - * template. It also helps reduce traffic sent to nodes still initializing (when guarded on the - * check result). Finally, this reads the cluster health of the index as it can go down after the - * one-time initialization passes. - */ - CheckResult ensureIndexTemplatesAndClusterReady(String index) { - try { - version(); // ensure the version is available (even if we already cached it) - ensureIndexTemplates(); // called only once, so we have to double-check health - AggregatedHttpRequest request = AggregatedHttpRequest.of(GET, "/_cluster/health/" + index); - CheckResult result = http().newCall(request, READ_STATUS, "get-cluster-health").execute(); - if (result == null) throw new IllegalArgumentException("No content reading cluster health"); - return result; - } catch (Throwable e) { - Call.propagateIfFatal(e); - // Wrapping interferes with humans intended to read this message: - // - // Unwrap the marker exception as the health check is not relevant for the throttle component. - // Unwrap any IOException from the first call to ensureIndexTemplates() - if (e instanceof RejectedExecutionException || e instanceof UncheckedIOException) { - e = e.getCause(); - } - return CheckResult.failed(e); - } - } - - volatile boolean ensuredTemplates; - - // synchronized since we don't want overlapping calls to apply the index templates - void ensureIndexTemplates() { - if (ensuredTemplates) return; - if (!ensureTemplates()) ensuredTemplates = true; - synchronized (this) { - if (ensuredTemplates) return; - doEnsureIndexTemplates(); - ensuredTemplates = true; - } - } - - IndexTemplates doEnsureIndexTemplates() { - try { - HttpCall.Factory http = http(); - IndexTemplates templates = versionSpecificTemplates(version()); - ensureIndexTemplate(http, buildUrl(templates, TYPE_SPAN), templates.span()); - ensureIndexTemplate(http, buildUrl(templates, TYPE_DEPENDENCY), templates.dependency()); - ensureIndexTemplate(http, buildUrl(templates, TYPE_AUTOCOMPLETE), templates.autocomplete()); - return templates; - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - IndexTemplates versionSpecificTemplates(ElasticsearchVersion version) { - return new VersionSpecificTemplates( - indexNameFormatter().index(), - indexReplicas(), - indexShards(), - searchEnabled(), - strictTraceId(), - templatePriority() - ).get(version); - } - - String buildUrl(IndexTemplates templates, String type) { - String indexPrefix = indexNameFormatter().index() + templates.indexTypeDelimiter(); - - if (version().compareTo(V7_8) >= 0 && templatePriority() != null) { - return "/_index_template/" + indexPrefix + type + "_template"; - } - if (version().compareTo(V6_7) >= 0 && version().compareTo(V7_0) < 0) { - // because deprecation warning on 6 to prepare for 7: - // - // [types removal] The parameter include_type_name should be explicitly specified in get - // template requests to prepare for 7.0. In 7.0 include_type_name will default to 'false', - // which means responses will omit the type name in mapping definitions. - // - // The parameter include_type_name was added in 6.7. Using this with ES older than - // 6.7 will result in unrecognized parameter: [include_type_name]. - return "/_template/" + indexPrefix + type + "_template?include_type_name=true"; - } - return "/_template/" + indexPrefix + type + "_template"; - } - - @Override public final String toString() { - return "ElasticsearchStorage{initialEndpoints=" + lazyHttpClient() - + ", index=" + indexNameFormatter().index() + "}"; - } - - static { - Internal.instance = new Internal() { - @Override public HttpCall.Factory http(ElasticsearchStorage storage) { - return storage.http(); - } - }; - } - - @Memoized HttpCall.Factory http() { - return new HttpCall.Factory(lazyHttpClient().get()); - } - - @Override public void close() { - lazyHttpClient().close(); - } - - ElasticsearchStorage() { - } - - static final BodyConverter READ_STATUS = new BodyConverter() { - @Override public CheckResult convert(JsonParser parser, Supplier contentString) - throws IOException { - JsonParser status = enterPath(parser, "status"); - if (status == null) { - throw new IllegalArgumentException("Health status couldn't be read " + contentString.get()); - } - if ("RED".equalsIgnoreCase(status.getText())) { - return CheckResult.failed(new IllegalStateException("Health status is RED")); - } - return CheckResult.OK; - } - - @Override public String toString() { - return "ReadStatus"; - } - }; -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchVersion.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchVersion.java deleted file mode 100644 index bdbcce0c163..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/ElasticsearchVersion.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2015-2021 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.fasterxml.jackson.core.JsonParser; -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.HttpMethod; -import java.io.IOException; -import java.util.Objects; -import java.util.function.Supplier; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import zipkin2.elasticsearch.internal.client.HttpCall; - -import static zipkin2.elasticsearch.internal.JsonReaders.enterPath; - -/** Helps avoid problems comparing versions by number. Ex 7.10 should be > 7.9 */ -public final class ElasticsearchVersion implements Comparable { - public static final ElasticsearchVersion V5_0 = new ElasticsearchVersion(5, 0); - public static final ElasticsearchVersion V6_0 = new ElasticsearchVersion(6, 0); - public static final ElasticsearchVersion V6_7 = new ElasticsearchVersion(6, 7); - public static final ElasticsearchVersion V7_0 = new ElasticsearchVersion(7, 0); - public static final ElasticsearchVersion V7_8 = new ElasticsearchVersion(7, 8); - public static final ElasticsearchVersion V8_0 = new ElasticsearchVersion(8, 0); - - static ElasticsearchVersion get(HttpCall.Factory http) throws IOException { - return Parser.INSTANCE.get(http); - } - - final int major, minor; - - ElasticsearchVersion(int major, int minor) { - this.major = major; - this.minor = minor; - } - - @Override public int compareTo(ElasticsearchVersion other) { - if (major < other.major) return -1; - if (major > other.major) return 1; - return Integer.compare(minor, other.minor); - } - - @Override public boolean equals(Object o) { - if (o == this) return true; - if (!(o instanceof ElasticsearchVersion)) return false; - ElasticsearchVersion that = (ElasticsearchVersion) o; - return this.major == that.major && this.minor == that.minor; - } - - @Override public int hashCode() { - return Objects.hash(major, minor); - } - - @Override public String toString() { - return major + "." + minor; - } - - enum Parser implements HttpCall.BodyConverter { - INSTANCE; - - final Pattern REGEX = Pattern.compile("(\\d+)\\.(\\d+).*"); - - ElasticsearchVersion get(HttpCall.Factory callFactory) throws IOException { - AggregatedHttpRequest getNode = AggregatedHttpRequest.of(HttpMethod.GET, "/"); - ElasticsearchVersion version = callFactory.newCall(getNode, this, "get-node").execute(); - if (version == null) { - throw new IllegalArgumentException("No content reading Elasticsearch version"); - } - return version; - } - - @Override - public ElasticsearchVersion convert(JsonParser parser, Supplier contentString) { - String version = null; - try { - if (enterPath(parser, "version", "number") != null) version = parser.getText(); - } catch (RuntimeException | IOException possiblyParseException) { - // EmptyCatch ignored - } - if (version == null) { - throw new IllegalArgumentException( - ".version.number not found in response: " + contentString.get()); - } - - Matcher matcher = REGEX.matcher(version); - if (!matcher.matches()) { - throw new IllegalArgumentException("Invalid .version.number: " + version); - } - - try { - int major = Integer.parseInt(matcher.group(1)); - int minor = Integer.parseInt(matcher.group(2)); - return new ElasticsearchVersion(major, minor); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Invalid .version.number: " + version); - } - } - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/EnsureIndexTemplate.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/EnsureIndexTemplate.java deleted file mode 100644 index 26d78f228ad..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/EnsureIndexTemplate.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.RequestHeaders; -import java.io.FileNotFoundException; -import java.io.IOException; -import zipkin2.elasticsearch.internal.client.HttpCall; - -/** Ensures the index template exists and saves off the version */ -final class EnsureIndexTemplate { - - /** - * This is a blocking call, used inside a lazy. That's because no writes should occur until the - * template is available. - */ - static void ensureIndexTemplate(HttpCall.Factory callFactory, String templateUrl, - String indexTemplate) throws IOException { - AggregatedHttpRequest getTemplate = AggregatedHttpRequest.of(HttpMethod.GET, templateUrl); - try { - callFactory.newCall(getTemplate, BodyConverters.NULL, "get-template").execute(); - } catch (FileNotFoundException e) { // TODO: handle 404 slightly more nicely - AggregatedHttpRequest updateTemplate = AggregatedHttpRequest.of( - RequestHeaders.of( - HttpMethod.PUT, templateUrl, HttpHeaderNames.CONTENT_TYPE, MediaType.JSON_UTF_8), - HttpData.ofUtf8(indexTemplate)); - callFactory.newCall(updateTemplate, BodyConverters.NULL, "update-template").execute(); - } - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/IndexTemplates.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/IndexTemplates.java deleted file mode 100644 index 550889b3c85..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/IndexTemplates.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.google.auto.value.AutoValue; - -@AutoValue -abstract class IndexTemplates { - static Builder newBuilder() { - return new AutoValue_IndexTemplates.Builder(); - } - - abstract ElasticsearchVersion version(); - - abstract char indexTypeDelimiter(); - - abstract String span(); - - abstract String dependency(); - - abstract String autocomplete(); - - @AutoValue.Builder - interface Builder { - Builder version(ElasticsearchVersion version); - - Builder indexTypeDelimiter(char indexTypeDelimiter); - - Builder span(String span); - - Builder dependency(String dependency); - - Builder autocomplete(String autocomplete); - - IndexTemplates build(); - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/VersionSpecificTemplates.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/VersionSpecificTemplates.java deleted file mode 100644 index 310e597b6cc..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/VersionSpecificTemplates.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import static zipkin2.elasticsearch.ElasticsearchVersion.V5_0; -import static zipkin2.elasticsearch.ElasticsearchVersion.V6_0; -import static zipkin2.elasticsearch.ElasticsearchVersion.V7_0; -import static zipkin2.elasticsearch.ElasticsearchVersion.V7_8; -import static zipkin2.elasticsearch.ElasticsearchVersion.V8_0; - -/** Returns version-specific index templates */ -// TODO: make a main class that spits out the index template using ENV variables for the server, -// a parameter for the version, and a parameter for the index type. Ex. -// java -cp zipkin-storage-elasticsearch.jar zipkin2.elasticsearch.VersionSpecificTemplates 6.7 span -final class VersionSpecificTemplates { - /** Maximum character length constraint of most names, IP literals and IDs. */ - static final int SHORT_STRING_LENGTH = 256; - static final String TYPE_AUTOCOMPLETE = "autocomplete"; - static final String TYPE_SPAN = "span"; - static final String TYPE_DEPENDENCY = "dependency"; - - /** - * In Zipkin search, we do exact match only (keyword). Norms is about scoring. We don't use that - * in our API, and disable it to reduce disk storage needed. - */ - static final String KEYWORD = "{ \"type\": \"keyword\", \"norms\": false }"; - - final String indexPrefix; - final int indexReplicas, indexShards; - final boolean searchEnabled, strictTraceId; - final Integer templatePriority; - - VersionSpecificTemplates(String indexPrefix, int indexReplicas, int indexShards, - boolean searchEnabled, boolean strictTraceId, Integer templatePriority) { - this.indexPrefix = indexPrefix; - this.indexReplicas = indexReplicas; - this.indexShards = indexShards; - this.searchEnabled = searchEnabled; - this.strictTraceId = strictTraceId; - this.templatePriority = templatePriority; - } - - String indexPattern(String type, ElasticsearchVersion version) { - return '"' - + (version.compareTo(V6_0) < 0 ? "template" : "index_patterns") - + "\": \"" - + indexPrefix - + indexTypeDelimiter(version) - + type - + "-*" - + "\""; - } - - String indexProperties(ElasticsearchVersion version) { - // 6.x _all disabled https://www.elastic.co/guide/en/elasticsearch/reference/6.7/breaking-changes-6.0.html#_the_literal__all_literal_meta_field_is_now_disabled_by_default - // 7.x _default disallowed https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html#_the_literal__default__literal_mapping_is_no_longer_allowed - String result = "" - + " \"index.number_of_shards\": " + indexShards + ",\n" - + " \"index.number_of_replicas\": " + indexReplicas + ",\n" - + " \"index.requests.cache.enable\": true"; - return result + "\n"; - } - - String indexTemplate(ElasticsearchVersion version) { - if (useComposableTemplate(version)) { - return "\"template\": {\n"; - } - - return ""; - } - - String indexTemplateClosing(ElasticsearchVersion version) { - if (useComposableTemplate(version)) { - return "},\n"; - } - - return ""; - } - - String templatePriority(ElasticsearchVersion version) { - if (useComposableTemplate(version)) { - return "\"priority\": " + templatePriority + "\n"; - } - - return ""; - } - - String beginTemplate(String type, ElasticsearchVersion version) { - return "{\n" - + " " + indexPattern(type, version) + ",\n" - + indexTemplate(version) - + " \"settings\": {\n" - + indexProperties(version); - } - - String endTemplate(ElasticsearchVersion version) { - return indexTemplateClosing(version) - + templatePriority(version) - + "}"; - } - - /** Templatized due to version differences. Only fields used in search are declared */ - String spanIndexTemplate(ElasticsearchVersion version) { - String result = beginTemplate(TYPE_SPAN, version); - - String traceIdMapping = KEYWORD; - if (!strictTraceId) { - // Supporting mixed trace ID length is expensive due to needing a special analyzer and - // "fielddata" which consumes a lot of heap. Sites should only turn off strict trace ID when - // in a transition, and keep trace ID length transitions as short time as possible. - traceIdMapping = - "{ \"type\": \"text\", \"fielddata\": \"true\", \"analyzer\": \"traceId_analyzer\" }"; - result += (",\n" - + " \"analysis\": {\n" - + " \"analyzer\": {\n" - + " \"traceId_analyzer\": {\n" - + " \"type\": \"custom\",\n" - + " \"tokenizer\": \"keyword\",\n" - + " \"filter\": \"traceId_filter\"\n" - + " }\n" - + " },\n" - + " \"filter\": {\n" - + " \"traceId_filter\": {\n" - + " \"type\": \"pattern_capture\",\n" - + " \"patterns\": [\"([0-9a-f]{1,16})$\"],\n" - + " \"preserve_original\": true\n" - + " }\n" - + " }\n" - + " }\n"); - } - - result += " },\n"; - - if (searchEnabled) { - return result - + (" \"mappings\": {\n" - + maybeWrap(TYPE_SPAN, version, "" - + " \"_source\": {\"excludes\": [\"_q\"] },\n" - + " \"dynamic_templates\": [\n" - + " {\n" - + " \"strings\": {\n" - + " \"mapping\": {\n" - + " \"type\": \"keyword\",\"norms\": false," - + " \"ignore_above\": " + SHORT_STRING_LENGTH + "\n" - + " },\n" - + " \"match_mapping_type\": \"string\",\n" - + " \"match\": \"*\"\n" - + " }\n" - + " }\n" - + " ],\n" - + " \"properties\": {\n" - + " \"traceId\": " + traceIdMapping + ",\n" - + " \"name\": " + KEYWORD + ",\n" - + " \"localEndpoint\": {\n" - + " \"type\": \"object\",\n" - + " \"dynamic\": false,\n" - + " \"properties\": { \"serviceName\": " + KEYWORD + " }\n" - + " },\n" - + " \"remoteEndpoint\": {\n" - + " \"type\": \"object\",\n" - + " \"dynamic\": false,\n" - + " \"properties\": { \"serviceName\": " + KEYWORD + " }\n" - + " },\n" - + " \"timestamp_millis\": {\n" - + " \"type\": \"date\",\n" - + " \"format\": \"epoch_millis\"\n" - + " },\n" - + " \"duration\": { \"type\": \"long\" },\n" - + " \"annotations\": { \"enabled\": false },\n" - + " \"tags\": { \"enabled\": false },\n" - + " \"_q\": " + KEYWORD + "\n" - + " }\n") - + " }\n" - + endTemplate(version)); - } - return result - + (" \"mappings\": {\n" - + maybeWrap(TYPE_SPAN, version, "" - + " \"properties\": {\n" - + " \"traceId\": " + traceIdMapping + ",\n" - + " \"annotations\": { \"enabled\": false },\n" - + " \"tags\": { \"enabled\": false }\n" - + " }\n") - + " }\n" - + endTemplate(version)); - } - - /** Templatized due to version differences. Only fields used in search are declared */ - String dependencyTemplate(ElasticsearchVersion version) { - return beginTemplate(TYPE_DEPENDENCY, version) - + " },\n" - + " \"mappings\": {\n" - + maybeWrap(TYPE_DEPENDENCY, version, " \"enabled\": false\n") - + " }\n" - + endTemplate(version); - } - - // The key filed of a autocompleteKeys is intentionally names as tagKey since it clashes with the - // BodyConverters KEY - String autocompleteTemplate(ElasticsearchVersion version) { - return beginTemplate(TYPE_AUTOCOMPLETE, version) - + " },\n" - + " \"mappings\": {\n" - + maybeWrap(TYPE_AUTOCOMPLETE, version, "" - + " \"enabled\": true,\n" - + " \"properties\": {\n" - + " \"tagKey\": " + KEYWORD + ",\n" - + " \"tagValue\": " + KEYWORD + "\n" - + " }\n") - + " }\n" - + endTemplate(version); - } - - IndexTemplates get(ElasticsearchVersion version) { - if (version.compareTo(V5_0) < 0 || version.compareTo(V8_0) >= 0) { - throw new IllegalArgumentException( - "Elasticsearch versions 5-7.x are supported, was: " + version); - } - return IndexTemplates.newBuilder() - .version(version) - .indexTypeDelimiter(indexTypeDelimiter(version)) - .span(spanIndexTemplate(version)) - .dependency(dependencyTemplate(version)) - .autocomplete(autocompleteTemplate(version)) - .build(); - } - - boolean useComposableTemplate(ElasticsearchVersion version) { - return (version.compareTo(V7_8) >= 0 && templatePriority != null); - } - - /** - * This returns a delimiter based on what's supported by the Elasticsearch version. - * - *

Starting in Elasticsearch 7.x, colons are no longer allowed in index names. This logic will - * make sure the pattern in our index template doesn't use them either. - * - *

See https://github.com/openzipkin/zipkin/issues/2219 - */ - static char indexTypeDelimiter(ElasticsearchVersion version) { - return version.compareTo(V7_0) < 0 ? ':' : '-'; - } - - static String maybeWrap(String type, ElasticsearchVersion version, String json) { - // ES 7.x defaults include_type_name to false https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html#_literal_include_type_name_literal_now_defaults_to_literal_false_literal - if (version.compareTo(V7_0) >= 0) return json; - return " \"" + type + "\": {\n " + json.replace("\n", "\n ") + " }\n"; - } -} - diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/BulkCallBuilder.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/BulkCallBuilder.java deleted file mode 100644 index efd74255239..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/BulkCallBuilder.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.databind.JsonNode; -import com.google.auto.value.AutoValue; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.RequestContext; -import com.linecorp.armeria.common.RequestHeaders; -import com.linecorp.armeria.common.util.Exceptions; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufOutputStream; -import io.netty.buffer.PooledByteBufAllocator; -import io.netty.handler.codec.http.QueryStringEncoder; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.RejectedExecutionException; -import java.util.function.Supplier; -import zipkin2.elasticsearch.ElasticsearchStorage; -import zipkin2.elasticsearch.ElasticsearchVersion; -import zipkin2.elasticsearch.internal.client.HttpCall; -import zipkin2.elasticsearch.internal.client.HttpCall.BodyConverter; - -import static zipkin2.Call.propagateIfFatal; -import static zipkin2.elasticsearch.ElasticsearchVersion.V7_0; -import static zipkin2.elasticsearch.internal.JsonSerializers.OBJECT_MAPPER; - -// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html -// exposed to re-use for testing writes of dependency links -public final class BulkCallBuilder { - // This mapper is invoked under the assumption that bulk requests return errors even when the http - // status is success. The status codes expected to be returned were undocumented as of version 7.2 - // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html - static final BodyConverter CHECK_FOR_ERRORS = new BodyConverter() { - @Override public Void convert(JsonParser parser, Supplier contentString) { - RuntimeException toThrow = null; - try { - JsonNode root = OBJECT_MAPPER.readTree(parser); - // only throw when we know it is an error - if (!root.at("/errors").booleanValue() && !root.at("/error").isObject()) return null; - - String message = root.findPath("reason").textValue(); - if (message == null) message = contentString.get(); - Number status = root.findPath("status").numberValue(); - if (status != null && status.intValue() == 429) { - toThrow = new RejectedExecutionException(message); - } else { - toThrow = new RuntimeException(message); - } - } catch (RuntimeException | IOException possiblyParseException) { // All use of jackson throws - } - if (toThrow != null) throw toThrow; - return null; - } - - @Override public String toString() { - return "CheckForErrors"; - } - }; - - final String tag; - final boolean shouldAddType; - final HttpCall.Factory http; - final String pipeline; - final boolean waitForRefresh; - - // Mutated for each call to index - final List> entries = new ArrayList<>(); - - public BulkCallBuilder(ElasticsearchStorage es, ElasticsearchVersion version, String tag) { - this.tag = tag; - shouldAddType = version.compareTo(V7_0) < 0; - http = Internal.instance.http(es); - pipeline = es.pipeline(); - waitForRefresh = es.flushOnWrites(); - } - - static IndexEntry newIndexEntry(String index, String typeName, T input, - BulkIndexWriter writer) { - return new AutoValue_BulkCallBuilder_IndexEntry<>(index, typeName, input, writer); - } - - @AutoValue static abstract class IndexEntry { - abstract String index(); - - abstract String typeName(); - - abstract T input(); - - abstract BulkIndexWriter writer(); - } - - public void index(String index, String typeName, T input, BulkIndexWriter writer) { - entries.add(newIndexEntry(index, typeName, input, writer)); - } - - /** Creates a bulk request when there is more than one object to store */ - public HttpCall build() { - QueryStringEncoder urlBuilder = new QueryStringEncoder("/_bulk"); - if (pipeline != null) urlBuilder.addParam("pipeline", pipeline); - if (waitForRefresh) urlBuilder.addParam("refresh", "wait_for"); - - ByteBufAllocator alloc = RequestContext.mapCurrent( - RequestContext::alloc, () -> PooledByteBufAllocator.DEFAULT); - - HttpCall.RequestSupplier request = new BulkRequestSupplier( - entries, - shouldAddType, - RequestHeaders.of( - HttpMethod.POST, urlBuilder.toString(), - HttpHeaderNames.CONTENT_TYPE, MediaType.JSON_UTF_8), - alloc); - return http.newCall(request, CHECK_FOR_ERRORS, tag); - } - - static class BulkRequestSupplier implements HttpCall.RequestSupplier { - final List> entries; - final boolean shouldAddType; - final RequestHeaders headers; - final ByteBufAllocator alloc; - - BulkRequestSupplier(List> entries, boolean shouldAddType, - RequestHeaders headers, ByteBufAllocator alloc) { - this.entries = entries; - this.shouldAddType = shouldAddType; - this.headers = headers; - this.alloc = alloc; - } - - @Override public RequestHeaders headers() { - return headers; - } - - @Override public void writeBody(HttpCall.RequestStream requestStream) { - for (IndexEntry entry : entries) { - if (!requestStream.tryWrite(HttpData.wrap(serialize(alloc, entry, shouldAddType)))) { - // Stream aborted, no need to serialize anymore. - return; - } - } - } - } - - static ByteBuf serialize(ByteBufAllocator alloc, IndexEntry entry, - boolean shouldAddType) { - // Fuzzily assume a general small span is 600 bytes to reduce resizing while building up the - // JSON. Any extra bytes will be released back after serializing the document. - ByteBuf document = alloc.heapBuffer(600); - ByteBuf metadata = alloc.heapBuffer(200); - try { - String id = entry.writer().writeDocument(entry.input(), new ByteBufOutputStream(document)); - writeIndexMetadata(new ByteBufOutputStream(metadata), entry, id, shouldAddType); - - ByteBuf payload = alloc.ioBuffer(document.readableBytes() + metadata.readableBytes() + 2); - try { - payload.writeBytes(metadata).writeByte('\n').writeBytes(document).writeByte('\n'); - } catch (Throwable t) { - payload.release(); - propagateIfFatal(t); - Exceptions.throwUnsafely(t); - } - return payload; - } finally { - document.release(); - metadata.release(); - } - } - - static void writeIndexMetadata(ByteBufOutputStream sink, IndexEntry entry, String id, - boolean shouldAddType) { - try (JsonGenerator writer = JsonSerializers.jsonGenerator(sink)) { - writer.writeStartObject(); - writer.writeObjectFieldStart("index"); - writer.writeStringField("_index", entry.index()); - // the _type parameter is needed for Elasticsearch < 6.x - if (shouldAddType) writer.writeStringField("_type", entry.typeName()); - writer.writeStringField("_id", id); - writer.writeEndObject(); - writer.writeEndObject(); - } catch (IOException e) { - throw new AssertionError(e); // No I/O writing to a Buffer. - } - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/BulkIndexWriter.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/BulkIndexWriter.java deleted file mode 100644 index be505326b04..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/BulkIndexWriter.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import com.fasterxml.jackson.core.JsonGenerator; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufOutputStream; -import io.netty.buffer.ByteBufUtil; -import java.io.IOException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Iterator; -import java.util.Map; -import zipkin2.Annotation; -import zipkin2.Endpoint; -import zipkin2.Span; - -import static zipkin2.internal.RecyclableBuffers.SHORT_STRING_LENGTH; - -public abstract class BulkIndexWriter { - - /** - * Write a complete json document according to index strategy and returns the ID field. - */ - public abstract String writeDocument(T input, ByteBufOutputStream sink); - - public static final BulkIndexWriter SPAN = new BulkIndexWriter() { - @Override public String writeDocument(Span input, ByteBufOutputStream sink) { - return write(input, true, sink); - } - }; - public static final BulkIndexWriter - SPAN_SEARCH_DISABLED = new BulkIndexWriter() { - @Override public String writeDocument(Span input, ByteBufOutputStream sink) { - return write(input, false, sink); - } - }; - - public static final BulkIndexWriter> AUTOCOMPLETE = - new BulkIndexWriter>() { - @Override public String writeDocument(Map.Entry input, - ByteBufOutputStream sink) { - try (JsonGenerator writer = JsonSerializers.jsonGenerator(sink)) { - writeAutocompleteEntry(input.getKey(), input.getValue(), writer); - } catch (IOException e) { - throw new AssertionError("Couldn't close generator for a memory stream.", e); - } - // Id is used to dedupe server side as necessary. Arbitrarily same format as _q value. - return input.getKey() + '=' + input.getValue(); - } - }; - - static final Endpoint EMPTY_ENDPOINT = Endpoint.newBuilder().build(); - - /** - * In order to allow systems like Kibana to search by timestamp, we add a field "timestamp_millis" - * when storing. The cheapest way to do this without changing the codec is prefixing it to the - * json. For example. {"traceId":"... becomes {"timestamp_millis":12345,"traceId":"... - * - *

Tags are stored as a dictionary. Since some tag names will include inconsistent number of - * dots (ex "error" and perhaps "error.message"), we cannot index them naturally with - * elasticsearch. Instead, we add an index-only (non-source) field of {@code _q} which includes - * valid search queries. For example, the tag {@code error -> 500} results in {@code - * "_q":["error", "error=500"]}. This matches the input query syntax, and can be checked manually - * with curl. - * - *

Ex {@code curl -s localhost:9200/zipkin:span-2017-08-11/_search?q=_q:error=500} - * - * @param searchEnabled encodes timestamp_millis and _q when non-empty - */ - static String write(Span span, boolean searchEnabled, ByteBufOutputStream sink) { - int startIndex = sink.buffer().writerIndex(); - try (JsonGenerator writer = JsonSerializers.jsonGenerator(sink)) { - writer.writeStartObject(); - if (searchEnabled) addSearchFields(span, writer); - writer.writeStringField("traceId", span.traceId()); - if (span.parentId() != null) writer.writeStringField("parentId", span.parentId()); - writer.writeStringField("id", span.id()); - if (span.kind() != null) writer.writeStringField("kind", span.kind().toString()); - if (span.name() != null) writer.writeStringField("name", span.name()); - if (span.timestampAsLong() != 0L) { - writer.writeNumberField("timestamp", span.timestampAsLong()); - } - if (span.durationAsLong() != 0L) writer.writeNumberField("duration", span.durationAsLong()); - if (span.localEndpoint() != null && !EMPTY_ENDPOINT.equals(span.localEndpoint())) { - writer.writeFieldName("localEndpoint"); - write(span.localEndpoint(), writer); - } - if (span.remoteEndpoint() != null && !EMPTY_ENDPOINT.equals(span.remoteEndpoint())) { - writer.writeFieldName("remoteEndpoint"); - write(span.remoteEndpoint(), writer); - } - if (!span.annotations().isEmpty()) { - writer.writeArrayFieldStart("annotations"); - for (int i = 0, length = span.annotations().size(); i < length; ) { - write(span.annotations().get(i++), writer); - } - writer.writeEndArray(); - } - if (!span.tags().isEmpty()) { - writer.writeObjectFieldStart("tags"); - Iterator> tags = span.tags().entrySet().iterator(); - while (tags.hasNext()) write(tags.next(), writer); - writer.writeEndObject(); - } - if (Boolean.TRUE.equals(span.debug())) writer.writeBooleanField("debug", true); - if (Boolean.TRUE.equals(span.shared())) writer.writeBooleanField("shared", true); - writer.writeEndObject(); - } catch (IOException e) { - throw new AssertionError(e); // No I/O writing to a Buffer. - } - - // get a slice representing the document we just wrote so that we can make a content hash - ByteBuf slice = sink.buffer().slice(startIndex, sink.buffer().writerIndex() - startIndex); - - return span.traceId() + '-' + md5(slice); - } - - static void writeAutocompleteEntry(String key, String value, JsonGenerator writer) { - try { - writer.writeStartObject(); - writer.writeStringField("tagKey", key); - writer.writeStringField("tagValue", value); - writer.writeEndObject(); - } catch (IOException e) { - throw new AssertionError(e); // No I/O writing to a Buffer. - } - } - - static void write(Map.Entry tag, JsonGenerator writer) throws IOException { - writer.writeStringField(tag.getKey(), tag.getValue()); - } - - static void write(Annotation annotation, JsonGenerator writer) throws IOException { - writer.writeStartObject(); - writer.writeNumberField("timestamp", annotation.timestamp()); - writer.writeStringField("value", annotation.value()); - writer.writeEndObject(); - } - - static void write(Endpoint endpoint, JsonGenerator writer) throws IOException { - writer.writeStartObject(); - if (endpoint.serviceName() != null) { - writer.writeStringField("serviceName", endpoint.serviceName()); - } - if (endpoint.ipv4() != null) writer.writeStringField("ipv4", endpoint.ipv4()); - if (endpoint.ipv6() != null) writer.writeStringField("ipv6", endpoint.ipv6()); - if (endpoint.portAsInt() != 0) writer.writeNumberField("port", endpoint.portAsInt()); - writer.writeEndObject(); - } - - static void addSearchFields(Span span, JsonGenerator writer) throws IOException { - long timestampMillis = span.timestampAsLong() / 1000L; - if (timestampMillis != 0L) writer.writeNumberField("timestamp_millis", timestampMillis); - if (!span.tags().isEmpty() || !span.annotations().isEmpty()) { - writer.writeArrayFieldStart("_q"); - for (Annotation a : span.annotations()) { - if (a.value().length() > SHORT_STRING_LENGTH) continue; - writer.writeString(a.value()); - } - for (Map.Entry tag : span.tags().entrySet()) { - int length = tag.getKey().length() + tag.getValue().length() + 1; - if (length > SHORT_STRING_LENGTH) continue; - writer.writeString(tag.getKey()); // search is possible by key alone - writer.writeString(tag.getKey() + "=" + tag.getValue()); - } - writer.writeEndArray(); - } - } - - static String md5(ByteBuf buf) { - final MessageDigest messageDigest; - try { - messageDigest = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new AssertionError(); - } - messageDigest.update(buf.nioBuffer()); - return ByteBufUtil.hexDump(messageDigest.digest()); - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/IndexNameFormatter.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/IndexNameFormatter.java deleted file mode 100644 index 8d5be7e8c3f..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/IndexNameFormatter.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import com.google.auto.value.AutoValue; -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.GregorianCalendar; -import java.util.List; -import java.util.TimeZone; -import zipkin2.internal.DateUtil; -import zipkin2.internal.Nullable; - -import static java.time.LocalDateTime.ofInstant; -import static java.util.Calendar.DAY_OF_MONTH; - -/** - *

Index-Prefix/type delimiter

- * When Elasticsearch dropped support for multiple type indexes, we introduced a delimited naming - * convention to distinguish between span, dependency and autocomplete documents. Originally, this - * was a colon prefix pattern. In version 7, Elasticsearch dropped support for colons in indexes. To - * keep existing writes consistent, we still use colon in versions prior to ES 7, eventhough - * starting at version 7, we change to hyphens. {@code zipkin2.elasticsearch.IndexTemplates} is - * responsible for this decision. - * - *

Creating indexes

- * Using the default index prefix of "zipkin", when indexes are created, they look like the - * following, based on the version. - * - *
    - *
  • ES up to v6: zipkin:span-2019-05-03 zipkin:dependency-2019-05-03 zipkin:autocomplete-2019-05-03
  • - *
  • ES v7: zipkin-span-2019-05-03 zipkin-dependency-2019-05-03 zipkin-autocomplete-2019-05-03
  • - *
- * - *

We can allow an index prefix of up to 231 UTF-8 encoded bytes, subject to the index naming - * constraints. This is the normal 255 limit minus the longest suffix (ex. -autocomplete-2019-05-03). - * - *

Reading indexes

- * While ES 7 cannot write new indexes with a colons, it can read them. Upon upgrade, some sites - * will have a mixed read state where some indexes delimit types with a colon and others a hyphen. - * Accordingly, we use * in read patterns in place of a type delimiter. We use * because there is no - * support for single character wildcards in ES. - * - *

Elasticsearch 7 naming constraints

- * According to a recent - * reference, the following index naming constraints apply to index names as of ES 7: - * - *
    - *
  • No more than 255 UTF-8 encoded bytes
  • - *
  • Cannot be . or ..
  • - *
  • Cannot contain : or #
  • - *
  • Cannot start with _ - or +
  • - *
- */ -@AutoValue -public abstract class IndexNameFormatter { - public static Builder newBuilder() { - return new AutoValue_IndexNameFormatter.Builder(); - } - - public abstract Builder toBuilder(); - - private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); - - public abstract String index(); - - abstract char dateSeparator(); - - abstract DateTimeFormatter dateFormat(); - - @AutoValue.Builder - public abstract static class Builder { - public abstract Builder index(String index); - - public abstract Builder dateSeparator(char dateSeparator); - - abstract Builder dateFormat(DateTimeFormatter dateFormat); - - abstract char dateSeparator(); - - public final IndexNameFormatter build() { - char separator = dateSeparator(); - String format = separator == 0 ? "yyyyMMdd" : "yyyy-MM-dd".replace('-', separator); - return dateFormat(DateTimeFormatter.ofPattern(format).withZone(ZoneOffset.UTC)).autoBuild(); - } - - abstract IndexNameFormatter autoBuild(); - } - - /** - * Returns a set of index patterns that represent the range provided. Notably, this compresses - * months or years using wildcards (in order to send smaller API calls). - * - *

For example, if {@code beginMillis} is 2016-11-30 and {@code endMillis} is 2017-01-02, the - * result will be 2016-11-30, 2016-12-*, 2017-01-01 and 2017-01-02. - */ - public List formatTypeAndRange(@Nullable String type, long beginMillis, long endMillis) { - GregorianCalendar current = midnightUTC(beginMillis); - GregorianCalendar end = midnightUTC(endMillis); - - String prefix = prefix(type); - List indices = new ArrayList<>(); - while (current.compareTo(end) <= 0) { - if (current.get(Calendar.MONTH) == Calendar.JANUARY && current.get(DAY_OF_MONTH) == 1) { - // attempt to compress a year - current.set(Calendar.DAY_OF_YEAR, current.getActualMaximum(Calendar.DAY_OF_YEAR)); - if (current.compareTo(end) <= 0) { - indices.add( - String.format("%s-%s%c*", prefix, current.get(Calendar.YEAR), dateSeparator())); - current.add(DAY_OF_MONTH, 1); // rollover to next year - continue; - } else { - current.set(Calendar.DAY_OF_YEAR, 1); // rollback to first of the year - } - } else if (current.get(DAY_OF_MONTH) == 1) { - // attempt to compress a month - current.set(DAY_OF_MONTH, current.getActualMaximum(DAY_OF_MONTH)); - if (current.compareTo(end) <= 0) { - indices.add(formatIndexPattern("%s-%s%c%02d%c*", current, prefix)); - current.add(DAY_OF_MONTH, 1); // rollover to next month - continue; - } - current.set(DAY_OF_MONTH, 9); // try to compress days 0-9 - if (current.compareTo(end) <= 0) { - indices.add(formatIndexPattern("%s-%s%c%02d%c0*", current, prefix)); - current.add(DAY_OF_MONTH, 1); // rollover to day 10 - continue; - } - current.set(DAY_OF_MONTH, 1); // set back to day 1 - } else if (current.get(DAY_OF_MONTH) == 10) { - current.set(DAY_OF_MONTH, 19); // try to compress days 10-19 - if (current.compareTo(end) <= 0) { - indices.add(formatIndexPattern("%s-%s%c%02d%c1*", current, prefix)); - current.add(DAY_OF_MONTH, 1); // rollover to day 20 - continue; - } - current.set(DAY_OF_MONTH, 10); // set back to day 10 - } else if (current.get(DAY_OF_MONTH) == 20) { - current.set(DAY_OF_MONTH, 29); // try to compress days 20-29 - if (current.compareTo(end) <= 0) { - indices.add(formatIndexPattern("%s-%s%c%02d%c2*", current, prefix)); - current.add(DAY_OF_MONTH, 1); // rollover to day 30 - continue; - } - current.set(DAY_OF_MONTH, 20); // set back to day 20 - } - indices.add(formatTypeAndTimestamp(type, current.getTimeInMillis())); - current.add(DAY_OF_MONTH, 1); - } - return indices; - } - - String formatIndexPattern(String format, GregorianCalendar current, String prefix) { - return String.format( - format, - prefix, - current.get(Calendar.YEAR), - dateSeparator(), - current.get(Calendar.MONTH) + 1, - dateSeparator()); - } - - static GregorianCalendar midnightUTC(long epochMillis) { - GregorianCalendar result = new GregorianCalendar(UTC); - result.setTimeInMillis(DateUtil.midnightUTC(epochMillis)); - return result; - } - - /** On insert, require a version-specific index-type delimiter as ES 7+ dropped colons */ - public String formatTypeAndTimestampForInsert(String type, char indexTypeDelimiter, - long timestampMillis) { - return index() + indexTypeDelimiter + type + '-' + format(timestampMillis); - } - - public String formatTypeAndTimestamp(@Nullable String type, long timestampMillis) { - return prefix(type) + "-" + format(timestampMillis); - } - - private String prefix(@Nullable String type) { - // We use single-character wildcard here in order to read both : and - as starting in ES 7, : - // is no longer permitted. - return type != null ? index() + "*" + type : index(); - } - - public String formatType(@Nullable String type) { - return prefix(type) + "-*"; - } - - String format(long timestampMillis) { - return dateFormat().format(ofInstant(Instant.ofEpochMilli(timestampMillis), ZoneOffset.UTC)); - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/Internal.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/Internal.java deleted file mode 100644 index 91872c3fec6..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/Internal.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import zipkin2.elasticsearch.ElasticsearchStorage; -import zipkin2.elasticsearch.internal.client.HttpCall; - -/** - * Escalate internal APIs so they can be used from outside packages. The only implementation is in - * {@link ElasticsearchStorage}. - * - *

Inspired by {@code okhttp3.internal.Internal}. - */ -public abstract class Internal { - public static Internal instance; - - public abstract HttpCall.Factory http(ElasticsearchStorage storage); -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/JsonReaders.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/JsonReaders.java deleted file mode 100644 index 643e9fdc028..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/JsonReaders.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; -import java.io.IOException; -import java.util.ArrayList; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import zipkin2.internal.Nullable; - -/** - * Utilities used here aim to reduce allocation overhead for common requests. It does so by skipping - * unrelated fields. This is used for responses which could be large. - */ -public final class JsonReaders { - /** - * Navigates to a field of a JSON-serialized object. For example, - * - *

{@code
-   * JsonParser status = enterPath(JsonAdapters.jsonParser(stream), "message", "status");
-   * if (status != null) throw new IllegalStateException(status.nextString());
-   * }
- */ - @Nullable public static JsonParser enterPath(JsonParser parser, String path1, String path2) - throws IOException { - return enterPath(parser, path1) != null ? enterPath(parser, path2) : null; - } - - @Nullable public static JsonParser enterPath(JsonParser parser, String path) throws IOException { - if (!checkStartObject(parser, false)) return null; - - JsonToken value; - while ((value = parser.nextValue()) != JsonToken.END_OBJECT) { - if (value == null) { - // End of input so ignore. - return null; - } - if (parser.getCurrentName().equalsIgnoreCase(path) && value != JsonToken.VALUE_NULL) { - return parser; - } else { - parser.skipChildren(); - } - } - return null; - } - - public static List collectValuesNamed(JsonParser parser, String name) throws IOException { - checkStartObject(parser, true); - Set result = new LinkedHashSet<>(); - visitObject(parser, name, result); - return new ArrayList<>(result); - } - - static void visitObject(JsonParser parser, String name, Set result) throws IOException { - checkStartObject(parser, true); - JsonToken value; - while ((value = parser.nextValue()) != JsonToken.END_OBJECT) { - if (value == null) { - // End of input so ignore. - return; - } - if (parser.getCurrentName().equals(name)) { - result.add(parser.getText()); - } else { - visitNextOrSkip(parser, name, result); - } - } - } - - static void visitNextOrSkip(JsonParser parser, String name, Set result) - throws IOException { - switch (parser.currentToken()) { - case START_ARRAY: - JsonToken token; - while ((token = parser.nextToken()) != JsonToken.END_ARRAY) { - if (token == null) { - // End of input so ignore. - return; - } - visitObject(parser, name, result); - } - break; - case START_OBJECT: - visitObject(parser, name, result); - break; - default: - // Skip current value. - } - } - - static boolean checkStartObject(JsonParser parser, boolean shouldThrow) throws IOException { - try { - JsonToken currentToken = parser.currentToken(); - // The parser may not be at a token, yet. If that's the case advance. - if (currentToken == null) currentToken = parser.nextToken(); - - // If we are still not at the expected token, we could be an another or an empty body. - if (currentToken == JsonToken.START_OBJECT) return true; - if (shouldThrow) { - throw new IllegalArgumentException("Expected start object, was " + currentToken); - } - return false; - } catch (Throwable e) { // likely not json - if (shouldThrow) throw e; - return false; - } - } - - JsonReaders() { - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/JsonSerializers.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/JsonSerializers.java deleted file mode 100644 index c88b8232664..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/JsonSerializers.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; -import com.fasterxml.jackson.databind.ObjectMapper; -import java.io.IOException; -import java.io.OutputStream; -import zipkin2.Annotation; -import zipkin2.DependencyLink; -import zipkin2.Endpoint; -import zipkin2.Span; - -/** - * JSON serialization utilities and parsing code. - */ -public final class JsonSerializers { - public static final ObjectMapper OBJECT_MAPPER = new ObjectMapper() - .setSerializationInclusion(JsonInclude.Include.NON_NULL); - public static final JsonFactory JSON_FACTORY = new JsonFactory(); - - public static JsonGenerator jsonGenerator(OutputStream stream) { - try { - return JSON_FACTORY.createGenerator(stream); - } catch (IOException e) { - throw new AssertionError("Could not create JSON generator for a memory stream.", e); - } - } - - public interface ObjectParser { - T parse(JsonParser jsonParser) throws IOException; - } - - public static final ObjectParser SPAN_PARSER = JsonSerializers::parseSpan; - - static Span parseSpan(JsonParser parser) throws IOException { - if (!parser.isExpectedStartObjectToken()) { - throw new IllegalArgumentException("Not a valid JSON object, start token: " + - parser.currentToken()); - } - - Span.Builder result = Span.newBuilder(); - - JsonToken value; - while ((value = parser.nextValue()) != JsonToken.END_OBJECT) { - if (value == null) { - throw new IOException("End of input while parsing object."); - } - if (value == JsonToken.VALUE_NULL) { - continue; - } - switch (parser.currentName()) { - case "traceId": - result.traceId(parser.getText()); - break; - case "parentId": - result.parentId(parser.getText()); - break; - case "id": - result.id(parser.getText()); - break; - case "kind": - result.kind(Span.Kind.valueOf(parser.getText())); - break; - case "name": - result.name(parser.getText()); - break; - case "timestamp": - result.timestamp(parser.getLongValue()); - break; - case "duration": - result.duration(parser.getLongValue()); - break; - case "localEndpoint": - result.localEndpoint(parseEndpoint(parser)); - break; - case "remoteEndpoint": - result.remoteEndpoint(parseEndpoint(parser)); - break; - case "annotations": - if (value != JsonToken.START_ARRAY) { - throw new IOException("Invalid span, expecting annotations array start, got: " + - value); - } - while (parser.nextToken() != JsonToken.END_ARRAY) { - Annotation a = parseAnnotation(parser); - result.addAnnotation(a.timestamp(), a.value()); - } - break; - case "tags": - if (value != JsonToken.START_OBJECT) { - throw new IOException("Invalid span, expecting tags object, got: " + value); - } - while (parser.nextValue() != JsonToken.END_OBJECT) { - result.putTag(parser.currentName(), parser.getValueAsString()); - } - break; - case "debug": - result.debug(parser.getBooleanValue()); - break; - case "shared": - result.shared(parser.getBooleanValue()); - break; - default: - // Skip - } - } - - return result.build(); - } - - static Endpoint parseEndpoint(JsonParser parser) throws IOException { - if (!parser.isExpectedStartObjectToken()) { - throw new IllegalArgumentException("Not a valid JSON object, start token: " + - parser.currentToken()); - } - - String serviceName = null, ipv4 = null, ipv6 = null; - int port = 0; - - while (parser.nextToken() != JsonToken.END_OBJECT) { - JsonToken value = parser.nextValue(); - if (value == JsonToken.VALUE_NULL) { - continue; - } - - switch (parser.currentName()) { - case "serviceName": - serviceName = parser.getText(); - break; - case "ipv4": - ipv4 = parser.getText(); - break; - case "ipv6": - ipv6 = parser.getText(); - break; - case "port": - port = parser.getIntValue(); - break; - default: - // Skip - } - } - - if (serviceName == null && ipv4 == null && ipv6 == null && port == 0) return null; - return Endpoint.newBuilder() - .serviceName(serviceName) - .ip(ipv4) - .ip(ipv6) - .port(port) - .build(); - } - - static Annotation parseAnnotation(JsonParser parser) throws IOException { - if (!parser.isExpectedStartObjectToken()) { - throw new IllegalArgumentException("Not a valid JSON object, start token: " + - parser.currentToken()); - } - - long timestamp = 0; - String value = null; - - while (parser.nextValue() != JsonToken.END_OBJECT) { - switch (parser.currentName()) { - case "timestamp": - timestamp = parser.getLongValue(); - break; - case "value": - value = parser.getValueAsString(); - break; - default: - // Skip - } - } - - if (timestamp == 0 || value == null) { - throw new IllegalArgumentException("Incomplete annotation at " + parser.currentToken()); - } - return Annotation.create(timestamp, value); - } - - public static final ObjectParser DEPENDENCY_LINK_PARSER = parser -> { - if (!parser.isExpectedStartObjectToken()) { - throw new IllegalArgumentException("Expected start of dependency link object but was " - + parser.currentToken()); - } - - DependencyLink.Builder result = DependencyLink.newBuilder(); - JsonToken value; - while ((value = parser.nextValue()) != JsonToken.END_OBJECT) { - if (value == null) { - throw new IOException("End of input while parsing object."); - } - switch (parser.currentName()) { - case "parent": - result.parent(parser.getText()); - break; - case "child": - result.child(parser.getText()); - break; - case "callCount": - result.callCount(parser.getLongValue()); - break; - case "errorCount": - result.errorCount(parser.getLongValue()); - break; - default: - // Skip - } - } - return result.build(); - }; -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/Aggregation.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/Aggregation.java deleted file mode 100644 index 6f39d7c7d05..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/Aggregation.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal.client; - -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; - -public class Aggregation { - transient final String field; - - AggTerms terms; - Map min; - Map aggs; - - Aggregation(String field) { - this.field = field; - } - - public static Aggregation terms(String field, int size) { - Aggregation result = new Aggregation(field); - result.terms = new AggTerms(field, size); - return result; - } - - public Aggregation orderBy(String subAgg, String direction) { - terms.order(subAgg, direction); - return this; - } - - public static Aggregation min(String field) { - Aggregation result = new Aggregation(field); - result.min = Collections.singletonMap("field", field); - return result; - } - - public AggTerms getTerms() { - return terms; - } - - public Map getMin() { - return min; - } - - public Map getAggs() { - return aggs; - } - - static class AggTerms { - AggTerms(String field, int size) { - this.field = field; - this.size = size; - } - - final String field; - final int size; - Map order; - - void order(String agg, String direction) { - order = Collections.singletonMap(agg, direction); - } - - public String getField() { - return field; - } - - public int getSize() { - return size; - } - - public Map getOrder() { - return order; - } - } - - public Aggregation addSubAggregation(Aggregation agg) { - if (aggs == null) aggs = new LinkedHashMap<>(); - aggs.put(agg.field, agg); - return this; - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/HttpCall.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/HttpCall.java deleted file mode 100644 index b1a0593de8d..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/HttpCall.java +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal.client; - -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.databind.JsonNode; -import com.linecorp.armeria.client.Clients; -import com.linecorp.armeria.client.UnprocessedRequestException; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpHeaders; -import com.linecorp.armeria.common.HttpRequest; -import com.linecorp.armeria.common.HttpRequestWriter; -import com.linecorp.armeria.common.HttpResponse; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.HttpStatusClass; -import com.linecorp.armeria.common.RequestContext; -import com.linecorp.armeria.common.RequestHeaders; -import com.linecorp.armeria.common.util.Exceptions; -import com.linecorp.armeria.common.util.SafeCloseable; -import io.netty.util.concurrent.EventExecutor; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.RejectedExecutionException; -import java.util.function.Supplier; -import zipkin2.Call; -import zipkin2.Callback; - -import static zipkin2.elasticsearch.internal.JsonSerializers.JSON_FACTORY; -import static zipkin2.elasticsearch.internal.JsonSerializers.OBJECT_MAPPER; - -public final class HttpCall extends Call.Base { - - public interface BodyConverter { - /** - * Prefer using the {@code parser} for request-scoped conversions. Typically, {@code - * contentString} is only for an unexpected failure. - */ - V convert(JsonParser parser, Supplier contentString) throws IOException; - } - - /** - * A request stream which can have {@link HttpData} of the request body written to it. - */ - public interface RequestStream { - /** - * Writes the {@link HttpData} to the stream. Returns {@code false} if the stream has been - * aborted (e.g., the request timed out while writing), or {@code true} otherwise. - */ - boolean tryWrite(HttpData obj); - } - - /** - * A supplier of {@linkplain HttpHeaders headers} and {@linkplain HttpData body} of a request to - * Elasticsearch. - */ - public interface RequestSupplier { - /** - * Returns the {@linkplain HttpHeaders headers} for this request. - */ - RequestHeaders headers(); - - /** - * Writes the body of this request into the {@link RequestStream}. {@link - * RequestStream#tryWrite(HttpData)} can be called any number of times to publish any number of - * payload objects. It can be useful to split up a large payload into smaller chunks instead of - * buffering everything as one payload. - */ - void writeBody(RequestStream requestStream); - } - - static class AggregatedRequestSupplier implements RequestSupplier { - - final AggregatedHttpRequest request; - - AggregatedRequestSupplier(AggregatedHttpRequest request) { - try (HttpData content = request.content()) { - if (!content.isPooled()) { - this.request = request; - } else { - // Unfortunately it's not possible to use pooled objects in requests and support clone() - // after sending the request. - this.request = AggregatedHttpRequest.of( - request.headers(), HttpData.wrap(content.array()), request.trailers()); - } - } - } - - @Override public RequestHeaders headers() { - return request.headers(); - } - - @Override public void writeBody(RequestStream requestStream) { - requestStream.tryWrite(request.content()); - } - } - - public static class Factory { - final WebClient httpClient; - - public Factory(WebClient httpClient) { - this.httpClient = httpClient; - } - - public HttpCall newCall( - AggregatedHttpRequest request, BodyConverter bodyConverter, String name) { - return new HttpCall<>( - httpClient, new AggregatedRequestSupplier(request), bodyConverter, name); - } - - public HttpCall newCall( - RequestSupplier request, BodyConverter bodyConverter, String name) { - return new HttpCall<>(httpClient, request, bodyConverter, name); - } - } - - // Visible for benchmarks - public final RequestSupplier request; - final BodyConverter bodyConverter; - final String name; - - final WebClient httpClient; - - volatile CompletableFuture responseFuture; - - HttpCall(WebClient httpClient, RequestSupplier request, BodyConverter bodyConverter, - String name) { - this.httpClient = httpClient; - this.name = name; - this.request = request; - this.bodyConverter = bodyConverter; - } - - @Override protected V doExecute() throws IOException { - // TODO: testme - for (EventExecutor eventLoop : httpClient.options().factory().eventLoopGroup()) { - if (eventLoop.inEventLoop()) { - throw new RuntimeException("Attempting to make a blocking request from an event loop. " - + "Either use doEnqueue() or run this in a separate thread."); - } - } - final AggregatedHttpResponse response; - try { - response = sendRequest().join(); - } catch (CompletionException e) { - propagateIfFatal(e); - Exceptions.throwUnsafely(e.getCause()); - return null; // Unreachable - } - return parseResponse(response, bodyConverter); - } - - @SuppressWarnings("FutureReturnValueIgnored") - // TODO: errorprone wants us to check this future before returning, but what would be a sensible - // check? Say it is somehow canceled, would we take action? Would callback.onError() be redundant? - @Override protected void doEnqueue(Callback callback) { - sendRequest().handle((response, t) -> { - if (t != null) { - callback.onError(t); - } else { - try { - V value = parseResponse(response, bodyConverter); - callback.onSuccess(value); - } catch (Throwable t1) { - propagateIfFatal(t1); - callback.onError(t1); - } - } - return null; - }); - } - - @Override protected void doCancel() { - CompletableFuture responseFuture = this.responseFuture; - if (responseFuture != null) { - responseFuture.cancel(false); - } - } - - @Override public HttpCall clone() { - return new HttpCall<>(httpClient, request, bodyConverter, name); - } - - @Override public String toString() { - return "HttpCall(" + request + ")"; - } - - CompletableFuture sendRequest() { - final HttpResponse response; - try (SafeCloseable ignored = - Clients.withContextCustomizer(ctx -> ctx.logBuilder().name(name))) { - HttpRequestWriter httpRequest = HttpRequest.streaming(request.headers()); - response = httpClient.execute(httpRequest); - request.writeBody(httpRequest::tryWrite); - httpRequest.close(); - } - CompletableFuture responseFuture = - RequestContext.mapCurrent( - ctx -> response.aggregateWithPooledObjects(ctx.eventLoop(), ctx.alloc()), - // This should never be used in practice since the module runs in an Armeria server. - response::aggregate); - responseFuture = responseFuture.exceptionally(t -> { - if (t instanceof UnprocessedRequestException) { - Throwable cause = t.getCause(); - // Go ahead and reduce the output in logs since this is usually a configuration or - // infrastructure issue and the Armeria stack trace won't help debugging that. - Exceptions.clearTrace(cause); - - String message = cause.getMessage(); - if (message == null) message = cause.getClass().getSimpleName(); - throw new RejectedExecutionException(message, cause); - } else { - Exceptions.throwUnsafely(t); - } - return null; - }); - this.responseFuture = responseFuture; - return responseFuture; - } - - V parseResponse(AggregatedHttpResponse response, BodyConverter bodyConverter) - throws IOException { - // Handle the case where there is no content, as that means we have no resources to release. - HttpStatus status = response.status(); - if (response.content().isEmpty()) { - if (status.codeClass().equals(HttpStatusClass.SUCCESS)) { - return null; - } else if (status.code() == 404) { - throw new FileNotFoundException(request.headers().path()); - } else { - throw new RuntimeException( - "response for " + request.headers().path() + " failed: " + response.status()); - } - } - - // If this is a client or server error, we look for a json message. - if ((status.codeClass().equals(HttpStatusClass.CLIENT_ERROR) - || status.codeClass().equals(HttpStatusClass.SERVER_ERROR))) { - bodyConverter = (parser, contentString) -> { - String message = null; - try { - JsonNode root = OBJECT_MAPPER.readTree(parser); - message = root.findPath("reason").textValue(); - if (message == null) message = root.at("/Message").textValue(); - } catch (RuntimeException | IOException possiblyParseException) { - // EmptyCatch ignored - } - throw new RuntimeException(message != null ? message - : "response for " + request.headers().path() + " failed: " + contentString.get()); - }; - } - - try (HttpData content = response.content(); - InputStream stream = content.toInputStream(); - JsonParser parser = JSON_FACTORY.createParser(stream)) { - - if (status.code() == 404) throw new FileNotFoundException(request.headers().path()); - - return bodyConverter.convert(parser, content::toStringUtf8); - } - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/SearchCallFactory.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/SearchCallFactory.java deleted file mode 100644 index 564b1d64813..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/SearchCallFactory.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal.client; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpHeaderNames; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.RequestHeaders; -import java.util.List; -import zipkin2.internal.Nullable; - -import static zipkin2.elasticsearch.internal.JsonSerializers.OBJECT_MAPPER; - -public class SearchCallFactory { - final HttpCall.Factory http; - - public SearchCallFactory(HttpCall.Factory http) { - this.http = http; - } - - public HttpCall newCall(SearchRequest request, HttpCall.BodyConverter bodyConverter) { - final AggregatedHttpRequest httpRequest; - try { - httpRequest = AggregatedHttpRequest.of( - RequestHeaders.of( - HttpMethod.POST, lenientSearch(request.indices, request.type), - HttpHeaderNames.CONTENT_TYPE, MediaType.JSON_UTF_8), - HttpData.wrap(OBJECT_MAPPER.writeValueAsBytes(request))); - } catch (JsonProcessingException e) { - throw new AssertionError("Could not serialize SearchRequest to bytes.", e); - } - return http.newCall(httpRequest, bodyConverter, request.tag()); - } - - /** Matches the behavior of {@code IndicesOptions#lenientExpandOpen()} */ - String lenientSearch(List indices, @Nullable String type) { - return '/' + String.join(",", indices) + - "/_search?allow_no_indices=true&expand_wildcards=open&ignore_unavailable=true"; - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/SearchRequest.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/SearchRequest.java deleted file mode 100644 index b731d04b1be..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/SearchRequest.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal.client; - -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import zipkin2.internal.Nullable; - -public final class SearchRequest { - - public static SearchRequest create(List indices) { - return new SearchRequest(indices, null); - } - - public static SearchRequest create(List indices, String type) { - return new SearchRequest(indices, type); - } - - /** - * The maximum results returned in a query. This only affects non-aggregation requests. - * - *

Not configurable as it implies adjustments to the index template (index.max_result_window) - * - *

See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-from-size.html - */ - static final int MAX_RESULT_WINDOW = 10000; // the default elasticsearch allowed limit - - transient final List indices; - @Nullable transient final String type; - - Integer size = MAX_RESULT_WINDOW; - Boolean _source; - Object query; - Map aggs; - - SearchRequest(List indices, @Nullable String type) { - this.indices = indices; - this.type = type; - } - - public static class Filters extends ArrayList { - public Filters addRange(String field, long from, Long to) { - add(new Range(field, from, to)); - return this; - } - - public Filters addTerm(String field, String value) { - add(new Term(field, value)); - return this; - } - } - - public SearchRequest filters(Filters filters) { - return query(new BoolQuery("must", filters)); - } - - public SearchRequest term(String field, String value) { - return query(new Term(field, value)); - } - - public SearchRequest terms(String field, Collection values) { - return query(new Terms(field, values)); - } - - public SearchRequest addAggregation(Aggregation agg) { - size = null; // we return aggs, not source data - _source = false; - if (aggs == null) aggs = new LinkedHashMap<>(); - aggs.put(agg.field, agg); - return this; - } - - public Integer getSize() { - return size; - } - - public Boolean get_source() { - return _source; - } - - public Object getQuery() { - return query; - } - - public Map getAggs() { - return aggs; - } - - String tag() { - return aggs != null ? "aggregation" : "search"; - } - - SearchRequest query(Object filter) { - query = Collections.singletonMap("bool", Collections.singletonMap("filter", filter)); - return this; - } - - static class Term { - - final Map term; - - Term(String field, String value) { - term = Collections.singletonMap(field, value); - } - public Map getTerm() { - return term; - } - } - - static class Terms { - final Map> terms; - - Terms(String field, Collection values) { - this.terms = Collections.singletonMap(field, values); - } - - public Map> getTerms() { - return terms; - } - } - - static class Range { - final Map range; - - Range(String field, long from, Long to) { - range = Collections.singletonMap(field, new Bounds(from, to)); - } - - public Map getRange() { - return range; - } - - static class Bounds { - final long from; - final Long to; - final boolean include_lower = true; - final boolean include_upper = true; - - Bounds(long from, Long to) { - this.from = from; - this.to = to; - } - - public long getFrom() { - return from; - } - - public Long getTo() { - return to; - } - - @JsonProperty("include_lower") - public boolean isIncludeLower() { - return include_lower; - } - - @JsonProperty("include_upper") - public boolean isIncludeUpper() { - return include_upper; - } - } - } - - static class BoolQuery { - final Map bool; - - BoolQuery(String op, Object clause) { - bool = Collections.singletonMap(op, clause); - } - - public Map getBool() { - return bool; - } - } -} diff --git a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/SearchResultConverter.java b/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/SearchResultConverter.java deleted file mode 100644 index 1aa0f5f690e..00000000000 --- a/zipkin-storage/elasticsearch/src/main/java/zipkin2/elasticsearch/internal/client/SearchResultConverter.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal.client; - -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.function.Supplier; -import zipkin2.elasticsearch.internal.JsonSerializers.ObjectParser; - -import static zipkin2.elasticsearch.internal.JsonReaders.enterPath; - -public class SearchResultConverter implements HttpCall.BodyConverter> { - final ObjectParser adapter; - - public static SearchResultConverter create(ObjectParser adapter) { - return new SearchResultConverter<>(adapter); - } - - protected SearchResultConverter(ObjectParser adapter) { - this.adapter = adapter; - } - - @Override - public List convert(JsonParser parser, Supplier contentString) throws IOException { - JsonParser hits = enterPath(parser, "hits", "hits"); - if (hits == null || !hits.isExpectedStartArrayToken()) return Collections.emptyList(); - - List result = new ArrayList<>(); - while (hits.nextToken() != JsonToken.END_ARRAY) { - JsonParser source = enterPath(hits, "_source"); - if (source != null) result.add(adapter.parse(source)); - } - return result.isEmpty() ? Collections.emptyList() : result; - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchAutocompleteTagsTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchAutocompleteTagsTest.java deleted file mode 100644 index e87bb269ca5..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchAutocompleteTagsTest.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; - -class ElasticsearchAutocompleteTagsTest { - - static final AggregatedHttpResponse SUCCESS_RESPONSE = - AggregatedHttpResponse.of(HttpStatus.OK, MediaType.JSON_UTF_8, - HttpData.ofUtf8(TestResponses.AUTOCOMPLETE_VALUES)); - - @RegisterExtension static MockWebServerExtension server = new MockWebServerExtension(); - - ElasticsearchStorage storage; - ElasticsearchAutocompleteTags tagStore; - - @BeforeEach void setUp() { - storage = ElasticsearchStorage.newBuilder(() -> WebClient.of(server.httpUri())) - .autocompleteKeys(asList("http#host", "http-url", "http.method")).build(); - tagStore = new ElasticsearchAutocompleteTags(storage); - } - - @AfterEach void tearDown() { - storage.close(); - } - - @Test void get_list_of_autocomplete_keys() throws Exception { - // note: we don't enqueue a request! - assertThat(tagStore.getKeys().execute()) - .contains("http#host", "http-url", "http.method"); - } - - @Test void getValues_requestIncludesKeyName() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - tagStore.getValues("http.method").execute(); - assertThat(server.takeRequest().request().contentUtf8()).contains("\"tagKey\":\"http.method\""); - } - - @Test void getValues() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - assertThat(tagStore.getValues("http.method").execute()).containsOnly("get", "post"); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchSpanConsumerTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchSpanConsumerTest.java deleted file mode 100644 index d58589bf05e..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchSpanConsumerTest.java +++ /dev/null @@ -1,288 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.ResponseHeaders; -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.Endpoint; -import zipkin2.Span; -import zipkin2.Span.Kind; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.storage.SpanConsumer; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown; -import static zipkin2.TestObjects.TODAY; -import static zipkin2.TestObjects.UTF_8; - -class ElasticsearchSpanConsumerTest { - static final Endpoint WEB_ENDPOINT = Endpoint.newBuilder().serviceName("web").build(); - static final Endpoint APP_ENDPOINT = Endpoint.newBuilder().serviceName("app").build(); - - final AggregatedHttpResponse SUCCESS_RESPONSE = - AggregatedHttpResponse.of(ResponseHeaders.of(HttpStatus.OK), HttpData.empty()); - - @RegisterExtension static MockWebServerExtension server = new MockWebServerExtension(); - - ElasticsearchStorage storage; - SpanConsumer spanConsumer; - - @BeforeEach void setUp() throws Exception { - storage = ElasticsearchStorage.newBuilder(() -> WebClient.of(server.httpUri())) - .autocompleteKeys(asList("environment")) - .build(); - - ensureIndexTemplate(); - } - - @AfterEach void tearDown() throws IOException { - storage.close(); - } - - void ensureIndexTemplate() throws Exception { - // gets the index template so that each test doesn't have to - ensureIndexTemplates(storage); - spanConsumer = storage.spanConsumer(); - } - - private void ensureIndexTemplates(ElasticsearchStorage storage) throws InterruptedException { - server.enqueue(AggregatedHttpResponse.of(HttpStatus.OK, MediaType.JSON_UTF_8, - "{\"version\":{\"number\":\"6.0.0\"}}")); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get tags template - storage.ensureIndexTemplates(); - server.takeRequest(); // get version - server.takeRequest(); // get span template - server.takeRequest(); // get dependency template - server.takeRequest(); // get tags template - } - - @Test void addsTimestamp_millisIntoJson() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - Span span = - Span.newBuilder().traceId("20").id("20").name("get").timestamp(TODAY * 1000).build(); - - accept(span); - - assertThat(server.takeRequest().request().contentUtf8()) - .contains("\n{\"timestamp_millis\":" + TODAY + ",\"traceId\":"); - } - - @Test void writesSpanNaturallyWhenNoTimestamp() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - Span span = Span.newBuilder().traceId("1").id("1").name("foo").build(); - accept(Span.newBuilder().traceId("1").id("1").name("foo").build()); - - assertThat(server.takeRequest().request().contentUtf8()) - .contains("\n" + new String(SpanBytesEncoder.JSON_V2.encode(span), UTF_8) + "\n"); - } - - @Test void traceIsSearchableByServerServiceName() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - Span clientSpan = - Span.newBuilder() - .traceId("20") - .id("22") - .name("") - .parentId("21") - .timestamp(1000L) - .kind(Kind.CLIENT) - .localEndpoint(WEB_ENDPOINT) - .build(); - - Span serverSpan = - Span.newBuilder() - .traceId("20") - .id("22") - .name("get") - .parentId("21") - .timestamp(2000L) - .kind(Kind.SERVER) - .localEndpoint(APP_ENDPOINT) - .build(); - - accept(serverSpan, clientSpan); - - // make sure that both timestamps are in the index - assertThat(server.takeRequest().request().contentUtf8()) - .contains("{\"timestamp_millis\":2") - .contains("{\"timestamp_millis\":1"); - } - - @Test void addsPipelineId() throws Exception { - storage.close(); - storage = ElasticsearchStorage.newBuilder(() -> WebClient.of(server.httpUri())) - .pipeline("zipkin") - .build(); - ensureIndexTemplate(); - - server.enqueue(SUCCESS_RESPONSE); - - accept(Span.newBuilder().traceId("1").id("1").name("foo").build()); - - AggregatedHttpRequest request = server.takeRequest().request(); - assertThat(request.path()).isEqualTo("/_bulk?pipeline=zipkin"); - } - - @Test void choosesTypeSpecificIndex() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - Span span = - Span.newBuilder() - .traceId("1") - .id("2") - .parentId("1") - .name("s") - .localEndpoint(APP_ENDPOINT) - .addAnnotation(TimeUnit.DAYS.toMicros(365) /* 1971-01-01 */, "foo") - .build(); - - // sanity check data - assertThat(span.timestamp()).isNull(); - - accept(span); - - // index timestamp is the server timestamp, not current time! - assertThat(server.takeRequest().request().contentUtf8()) - .startsWith("{\"index\":{\"_index\":\"zipkin:span-1971-01-01\",\"_type\":\"span\""); - } - - /** Much simpler template which doesn't write the timestamp_millis field */ - @Test void searchDisabled_simplerIndexTemplate() throws Exception { - storage.close(); - storage = ElasticsearchStorage.newBuilder(() -> WebClient.of(server.httpUri())) - .searchEnabled(false) - .build(); - - server.enqueue(AggregatedHttpResponse.of(HttpStatus.OK, MediaType.JSON_UTF_8, - "{\"version\":{\"number\":\"6.0.0\"}}")); - server.enqueue(AggregatedHttpResponse.of(HttpStatus.NOT_FOUND)); // get span template - server.enqueue(SUCCESS_RESPONSE); // put span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get tags template - storage.ensureIndexTemplates(); - server.takeRequest(); // get version - server.takeRequest(); // get span template - - assertThat(server.takeRequest().request().contentUtf8()) // put span template - .contains( - "" - + " \"mappings\": {\n" - + " \"span\": {\n" - + " \"properties\": {\n" - + " \"traceId\": { \"type\": \"keyword\", \"norms\": false },\n" - + " \"annotations\": { \"enabled\": false },\n" - + " \"tags\": { \"enabled\": false }\n" - + " }\n" - + " }\n" - + " }\n"); - } - - /** Less overhead as a span json isn't rewritten to include a millis timestamp */ - @Test - void searchDisabled_doesntAddTimestampMillis() throws Exception { - storage.close(); - storage = ElasticsearchStorage.newBuilder(() -> WebClient.of(server.httpUri())) - .searchEnabled(false) - .build(); - ensureIndexTemplates(storage); - server.enqueue(SUCCESS_RESPONSE); // for the bulk request - - Span span = - Span.newBuilder().traceId("20").id("20").name("get").timestamp(TODAY * 1000).build(); - - storage.spanConsumer().accept(asList(span)).execute(); - - assertThat(server.takeRequest().request().contentUtf8()).doesNotContain("timestamp_millis"); - } - - @Test void addsAutocompleteValue() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - accept(Span.newBuilder().traceId("1").id("1").timestamp(1).putTag("environment", "A").build()); - - assertThat(server.takeRequest().request().contentUtf8()) - .endsWith("" - + "{\"index\":{\"_index\":\"zipkin:autocomplete-1970-01-01\",\"_type\":\"autocomplete\",\"_id\":\"environment=A\"}}\n" - + "{\"tagKey\":\"environment\",\"tagValue\":\"A\"}\n"); - } - - @Test void addsAutocompleteValue_suppressesWhenSameDay() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - server.enqueue(SUCCESS_RESPONSE); - - Span s = Span.newBuilder().traceId("1").id("1").timestamp(1).putTag("environment", "A").build(); - accept(s); - accept(s.toBuilder().id(2).build()); - - server.takeRequest(); // skip first - // the tag is in the same date range as the other, so it should not write the tag again - assertThat(server.takeRequest().request().contentUtf8()) - .doesNotContain("autocomplete"); - } - - @Test void addsAutocompleteValue_differentDays() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - server.enqueue(SUCCESS_RESPONSE); - - Span s = Span.newBuilder().traceId("1").id("1").timestamp(1).putTag("environment", "A").build(); - accept(s); - accept(s.toBuilder().id(2).timestamp(1 + TimeUnit.DAYS.toMicros(1)).build()); - - server.takeRequest(); // skip first - // different day == different context - assertThat(server.takeRequest().request().contentUtf8()) - .endsWith("" - + "{\"index\":{\"_index\":\"zipkin:autocomplete-1970-01-02\",\"_type\":\"autocomplete\",\"_id\":\"environment=A\"}}\n" - + "{\"tagKey\":\"environment\",\"tagValue\":\"A\"}\n"); - } - - @Test void addsAutocompleteValue_revertsSuppressionOnFailure() throws Exception { - server.enqueue(AggregatedHttpResponse.of(HttpStatus.INTERNAL_SERVER_ERROR)); - server.enqueue(SUCCESS_RESPONSE); - - Span s = Span.newBuilder().traceId("1").id("1").timestamp(1).putTag("environment", "A").build(); - try { - accept(s); - failBecauseExceptionWasNotThrown(RuntimeException.class); - } catch (RuntimeException expected) { - } - accept(s); - - // We only cache when there was no error.. the second request should be same as the first - assertThat(server.takeRequest().request().contentUtf8()) - .isEqualTo(server.takeRequest().request().contentUtf8()); - } - - void accept(Span... spans) throws Exception { - spanConsumer.accept(asList(spans)).execute(); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchSpanStoreTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchSpanStoreTest.java deleted file mode 100644 index 02a31edb475..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchSpanStoreTest.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.ResponseHeaders; -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.TestObjects; -import zipkin2.storage.QueryRequest; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.DAY; -import static zipkin2.TestObjects.TODAY; -import static zipkin2.elasticsearch.VersionSpecificTemplates.TYPE_SPAN; - -class ElasticsearchSpanStoreTest { - static final AggregatedHttpResponse EMPTY_RESPONSE = - AggregatedHttpResponse.of(ResponseHeaders.of(HttpStatus.OK), HttpData.empty()); - - @RegisterExtension static MockWebServerExtension server = new MockWebServerExtension(); - - @BeforeEach void setUp() { - storage = ElasticsearchStorage.newBuilder(() -> WebClient.of(server.httpUri())).build(); - spanStore = new ElasticsearchSpanStore(storage); - } - - @AfterEach void tearDown() throws IOException { - storage.close(); - } - - ElasticsearchStorage storage; - ElasticsearchSpanStore spanStore; - - @Test void doesntTruncateTraceIdByDefault() throws Exception { - server.enqueue(EMPTY_RESPONSE); - spanStore.getTrace("48fec942f3e78b893041d36dc43227fd").execute(); - - assertThat(server.takeRequest().request().contentUtf8()) - .contains("\"traceId\":\"48fec942f3e78b893041d36dc43227fd\""); - } - - @Test void truncatesTraceIdTo16CharsWhenNotStrict() throws Exception { - storage.close(); - storage = storage.toBuilder().strictTraceId(false).build(); - spanStore = new ElasticsearchSpanStore(storage); - - server.enqueue(EMPTY_RESPONSE); - spanStore.getTrace("48fec942f3e78b893041d36dc43227fd").execute(); - - assertThat(server.takeRequest().request().contentUtf8()) - .contains("\"traceId\":\"3041d36dc43227fd\""); - } - - @Test void serviceNames_defaultsTo24HrsAgo_6x() throws Exception { - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, TestResponses.SERVICE_NAMES)); - spanStore.getServiceNames().execute(); - - requestLimitedTo2DaysOfIndices_singleTypeIndex(); - } - - @Test void spanNames_defaultsTo24HrsAgo_6x() throws Exception { - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, TestResponses.SPAN_NAMES)); - spanStore.getSpanNames("foo").execute(); - - requestLimitedTo2DaysOfIndices_singleTypeIndex(); - } - - @Test void searchDisabled_doesntMakeRemoteQueryRequests() throws Exception { - storage.close(); - storage = ElasticsearchStorage.newBuilder(() -> WebClient.of(server.httpUri())) - .searchEnabled(false) - .build(); - - // skip template check - ElasticsearchSpanStore spanStore = new ElasticsearchSpanStore(storage); - - QueryRequest request = QueryRequest.newBuilder().endTs(TODAY).lookback(DAY).limit(10).build(); - assertThat(spanStore.getTraces(request).execute()).isEmpty(); - assertThat(spanStore.getServiceNames().execute()).isEmpty(); - assertThat(spanStore.getSpanNames("icecream").execute()).isEmpty(); - - assertThat(server.takeRequest(100, TimeUnit.MILLISECONDS)).isNull(); - } - - void requestLimitedTo2DaysOfIndices_singleTypeIndex() { - long today = TestObjects.midnightUTC(System.currentTimeMillis()); - long yesterday = today - TimeUnit.DAYS.toMillis(1); - - // 24 hrs ago always will fall into 2 days (ex. if it is 4:00pm, 24hrs ago is a different day) - String indexesToSearch = "" - + storage.indexNameFormatter().formatTypeAndTimestamp(TYPE_SPAN, yesterday) - + "," - + storage.indexNameFormatter().formatTypeAndTimestamp(TYPE_SPAN, today); - - AggregatedHttpRequest request = server.takeRequest().request(); - assertThat(request.path()).startsWith("/" + indexesToSearch + "/_search"); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchStorageTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchStorageTest.java deleted file mode 100644 index 90aa9321f40..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchStorageTest.java +++ /dev/null @@ -1,374 +0,0 @@ -/* - * Copyright 2015-2021 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.linecorp.armeria.client.ResponseTimeoutException; -import com.linecorp.armeria.client.UnprocessedRequestException; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.client.endpoint.EndpointGroupException; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.ResponseHeaders; -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import java.time.Instant; -import java.util.concurrent.RejectedExecutionException; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.CheckResult; -import zipkin2.Component; -import zipkin2.elasticsearch.ElasticsearchStorage.LazyHttpClient; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.DAY; - -class ElasticsearchStorageTest { - static final AggregatedHttpResponse SUCCESS_RESPONSE = - AggregatedHttpResponse.of(ResponseHeaders.of(HttpStatus.OK), HttpData.empty()); - - @RegisterExtension static MockWebServerExtension server = new MockWebServerExtension(); - - ElasticsearchStorage storage; - - @BeforeEach void setUp() { - storage = newBuilder().build(); - } - - @AfterEach void tearDown() { - storage.close(); - } - - @Test void ensureIndexTemplates_false() throws Exception { - storage.close(); - storage = newBuilder().ensureTemplates(false).build(); - - server.enqueue(SUCCESS_RESPONSE); // dependencies request - - long endTs = Instant.parse("2016-10-02T00:00:00Z").toEpochMilli(); - storage.spanStore().getDependencies(endTs, DAY).execute(); - - assertThat(server.takeRequest().request().path()) - .startsWith("/zipkin*dependency-2016-10-01,zipkin*dependency-2016-10-02/_search"); - } - - @Test void memoizesIndexTemplate() throws Exception { - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "{\"version\":{\"number\":\"6.7.0\"}}")); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get tags template - server.enqueue(SUCCESS_RESPONSE); // dependencies request - server.enqueue(SUCCESS_RESPONSE); // dependencies request - - long endTs = Instant.parse("2016-10-02T00:00:00Z").toEpochMilli(); - storage.spanStore().getDependencies(endTs, DAY).execute(); - storage.spanStore().getDependencies(endTs, DAY).execute(); - - server.takeRequest(); // get version - server.takeRequest(); // get span template - server.takeRequest(); // get dependency template - server.takeRequest(); // get tags template - - assertThat(server.takeRequest().request().path()) - .startsWith("/zipkin*dependency-2016-10-01,zipkin*dependency-2016-10-02/_search"); - assertThat(server.takeRequest().request().path()) - .startsWith("/zipkin*dependency-2016-10-01,zipkin*dependency-2016-10-02/_search"); - } - - static final AggregatedHttpResponse HEALTH_RESPONSE = AggregatedHttpResponse.of( - HttpStatus.OK, - MediaType.JSON_UTF_8, - "{\n" - + " \"cluster_name\": \"elasticsearch_zipkin\",\n" - + " \"status\": \"yellow\",\n" - + " \"timed_out\": false,\n" - + " \"number_of_nodes\": 1,\n" - + " \"number_of_data_nodes\": 1,\n" - + " \"active_primary_shards\": 5,\n" - + " \"active_shards\": 5,\n" - + " \"relocating_shards\": 0,\n" - + " \"initializing_shards\": 0,\n" - + " \"unassigned_shards\": 5,\n" - + " \"delayed_unassigned_shards\": 0,\n" - + " \"number_of_pending_tasks\": 0,\n" - + " \"number_of_in_flight_fetch\": 0,\n" - + " \"task_max_waiting_in_queue_millis\": 0,\n" - + " \"active_shards_percent_as_number\": 50\n" - + "}"); - - static final AggregatedHttpResponse RESPONSE_UNAUTHORIZED = AggregatedHttpResponse.of( - HttpStatus.UNAUTHORIZED, - MediaType.JSON_UTF_8, // below is actual message from Amazon - "{\"Message\":\"User: anonymous is not authorized to perform: es:ESHttpGet\"}}"); - - static final AggregatedHttpResponse RESPONSE_VERSION_6 = AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "{\"version\":{\"number\":\"6.7.0\"}}"); - - @Test void check_ensuresIndexTemplates_memozied() { - server.enqueue(RESPONSE_VERSION_6); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get tags template - - server.enqueue(HEALTH_RESPONSE); - - assertThat(storage.check()).isEqualTo(CheckResult.OK); - - // Later checks do not redo index template requests - server.enqueue(HEALTH_RESPONSE); - - assertThat(storage.check()).isEqualTo(CheckResult.OK); - } - - // makes sure we don't NPE - @Test void check_ensuresIndexTemplates_fail_onNoContent() { - server.enqueue(SUCCESS_RESPONSE); // empty instead of version json - - CheckResult result = storage.check(); - assertThat(result.ok()).isFalse(); - assertThat(result.error().getMessage()) - .isEqualTo("No content reading Elasticsearch version"); - } - - // makes sure we don't NPE - @Test void check_fail_onNoContent() { - storage.ensuredTemplates = true; // assume index templates called before - - server.enqueue(SUCCESS_RESPONSE); // empty instead of success response - - CheckResult result = storage.check(); - assertThat(result.ok()).isFalse(); - assertThat(result.error().getMessage()) - .isEqualTo("No content reading Elasticsearch version"); - } - - // TODO: when Armeria's mock server supports it, add a test for IOException - - @Test void check_unauthorized() { - server.enqueue(RESPONSE_UNAUTHORIZED); - - CheckResult result = storage.check(); - assertThat(result.ok()).isFalse(); - assertThat(result.error().getMessage()) - .isEqualTo("User: anonymous is not authorized to perform: es:ESHttpGet"); - } - - /** - * See {@link HttpCallTest#unprocessedRequest()} which shows {@link UnprocessedRequestException} - * are re-wrapped as {@link RejectedExecutionException}. - */ - @Test void isOverCapacity() { - // timeout - assertThat(storage.isOverCapacity(ResponseTimeoutException.get())).isTrue(); - - // top-level - assertThat(storage.isOverCapacity(new RejectedExecutionException( - "{\"status\":429,\"error\":{\"type\":\"es_rejected_execution_exception\"}}"))).isTrue(); - - // re-wrapped - assertThat(storage.isOverCapacity( - new RejectedExecutionException("Rejected execution: No endpoints.", - new EndpointGroupException("No endpoints")))).isTrue(); - - // not applicable - assertThat(storage.isOverCapacity(new IllegalStateException("Rejected execution"))).isFalse(); - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test void toStringContainsOnlySummaryInformation() { - assertThat(storage).hasToString( - String.format("ElasticsearchStorage{initialEndpoints=%s, index=zipkin}", server.httpUri())); - } - - /** Ensure that Zipkin doesn't include "include_type_name" parameter with unsupported versions */ - @Test void check_create_indexTemplate_resourcePath__version66() { - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "{\"version\":{\"number\":\"6.6.6\"}}")); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get autocomplete template - server.enqueue(SUCCESS_RESPONSE); // cluster health - - storage.check(); - - server.takeRequest(); // get version - - assertThat(server.takeRequest().request().path()) // get span template - .startsWith("/_template/zipkin:span_template"); - assertThat(server.takeRequest().request().path()) // // get dependency template - .startsWith("/_template/zipkin:dependency_template"); - assertThat(server.takeRequest().request().path()) // get autocomplete template - .startsWith("/_template/zipkin:autocomplete_template"); - } - - /** Ensure that Zipkin includes "include_type_name" parameter with 6.7 */ - @Test void check_create_indexTemplate_resourcePath_version67() { - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "{\"version\":{\"number\":\"6.7.0\"}}")); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get autocomplete template - server.enqueue(SUCCESS_RESPONSE); // cluster health - - storage.check(); - - server.takeRequest(); // get version - - assertThat(server.takeRequest().request().path()) // get span template - .startsWith("/_template/zipkin:span_template?include_type_name=true"); - assertThat(server.takeRequest().request().path()) // // get dependency template - .startsWith("/_template/zipkin:dependency_template?include_type_name=true"); - assertThat(server.takeRequest().request().path()) // get autocomplete template - .startsWith("/_template/zipkin:autocomplete_template?include_type_name=true"); - } - - /** Ensure that Zipkin doesn't include "include_type_name" parameter with version >7.0 */ - @Test void check_create_indexTemplate_resourcePath_version71() { - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "{\"version\":{\"number\":\"7.0.0\"}}")); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get autocomplete template - server.enqueue(SUCCESS_RESPONSE); // cluster health - - storage.check(); - - server.takeRequest(); // get version - - assertThat(server.takeRequest().request().path()) // get span template - .startsWith("/_template/zipkin-span_template"); - assertThat(server.takeRequest().request().path()) // // get dependency template - .startsWith("/_template/zipkin-dependency_template"); - assertThat(server.takeRequest().request().path()) // get autocomplete template - .startsWith("/_template/zipkin-autocomplete_template"); - } - - /** Ensure that Zipkin uses the legacy resource path when priority is not set. */ - @Test void check_create_legacy_indexTemplate_resourcePath_version78() { - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "{\"version\":{\"number\":\"7.8.0\"}}")); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get autocomplete template - server.enqueue(SUCCESS_RESPONSE); // cluster health - - storage.check(); - - server.takeRequest(); // get version - - assertThat(server.takeRequest().request().path()) // get span template - .startsWith("/_template/zipkin-span_template"); - assertThat(server.takeRequest().request().path()) // // get dependency template - .startsWith("/_template/zipkin-dependency_template"); - assertThat(server.takeRequest().request().path()) // get autocomplete template - .startsWith("/_template/zipkin-autocomplete_template"); - } - - /** - * Ensure that Zipkin uses the correct resource path of /_index_template when creating index - * template for ES 7.8 when priority is set, as opposed to ES < 7.8 that uses /_template/ - */ - @Test void check_create_composable_indexTemplate_resourcePath_version78() { - // Set up a new storage with priority - storage.close(); - storage = newBuilder().templatePriority(0).build(); - - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "{\"version\":{\"number\":\"7.8.0\"}}")); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get autocomplete template - server.enqueue(SUCCESS_RESPONSE); // cluster health - - storage.check(); - - server.takeRequest(); // get version - - assertThat(server.takeRequest().request().path()) // get span template - .startsWith("/_index_template/zipkin-span_template"); - assertThat(server.takeRequest().request().path()) // // get dependency template - .startsWith("/_index_template/zipkin-dependency_template"); - assertThat(server.takeRequest().request().path()) // get autocomplete template - .startsWith("/_index_template/zipkin-autocomplete_template"); - } - - /** Ensure that Zipkin uses the legacy resource path when priority is not set. */ - @Test void check_create_legacy_indexTemplate_resourcePath_version79() { - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "{\"version\":{\"number\":\"7.9.0\"}}")); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get autocomplete template - server.enqueue(SUCCESS_RESPONSE); // cluster health - - storage.check(); - - server.takeRequest(); // get version - - assertThat(server.takeRequest().request().path()) // get span template - .startsWith("/_template/zipkin-span_template"); - assertThat(server.takeRequest().request().path()) // // get dependency template - .startsWith("/_template/zipkin-dependency_template"); - assertThat(server.takeRequest().request().path()) // get autocomplete template - .startsWith("/_template/zipkin-autocomplete_template"); - } - - /** - * Ensure that Zipkin uses the correct resource path of /_index_template when creating index - * template for ES 7.9 when priority is set, as opposed to ES < 7.8 that uses /_template/ - */ - @Test void check_create_composable_indexTemplate_resourcePath_version79() throws Exception { - // Set up a new storage with priority - storage.close(); - storage = newBuilder().templatePriority(0).build(); - - server.enqueue(AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "{\"version\":{\"number\":\"7.9.0\"}}")); - server.enqueue(SUCCESS_RESPONSE); // get span template - server.enqueue(SUCCESS_RESPONSE); // get dependency template - server.enqueue(SUCCESS_RESPONSE); // get autocomplete template - server.enqueue(SUCCESS_RESPONSE); // cluster health - - storage.check(); - - server.takeRequest(); // get version - - assertThat(server.takeRequest().request().path()) // get span template - .startsWith("/_index_template/zipkin-span_template"); - assertThat(server.takeRequest().request().path()) // // get dependency template - .startsWith("/_index_template/zipkin-dependency_template"); - assertThat(server.takeRequest().request().path()) // get autocomplete template - .startsWith("/_index_template/zipkin-autocomplete_template"); - } - - ElasticsearchStorage.Builder newBuilder() { - return ElasticsearchStorage.newBuilder(new LazyHttpClient() { - @Override public WebClient get() { - return WebClient.of(server.httpUri()); - } - - @Override public String toString() { - return server.httpUri().toString(); - } - }); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchVersionTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchVersionTest.java deleted file mode 100644 index 4114b36ba3d..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/ElasticsearchVersionTest.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.MediaType; -import com.linecorp.armeria.common.ResponseHeaders; -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static zipkin2.elasticsearch.ElasticsearchStorageTest.RESPONSE_UNAUTHORIZED; -import static zipkin2.elasticsearch.ElasticsearchVersion.V5_0; -import static zipkin2.elasticsearch.ElasticsearchVersion.V7_0; - -class ElasticsearchVersionTest { - static final ElasticsearchVersion V2_4 = new ElasticsearchVersion(2, 4); - static final ElasticsearchVersion V6_7 = new ElasticsearchVersion(6, 7); - - static final AggregatedHttpResponse VERSION_RESPONSE_7 = AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "" - + "{\n" - + " \"name\" : \"zipkin-elasticsearch\",\n" - + " \"cluster_name\" : \"docker-cluster\",\n" - + " \"cluster_uuid\" : \"wByRPgSgTryYl0TZXW4MsA\",\n" - + " \"version\" : {\n" - + " \"number\" : \"7.0.1\",\n" - + " \"build_flavor\" : \"default\",\n" - + " \"build_type\" : \"tar\",\n" - + " \"build_hash\" : \"e4efcb5\",\n" - + " \"build_date\" : \"2019-04-29T12:56:03.145736Z\",\n" - + " \"build_snapshot\" : false,\n" - + " \"lucene_version\" : \"8.0.0\",\n" - + " \"minimum_wire_compatibility_version\" : \"6.7.0\",\n" - + " \"minimum_index_compatibility_version\" : \"6.0.0-beta1\"\n" - + " },\n" - + " \"tagline\" : \"You Know, for Search\"\n" - + "}"); - static final AggregatedHttpResponse VERSION_RESPONSE_6 = AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "" - + "{\n" - + " \"name\" : \"PV-NhJd\",\n" - + " \"cluster_name\" : \"CollectorDBCluster\",\n" - + " \"cluster_uuid\" : \"UjZaM0fQRC6tkHINCg9y8w\",\n" - + " \"version\" : {\n" - + " \"number\" : \"6.7.0\",\n" - + " \"build_flavor\" : \"oss\",\n" - + " \"build_type\" : \"tar\",\n" - + " \"build_hash\" : \"8453f77\",\n" - + " \"build_date\" : \"2019-03-21T15:32:29.844721Z\",\n" - + " \"build_snapshot\" : false,\n" - + " \"lucene_version\" : \"7.7.0\",\n" - + " \"minimum_wire_compatibility_version\" : \"5.6.0\",\n" - + " \"minimum_index_compatibility_version\" : \"5.0.0\"\n" - + " },\n" - + " \"tagline\" : \"You Know, for Search\"\n" - + "}"); - static final AggregatedHttpResponse VERSION_RESPONSE_5 = AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "" - + "{\n" - + " \"name\" : \"vU0g1--\",\n" - + " \"cluster_name\" : \"elasticsearch\",\n" - + " \"cluster_uuid\" : \"Fnm277ITSNyzsy0UCVFN7g\",\n" - + " \"version\" : {\n" - + " \"number\" : \"5.0.0\",\n" - + " \"build_hash\" : \"253032b\",\n" - + " \"build_date\" : \"2016-10-26T04:37:51.531Z\",\n" - + " \"build_snapshot\" : false,\n" - + " \"lucene_version\" : \"6.2.0\"\n" - + " },\n" - + " \"tagline\" : \"You Know, for Search\"\n" - + "}"); - static final AggregatedHttpResponse VERSION_RESPONSE_2 = AggregatedHttpResponse.of( - HttpStatus.OK, MediaType.JSON_UTF_8, "" - + "{\n" - + " \"name\" : \"Kamal\",\n" - + " \"cluster_name\" : \"elasticsearch\",\n" - + " \"version\" : {\n" - + " \"number\" : \"2.4.0\",\n" - + " \"build_hash\" : \"ce9f0c7394dee074091dd1bc4e9469251181fc55\",\n" - + " \"build_timestamp\" : \"2016-08-29T09:14:17Z\",\n" - + " \"build_snapshot\" : false,\n" - + " \"lucene_version\" : \"5.5.2\"\n" - + " },\n" - + " \"tagline\" : \"You Know, for Search\"\n" - + "}"); - - @RegisterExtension static MockWebServerExtension server = new MockWebServerExtension(); - - @BeforeEach void setUp() { - storage = ElasticsearchStorage.newBuilder(() -> WebClient.of(server.httpUri())).build(); - } - - @AfterEach void tearDown() { - storage.close(); - } - - ElasticsearchStorage storage; - - @Test void wrongContent() { - server.enqueue(AggregatedHttpResponse.of( - ResponseHeaders.of(HttpStatus.OK), - HttpData.ofUtf8("you got mail"))); - - assertThatThrownBy(() -> ElasticsearchVersion.get(storage.http())) - .hasMessage(".version.number not found in response: you got mail"); - } - - @Test void unauthorized() { - server.enqueue(RESPONSE_UNAUTHORIZED); - - assertThatThrownBy(() -> ElasticsearchVersion.get(storage.http())) - .hasMessage("User: anonymous is not authorized to perform: es:ESHttpGet"); - } - - /** Unsupported, but we should test that parsing works */ - @Test void version2() throws Exception { - server.enqueue(VERSION_RESPONSE_2); - - assertThat(ElasticsearchVersion.get(storage.http())) - .isEqualTo(V2_4); - } - - @Test void version5() throws Exception { - server.enqueue(VERSION_RESPONSE_5); - - assertThat(ElasticsearchVersion.get(storage.http())) - .isEqualTo(V5_0); - } - - @Test void version6() throws Exception { - server.enqueue(VERSION_RESPONSE_6); - - assertThat(ElasticsearchVersion.get(storage.http())) - .isEqualTo(V6_7); - } - - @Test void version7() throws Exception { - server.enqueue(VERSION_RESPONSE_7); - - assertThat(ElasticsearchVersion.get(storage.http())) - .isEqualTo(V7_0); - } - - /** Prove we compare better than a float. A float of 7.10 is the same as 7.1! */ - @Test void version7_10IsGreaterThan_V7_2() { - assertThat(new ElasticsearchVersion(7, 10)) - .hasToString("7.10") - .isGreaterThan(new ElasticsearchVersion(7, 2)); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/InternalForTests.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/InternalForTests.java deleted file mode 100644 index ee1b5dc7f94..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/InternalForTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.fasterxml.jackson.core.JsonGenerator; -import io.netty.buffer.ByteBufOutputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.List; -import zipkin2.DependencyLink; -import zipkin2.elasticsearch.internal.BulkCallBuilder; -import zipkin2.elasticsearch.internal.BulkIndexWriter; -import zipkin2.elasticsearch.internal.JsonSerializers; - -/** Package accessor for integration tests */ -public class InternalForTests { - public static void writeDependencyLinks(ElasticsearchStorage es, List links, - long midnightUTC) { - String index = ((ElasticsearchSpanConsumer) es.spanConsumer()) - .formatTypeAndTimestampForInsert("dependency", midnightUTC); - BulkCallBuilder indexer = new BulkCallBuilder(es, es.version(), "indexlinks"); - for (DependencyLink link : links) - indexer.index(index, "dependency", link, DEPENDENCY_LINK_WRITER); - try { - indexer.build().execute(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - static final BulkIndexWriter DEPENDENCY_LINK_WRITER = - new BulkIndexWriter() { - @Override public String writeDocument(DependencyLink link, ByteBufOutputStream sink) { - try (JsonGenerator writer = JsonSerializers.jsonGenerator(sink)) { - writer.writeStartObject(); - writer.writeStringField("parent", link.parent()); - writer.writeStringField("child", link.child()); - writer.writeNumberField("callCount", link.callCount()); - if (link.errorCount() > 0) writer.writeNumberField("errorCount", link.errorCount()); - writer.writeEndObject(); - } catch (IOException e) { - throw new AssertionError(e); // No I/O writing to a Buffer. - } - return link.parent() + "|" + link.child(); - } - }; -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/JsonReadersTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/JsonReadersTest.java deleted file mode 100644 index bf3a73d9211..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/JsonReadersTest.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import java.io.IOException; -import java.util.List; -import org.junit.Test; -import zipkin2.elasticsearch.internal.JsonReaders; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.elasticsearch.internal.JsonReaders.collectValuesNamed; -import static zipkin2.elasticsearch.internal.JsonSerializers.JSON_FACTORY; - -public class JsonReadersTest { - @Test public void enterPath_nested() throws IOException { - String content = "{\n" - + " \"name\" : \"Kamal\",\n" - + " \"cluster_name\" : \"elasticsearch\",\n" - + " \"version\" : {\n" - + " \"number\" : \"2.4.0\",\n" - + " \"build_hash\" : \"ce9f0c7394dee074091dd1bc4e9469251181fc55\",\n" - + " \"build_timestamp\" : \"2016-08-29T09:14:17Z\",\n" - + " \"build_snapshot\" : false,\n" - + " \"lucene_version\" : \"5.5.2\"\n" - + " },\n" - + " \"tagline\" : \"You Know, for Search\"\n" - + "}"; - - assertThat( - JsonReaders.enterPath(JSON_FACTORY.createParser(content), "version", "number").getText()) - .isEqualTo("2.4.0"); - } - - @Test public void enterPath_nullOnNoInput() throws IOException { - assertThat(JsonReaders.enterPath(JSON_FACTORY.createParser(""), "message")) - .isNull(); - } - - @Test public void enterPath_nullOnWrongInput() throws IOException { - assertThat(JsonReaders.enterPath(JSON_FACTORY.createParser("[]"), "message")) - .isNull(); - } - - @Test public void collectValuesNamed_emptyWhenNotFound() throws IOException { - String content = "{\n" - + " \"took\": 1,\n" - + " \"timed_out\": false,\n" - + " \"_shards\": {\n" - + " \"total\": 0,\n" - + " \"successful\": 0,\n" - + " \"failed\": 0\n" - + " },\n" - + " \"hits\": {\n" - + " \"total\": 0,\n" - + " \"max_score\": 0,\n" - + " \"hits\": []\n" - + " }\n" - + "}"; - - assertThat(collectValuesNamed(JSON_FACTORY.createParser(content), "key")).isEmpty(); - } - - // All elasticsearch results start with an object, not an array. - @Test(expected = IllegalArgumentException.class) - public void collectValuesNamed_exceptionOnWrongData() throws IOException { - assertThat(collectValuesNamed(JSON_FACTORY.createParser("[]"), "key")).isEmpty(); - } - - @Test public void collectValuesNamed_mergesArrays() throws IOException { - List result = - collectValuesNamed(JSON_FACTORY.createParser(TestResponses.SPAN_NAMES), "key"); - - assertThat(result).containsExactly("methodcall", "yak"); - } - - @Test public void collectValuesNamed_mergesChildren() throws IOException { - List result = - collectValuesNamed(JSON_FACTORY.createParser(TestResponses.SERVICE_NAMES), "key"); - - assertThat(result).containsExactly("yak", "service"); - } - - @Test public void collectValuesNamed_nested() throws IOException { - String content = "{\n" - + " \"took\": 49,\n" - + " \"timed_out\": false,\n" - + " \"_shards\": {\n" - + " \"total\": 5,\n" - + " \"successful\": 5,\n" - + " \"failed\": 0\n" - + " },\n" - + " \"hits\": {\n" - + " \"total\": 1,\n" - + " \"max_score\": 0,\n" - + " \"hits\": []\n" - + " },\n" - + " \"aggregations\": {\n" - + " \"traceId_agg\": {\n" - + " \"doc_count_error_upper_bound\": 0,\n" - + " \"sum_other_doc_count\": 0,\n" - + " \"buckets\": [\n" - + " {\n" - + " \"key\": \"000000000000007b\",\n" - + " \"doc_count\": 1,\n" - + " \"timestamps_agg\": {\n" - + " \"value\": 1474761600001,\n" - + " \"value_as_string\": \"1474761600001\"\n" - + " }\n" - + " }\n" - + " ]\n" - + " }\n" - + " }\n" - + "}"; - - assertThat(collectValuesNamed(JSON_FACTORY.createParser(content), "key")) - .containsExactly("000000000000007b"); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/JsonSerializersTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/JsonSerializersTest.java deleted file mode 100644 index 462c0d47923..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/JsonSerializersTest.java +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.fasterxml.jackson.core.JsonParser; -import java.io.IOException; -import java.io.UncheckedIOException; -import org.junit.Test; -import zipkin2.DependencyLink; -import zipkin2.Endpoint; -import zipkin2.Span; -import zipkin2.codec.DependencyLinkBytesEncoder; -import zipkin2.codec.SpanBytesEncoder; -import zipkin2.elasticsearch.internal.JsonSerializers; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.entry; -import static zipkin2.TestObjects.CLIENT_SPAN; -import static zipkin2.TestObjects.UTF_8; -import static zipkin2.elasticsearch.internal.JsonSerializers.SPAN_PARSER; - -public class JsonSerializersTest { - @Test - public void span_ignoreNull_parentId() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"parentId\": null\n" - + "}"; - - parse(SPAN_PARSER, json); - } - - @Test - public void span_ignoreNull_timestamp() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"timestamp\": null\n" - + "}"; - - parse(SPAN_PARSER, json); - } - - @Test - public void span_ignoreNull_duration() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"duration\": null\n" - + "}"; - - parse(SPAN_PARSER, json); - } - - @Test - public void span_ignoreNull_debug() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"debug\": null\n" - + "}"; - - parse(SPAN_PARSER, json); - } - - @Test - public void span_ignoreNull_annotation_endpoint() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"annotations\": [\n" - + " {\n" - + " \"timestamp\": 1461750491274000,\n" - + " \"value\": \"cs\",\n" - + " \"endpoint\": null\n" - + " }\n" - + " ]\n" - + "}"; - - parse(SPAN_PARSER, json); - } - - @Test - public void span_tag_long_read() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"tags\": {" - + " \"num\": 9223372036854775807" - + " }" - + "}"; - - Span span = parse(SPAN_PARSER, json); - assertThat(span.tags()).containsExactly(entry("num", "9223372036854775807")); - } - - @Test - public void span_tag_double_read() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"tags\": {" - + " \"num\": 1.23456789" - + " }" - + "}"; - - Span span = parse(SPAN_PARSER, json); - assertThat(span.tags()).containsExactly(entry("num", "1.23456789")); - } - - @Test - public void span_roundTrip() { - assertThat(parse(SPAN_PARSER, new String(SpanBytesEncoder.JSON_V2.encode(CLIENT_SPAN), UTF_8))) - .isEqualTo(CLIENT_SPAN); - } - - /** - * This isn't a test of what we "should" accept as a span, rather that characters that trip-up - * json don't fail in SPAN_PARSER. - */ - @Test - public void span_specialCharsInJson() { - // service name is surrounded by control characters - Endpoint e = Endpoint.newBuilder().serviceName(new String(new char[] {0, 'a', 1})).build(); - Span worstSpanInTheWorld = - Span.newBuilder() - .traceId("1") - .id("1") - // name is terrible - .name(new String(new char[] {'"', '\\', '\t', '\b', '\n', '\r', '\f'})) - .localEndpoint(e) - // annotation value includes some json newline characters - .addAnnotation(1L, "\u2028 and \u2029") - // binary annotation key includes a quote and value newlines - .putTag( - "\"foo", - "Database error: ORA-00942:\u2028 and \u2029 table or view does not exist\n") - .build(); - - assertThat( - parse(SPAN_PARSER, new String(SpanBytesEncoder.JSON_V2.encode(worstSpanInTheWorld), UTF_8))) - .isEqualTo(worstSpanInTheWorld); - } - - @Test - public void span_endpointHighPort() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"localEndpoint\": {\n" - + " \"serviceName\": \"service\",\n" - + " \"port\": 65535\n" - + " }\n" - + "}"; - - assertThat(parse(SPAN_PARSER, json).localEndpoint()) - .isEqualTo(Endpoint.newBuilder().serviceName("service").port(65535).build()); - } - - @Test - public void span_noServiceName() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"localEndpoint\": {\n" - + " \"port\": 65535\n" - + " }\n" - + "}"; - - assertThat(parse(SPAN_PARSER, json).localEndpoint()) - .isEqualTo(Endpoint.newBuilder().serviceName("").port(65535).build()); - } - - @Test - public void span_nullServiceName() { - String json = - "{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\",\n" - + " \"localEndpoint\": {\n" - + " \"serviceName\": null,\n" - + " \"port\": 65535\n" - + " }\n" - + "}"; - - assertThat(parse(SPAN_PARSER, json).localEndpoint()) - .isEqualTo(Endpoint.newBuilder().serviceName("").port(65535).build()); - } - - @Test - public void span_readsTraceIdHighFromTraceIdField() throws IOException { - String with128BitTraceId = - ("{\n" - + " \"traceId\": \"48485a3953bb61246b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\"\n" - + "}"); - String withLower64bitsTraceId = - ("{\n" - + " \"traceId\": \"6b221d5bc9e6496c\",\n" - + " \"name\": \"get-traces\",\n" - + " \"id\": \"6b221d5bc9e6496c\"\n" - + "}"); - - assertThat(parse(SPAN_PARSER, with128BitTraceId)) - .isEqualTo( - parse(JsonSerializers.SPAN_PARSER, withLower64bitsTraceId) - .toBuilder() - .traceId("48485a3953bb61246b221d5bc9e6496c") - .build()); - } - - @Test - public void dependencyLinkRoundTrip() { - DependencyLink link = - DependencyLink.newBuilder().parent("foo").child("bar").callCount(2).build(); - - assertThat(parse(JsonSerializers.DEPENDENCY_LINK_PARSER, - new String(DependencyLinkBytesEncoder.JSON_V1.encode(link), UTF_8))).isEqualTo(link); - } - - @Test - public void dependencyLinkRoundTrip_withError() { - DependencyLink link = - DependencyLink.newBuilder().parent("foo").child("bar").callCount(2).errorCount(1).build(); - - assertThat(parse(JsonSerializers.DEPENDENCY_LINK_PARSER, - new String(DependencyLinkBytesEncoder.JSON_V1.encode(link), UTF_8))).isEqualTo(link); - } - - static T parse(JsonSerializers.ObjectParser parser, String json) { - try { - JsonParser jsonParser = JsonSerializers.JSON_FACTORY.createParser(json); - jsonParser.nextToken(); - return parser.parse(jsonParser); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/SearchResultConverterTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/SearchResultConverterTest.java deleted file mode 100644 index ff526a262ff..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/SearchResultConverterTest.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; // to access package private stuff - -import java.io.IOException; -import java.util.List; -import java.util.stream.Collectors; -import org.junit.Test; -import org.junit.jupiter.api.Assertions; -import zipkin2.Annotation; -import zipkin2.Span; -import zipkin2.TestObjects; -import zipkin2.elasticsearch.internal.JsonSerializers; -import zipkin2.elasticsearch.internal.client.SearchResultConverter; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.TODAY; -import static zipkin2.elasticsearch.TestResponses.SPANS; -import static zipkin2.elasticsearch.internal.JsonSerializers.JSON_FACTORY; - -public class SearchResultConverterTest { - SearchResultConverter converter = SearchResultConverter.create(JsonSerializers.SPAN_PARSER); - - @Test public void convert() throws IOException { - // Our normal test data has recent timestamps to make testing the server and dependency linker - // work as there are values related to recency used in search defaults. - // This test needs stable timestamps because items like MD5 need to match. - long stableMicros = (TODAY - 1) * 1000L; // can't result in a zero value, so minimum ts of 1. - List stableTrace = TestObjects.TRACE.stream() - .map(s -> { - Span.Builder builder = s.toBuilder().timestamp(s.timestampAsLong() - stableMicros) - .clearAnnotations(); - for (Annotation a : s.annotations()) { - builder.addAnnotation(a.timestamp() - stableMicros, a.value()); - } - return builder.build(); - }).collect(Collectors.toList()); - assertThat(converter.convert(JSON_FACTORY.createParser(SPANS), Assertions::fail)) - .containsExactlyElementsOf(stableTrace); - } - - @Test public void convert_noHits() throws IOException { - assertThat(converter.convert(JSON_FACTORY.createParser("{}"), Assertions::fail)) - .isEmpty(); - } - - @Test public void convert_onlyOneLevelHits() throws IOException { - assertThat(converter.convert(JSON_FACTORY.createParser("{\"hits\":{}}"), Assertions::fail)) - .isEmpty(); - } - - @Test public void convert_hitsHitsButEmpty() throws IOException { - assertThat( - converter.convert(JSON_FACTORY.createParser("{\"hits\":{\"hits\":[]}}"), Assertions::fail)) - .isEmpty(); - } - - @Test public void convert_hitsHitsButNoSource() throws IOException { - assertThat( - converter.convert(JSON_FACTORY.createParser("{\"hits\":{\"hits\":[{}]}}"), Assertions::fail)) - .isEmpty(); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/TestResponses.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/TestResponses.java deleted file mode 100644 index c1381d9f8af..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/TestResponses.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -final class TestResponses { - static final String SPANS = "{\n" - + " \"took\": 4,\n" - + " \"timed_out\": false,\n" - + " \"_shards\": {\n" - + " \"total\": 5,\n" - + " \"successful\": 5,\n" - + " \"skipped\": 0,\n" - + " \"failed\": 0\n" - + " },\n" - + " \"hits\": {\n" - + " \"total\": 4,\n" - + " \"max_score\": 0,\n" - + " \"hits\": [\n" - + " {\n" - + " \"_index\": \"zipkin:span-2019-07-20\",\n" - + " \"_type\": \"span\",\n" - + " \"_id\": \"7180c278b62e8f6a216a2aea45d08fc9-2a40476ca7a22f2c85ac18b9c1f3a99c\",\n" - + " \"_score\": 0,\n" - + " \"_source\": {\n" - + " \"traceId\": \"7180c278b62e8f6a216a2aea45d08fc9\",\n" - + " \"duration\": 350000,\n" - + " \"localEndpoint\": {\n" - + " \"serviceName\": \"frontend\",\n" - + " \"ipv4\": \"127.0.0.1\"\n" - + " },\n" - + " \"timestamp_millis\": 1,\n" - + " \"kind\": \"SERVER\",\n" - + " \"name\": \"get\",\n" - + " \"id\": \"0000000000000001\",\n" - + " \"timestamp\": 1000\n" - + " }\n" - + " },\n" - + " {\n" - + " \"_index\": \"zipkin:span-2019-07-20\",\n" - + " \"_type\": \"span\",\n" - + " \"_id\": \"7180c278b62e8f6a216a2aea45d08fc9-466fed1eb1d5cef4a76a227e83a7a7a8\",\n" - + " \"_score\": 0,\n" - + " \"_source\": {\n" - + " \"traceId\": \"7180c278b62e8f6a216a2aea45d08fc9\",\n" - + " \"duration\": 200000,\n" - + " \"remoteEndpoint\": {\n" - + " \"serviceName\": \"backend\",\n" - + " \"ipv4\": \"192.168.99.101\",\n" - + " \"port\": 9000\n" - + " },\n" - + " \"localEndpoint\": {\n" - + " \"serviceName\": \"frontend\",\n" - + " \"ipv4\": \"127.0.0.1\"\n" - + " },\n" - + " \"timestamp_millis\": 51,\n" - + " \"kind\": \"CLIENT\",\n" - + " \"name\": \"get\",\n" - + " \"annotations\": [\n" - + " {\n" - + " \"timestamp\": 101000,\n" - + " \"value\": \"foo\"\n" - + " }\n" - + " ],\n" - + " \"id\": \"0000000000000002\",\n" - + " \"parentId\": \"0000000000000001\",\n" - + " \"timestamp\": 51000,\n" - + " \"tags\": {\n" - + " \"clnt/finagle.version\": \"6.45.0\",\n" - + " \"http.path\": \"/api\"\n" - + " }\n" - + " }\n" - + " },\n" - + " {\n" - + " \"_index\": \"zipkin:span-2019-07-20\",\n" - + " \"_type\": \"span\",\n" - + " \"_id\": \"7180c278b62e8f6a216a2aea45d08fc9-74d915e86c8f53d59ef5850b4e966199\",\n" - + " \"_score\": 0,\n" - + " \"_source\": {\n" - + " \"traceId\": \"7180c278b62e8f6a216a2aea45d08fc9\",\n" - + " \"duration\": 150000,\n" - + " \"shared\": true,\n" - + " \"localEndpoint\": {\n" - + " \"serviceName\": \"backend\",\n" - + " \"ipv4\": \"192.168.99.101\",\n" - + " \"port\": 9000\n" - + " },\n" - + " \"timestamp_millis\": 101,\n" - + " \"kind\": \"SERVER\",\n" - + " \"name\": \"get\",\n" - + " \"id\": \"0000000000000002\",\n" - + " \"parentId\": \"0000000000000001\",\n" - + " \"timestamp\": 101000\n" - + " }\n" - + " },\n" - + " {\n" - + " \"_index\": \"zipkin:span-2019-07-20\",\n" - + " \"_type\": \"span\",\n" - + " \"_id\": \"7180c278b62e8f6a216a2aea45d08fc9-989c12147ff4ca03ce10d8488d93b89d\",\n" - + " \"_score\": 0,\n" - + " \"_source\": {\n" - + " \"traceId\": \"7180c278b62e8f6a216a2aea45d08fc9\",\n" - + " \"duration\": 50000,\n" - + " \"remoteEndpoint\": {\n" - + " \"serviceName\": \"db\",\n" - + " \"ipv6\": \"2001:db8::c001\",\n" - + " \"port\": 3036\n" - + " },\n" - + " \"localEndpoint\": {\n" - + " \"serviceName\": \"backend\",\n" - + " \"ipv4\": \"192.168.99.101\",\n" - + " \"port\": 9000\n" - + " },\n" - + " \"timestamp_millis\": 151,\n" - + " \"kind\": \"CLIENT\",\n" - + " \"name\": \"query\",\n" - + " \"annotations\": [\n" - + " {\n" - + " \"timestamp\": 191000,\n" - + " \"value\": \"⻩\"\n" - + " }\n" - + " ],\n" - + " \"id\": \"0000000000000003\",\n" - + " \"parentId\": \"0000000000000002\",\n" - + " \"timestamp\": 151000,\n" - + " \"tags\": {\n" - + " \"error\": \"\uD83D\uDCA9\"\n" - + " }\n" - + " }\n" - + " }\n" - + " ]\n" - + " }\n" - + "}"; - static final String SERVICE_NAMES = - "{\n" - + " \"took\": 4,\n" - + " \"timed_out\": false,\n" - + " \"_shards\": {\n" - + " \"total\": 5,\n" - + " \"successful\": 5,\n" - + " \"failed\": 0\n" - + " },\n" - + " \"hits\": {\n" - + " \"total\": 1,\n" - + " \"max_score\": 0,\n" - + " \"hits\": []\n" - + " },\n" - + " \"aggregations\": {\n" - + " \"binaryAnnotations_agg\": {\n" - + " \"doc_count\": 1,\n" - + " \"binaryAnnotationsServiceName_agg\": {\n" - + " \"doc_count_error_upper_bound\": 0,\n" - + " \"sum_other_doc_count\": 0,\n" - + " \"buckets\": [\n" - + " {\n" - + " \"key\": \"yak\",\n" - + " \"doc_count\": 1\n" - + " }\n" - + " ]\n" - + " }\n" - + " },\n" - + " \"annotations_agg\": {\n" - + " \"doc_count\": 2,\n" - + " \"annotationsServiceName_agg\": {\n" - + " \"doc_count_error_upper_bound\": 0,\n" - + " \"sum_other_doc_count\": 0,\n" - + " \"buckets\": [\n" - + " {\n" - + " \"key\": \"service\",\n" - + " \"doc_count\": 2\n" - + " }\n" - + " ]\n" - + " }\n" - + " }\n" - + " }\n" - + "}"; - - static final String SPAN_NAMES = - "{\n" - + " \"took\": 1,\n" - + " \"timed_out\": false,\n" - + " \"_shards\": {\n" - + " \"total\": 5,\n" - + " \"successful\": 5,\n" - + " \"failed\": 0\n" - + " },\n" - + " \"hits\": {\n" - + " \"total\": 2,\n" - + " \"max_score\": 0,\n" - + " \"hits\": []\n" - + " },\n" - + " \"aggregations\": {\n" - + " \"name_agg\": {\n" - + " \"doc_count_error_upper_bound\": 0,\n" - + " \"sum_other_doc_count\": 0,\n" - + " \"buckets\": [\n" - + " {\n" - + " \"key\": \"methodcall\",\n" - + " \"doc_count\": 1\n" - + " },\n" - + " {\n" - + " \"key\": \"yak\",\n" - + " \"doc_count\": 1\n" - + " }\n" - + " ]\n" - + " }\n" - + " }\n" - + "}"; - - static final String AUTOCOMPLETE_VALUES = "{\n" - + " \"took\": 12,\n" - + " \"timed_out\": false,\n" - + " \"_shards\": {\n" - + " \"total\": 5,\n" - + " \"successful\": 5,\n" - + " \"skipped\": 0,\n" - + " \"failed\": 0\n" - + " },\n" - + " \"hits\": {\n" - + " \"total\": 2,\n" - + " \"max_score\": 0,\n" - + " \"hits\": [\n" - + " {\n" - + " \"_index\": \"zipkin:autocomplete-2018-12-08\",\n" - + " \"_type\": \"autocomplete\",\n" - + " \"_id\": \"http.method|POST\",\n" - + " \"_score\": 0\n" - + " },\n" - + " {\n" - + " \"_index\": \"zipkin:autocomplete-2018-12-08\",\n" - + " \"_type\": \"autocomplete\",\n" - + " \"_id\": \"http.method|GET\",\n" - + " \"_score\": 0\n" - + " }\n" - + " ]\n" - + " },\n" - + " \"aggregations\": {\n" - + " \"tagValue\": {\n" - + " \"doc_count_error_upper_bound\": 0,\n" - + " \"sum_other_doc_count\": 0,\n" - + " \"buckets\": [\n" - + " {\n" - + " \"key\": \"get\",\n" - + " \"doc_count\": 1\n" - + " },\n" - + " {\n" - + " \"key\": \"post\",\n" - + " \"doc_count\": 1\n" - + " }\n" - + " ]\n" - + " }\n" - + " }\n" - + "}"; -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/VersionSpecificTemplatesTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/VersionSpecificTemplatesTest.java deleted file mode 100644 index 712abcd4fef..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/VersionSpecificTemplatesTest.java +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch; - -import com.linecorp.armeria.client.WebClient; -import org.junit.jupiter.api.Test; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.mock; -import static zipkin2.elasticsearch.ElasticsearchVersion.V5_0; -import static zipkin2.elasticsearch.ElasticsearchVersion.V7_0; -import static zipkin2.elasticsearch.ElasticsearchVersion.V7_8; - -class VersionSpecificTemplatesTest { - static final ElasticsearchVersion V2_4 = new ElasticsearchVersion(2, 4); - static final ElasticsearchVersion V6_7 = new ElasticsearchVersion(6, 7); - static final ElasticsearchVersion V7_9 = new ElasticsearchVersion(7, 9); - - ElasticsearchStorage storage = - ElasticsearchStorage.newBuilder(() -> mock(WebClient.class)).build(); - - /** Unsupported, but we should test that parsing works */ - @Test void version2_unsupported() { - assertThatThrownBy(() -> storage.versionSpecificTemplates(V2_4)) - .hasMessage("Elasticsearch versions 5-7.x are supported, was: 2.4"); - } - - @Test void version5() { - IndexTemplates template = storage.versionSpecificTemplates(V5_0); - - assertThat(template.version()).isEqualTo(V5_0); - assertThat(template.autocomplete()) - .withFailMessage("In v5.x, the index_patterns field was named template") - .contains("\"template\":"); - assertThat(template.autocomplete()) - .withFailMessage("Until v7.x, we delimited index and type with a colon") - .contains("\"template\": \"zipkin:autocomplete-*\""); - } - - @Test void version6() { - IndexTemplates template = storage.versionSpecificTemplates(V6_7); - - assertThat(template.version()).isEqualTo(V6_7); - assertThat(template.autocomplete()) - .withFailMessage("Until v7.x, we delimited index and type with a colon") - .contains("\"index_patterns\": \"zipkin:autocomplete-*\""); - } - - @Test void version6_wrapsPropertiesWithType() { - IndexTemplates template = storage.versionSpecificTemplates(V6_7); - - assertThat(template.dependency()).contains("" - + " \"mappings\": {\n" - + " \"dependency\": {\n" - + " \"enabled\": false\n" - + " }\n" - + " }"); - - assertThat(template.autocomplete()).contains("" - + " \"mappings\": {\n" - + " \"autocomplete\": {\n" - + " \"enabled\": true,\n" - + " \"properties\": {\n" - + " \"tagKey\": { \"type\": \"keyword\", \"norms\": false },\n" - + " \"tagValue\": { \"type\": \"keyword\", \"norms\": false }\n" - + " }\n" - + " }\n" - + " }"); - } - - @Test void version7() { - IndexTemplates template = storage.versionSpecificTemplates(V7_0); - - assertThat(template.version()).isEqualTo(V7_0); - assertThat(template.autocomplete()) - .withFailMessage("Starting at v7.x, we delimit index and type with hyphen") - .contains("\"index_patterns\": \"zipkin-autocomplete-*\""); - assertThat(template.autocomplete()) - .withFailMessage("7.x does not support the key index.mapper.dynamic") - .doesNotContain("\"index.mapper.dynamic\": false"); - } - - @Test void version7_doesntWrapPropertiesWithType() { - IndexTemplates template = storage.versionSpecificTemplates(V7_0); - - assertThat(template.dependency()).contains("" - + " \"mappings\": {\n" - + " \"enabled\": false\n" - + " }"); - - assertThat(template.autocomplete()).contains("" - + " \"mappings\": {\n" - + " \"enabled\": true,\n" - + " \"properties\": {\n" - + " \"tagKey\": { \"type\": \"keyword\", \"norms\": false },\n" - + " \"tagValue\": { \"type\": \"keyword\", \"norms\": false }\n" - + " }\n" - + " }"); - } - - @Test void version78_legacy() { - IndexTemplates template = storage.versionSpecificTemplates(V7_8); - - assertThat(template.version()).isEqualTo(V7_8); - assertThat(template.autocomplete()) - .withFailMessage("Starting at v7.x, we delimit index and type with hyphen") - .contains("\"index_patterns\": \"zipkin-autocomplete-*\""); - assertThat(template.span()) - .doesNotContain("\"template\": {\n") - .doesNotContain("\"priority\": 0\n"); - assertThat(template.autocomplete()) - .doesNotContain("\"template\": {\n") - .doesNotContain("\"priority\": 0\n"); - assertThat(template.dependency()) - .doesNotContain("\"template\": {\n") - .doesNotContain("\"priority\": 0\n"); - } - - @Test void version78_composable() { - // Set up a new storage with priority - storage.close(); - storage = - ElasticsearchStorage.newBuilder(() -> mock(WebClient.class)).templatePriority(0).build(); - IndexTemplates template = storage.versionSpecificTemplates(V7_8); - - assertThat(template.version()).isEqualTo(V7_8); - assertThat(template.autocomplete()) - .withFailMessage("Starting at v7.x, we delimit index and type with hyphen") - .contains("\"index_patterns\": \"zipkin-autocomplete-*\""); - assertThat(template.span()) - .contains("\"template\": {\n") - .contains("\"priority\": 0\n"); - assertThat(template.autocomplete()) - .contains("\"template\": {\n") - .contains("\"priority\": 0\n"); - assertThat(template.dependency()) - .contains("\"template\": {\n") - .contains("\"priority\": 0\n"); - } - - @Test void version79_legacy() { - IndexTemplates template = storage.versionSpecificTemplates(V7_9); - - assertThat(template.version()).isEqualTo(V7_9); - assertThat(template.autocomplete()) - .withFailMessage("Starting at v7.x, we delimit index and type with hyphen") - .contains("\"index_patterns\": \"zipkin-autocomplete-*\""); - assertThat(template.span()) - .doesNotContain("\"template\": {\n") - .doesNotContain("\"priority\": 0\n"); - assertThat(template.autocomplete()) - .doesNotContain("\"template\": {\n") - .doesNotContain("\"priority\": 0\n"); - assertThat(template.dependency()) - .doesNotContain("\"template\": {\n") - .doesNotContain("\"priority\": 0\n"); - } - - @Test void version79_composable() { - // Set up a new storage with priority - storage.close(); - storage = - ElasticsearchStorage.newBuilder(() -> mock(WebClient.class)).templatePriority(0).build(); - IndexTemplates template = storage.versionSpecificTemplates(V7_9); - - assertThat(template.version()).isEqualTo(V7_9); - assertThat(template.autocomplete()) - .withFailMessage("Starting at v7.x, we delimit index and type with hyphen") - .contains("\"index_patterns\": \"zipkin-autocomplete-*\""); - assertThat(template.span()) - .contains("\"template\": {\n") - .contains("\"priority\": 0\n"); - assertThat(template.autocomplete()) - .contains("\"template\": {\n") - .contains("\"priority\": 0\n"); - assertThat(template.dependency()) - .contains("\"template\": {\n") - .contains("\"priority\": 0\n"); - } - - @Test void searchEnabled_minimalSpanIndexing_6x() { - storage.close(); - storage = ElasticsearchStorage.newBuilder(() -> mock(WebClient.class)) - .searchEnabled(false) - .build(); - - IndexTemplates template = storage.versionSpecificTemplates(V6_7); - - assertThat(template.span()) - .contains("" - + " \"mappings\": {\n" - + " \"span\": {\n" - + " \"properties\": {\n" - + " \"traceId\": { \"type\": \"keyword\", \"norms\": false },\n" - + " \"annotations\": { \"enabled\": false },\n" - + " \"tags\": { \"enabled\": false }\n" - + " }\n" - + " }\n" - + " }"); - } - - @Test void searchEnabled_minimalSpanIndexing_7x() { - storage = ElasticsearchStorage.newBuilder(() -> mock(WebClient.class)) - .searchEnabled(false) - .build(); - - IndexTemplates template = storage.versionSpecificTemplates(V7_0); - - // doesn't wrap in a type name - assertThat(template.span()) - .contains("" - + " \"mappings\": {\n" - + " \"properties\": {\n" - + " \"traceId\": { \"type\": \"keyword\", \"norms\": false },\n" - + " \"annotations\": { \"enabled\": false },\n" - + " \"tags\": { \"enabled\": false }\n" - + " }\n" - + " }"); - } - - @Test void strictTraceId_doesNotIncludeAnalysisSection() { - IndexTemplates template = storage.versionSpecificTemplates(V6_7); - - assertThat(template.span()).doesNotContain("analysis"); - } - - @Test void strictTraceId_false_includesAnalysisForMixedLengthTraceId() { - storage.close(); - storage = ElasticsearchStorage.newBuilder(() -> mock(WebClient.class)) - .strictTraceId(false) - .build(); - - IndexTemplates template = storage.versionSpecificTemplates(V6_7); - - assertThat(template.span()).contains("analysis"); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ElasticsearchExtension.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ElasticsearchExtension.java deleted file mode 100644 index d697d3c83bb..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ElasticsearchExtension.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.integration; - -import com.linecorp.armeria.client.ClientFactory; -import com.linecorp.armeria.client.ClientOptions; -import com.linecorp.armeria.client.ClientOptionsBuilder; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.client.WebClientBuilder; -import com.linecorp.armeria.client.logging.ContentPreviewingClient; -import com.linecorp.armeria.client.logging.LoggingClient; -import com.linecorp.armeria.client.logging.LoggingClientBuilder; -import com.linecorp.armeria.common.HttpResponse; -import com.linecorp.armeria.common.logging.LogLevel; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.extension.AfterAllCallback; -import org.junit.jupiter.api.extension.BeforeAllCallback; -import org.junit.jupiter.api.extension.ExtensionContext; -import org.opentest4j.TestAbortedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.output.Slf4jLogConsumer; -import org.testcontainers.containers.wait.strategy.Wait; -import zipkin2.elasticsearch.ElasticsearchStorage; -import zipkin2.elasticsearch.ElasticsearchStorage.Builder; - -import static org.testcontainers.utility.DockerImageName.parse; -import static zipkin2.elasticsearch.integration.IgnoredDeprecationWarnings.IGNORE_THESE_WARNINGS; - -class ElasticsearchExtension implements BeforeAllCallback, AfterAllCallback { - static final Logger LOGGER = LoggerFactory.getLogger(ElasticsearchExtension.class); - - final ElasticsearchContainer container; - - ElasticsearchExtension(int majorVersion) { - container = new ElasticsearchContainer(majorVersion); - } - - @Override public void beforeAll(ExtensionContext context) { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - - container.start(); - LOGGER.info("Using baseUrl " + baseUrl()); - } - - @Override public void afterAll(ExtensionContext context) { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - - container.stop(); - } - - Builder computeStorageBuilder() { - WebClientBuilder builder = WebClient.builder(baseUrl()) - // Elasticsearch 7 never returns a response when receiving an HTTP/2 preface instead of the - // more valid behavior of returning a bad request response, so we can't use the preface. - // - // TODO: find or raise a bug with Elastic - .factory(ClientFactory.builder().useHttp2Preface(false).build()); - builder.decorator((delegate, ctx, req) -> { - final HttpResponse response = delegate.execute(ctx, req); - return HttpResponse.from(response.aggregate().thenApply(r -> { - // ES will return a 'warning' response header when using deprecated api, detect this and - // fail early so we can do something about it. - // Example usage: https://github.com/elastic/elasticsearch/blob/3049e55f093487bb582a7e49ad624961415ba31c/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndexPrivilegeIntegTests.java#L559 - final String warningHeader = r.headers().get("warning"); - if (warningHeader != null) { - if (IGNORE_THESE_WARNINGS.stream().noneMatch(p -> p.matcher(warningHeader).find())) { - throw new IllegalArgumentException("Detected usage of deprecated API for request " - + req.toString() + ":\n" + warningHeader); - } - } - // Convert AggregatedHttpResponse back to HttpResponse. - return r.toHttpResponse(); - })); - }); - - // When ES_DEBUG=true log full headers, request and response body to the category - // com.linecorp.armeria.client.logging - if (Boolean.parseBoolean(System.getenv("ES_DEBUG"))) { - ClientOptionsBuilder options = ClientOptions.builder(); - LoggingClientBuilder loggingBuilder = LoggingClient.builder() - .requestLogLevel(LogLevel.INFO) - .successfulResponseLogLevel(LogLevel.INFO); - options.decorator(loggingBuilder.newDecorator()); - options.decorator(ContentPreviewingClient.newDecorator(Integer.MAX_VALUE)); - builder.options(options.build()); - } - - WebClient client = builder.build(); - return ElasticsearchStorage.newBuilder(new ElasticsearchStorage.LazyHttpClient() { - @Override public WebClient get() { - return client; - } - - @Override public void close() { - client.endpointGroup().close(); - } - - @Override public String toString() { - return client.uri().toString(); - } - }).index("zipkin-test").flushOnWrites(true); - } - - String baseUrl() { - return "http://" + container.getHost() + ":" + container.getMappedPort(9200); - } - - // mostly waiting for https://github.com/testcontainers/testcontainers-java/issues/3537 - static final class ElasticsearchContainer extends GenericContainer { - ElasticsearchContainer(int majorVersion) { - super(parse("ghcr.io/openzipkin/zipkin-elasticsearch" + majorVersion + ":2.23.2")); - if ("true".equals(System.getProperty("docker.skip"))) { - throw new TestAbortedException("${docker.skip} == true"); - } - addExposedPort(9200); - waitStrategy = Wait.forHealthcheck(); - withLogConsumer(new Slf4jLogConsumer(LOGGER)); - } - } - - static String index(TestInfo testInfo) { - String result; - if (testInfo.getTestMethod().isPresent()) { - result = testInfo.getTestMethod().get().getName(); - } else { - assert testInfo.getTestClass().isPresent(); - result = testInfo.getTestClass().get().getSimpleName(); - } - result = result.toLowerCase(); - return result.length() <= 48 ? result : result.substring(result.length() - 48); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITElasticsearchStorage.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITElasticsearchStorage.java deleted file mode 100644 index 2d7fb2770b1..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITElasticsearchStorage.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.integration; - -import com.linecorp.armeria.client.ClientFactory; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.HttpRequest; -import com.linecorp.armeria.common.RequestHeaders; -import java.io.IOException; -import java.util.List; -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import zipkin2.Span; -import zipkin2.elasticsearch.ElasticsearchStorage; -import zipkin2.elasticsearch.InternalForTests; -import zipkin2.storage.StorageComponent; - -import static zipkin2.elasticsearch.integration.ElasticsearchExtension.index; -import static zipkin2.storage.ITDependencies.aggregateLinks; - -abstract class ITElasticsearchStorage { - - static final Logger LOGGER = LoggerFactory.getLogger(ITElasticsearchStorage.class); - - abstract ElasticsearchExtension elasticsearch(); - - @Nested - class ITTraces extends zipkin2.storage.ITTraces { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - - @Override public void clear() throws IOException { - storage.clear(); - } - } - - @Nested - class ITSpanStore extends zipkin2.storage.ITSpanStore { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - - @Override public void clear() throws IOException { - storage.clear(); - } - } - - @Nested - class ITSpanStoreHeavy extends zipkin2.storage.ITSpanStoreHeavy { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - - @Override public void clear() throws IOException { - storage.clear(); - } - } - - @Nested - class ITSearchEnabledFalse extends zipkin2.storage.ITSearchEnabledFalse { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - - @Override public void clear() throws IOException { - storage.clear(); - } - } - - @Nested - class ITServiceAndSpanNames extends zipkin2.storage.ITServiceAndSpanNames { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - - @Override public void clear() throws IOException { - storage.clear(); - } - } - - @Nested - class ITAutocompleteTags extends zipkin2.storage.ITAutocompleteTags { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - - @Override public void clear() throws IOException { - storage.clear(); - } - } - - @Nested - class ITStrictTraceIdFalse extends zipkin2.storage.ITStrictTraceIdFalse { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - - @Override public void clear() throws IOException { - storage.clear(); - } - } - - @Nested - class ITDependencies extends zipkin2.storage.ITDependencies { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - - @Override protected void processDependencies(List spans) { - aggregateDependencies(storage, spans); - } - - @Override public void clear() throws IOException { - storage.clear(); - } - } - - @Nested - class ITDependenciesHeavy extends zipkin2.storage.ITDependenciesHeavy { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - - @Override protected void processDependencies(List spans) { - aggregateDependencies(storage, spans); - } - - @Override public void clear() throws IOException { - storage.clear(); - } - } - - /** - * The current implementation does not include dependency aggregation. It includes retrieval of - * pre-aggregated links, usually made via zipkin-dependencies - */ - static void aggregateDependencies(ElasticsearchStorage storage, List spans) { - aggregateLinks(spans).forEach( - (midnight, links) -> InternalForTests.writeDependencyLinks( - storage, links, midnight)); - } - - @Test void testUsageOfDeprecatedFeatures() { - WebClient webClient = WebClient.builder(elasticsearch().baseUrl()).factory(ClientFactory.builder() - .useHttp2Preface(false).build()).build(); - final AggregatedHttpResponse response = - webClient.execute(HttpRequest.of(RequestHeaders.of(HttpMethod.GET, - "/_migration/deprecations"))).aggregate().join(); - if (!response.contentAscii().isEmpty()) { - LOGGER.warn("The ElasticSearch instance used during IT's is using deprecated features or " - + "configuration. This is likely nothing to be really worried about (for example 'xpack.monitoring.enabled' " - + "setting), but nevertheless it should be looked at to see if our docker image used during " - + "integration tests needs updating for the next version of ElasticSearch. " - + "See https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html" - + "for more information. This is the deprecation warning we received:\n\n" - + response.contentAscii()); - } - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITElasticsearchStorageV6.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITElasticsearchStorageV6.java deleted file mode 100644 index ec63b371168..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITElasticsearchStorageV6.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.integration; - -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.extension.RegisterExtension; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class ITElasticsearchStorageV6 extends ITElasticsearchStorage { - - @RegisterExtension ElasticsearchExtension elasticsearch = new ElasticsearchExtension(6); - - @Override ElasticsearchExtension elasticsearch() { - return elasticsearch; - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITElasticsearchStorageV7.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITElasticsearchStorageV7.java deleted file mode 100644 index 343d30e0e89..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITElasticsearchStorageV7.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.integration; - -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.elasticsearch.ElasticsearchStorage; - -import static zipkin2.elasticsearch.integration.ElasticsearchExtension.index; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class ITElasticsearchStorageV7 extends ITElasticsearchStorage { - - @RegisterExtension ElasticsearchExtension elasticsearch = new ElasticsearchExtension(7); - - @Override ElasticsearchExtension elasticsearch() { - return elasticsearch; - } - - @Nested - class ITEnsureIndexTemplate extends zipkin2.elasticsearch.integration.ITEnsureIndexTemplate { - @Override protected ElasticsearchStorage.Builder newStorageBuilder(TestInfo testInfo) { - return elasticsearch().computeStorageBuilder().index(index(testInfo)); - } - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITEnsureIndexTemplate.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITEnsureIndexTemplate.java deleted file mode 100644 index 32cfb87c9ef..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/ITEnsureIndexTemplate.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.integration; - -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.RequestHeaders; -import java.io.IOException; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.TestInstance; -import zipkin2.Span; -import zipkin2.elasticsearch.ElasticsearchStorage; -import zipkin2.elasticsearch.internal.Internal; -import zipkin2.storage.ITStorage; -import zipkin2.storage.StorageComponent; - -import static com.linecorp.armeria.common.HttpHeaderNames.CONTENT_TYPE; -import static com.linecorp.armeria.common.HttpMethod.DELETE; -import static com.linecorp.armeria.common.HttpMethod.GET; -import static com.linecorp.armeria.common.HttpMethod.PUT; -import static com.linecorp.armeria.common.MediaType.JSON_UTF_8; -import static java.util.Arrays.asList; -import static zipkin2.TestObjects.spanBuilder; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -abstract class ITEnsureIndexTemplate extends ITStorage { - @Override protected abstract ElasticsearchStorage.Builder newStorageBuilder(TestInfo testInfo); - - @Override protected void configureStorageForTest(StorageComponent.Builder storage) { - } - - @Override protected boolean initializeStoragePerTest() { - return true; // We need a different index pattern per test - } - - @Override protected void clear() throws Exception { - storage.clear(); - } - - @Test - void createZipkinIndexTemplate_getTraces_returnsSuccess(TestInfo testInfo) throws Exception { - String testSuffix = testSuffix(testInfo); - storage = newStorageBuilder(testInfo).templatePriority(10).build(); - try { - // Delete all templates in order to create the "catch-all" index template, because - // ES does not allow multiple index templates of the same index_patterns and priority - http(DELETE, "/_template/*"); - setUpCatchAllTemplate(); - - // Implicitly creates an index template - checkStorage(); - - // Get all templates. We don't assert on this at the moment. This is for logging on ES_DEBUG. - http(GET, "/_template"); - - // Now, add a span, which should be indexed differently than default. - Span span = spanBuilder(testSuffix).putTag("queryTest", "ok").build(); - accept(asList(span)); - - // Assert that Zipkin's templates work and source is returned - assertGetTracesReturns( - requestBuilder() - .parseAnnotationQuery("queryTest=" + span.tags().get("queryTest")) - .build(), - asList(span)); - } finally { - // Delete "catch-all" index template so it does not interfere with any other test - http(DELETE, catchAllIndexPath()); - } - } - - /** - * Create a "catch-all" index template with the lowest priority prior to running tests to ensure - * that the index templates created during tests with higher priority function as designed. Only - * applicable for ES >= 7.8 - */ - void setUpCatchAllTemplate() throws IOException { - AggregatedHttpRequest updateTemplate = AggregatedHttpRequest.of( - RequestHeaders.of(PUT, catchAllIndexPath(), CONTENT_TYPE, JSON_UTF_8), - HttpData.ofUtf8(catchAllTemplate())); - Internal.instance.http(storage).newCall(updateTemplate, (parser, contentString) -> null, - "update-template").execute(); - } - - String catchAllIndexPath() { - return "/_index_template/catch-all"; - } - - /** Catch-all template doesn't store source */ - String catchAllTemplate() { - return "{\n" - + " \"index_patterns\" : [\"*\"],\n" - + " \"priority\" : 0,\n" - + " \"template\": {\n" - + " \"settings\" : {\n" - + " \"number_of_shards\" : 1\n" - + " },\n" - + " \"mappings\" : {\n" - + " \"_source\": {\"enabled\": false }\n" - + " }\n" - + " }\n" - + "}"; - } - - void http(HttpMethod method, String path) throws IOException { - AggregatedHttpRequest delete = AggregatedHttpRequest.of(method, path); - Internal.instance.http(storage) - .newCall(delete, (parser, contentString) -> null, method + "-" + path).execute(); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/IgnoredDeprecationWarnings.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/IgnoredDeprecationWarnings.java deleted file mode 100644 index 0e00195a69b..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/integration/IgnoredDeprecationWarnings.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.integration; - -import java.util.List; -import java.util.regex.Pattern; - -import static java.util.Arrays.asList; -import static java.util.regex.Pattern.compile; - -/** - * When ES emits a deprecation warning header in response to a method being called, the integration - * test will fail. We cannot always fix our code however to take into account all deprecation - * warnings, as we have to support multiple versions of ES. For these cases, add the warning message - * to {@link #IGNORE_THESE_WARNINGS} array so it will not raise an exception anymore. - */ -abstract class IgnoredDeprecationWarnings { - - // These will be matched using header.contains(ignored[i]), so find a unique substring of the - // warning header for it to be ignored - static List IGNORE_THESE_WARNINGS = asList( - compile("Elasticsearch 7\\.x will read, but not allow creation of new indices containing ':'"), - compile("has index patterns \\[.*] matching patterns from existing older templates"), - compile("has index patterns \\[.*] matching patterns from existing composable templates") - ); -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/BulkCallBuilderTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/BulkCallBuilderTest.java deleted file mode 100644 index b524c53b9b7..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/BulkCallBuilderTest.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import java.util.concurrent.RejectedExecutionException; -import org.junit.Test; - -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static zipkin2.elasticsearch.internal.BulkCallBuilder.CHECK_FOR_ERRORS; -import static zipkin2.elasticsearch.internal.JsonSerializers.JSON_FACTORY; - -public class BulkCallBuilderTest { - @Test public void throwsRejectedExecutionExceptionWhenOverCapacity() { - String response = - "{\"took\":0,\"errors\":true,\"items\":[{\"index\":{\"_index\":\"dev-zipkin:span-2019.04.18\",\"_type\":\"span\",\"_id\":\"2511\",\"status\":429,\"error\":{\"type\":\"es_rejected_execution_exception\",\"reason\":\"rejected execution of org.elasticsearch.transport.TransportService$7@7ec1ea93 on EsThreadPoolExecutor[bulk, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@621571ba[Running, pool size = 4, active threads = 4, queued tasks = 200, completed tasks = 3838534]]\"}}}]}"; - - assertThatThrownBy( - () -> CHECK_FOR_ERRORS.convert(JSON_FACTORY.createParser(response), () -> response)) - .isInstanceOf(RejectedExecutionException.class) - .hasMessage( - "rejected execution of org.elasticsearch.transport.TransportService$7@7ec1ea93 on EsThreadPoolExecutor[bulk, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@621571ba[Running, pool size = 4, active threads = 4, queued tasks = 200, completed tasks = 3838534]]"); - } - - @Test public void throwsRuntimeExceptionAsReasonWhenPresent() { - String response = - "{\"error\":{\"root_cause\":[{\"type\":\"illegal_argument_exception\",\"reason\":\"Fielddata is disabled on text fields by default. Set fielddata=true on [spanName] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory. Alternatively use a keyword field instead.\"}],\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"query\",\"grouped\":true,\"failed_shards\":[{\"shard\":0,\"index\":\"zipkin-2017-05-14\",\"node\":\"IqceAwZnSvyv0V0xALkEnQ\",\"reason\":{\"type\":\"illegal_argument_exception\",\"reason\":\"Fielddata is disabled on text fields by default. Set fielddata=true on [spanName] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory. Alternatively use a keyword field instead.\"}}]},\"status\":400}"; - - assertThatThrownBy( - () -> CHECK_FOR_ERRORS.convert(JSON_FACTORY.createParser(response), () -> response)) - .isInstanceOf(RuntimeException.class) - .hasMessage("Fielddata is disabled on text fields by default. Set fielddata=true on [spanName] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory. Alternatively use a keyword field instead."); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/BulkIndexWriterTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/BulkIndexWriterTest.java deleted file mode 100644 index b017df4dc22..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/BulkIndexWriterTest.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import io.netty.buffer.ByteBufOutputStream; -import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.Unpooled; -import java.nio.charset.StandardCharsets; -import org.junit.Before; -import org.junit.Test; -import zipkin2.Span; -import zipkin2.Span.Kind; -import zipkin2.codec.SpanBytesDecoder; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.TestObjects.CLIENT_SPAN; -import static zipkin2.TestObjects.FRONTEND; -import static zipkin2.TestObjects.TODAY; - -public class BulkIndexWriterTest { - - // Our usual test span depends on currentTime for testing span stores with TTL, but we'd prefer - // to have a fixed span here to avoid depending on business logic in test assertions. - static final Span STABLE_SPAN = CLIENT_SPAN.toBuilder() - .timestamp(100) - .clearAnnotations() - .build(); - - ByteBufOutputStream buffer; - - @Before public void setUp() { - buffer = new ByteBufOutputStream(Unpooled.buffer()); - } - - @Test public void span_addsDocumentId() throws Exception { - String id = BulkIndexWriter.SPAN.writeDocument(STABLE_SPAN, buffer); - - assertThat(id) - .isEqualTo("7180c278b62e8f6a216a2aea45d08fc9-198140c2a26bfa58fed4a572dfe3d63b"); - } - - @Test public void spanSearchDisabled_addsDocumentId() throws Exception { - String id = BulkIndexWriter.SPAN_SEARCH_DISABLED.writeDocument(STABLE_SPAN, buffer); - - assertThat(id) - .isEqualTo("7180c278b62e8f6a216a2aea45d08fc9-bfe7a3c0d9ee83b1d218bd0f383f006a"); - } - - @Test public void spanSearchFields_skipsWhenNoData() { - Span span = Span.newBuilder() - .traceId("20") - .id("22") - .parentId("21") - .timestamp(0L) - .localEndpoint(FRONTEND) - .kind(Kind.CLIENT) - .build(); - - BulkIndexWriter.SPAN.writeDocument(span, buffer); - - assertThat(buffer.buffer().toString(StandardCharsets.UTF_8)).startsWith("{\"traceId\":\""); - } - - @Test public void spanSearchFields_addsTimestampFieldWhenNoTags() { - Span span = - Span.newBuilder() - .traceId("20") - .id("22") - .name("") - .parentId("21") - .timestamp(1000L) - .localEndpoint(FRONTEND) - .kind(Kind.CLIENT) - .build(); - - BulkIndexWriter.SPAN.writeDocument(span, buffer); - - assertThat(buffer.buffer().toString(StandardCharsets.UTF_8)) - .startsWith("{\"timestamp_millis\":1,\"traceId\":"); - } - - @Test public void spanSearchFields_addsQueryFieldForAnnotations() { - Span span = Span.newBuilder() - .traceId("20") - .id("22") - .name("") - .parentId("21") - .localEndpoint(FRONTEND) - .addAnnotation(1L, "\"foo") - .build(); - - BulkIndexWriter.SPAN.writeDocument(span, buffer); - - assertThat(buffer.buffer().toString(StandardCharsets.UTF_8)) - .startsWith("{\"_q\":[\"\\\"foo\"],\"traceId"); - } - - @Test public void spanSearchFields_addsQueryFieldForTags() { - Span span = Span.newBuilder() - .traceId("20") - .id("22") - .parentId("21") - .localEndpoint(FRONTEND) - .putTag("\"foo", "\"bar") - .build(); - - BulkIndexWriter.SPAN.writeDocument(span, buffer); - - assertThat(buffer.buffer().toString(StandardCharsets.UTF_8)) - .startsWith("{\"_q\":[\"\\\"foo\",\"\\\"foo=\\\"bar\"],\"traceId"); - } - - @Test public void spanSearchFields_readableByNormalJsonCodec() { - Span span = - Span.newBuilder().traceId("20").id("20").name("get").timestamp(TODAY * 1000).build(); - - BulkIndexWriter.SPAN.writeDocument(span, buffer); - - assertThat(SpanBytesDecoder.JSON_V2.decodeOne(ByteBufUtil.getBytes(buffer.buffer()))) - .isEqualTo(span); // ignores timestamp_millis field - } - - @Test public void spanSearchDisabled_doesntAddQueryFields() { - BulkIndexWriter.SPAN_SEARCH_DISABLED.writeDocument(CLIENT_SPAN, buffer); - - assertThat(buffer.buffer().toString(StandardCharsets.UTF_8)) - .startsWith("{\"traceId\":\""); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/IndexNameFormatterTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/IndexNameFormatterTest.java deleted file mode 100644 index 3c0966aa077..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/IndexNameFormatterTest.java +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal; - -import java.text.DateFormat; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.TimeZone; -import org.junit.Test; - -import static org.assertj.core.api.Assertions.assertThat; - -public class IndexNameFormatterTest { - IndexNameFormatter formatter = - IndexNameFormatter.newBuilder().index("zipkin").dateSeparator('-').build(); - DateFormat iso8601 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssX"); - - public IndexNameFormatterTest() { - iso8601.setTimeZone(TimeZone.getTimeZone("UTC")); - } - - @Test - public void indexNameForTimestampRange_sameTime() throws ParseException { - long start = iso8601.parse("2016-11-01T01:01:01Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, start)) - .containsExactly("zipkin*span-2016-11-01"); - } - - @Test - public void indexNameForTimestampRange_sameDay() throws ParseException { - long start = iso8601.parse("2016-11-01T01:01:01Z").getTime(); - long end = iso8601.parse("2016-11-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016-11-01"); - } - - @Test - public void indexNameForTimestampRange_sameMonth() throws ParseException { - long start = iso8601.parse("2016-11-15T01:01:01Z").getTime(); - long end = iso8601.parse("2016-11-16T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016-11-15", "zipkin*span-2016-11-16"); - } - - @Test - public void indexNameForTimestampRange_sameMonth_startingAtOne() throws ParseException { - long start = iso8601.parse("2016-11-1T01:01:01Z").getTime(); - long end = iso8601.parse("2016-11-3T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016-11-01", "zipkin*span-2016-11-02", "zipkin*span-2016-11-03"); - } - - @Test - public void indexNameForTimestampRange_nextMonth() throws ParseException { - long start = iso8601.parse("2016-10-31T01:01:01Z").getTime(); - long end = iso8601.parse("2016-11-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016-10-31", "zipkin*span-2016-11-01"); - } - - @Test - public void indexNameForTimestampRange_compressesMonth() throws ParseException { - long start = iso8601.parse("2016-10-01T01:01:01Z").getTime(); - long end = iso8601.parse("2016-10-31T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016-10-*"); - } - - @Test - public void indexNameForTimestampRange_skipMonths() throws ParseException { - long start = iso8601.parse("2016-10-31T01:01:01Z").getTime(); - long end = iso8601.parse("2016-12-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016-10-31", "zipkin*span-2016-11-*", "zipkin*span-2016-12-01"); - } - - @Test - public void indexNameForTimestampRange_skipMonths_leapYear() throws ParseException { - long start = iso8601.parse("2016-02-28T01:01:01Z").getTime(); - long end = iso8601.parse("2016-04-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016-02-28", - "zipkin*span-2016-02-29", - "zipkin*span-2016-03-*", - "zipkin*span-2016-04-01"); - } - - @Test - public void indexNameForTimestampRange_compressesYear() throws ParseException { - long start = iso8601.parse("2016-01-01T01:01:01Z").getTime(); - long end = iso8601.parse("2016-12-31T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016-*"); - } - - @Test - public void indexNameForTimestampRange_skipYears() throws ParseException { - long start = iso8601.parse("2016-10-31T01:01:01Z").getTime(); - long end = iso8601.parse("2018-01-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016-10-31", - "zipkin*span-2016-11-*", - "zipkin*span-2016-12-*", - "zipkin*span-2017-*", - "zipkin*span-2018-01-01"); - } - - @Test - public void indexNameForTimestampRange_other_sameDay() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-11-01T01:01:01Z").getTime(); - long end = iso8601.parse("2016-11-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016.11.01"); - } - - @Test - public void indexNameForTimestampRange_other_sameMonth() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-11-15T01:01:01Z").getTime(); - long end = iso8601.parse("2016-11-16T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016.11.15", "zipkin*span-2016.11.16"); - } - - @Test - public void indexNameForTimestampRange_sameMonth_other_startingAtOne() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-11-1T01:01:01Z").getTime(); - long end = iso8601.parse("2016-11-3T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016.11.01", "zipkin*span-2016.11.02", "zipkin*span-2016.11.03"); - } - - @Test - public void indexNameForTimestampRange_other_nextMonth() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-10-31T01:01:01Z").getTime(); - long end = iso8601.parse("2016-11-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016.10.31", "zipkin*span-2016.11.01"); - } - - @Test - public void indexNameForTimestampRange_other_compressesMonth() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-10-01T01:01:01Z").getTime(); - long end = iso8601.parse("2016-10-31T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016.10.*"); - } - - @Test - public void indexNameForTimestampRange_other_skipMonths() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-10-31T01:01:01Z").getTime(); - long end = iso8601.parse("2016-12-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016.10.31", "zipkin*span-2016.11.*", "zipkin*span-2016.12.01"); - } - - @Test - public void indexNameForTimestampRange_skipMonths_other_leapYear() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-02-28T01:01:01Z").getTime(); - long end = iso8601.parse("2016-04-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016.02.28", - "zipkin*span-2016.02.29", - "zipkin*span-2016.03.*", - "zipkin*span-2016.04.01"); - } - - @Test - public void indexNameForTimestampRange_other_compressesYear() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-01-01T01:01:01Z").getTime(); - long end = iso8601.parse("2016-12-31T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016.*"); - } - - @Test - public void indexNameForTimestampRange_other_skipYears() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-10-31T01:01:01Z").getTime(); - long end = iso8601.parse("2018-01-01T23:59:59Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016.10.31", - "zipkin*span-2016.11.*", - "zipkin*span-2016.12.*", - "zipkin*span-2017.*", - "zipkin*span-2018.01.01"); - } - - @Test - public void indexNameForTimestampRange_compressesTens() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-10-01T01:01:01Z").getTime(); - long end = iso8601.parse("2016-10-30T01:01:01Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016.10.0*", - "zipkin*span-2016.10.1*", - "zipkin*span-2016.10.2*", - "zipkin*span-2016.10.30"); - } - - @Test - public void indexNameForTimestampRange_compressesTens_startingAtNine() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-10-09T01:01:01Z").getTime(); - long end = iso8601.parse("2016-10-30T01:01:01Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016.10.09", - "zipkin*span-2016.10.1*", - "zipkin*span-2016.10.2*", - "zipkin*span-2016.10.30"); - } - - @Test - public void indexNameForTimestampRange_compressesTens_startingAtNineteen() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-10-19T01:01:01Z").getTime(); - long end = iso8601.parse("2016-10-30T01:01:01Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly( - "zipkin*span-2016.10.19", - "zipkin*span-2016.10.2*", - "zipkin*span-2016.10.30"); - } - - @Test - public void indexNameForTimestampRange_compressesTens_not30DayMonth() throws ParseException { - formatter = formatter.toBuilder().dateSeparator('.').build(); - long start = iso8601.parse("2016-06-01T01:01:01Z").getTime(); - long end = iso8601.parse("2016-06-30T01:01:01Z").getTime(); - - assertThat(formatter.formatTypeAndRange("span", start, end)) - .containsExactly("zipkin*span-2016.06.*"); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/client/HttpCallTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/client/HttpCallTest.java deleted file mode 100644 index 824d3037912..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/client/HttpCallTest.java +++ /dev/null @@ -1,285 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal.client; // to access package-private stuff - -import com.linecorp.armeria.client.UnprocessedRequestException; -import com.linecorp.armeria.client.WebClient; -import com.linecorp.armeria.client.endpoint.EndpointGroupException; -import com.linecorp.armeria.common.AggregatedHttpRequest; -import com.linecorp.armeria.common.AggregatedHttpResponse; -import com.linecorp.armeria.common.HttpData; -import com.linecorp.armeria.common.HttpMethod; -import com.linecorp.armeria.common.HttpStatus; -import com.linecorp.armeria.common.RequestHeaders; -import com.linecorp.armeria.common.ResponseHeaders; -import com.linecorp.armeria.common.logging.RequestLog; -import com.linecorp.armeria.testing.junit5.server.mock.MockWebServerExtension; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.PooledByteBufAllocator; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.Call; -import zipkin2.Callback; -import zipkin2.internal.Nullable; - -import static com.linecorp.armeria.common.MediaType.PLAIN_TEXT_UTF_8; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.awaitility.Awaitility.await; -import static org.junit.jupiter.api.Assertions.fail; -import static zipkin2.TestObjects.UTF_8; - -class HttpCallTest { - static final HttpCall.BodyConverter NULL = (parser, contentString) -> null; - - private static final AggregatedHttpResponse SUCCESS_RESPONSE = - AggregatedHttpResponse.of(HttpStatus.OK); - - @RegisterExtension static MockWebServerExtension server = new MockWebServerExtension(); - - static final AggregatedHttpRequest REQUEST = AggregatedHttpRequest.of(HttpMethod.GET, "/"); - - HttpCall.Factory http; - - @BeforeEach void setUp() { - http = new HttpCall.Factory(WebClient.of(server.httpUri())); - } - - @Test void emptyContent() throws IOException { - server.enqueue(AggregatedHttpResponse.of(HttpStatus.OK, PLAIN_TEXT_UTF_8, "")); - - HttpCall call = http.newCall(REQUEST, (parser, contentString) -> fail(), "test"); - assertThat(call.execute()).isNull(); - - server.enqueue(AggregatedHttpResponse.of(HttpStatus.OK, PLAIN_TEXT_UTF_8, "")); - CompletableCallback future = new CompletableCallback<>(); - http.newCall(REQUEST, (parser, contentString) -> "hello", "test").enqueue(future); - assertThat(future.join()).isNull(); - } - - @Test void propagatesOnDispatcherThreadWhenFatal() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - final LinkedBlockingQueue q = new LinkedBlockingQueue<>(); - CountDownLatch latch = new CountDownLatch(1); - http.newCall(REQUEST, (parser, contentString) -> { - latch.countDown(); - throw new LinkageError(); - }, "test").enqueue(new Callback() { - @Override public void onSuccess(@Nullable Object value) { - q.add(value); - } - - @Override public void onError(Throwable t) { - q.add(t); - } - }); - - // It can take some time for the HTTP response to process. Wait until we reach the parser - latch.await(); - - // Wait a little longer for a callback to fire (it should never do this) - assertThat(q.poll(100, TimeUnit.MILLISECONDS)) - .as("expected callbacks to never signal") - .isNull(); - } - - @Test void executionException_conversionException() { - server.enqueue(SUCCESS_RESPONSE); - - Call call = http.newCall(REQUEST, (parser, contentString) -> { - throw new IllegalArgumentException("eeek"); - }, "test"); - - assertThatThrownBy(call::execute).isInstanceOf(IllegalArgumentException.class); - } - - @Test void cloned() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - Call call = http.newCall(REQUEST, (parser, contentString) -> null, "test"); - call.execute(); - - assertThatThrownBy(call::execute).isInstanceOf(IllegalStateException.class); - - server.enqueue(SUCCESS_RESPONSE); - - call.clone().execute(); - } - - @Test void executionException_5xx() { - server.enqueue(AggregatedHttpResponse.of(HttpStatus.INTERNAL_SERVER_ERROR)); - - Call call = http.newCall(REQUEST, NULL, "test"); - - assertThatThrownBy(call::execute) - .isInstanceOf(RuntimeException.class) - .hasMessage("response for / failed: 500 Internal Server Error"); - } - - @Test void executionException_404() { - server.enqueue(AggregatedHttpResponse.of(HttpStatus.NOT_FOUND)); - - Call call = http.newCall(REQUEST, NULL, "test"); - - assertThatThrownBy(call::execute) - .isInstanceOf(FileNotFoundException.class) - .hasMessage("/"); - } - - @Test void releasesAllReferencesToByteBuf() { - // Force this to be a ref-counted response - byte[] message = "{\"Message\":\"error\"}".getBytes(UTF_8); - ByteBuf encodedBuf = PooledByteBufAllocator.DEFAULT.buffer(message.length); - encodedBuf.writeBytes(message); - AggregatedHttpResponse response = AggregatedHttpResponse.of( - ResponseHeaders.of(HttpStatus.FORBIDDEN), - HttpData.wrap(encodedBuf) - ); - - HttpCall call = http.newCall(REQUEST, NULL, "test"); - - // Invoke the parser directly because using the fake server will not result in ref-counted - assertThatThrownBy(() -> call.parseResponse(response, NULL)).hasMessage("error"); - assertThat(encodedBuf.refCnt()).isEqualTo(0); - } - - // For simplicity, we also parse messages from AWS Elasticsearch, as it prevents copy/paste. - @Test void executionException_message() { - Map responseToMessage = new LinkedHashMap<>(); - responseToMessage.put(AggregatedHttpResponse.of( - ResponseHeaders.of(HttpStatus.FORBIDDEN), - HttpData.ofUtf8( - "{\"Message\":\"User: anonymous is not authorized to perform: es:ESHttpGet\"}") - ), "User: anonymous is not authorized to perform: es:ESHttpGet"); - responseToMessage.put(AggregatedHttpResponse.of( - ResponseHeaders.of(HttpStatus.FORBIDDEN) - ), "response for / failed: 403 Forbidden"); - responseToMessage.put(AggregatedHttpResponse.of( - ResponseHeaders.of(HttpStatus.BAD_GATEWAY), - HttpData.ofUtf8("Message: sleet") // note: not json - ), "response for / failed: Message: sleet"); // In this case, we give request context - - Call call = http.newCall(REQUEST, NULL, "test"); - - for (Map.Entry entry : responseToMessage.entrySet()) { - server.enqueue(entry.getKey()); - - call = call.clone(); - assertThatThrownBy(call::execute) - .isInstanceOf(RuntimeException.class) - .hasMessage(entry.getValue()); - } - } - - @Test void setsCustomName() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - AtomicReference log = new AtomicReference<>(); - http = new HttpCall.Factory(WebClient.builder(server.httpUri()) - .decorator((client, ctx, req) -> { - ctx.log().whenComplete().thenAccept(log::set); - return client.execute(ctx, req); - }) - .build()); - - http.newCall(REQUEST, NULL, "custom-name").execute(); - - await().untilAsserted(() -> assertThat(log).doesNotHaveValue(null)); - assertThat(log.get().name()).isEqualTo("custom-name"); - } - - @Test void wrongScheme() { - server.enqueue(SUCCESS_RESPONSE); - - http = new HttpCall.Factory(WebClient.builder("https://localhost:" + server.httpPort()).build()); - - assertThatThrownBy(() -> http.newCall(REQUEST, NULL, "test").execute()) - .isInstanceOf(RejectedExecutionException.class) - // depending on JDK this is "OPENSSL_internal" or "not an SSL/TLS record" - .hasMessageContaining("SSL"); - } - - @Test void unprocessedRequest() { - server.enqueue(SUCCESS_RESPONSE); - - http = new HttpCall.Factory(WebClient.builder(server.httpUri()) - .decorator((client, ctx, req) -> { - throw UnprocessedRequestException.of(new EndpointGroupException("No endpoints")); - }) - .build()); - - assertThatThrownBy(() -> http.newCall(REQUEST, NULL, "test").execute()) - .isInstanceOf(RejectedExecutionException.class) - .hasMessage("No endpoints"); - } - - @Test void throwsRuntimeExceptionAsReasonWhenPresent() { - String body = - "{\"error\":{\"root_cause\":[{\"type\":\"illegal_argument_exception\",\"reason\":\"Fielddata is disabled on text fields by default. Set fielddata=true on [spanName] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory. Alternatively use a keyword field instead.\"}],\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"query\",\"grouped\":true,\"failed_shards\":[{\"shard\":0,\"index\":\"zipkin-2017-05-14\",\"node\":\"IqceAwZnSvyv0V0xALkEnQ\",\"reason\":{\"type\":\"illegal_argument_exception\",\"reason\":\"Fielddata is disabled on text fields by default. Set fielddata=true on [spanName] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory. Alternatively use a keyword field instead.\"}}]},\"status\":400}"; - server.enqueue( - AggregatedHttpResponse.of(ResponseHeaders.of(HttpStatus.BAD_REQUEST), HttpData.ofUtf8(body)) - ); - - assertThatThrownBy(() -> http.newCall(REQUEST, NULL, "test").execute()) - .isInstanceOf(RuntimeException.class) - .hasMessage( - "Fielddata is disabled on text fields by default. Set fielddata=true on [spanName] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory. Alternatively use a keyword field instead."); - } - - @Test void streamingContent() throws Exception { - server.enqueue(SUCCESS_RESPONSE); - - HttpCall.RequestSupplier supplier = new HttpCall.RequestSupplier() { - @Override public RequestHeaders headers() { - return RequestHeaders.of(HttpMethod.POST, "/"); - } - - @Override public void writeBody(HttpCall.RequestStream requestStream) { - requestStream.tryWrite(HttpData.ofUtf8("hello")); - requestStream.tryWrite(HttpData.ofUtf8(" world")); - } - }; - - http.newCall(supplier, NULL, "test").execute(); - - AggregatedHttpRequest request = server.takeRequest().request(); - assertThat(request.method()).isEqualTo(HttpMethod.POST); - assertThat(request.path()).isEqualTo("/"); - assertThat(request.contentUtf8()).isEqualTo("hello world"); - } - - // TODO(adriancole): Find a home for this generic conversion between Call and Java 8. - static final class CompletableCallback extends CompletableFuture implements Callback { - - @Override public void onSuccess(T value) { - complete(value); - } - - @Override public void onError(Throwable t) { - completeExceptionally(t); - } - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/client/SearchCallFactoryTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/client/SearchCallFactoryTest.java deleted file mode 100644 index 6077cbe9d70..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/client/SearchCallFactoryTest.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal.client; - -import com.linecorp.armeria.client.WebClient; -import org.junit.Test; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; - -public class SearchCallFactoryTest { - WebClient httpClient = mock(WebClient.class); - - SearchCallFactory client = new SearchCallFactory(new HttpCall.Factory(httpClient)); - - /** Declaring queries alphabetically helps simplify amazon signature logic */ - @Test public void lenientSearchOrdersQueryAlphabetically() { - assertThat(client.lenientSearch(asList("zipkin:span-2016-10-01"), null)) - .endsWith("/_search?allow_no_indices=true&expand_wildcards=open&ignore_unavailable=true"); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/client/SearchRequestTest.java b/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/client/SearchRequestTest.java deleted file mode 100644 index c956ea44e9e..00000000000 --- a/zipkin-storage/elasticsearch/src/test/java/zipkin2/elasticsearch/internal/client/SearchRequestTest.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.elasticsearch.internal.client; - -import org.junit.Test; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.elasticsearch.internal.JsonSerializers.OBJECT_MAPPER; - -public class SearchRequestTest { - - SearchRequest request = SearchRequest.create(asList("zipkin-2016.11.31")); - - @Test public void defaultSizeIsMaxResultWindow() { - assertThat(request.size) - .isEqualTo(10000); - } - - /** Indices and type affect the request URI, not the json body */ - @Test public void doesntSerializeIndicesOrType() throws Exception { - assertThat(OBJECT_MAPPER.writeValueAsString(request)) - .isEqualTo("{\"size\":10000}"); - } -} diff --git a/zipkin-storage/elasticsearch/src/test/resources/simplelogger.properties b/zipkin-storage/elasticsearch/src/test/resources/simplelogger.properties deleted file mode 100644 index 2d5a1e94cfb..00000000000 --- a/zipkin-storage/elasticsearch/src/test/resources/simplelogger.properties +++ /dev/null @@ -1,12 +0,0 @@ -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options - -org.slf4j.simpleLogger.logFile=System.out -org.slf4j.simpleLogger.defaultLogLevel=warn -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS - -# stop huge spam -org.slf4j.simpleLogger.log.org.testcontainers.dockerclient=off - -# Ensure when ES_DEBUG=true tests dump trace output -org.slf4j.simpleLogger.log.com.linecorp.armeria.client.logging=info diff --git a/zipkin-storage/mysql-v1/README.md b/zipkin-storage/mysql-v1/README.md deleted file mode 100644 index 67999d98a8a..00000000000 --- a/zipkin-storage/mysql-v1/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# storage-mysql-v1 -This MySQL (Legacy) storage component includes a blocking `SpanStore` and span consumer function. -`SpanStore.getDependencies()` aggregates dependency links on-demand. - -The implementation uses JOOQ to generate MySQL SQL commands. MySQL 5.6+ -features are used, but tests run against MariaDB 10.3. - -See the [schema DDL](src/main/resources/mysql.sql). - -`zipkin2.storage.mysql.v1.MySQLStorage.Builder` includes defaults that will -operate against a given Datasource. - -## Testing this component -This module conditionally runs integration tests against a Docker managed MySQL container. - -Ex. -``` -$ ./mvnw clean verify -pl :zipkin-storage-mysql-v1 -``` - -If you run tests via Maven or otherwise without Docker, you'll notice tests are silently skipped. -``` -Results : - -Tests run: 49, Failures: 0, Errors: 0, Skipped: 48 -``` - -This behaviour is intentional: We don't want to burden developers with -installing and running all storage options to test unrelated change. -That said, all integration tests run on pull request. - -## Exploring Zipkin Data - -When troubleshooting, it is important to note that zipkin ids are encoded as hex. -If you want to view data in mysql, you'll need to use the hex function accordingly. - -For example, all the below query the same trace using different tools: -* zipkin-ui: `http://1.2.3.4:9411/traces/27960dafb1ea7454` -* zipkin-api: `http://1.2.3.4:9411/api/v1/trace/27960dafb1ea7454?raw` -* mysql: `select * from zipkin_spans where trace_id = x'27960dafb1ea7454';` - -If you are trying to debug from data in the database, it is helpful to -format IDs as hex, and timestamps as dates. The following is an example -query which will return one line for each update to a span in the last -5 minutes. - -```sql -SELECT lower(concat(CASE trace_id_high - WHEN '0' THEN '' - ELSE hex(trace_id_high) - END,hex(trace_id))) AS trace_id, - lower(hex(parent_id)) as parent_id, - lower(hex(id)) as span_id, - name, - from_unixtime(start_ts/1000000) as timestamp -FROM zipkin_spans -where (start_ts/1000000) > UNIX_TIMESTAMP(now()) - 5 * 60; -``` - -For example, the output below shows two traces recently reported. One of -which is using 128-bit trace IDs. You could copy and paste the `trace_id` -into zipkin's UI to troubleshoot further. -``` -+----------------------------------+------------------+------------------+------+--------------------------+ -| trace_id | parent_id | span_id | name | timestamp | -+----------------------------------+------------------+------------------+------+--------------------------+ -| abbd9f5da49e5848aa4b729ff2bc90a3 | NULL | aa4b729ff2bc90a3 | get | 2017-04-19 12:43:00.9830 | -| abbd9f5da49e5848aa4b729ff2bc90a3 | aa4b729ff2bc90a3 | 7888a4aef81f074d | get | 2017-04-19 12:43:01.1960 | -| 11b98d7107dac980 | 11b98d7107dac980 | bc33c2d5ad25bf89 | get | 2017-04-19 12:42:45.4240 | -| 11b98d7107dac980 | NULL | 11b98d7107dac980 | get | 2017-04-19 12:42:45.0680 | -+----------------------------------+------------------+------------------+------+--------------------------+ -``` - -## Applying the schema - -```bash -# Barracuda supports compression (In AWS RDS, this must be assigned in a parameter group) -$ mysql -uroot -e "SET GLOBAL innodb_file_format=Barracuda" -# This command should work even in RDS, and return "Barracuda" -$ mysql -uroot -e "show global variables like 'innodb_file_format'" - -# install the schema and indexes -$ mysql -uroot -e "create database if not exists zipkin" -$ mysql -uroot -Dzipkin < zipkin-storage/mysql-v1/src/main/resources/mysql.sql -``` - -## Generating the schema types - -```bash -$ rm -rf rm -rf zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/ -$ ./mvnw -pl :zipkin-storage-mysql-v1 clean org.jooq:jooq-codegen-maven:generate com.mycila:license-maven-plugin:format -``` diff --git a/zipkin-storage/mysql-v1/pom.xml b/zipkin-storage/mysql-v1/pom.xml deleted file mode 100644 index 68c7bdcc9fc..00000000000 --- a/zipkin-storage/mysql-v1/pom.xml +++ /dev/null @@ -1,126 +0,0 @@ - - - - 4.0.0 - - - io.zipkin.zipkin2 - zipkin-storage-parent - 2.24.4-SNAPSHOT - - - zipkin-storage-mysql-v1 - Storage: MySQL (v1) - - - ${project.basedir}/../.. - - 3.14.4 - - - -Xep:InconsistentCapitalization:OFF - - - - - org.jooq - jooq - ${jooq.version} - - - - - javax.annotation - javax.annotation-api - ${javax-annotation-api.version} - provided - - - - org.mariadb.jdbc - mariadb-java-client - ${mariadb-java-client.version} - test - - - - org.testcontainers - mysql - ${testcontainers.version} - test - - - - - - - - org.jooq - jooq-codegen-maven - ${jooq.version} - - - - generate - - - - - - org.jooq - jooq - ${jooq.version} - - - org.mariadb.jdbc - mariadb-java-client - ${mariadb-java-client.version} - - - - - org.mariadb.jdbc.Driver - jdbc:mysql://localhost:3306/zipkin - root - - - - - false - false - false - false - - JAVAX_ANNOTATION_GENERATED - - - org.jooq.meta.mysql.MySQLDatabase - .* - - zipkin - - - zipkin2.storage.mysql.v1.internal.generated - src/main/java - - - - - - - - diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/AggregateDependencies.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/AggregateDependencies.java deleted file mode 100644 index aeb7fbade04..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/AggregateDependencies.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.function.Function; -import org.jooq.Cursor; -import org.jooq.DSLContext; -import org.jooq.Record; -import org.jooq.Record1; -import org.jooq.SelectConditionStep; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.internal.DependencyLinker; - -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS; - -final class AggregateDependencies implements Function> { - final Schema schema; - final long startTsBegin, startTsEnd; - - AggregateDependencies(Schema schema, long startTsBegin, long startTsEnd) { - this.schema = schema; - this.startTsBegin = startTsBegin; - this.startTsEnd = startTsEnd; - } - - @Override - public List apply(DSLContext context) { - // Subquery on trace IDs to prevent only matching the part of the trace that exists within - // the interval: we want all of the trace. - SelectConditionStep> traceIDs = context.selectDistinct(ZIPKIN_SPANS.TRACE_ID) - .from(ZIPKIN_SPANS) - .where(startTsBegin == startTsEnd - ? ZIPKIN_SPANS.START_TS.lessOrEqual(startTsEnd) - : ZIPKIN_SPANS.START_TS.between(startTsBegin, startTsEnd)); - // Lazy fetching the cursor prevents us from buffering the whole dataset in memory. - Cursor cursor = context.selectDistinct(schema.dependencyLinkerFields) - // left joining allows us to keep a mapping of all span ids, not just ones that have - // special annotations. We need all span ids to reconstruct the trace tree. We need - // the whole trace tree so that we can accurately skip local spans. - .from(ZIPKIN_SPANS.leftJoin(ZIPKIN_ANNOTATIONS) - // NOTE: we are intentionally grouping only on the low-bits of trace id. This - // buys time for applications to upgrade to 128-bit instrumentation. - .on(ZIPKIN_SPANS.TRACE_ID.eq(ZIPKIN_ANNOTATIONS.TRACE_ID) - .and(ZIPKIN_SPANS.ID.eq(ZIPKIN_ANNOTATIONS.SPAN_ID))) - .and(ZIPKIN_ANNOTATIONS.A_KEY.in("lc", "cs", "ca", "sr", "sa", "ma", "mr", "ms", "error"))) - .where(ZIPKIN_SPANS.TRACE_ID.in(traceIDs)) - // Grouping so that later code knows when a span or trace is finished. - .groupBy(schema.dependencyLinkerGroupByFields) - .fetchLazy(); - - Iterator> traces = - new DependencyLinkV2SpanIterator.ByTraceId(cursor.iterator(), schema.hasTraceIdHigh); - - if (!traces.hasNext()) return Collections.emptyList(); - - DependencyLinker linker = new DependencyLinker(); - - List nextTrace = new ArrayList<>(); - while (traces.hasNext()) { - Iterator i = traces.next(); - while (i.hasNext()) nextTrace.add(i.next()); - linker.putTrace(nextTrace); - nextTrace.clear(); - } - - return linker.link(); - } - - @Override - public String toString() { - return "AggregateDependencies{" - + "startTsBegin=" - + startTsBegin - + ", startTsEnd=" - + startTsEnd - + '}'; - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/DSLContexts.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/DSLContexts.java deleted file mode 100644 index 0d21859cb8e..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/DSLContexts.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.Connection; -import org.jooq.DSLContext; -import org.jooq.ExecuteListenerProvider; -import org.jooq.SQLDialect; -import org.jooq.conf.Settings; -import org.jooq.impl.DSL; -import org.jooq.impl.DefaultConfiguration; -import zipkin2.internal.Nullable; - -final class DSLContexts { - private final Settings settings; - private final ExecuteListenerProvider listenerProvider; - - DSLContexts(Settings settings, @Nullable ExecuteListenerProvider listenerProvider) { - this.settings = settings; - this.listenerProvider = listenerProvider; - } - - DSLContext get(Connection conn) { - return DSL.using( - new DefaultConfiguration() - .set(conn) - .set(SQLDialect.MYSQL) - .set(settings) - .set(listenerProvider)); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/DataSourceCall.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/DataSourceCall.java deleted file mode 100644 index fc0cb5f048a..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/DataSourceCall.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.concurrent.Executor; -import java.util.function.Function; -import javax.sql.DataSource; -import org.jooq.DSLContext; -import zipkin2.Call; -import zipkin2.Callback; - -/** Uncancelable call built with an executor */ -final class DataSourceCall extends Call.Base { - - static final class Factory { - final DataSource datasource; - final DSLContexts context; - final Executor executor; - - Factory(DataSource datasource, DSLContexts context, Executor executor) { - this.datasource = datasource; - this.context = context; - this.executor = executor; - } - - DataSourceCall create(Function queryFunction) { - return new DataSourceCall<>(this, queryFunction); - } - } - - final Factory factory; - final Function queryFunction; - - DataSourceCall(Factory factory, Function queryFunction) { - this.factory = factory; - this.queryFunction = queryFunction; - } - - @Override - protected final V doExecute() throws IOException { - try (Connection conn = factory.datasource.getConnection()) { - DSLContext context = factory.context.get(conn); - return queryFunction.apply(context); - } catch (SQLException e) { - throw new IOException(e); - } - } - - @Override - protected void doEnqueue(Callback callback) { - class CallbackRunnable implements Runnable { - @Override - public void run() { - try { - callback.onSuccess(doExecute()); - } catch (IOException e) { - // unwrap the exception - if (e.getCause() instanceof SQLException) { - callback.onError(e.getCause()); - } else { - callback.onError(e); - } - } catch (Throwable t) { - propagateIfFatal(t); - callback.onError(t); - } - } - } - factory.executor.execute(new CallbackRunnable()); - } - - @Override - public String toString() { - return queryFunction.toString(); - } - - @Override - public Call clone() { - return new DataSourceCall<>(factory, queryFunction); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/DependencyLinkV2SpanIterator.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/DependencyLinkV2SpanIterator.java deleted file mode 100644 index e4455ceb893..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/DependencyLinkV2SpanIterator.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.Iterator; -import java.util.NoSuchElementException; -import org.jooq.Record; -import org.jooq.TableField; -import zipkin2.Endpoint; -import zipkin2.Span; -import zipkin2.internal.Nullable; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans; -import zipkin2.v1.V1BinaryAnnotation; - -import static zipkin2.storage.mysql.v1.Schema.maybeGet; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; - -/** - * Lazy converts rows into {@linkplain Span} objects suitable for dependency links. This takes - * short-cuts to require less data. For example, it folds shared RPC spans into one, and doesn't - * include tags, non-core annotations or time units. - * - *

Out-of-date schemas may be missing the trace_id_high field. When present, the {@link - * Span#traceId()} could be 32 characters in logging statements. - */ -final class DependencyLinkV2SpanIterator implements Iterator { - - /** Assumes the input records are sorted by trace id, span id */ - static final class ByTraceId implements Iterator> { - final PeekingIterator delegate; - final boolean hasTraceIdHigh; - - long currentTraceIdHi, currentTraceIdLo; - - ByTraceId(Iterator delegate, boolean hasTraceIdHigh) { - this.delegate = new PeekingIterator<>(delegate); - this.hasTraceIdHigh = hasTraceIdHigh; - } - - @Override - public boolean hasNext() { - return delegate.hasNext(); - } - - @Override - public Iterator next() { - if (!hasNext()) throw new NoSuchElementException(); - currentTraceIdHi = hasTraceIdHigh ? traceIdHigh(delegate) : 0L; - currentTraceIdLo = delegate.peek().getValue(ZipkinSpans.ZIPKIN_SPANS.TRACE_ID); - return new DependencyLinkV2SpanIterator(delegate, currentTraceIdHi, currentTraceIdLo); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - } - - final PeekingIterator delegate; - final long traceIdHi, traceIdLo; - - DependencyLinkV2SpanIterator(PeekingIterator delegate, long traceIdHi, long traceIdLo) { - this.delegate = delegate; - this.traceIdHi = traceIdHi; - this.traceIdLo = traceIdLo; - } - - @Override - public boolean hasNext() { - return delegate.hasNext() - // We don't have a query parameter for strictTraceId when fetching dependency links, so we - // ignore traceIdHigh. Otherwise, a single trace can appear as two, doubling callCount. - && delegate.peek().getValue(ZipkinSpans.ZIPKIN_SPANS.TRACE_ID) == traceIdLo; - } - - @Override - public Span next() { - if (!hasNext()) throw new NoSuchElementException(); - Record row = delegate.peek(); - - long spanId = row.getValue(ZipkinSpans.ZIPKIN_SPANS.ID); - boolean error = false; - String lcService = null, srService = null, csService = null, caService = null, saService = null, - maService = null, mrService = null, msService = null; - while (hasNext()) { // there are more values for this trace - if (spanId != delegate.peek().getValue(ZipkinSpans.ZIPKIN_SPANS.ID)) { - break; // if we are in a new span - } - Record next = delegate.next(); // row for the same span - - String key = emptyToNull(next, ZIPKIN_ANNOTATIONS.A_KEY); - String value = emptyToNull(next, ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME); - if (key == null || value == null) continue; // neither client nor server - switch (key) { - case "lc": - lcService = value; - break; - case "ca": - caService = value; - break; - case "cs": - csService = value; - break; - case "ma": - maService = value; - break; - case "mr": - mrService = value; - break; - case "ms": - msService = value; - break; - case "sa": - saService = value; - break; - case "sr": - srService = value; - break; - case "error": - // a span is in error if it has a tag, not an annotation, of name "error" - error = V1BinaryAnnotation.TYPE_STRING == next.get(ZIPKIN_ANNOTATIONS.A_TYPE); - } - } - - // The client address is more authoritative than the client send owner. - if (caService == null) caService = csService; - - // Finagle labels two sides of the same socket ("ca", "sa") with the same name. - // Skip the client side, so it isn't mistaken for a loopback request - if (saService != null && saService.equals(caService)) caService = null; - - long parentId = maybeGet(row, ZipkinSpans.ZIPKIN_SPANS.PARENT_ID, 0L); - Span.Builder result = - Span.newBuilder().traceId(traceIdHi, traceIdLo).parentId(parentId).id(spanId); - - if (error) { - result.putTag("error", "" /* actual value doesn't matter */); - } - - if (srService != null) { - return result - .kind(Span.Kind.SERVER) - .localEndpoint(ep(srService)) - .remoteEndpoint(ep(caService)) - .build(); - } else if (saService != null) { - Endpoint localEndpoint = ep(caService); - // When span.kind is missing, the local endpoint is "lc" and the remote endpoint is "sa" - if (localEndpoint == null) localEndpoint = ep(lcService); - return result - .kind(csService != null ? Span.Kind.CLIENT : null) - .localEndpoint(localEndpoint) - .remoteEndpoint(ep(saService)) - .build(); - } else if (csService != null) { - return result.kind(Span.Kind.SERVER).localEndpoint(ep(caService)).build(); - } else if (mrService != null) { - return result - .kind(Span.Kind.CONSUMER) - .localEndpoint(ep(mrService)) - .remoteEndpoint(ep(maService)) - .build(); - } else if (msService != null) { - return result - .kind(Span.Kind.PRODUCER) - .localEndpoint(ep(msService)) - .remoteEndpoint(ep(maService)) - .build(); - } - return result.build(); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - static long traceIdHigh(PeekingIterator delegate) { - return delegate.peek().getValue(ZipkinSpans.ZIPKIN_SPANS.TRACE_ID_HIGH); - } - - static @Nullable String emptyToNull(Record next, TableField field) { - String result = next.getValue(field); - return result != null && !"".equals(result) ? result : null; - } - - static Endpoint ep(@Nullable String serviceName) { - return serviceName != null ? Endpoint.newBuilder().serviceName(serviceName).build() : null; - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasErrorCount.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasErrorCount.java deleted file mode 100644 index 6c64c2b0e18..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasErrorCount.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.sql.DataSource; -import org.jooq.DSLContext; -import org.jooq.exception.DataAccessException; - -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinDependencies.ZIPKIN_DEPENDENCIES; - -final class HasErrorCount { - private static final Logger LOG = Logger.getLogger(HasErrorCount.class.getName()); - - static boolean test(DataSource datasource, DSLContexts context) { - try (Connection conn = datasource.getConnection()) { - DSLContext dsl = context.get(conn); - dsl.select(ZIPKIN_DEPENDENCIES.ERROR_COUNT).from(ZIPKIN_DEPENDENCIES).limit(1).fetchAny(); - return true; - } catch (DataAccessException e) { - if (e.sqlState().equals("42S22")) { - LOG.warning( - "zipkin_dependencies.error_count doesn't exist, so DependencyLink.errorCount is not supported. " - + "Execute: alter table zipkin_dependencies add `error_count` BIGINT"); - return false; - } - problemReading(e); - } catch (SQLException | RuntimeException e) { - problemReading(e); - } - return false; - } - - static void problemReading(Exception e) { - LOG.log(Level.WARNING, "problem reading zipkin_dependencies.error_count", e); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasIpv6.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasIpv6.java deleted file mode 100644 index 46009fc4ab9..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasIpv6.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.sql.DataSource; -import org.jooq.DSLContext; -import org.jooq.exception.DataAccessException; - -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; - -final class HasIpv6 { - private static final Logger LOG = Logger.getLogger(HasIpv6.class.getName()); - - static boolean test(DataSource datasource, DSLContexts context) { - try (Connection conn = datasource.getConnection()) { - DSLContext dsl = context.get(conn); - dsl.select(ZIPKIN_ANNOTATIONS.ENDPOINT_IPV6).from(ZIPKIN_ANNOTATIONS).limit(1).fetchAny(); - return true; - } catch (DataAccessException e) { - if (e.sqlState().equals("42S22")) { - LOG.warning( - "zipkin_annotations.ipv6 doesn't exist, so Endpoint.ipv6 is not supported. " - + "Execute: alter table zipkin_annotations add `endpoint_ipv6` BINARY(16)"); - return false; - } - problemReading(e); - } catch (SQLException | RuntimeException e) { - problemReading(e); - } - return false; - } - - static void problemReading(Exception e) { - LOG.log(Level.WARNING, "problem reading zipkin_annotations.ipv6", e); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasPreAggregatedDependencies.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasPreAggregatedDependencies.java deleted file mode 100644 index 12b16c6c98e..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasPreAggregatedDependencies.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.sql.DataSource; -import org.jooq.DSLContext; -import org.jooq.exception.DataAccessException; - -import static org.jooq.impl.DSL.count; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinDependencies.ZIPKIN_DEPENDENCIES; - -/** - * Returns true when the zipkin_dependencies table exists and has data in it, implying the spark job - * has been run. - */ -final class HasPreAggregatedDependencies { - private static final Logger LOG = Logger.getLogger(HasPreAggregatedDependencies.class.getName()); - - static boolean test(DataSource datasource, DSLContexts context) { - try (Connection conn = datasource.getConnection()) { - DSLContext dsl = context.get(conn); - return dsl.select(count()).from(ZIPKIN_DEPENDENCIES).fetchAny().value1() > 0; - } catch (DataAccessException e) { - if (e.sqlState().equals("42S02")) { - LOG.warning( - "zipkin_dependencies doesn't exist, so pre-aggregated dependencies are not " - + "supported. Execute mysql.sql located in this jar to add the table"); - return false; - } - problemReading(e); - } catch (SQLException | RuntimeException e) { - problemReading(e); - } - return false; - } - - static void problemReading(Exception e) { - LOG.log(Level.WARNING, "problem reading zipkin_dependencies", e); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasRemoteServiceName.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasRemoteServiceName.java deleted file mode 100644 index aa7aede5d43..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasRemoteServiceName.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.sql.DataSource; -import org.jooq.DSLContext; -import org.jooq.exception.DataAccessException; - -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS; - -final class HasRemoteServiceName { - static final Logger LOG = Logger.getLogger(HasRemoteServiceName.class.getName()); - static final String MESSAGE = - "zipkin_spans.remote_service_name doesn't exist, so queries for remote service names will return empty.\n" - + "Execute: ALTER TABLE zipkin_spans ADD `remote_service_name` VARCHAR(255);\n" - + "ALTER TABLE zipkin_spans ADD INDEX `remote_service_name`;"; - - static boolean test(DataSource datasource, DSLContexts context) { - try (Connection conn = datasource.getConnection()) { - DSLContext dsl = context.get(conn); - dsl.select(ZIPKIN_SPANS.REMOTE_SERVICE_NAME).from(ZIPKIN_SPANS).limit(1).fetchAny(); - return true; - } catch (DataAccessException e) { - if (e.sqlState().equals("42S22")) { - LOG.warning(MESSAGE); - return false; - } - problemReading(e); - } catch (SQLException | RuntimeException e) { - problemReading(e); - } - return false; - } - - static void problemReading(Exception e) { - LOG.log(Level.WARNING, "problem reading zipkin_spans.remote_service_name", e); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasTraceIdHigh.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasTraceIdHigh.java deleted file mode 100644 index 8531ded8128..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/HasTraceIdHigh.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.sql.DataSource; -import org.jooq.DSLContext; -import org.jooq.exception.DataAccessException; - -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS; - -final class HasTraceIdHigh { - static final Logger LOG = Logger.getLogger(HasTraceIdHigh.class.getName()); - static final String MESSAGE = - "zipkin_spans.trace_id_high doesn't exist, so 128-bit trace ids are not supported. " - + "Execute: ALTER TABLE zipkin_spans ADD `trace_id_high` BIGINT NOT NULL DEFAULT 0;\n" - + "ALTER TABLE zipkin_annotations ADD `trace_id_high` BIGINT NOT NULL DEFAULT 0;\n" - + "ALTER TABLE zipkin_spans" - + " DROP INDEX trace_id,\n" - + " ADD UNIQUE KEY(`trace_id_high`, `trace_id`, `id`);\n" - + "ALTER TABLE zipkin_annotations\n" - + " DROP INDEX trace_id,\n" - + " ADD UNIQUE KEY(`trace_id_high`, `trace_id`, `span_id`, `a_key`, `a_timestamp`);"; - - static boolean test(DataSource datasource, DSLContexts context) { - try (Connection conn = datasource.getConnection()) { - DSLContext dsl = context.get(conn); - dsl.select(ZIPKIN_SPANS.TRACE_ID_HIGH).from(ZIPKIN_SPANS).limit(1).fetchAny(); - return true; - } catch (DataAccessException e) { - if (e.sqlState().equals("42S22")) { - LOG.warning(MESSAGE); - return false; - } - problemReading(e); - } catch (SQLException | RuntimeException e) { - problemReading(e); - } - return false; - } - - static void problemReading(Exception e) { - LOG.log(Level.WARNING, "problem reading zipkin_spans.trace_id_high", e); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLAutocompleteTags.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLAutocompleteTags.java deleted file mode 100644 index 10a13c6e5c6..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLAutocompleteTags.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.LinkedHashSet; -import java.util.List; -import zipkin2.Call; -import zipkin2.storage.AutocompleteTags; - -final class MySQLAutocompleteTags implements AutocompleteTags { - final DataSourceCall.Factory dataSourceCallFactory; - final Schema schema; - final boolean enabled; - final LinkedHashSet autocompleteKeys; - final Call> keysCall; - - MySQLAutocompleteTags(MySQLStorage storage, Schema schema) { - this.dataSourceCallFactory = storage.dataSourceCallFactory; - this.schema = schema; - enabled = storage.searchEnabled && !storage.autocompleteKeys.isEmpty(); - autocompleteKeys = new LinkedHashSet<>(storage.autocompleteKeys); - keysCall = Call.create(storage.autocompleteKeys); - } - - @Override public Call> getKeys() { - if (!enabled) return Call.emptyList(); - return keysCall.clone(); - } - - @Override public Call> getValues(String key) { - if (key == null) throw new NullPointerException("key == null"); - if (key.isEmpty()) throw new IllegalArgumentException("key was empty"); - if (!enabled || !autocompleteKeys.contains(key)) return Call.emptyList(); - return dataSourceCallFactory.create(new SelectAutocompleteValues(schema, key)); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLSpanConsumer.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLSpanConsumer.java deleted file mode 100644 index 927d1fa7e92..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLSpanConsumer.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.function.Function; -import org.jooq.DSLContext; -import org.jooq.InsertSetMoreStep; -import org.jooq.Query; -import org.jooq.Record; -import org.jooq.TableField; -import zipkin2.Call; -import zipkin2.Endpoint; -import zipkin2.Span; -import zipkin2.internal.Nullable; -import zipkin2.storage.SpanConsumer; -import zipkin2.v1.V1Annotation; -import zipkin2.v1.V1BinaryAnnotation; -import zipkin2.v1.V1Span; -import zipkin2.v1.V2SpanConverter; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS; - -final class MySQLSpanConsumer implements SpanConsumer { - static final byte[] ONE = {1}; - - final DataSourceCall.Factory dataSourceCallFactory; - final Schema schema; - - MySQLSpanConsumer(DataSourceCall.Factory dataSourceCallFactory, Schema schema) { - this.dataSourceCallFactory = dataSourceCallFactory; - this.schema = schema; - } - - @Override - public Call accept(List spans) { - if (spans.isEmpty()) return Call.create(null); - return dataSourceCallFactory.create(new BatchInsertSpans(spans, schema)); - } - - static final class BatchInsertSpans implements Function { - final List spans; - final Schema schema; - - BatchInsertSpans(List spans, Schema schema) { - this.spans = spans; - this.schema = schema; - } - - @Override - public Void apply(DSLContext create) { - List inserts = new ArrayList<>(); - V2SpanConverter v2SpanConverter = V2SpanConverter.create(); - - for (Span v2 : spans) { - Endpoint ep = v2.localEndpoint(); - long timestamp = v2.timestampAsLong(); - - V1Span v1Span = v2SpanConverter.convert(v2); - - long traceId, spanId; - InsertSetMoreStep insertSpan = - create - .insertInto(ZIPKIN_SPANS) - .set(ZIPKIN_SPANS.TRACE_ID, traceId = v1Span.traceId()) - .set(ZIPKIN_SPANS.ID, spanId = v1Span.id()) - .set(ZIPKIN_SPANS.DEBUG, v1Span.debug()); - - Map, Object> updateFields = new LinkedHashMap<>(); - if (timestamp != 0L) { - // tentatively we can use even a shared timestamp - insertSpan.set(ZIPKIN_SPANS.START_TS, timestamp); - // replace any tentative timestamp with the authoritative one. - if (!Boolean.TRUE.equals(v2.shared())) updateFields.put(ZIPKIN_SPANS.START_TS, timestamp); - } - - updateName(v1Span.name(), ZIPKIN_SPANS.NAME, insertSpan, updateFields); - if (schema.hasRemoteServiceName) { - updateName(v2.remoteServiceName(), ZIPKIN_SPANS.REMOTE_SERVICE_NAME, insertSpan, updateFields); - } - - long duration = v1Span.duration(); - if (duration != 0L) { - insertSpan.set(ZIPKIN_SPANS.DURATION, duration); - updateFields.put(ZIPKIN_SPANS.DURATION, duration); - } - - if (v1Span.parentId() != 0) { - insertSpan.set(ZIPKIN_SPANS.PARENT_ID, v1Span.parentId()); - updateFields.put(ZIPKIN_SPANS.PARENT_ID, v1Span.parentId()); - } - - long traceIdHigh = schema.hasTraceIdHigh ? v1Span.traceIdHigh() : 0L; - if (traceIdHigh != 0L) { - insertSpan.set(ZIPKIN_SPANS.TRACE_ID_HIGH, traceIdHigh); - } - - inserts.add( - updateFields.isEmpty() - ? insertSpan.onDuplicateKeyIgnore() - : insertSpan.onDuplicateKeyUpdate().set(updateFields)); - - int ipv4 = - ep != null && ep.ipv4Bytes() != null ? ByteBuffer.wrap(ep.ipv4Bytes()).getInt() : 0; - for (V1Annotation a : v1Span.annotations()) { - InsertSetMoreStep insert = - create - .insertInto(ZIPKIN_ANNOTATIONS) - .set(ZIPKIN_ANNOTATIONS.TRACE_ID, traceId) - .set(ZIPKIN_ANNOTATIONS.SPAN_ID, spanId) - .set(ZIPKIN_ANNOTATIONS.A_KEY, a.value()) - .set(ZIPKIN_ANNOTATIONS.A_TYPE, -1) - .set(ZIPKIN_ANNOTATIONS.A_TIMESTAMP, a.timestamp()); - if (traceIdHigh != 0L) { - insert.set(ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, traceIdHigh); - } - addEndpoint(insert, ep, ipv4); - inserts.add(insert.onDuplicateKeyIgnore()); - } - - for (V1BinaryAnnotation ba : v1Span.binaryAnnotations()) { - InsertSetMoreStep insert = - create - .insertInto(ZIPKIN_ANNOTATIONS) - .set(ZIPKIN_ANNOTATIONS.TRACE_ID, traceId) - .set(ZIPKIN_ANNOTATIONS.SPAN_ID, spanId) - .set(ZIPKIN_ANNOTATIONS.A_KEY, ba.key()) - .set(ZIPKIN_ANNOTATIONS.A_TYPE, ba.type()) - .set(ZIPKIN_ANNOTATIONS.A_TIMESTAMP, timestamp); - if (traceIdHigh != 0) { - insert.set(ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, traceIdHigh); - } - if (ba.stringValue() != null) { - insert.set(ZIPKIN_ANNOTATIONS.A_VALUE, ba.stringValue().getBytes(UTF_8)); - addEndpoint(insert, ep, ipv4); - } else { // add the address annotation - insert.set(ZIPKIN_ANNOTATIONS.A_VALUE, ONE); - Endpoint nextEp = ba.endpoint(); - addEndpoint( - insert, - nextEp, - nextEp.ipv4Bytes() != null ? ByteBuffer.wrap(nextEp.ipv4Bytes()).getInt() : 0); - } - inserts.add(insert.onDuplicateKeyIgnore()); - } - } - // TODO: See if DSLContext.batchMerge() can be used to avoid some of the complexity - // https://github.com/jOOQ/jOOQ/issues/3172 - create.batch(inserts).execute(); - return null; - } - - void addEndpoint(InsertSetMoreStep insert, Endpoint ep, int ipv4) { - if (ep == null) return; - // old code wrote empty service names - String serviceName = ep.serviceName() != null ? ep.serviceName() : ""; - insert.set(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME, serviceName); - if (ipv4 != 0) { - insert.set(ZIPKIN_ANNOTATIONS.ENDPOINT_IPV4, ipv4); - } - if (ep.ipv6Bytes() != null && schema.hasIpv6) { - insert.set(ZIPKIN_ANNOTATIONS.ENDPOINT_IPV6, ep.ipv6Bytes()); - } - if (ep.portAsInt() != 0) { - insert.set(ZIPKIN_ANNOTATIONS.ENDPOINT_PORT, (short) ep.portAsInt()); - } - } - - @Override - public String toString() { - return "BatchInsertSpansAndAnnotations{spans=" + spans + "}"; - } - } - - static void updateName(@Nullable String name, TableField column, - InsertSetMoreStep insertSpan, Map, Object> updateFields) { - if (name != null && !name.equals("unknown")) { - insertSpan.set(column, name); - updateFields.put(column, name); - } else { - // old code wrote empty span name - insertSpan.set(column, ""); - } - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLSpanStore.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLSpanStore.java deleted file mode 100644 index 9000412cd64..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLSpanStore.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import zipkin2.Call; -import zipkin2.DependencyLink; -import zipkin2.Span; -import zipkin2.storage.GroupByTraceId; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.ServiceAndSpanNames; -import zipkin2.storage.SpanStore; -import zipkin2.storage.StrictTraceId; -import zipkin2.storage.Traces; - -import static zipkin2.internal.DateUtil.epochDays; -import static zipkin2.internal.HexCodec.lowerHexToUnsignedLong; - -final class MySQLSpanStore implements SpanStore, Traces, ServiceAndSpanNames { - - final DataSourceCall.Factory dataSourceCallFactory; - final Schema schema; - final boolean strictTraceId, searchEnabled; - final SelectSpansAndAnnotations.Factory selectFromSpansAndAnnotationsFactory; - final Call.Mapper, List>> groupByTraceId; - final DataSourceCall> getServiceNamesCall; - - MySQLSpanStore(MySQLStorage storage, Schema schema) { - this.dataSourceCallFactory = storage.dataSourceCallFactory; - this.schema = schema; - this.strictTraceId = storage.strictTraceId; - this.searchEnabled = storage.searchEnabled; - this.selectFromSpansAndAnnotationsFactory = - new SelectSpansAndAnnotations.Factory(schema, strictTraceId); - this.groupByTraceId = GroupByTraceId.create(strictTraceId); - this.getServiceNamesCall = dataSourceCallFactory.create(new SelectAnnotationServiceNames()); - } - - @Override public Call>> getTraces(QueryRequest request) { - if (!searchEnabled) return Call.emptyList(); - - Call>> result = - dataSourceCallFactory - .create(selectFromSpansAndAnnotationsFactory.create(request)) - .map(groupByTraceId); - - return strictTraceId ? result.map(StrictTraceId.filterTraces(request)) : result; - } - - @Override public Call> getTrace(String hexTraceId) { - // make sure we have a 16 or 32 character trace ID - hexTraceId = Span.normalizeTraceId(hexTraceId); - long traceIdHigh = hexTraceId.length() == 32 ? lowerHexToUnsignedLong(hexTraceId, 0) : 0L; - long traceId = lowerHexToUnsignedLong(hexTraceId); - - DataSourceCall> result = - dataSourceCallFactory.create( - selectFromSpansAndAnnotationsFactory.create(traceIdHigh, traceId)); - return strictTraceId ? result.map(StrictTraceId.filterSpans(hexTraceId)) : result; - } - - @Override public Call>> getTraces(Iterable traceIds) { - Set normalizedTraceIds = new LinkedHashSet<>(); - Set traceIdPairs = new LinkedHashSet<>(); - for (String traceId : traceIds) { - // make sure we have a 16 or 32 character trace ID - String hexTraceId = Span.normalizeTraceId(traceId); - normalizedTraceIds.add(hexTraceId); - traceIdPairs.add(new Pair( - hexTraceId.length() == 32 ? lowerHexToUnsignedLong(hexTraceId, 0) : 0L, - lowerHexToUnsignedLong(hexTraceId) - ) - ); - } - - if (traceIdPairs.isEmpty()) return Call.emptyList(); - Call>> result = dataSourceCallFactory - .create(selectFromSpansAndAnnotationsFactory.create(traceIdPairs)) - .map(groupByTraceId); - - return strictTraceId ? result.map(StrictTraceId.filterTraces(normalizedTraceIds)) : result; - } - - @Override public Call> getServiceNames() { - if (!searchEnabled) return Call.emptyList(); - return getServiceNamesCall.clone(); - } - - @Override public Call> getRemoteServiceNames(String serviceName) { - if (serviceName.isEmpty() || !searchEnabled || !schema.hasRemoteServiceName) { - return Call.emptyList(); - } - return dataSourceCallFactory.create(new SelectRemoteServiceNames(schema, serviceName)); - } - - @Override public Call> getSpanNames(String serviceName) { - if (serviceName.isEmpty() || !searchEnabled) return Call.emptyList(); - return dataSourceCallFactory.create(new SelectSpanNames(schema, serviceName)); - } - - @Override public Call> getDependencies(long endTs, long lookback) { - if (endTs <= 0) throw new IllegalArgumentException("endTs <= 0"); - if (lookback <= 0) throw new IllegalArgumentException("lookback <= 0"); - - if (schema.hasPreAggregatedDependencies) { - return dataSourceCallFactory.create(new SelectDependencies(schema, epochDays(endTs, lookback))); - } - return dataSourceCallFactory.create( - new AggregateDependencies(schema, endTs * 1000 - lookback * 1000, endTs * 1000)); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLStorage.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLStorage.java deleted file mode 100644 index bf4f7adc264..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/MySQLStorage.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.Executor; -import javax.sql.DataSource; -import org.jooq.ExecuteListenerProvider; -import org.jooq.conf.Settings; -import zipkin2.Call; -import zipkin2.CheckResult; -import zipkin2.internal.Nullable; -import zipkin2.storage.AutocompleteTags; -import zipkin2.storage.ServiceAndSpanNames; -import zipkin2.storage.SpanConsumer; -import zipkin2.storage.SpanStore; -import zipkin2.storage.StorageComponent; -import zipkin2.storage.Traces; - -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinDependencies.ZIPKIN_DEPENDENCIES; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS; - -public final class MySQLStorage extends StorageComponent { - public static Builder newBuilder() { - return new Builder(); - } - - public static final class Builder extends StorageComponent.Builder { - boolean strictTraceId = true, searchEnabled = true; - private DataSource datasource; - private Settings settings = new Settings().withRenderSchema(false); - private ExecuteListenerProvider listenerProvider; - private Executor executor; - List autocompleteKeys = new ArrayList<>(); - - @Override public Builder strictTraceId(boolean strictTraceId) { - this.strictTraceId = strictTraceId; - return this; - } - - @Override public Builder searchEnabled(boolean searchEnabled) { - this.searchEnabled = searchEnabled; - return this; - } - - @Override public Builder autocompleteKeys(List keys) { - if (keys == null) throw new NullPointerException("keys == null"); - this.autocompleteKeys = keys; - return this; - } - - public Builder datasource(DataSource datasource) { - if (datasource == null) throw new NullPointerException("datasource == null"); - this.datasource = datasource; - return this; - } - - public Builder settings(Settings settings) { - if (settings == null) throw new NullPointerException("settings == null"); - this.settings = settings; - return this; - } - - public Builder listenerProvider(@Nullable ExecuteListenerProvider listenerProvider) { - this.listenerProvider = listenerProvider; - return this; - } - - public Builder executor(Executor executor) { - if (executor == null) throw new NullPointerException("executor == null"); - this.executor = executor; - return this; - } - - @Override public MySQLStorage build() { - return new MySQLStorage(this); - } - - Builder() { - } - } - - static { - System.setProperty("org.jooq.no-logo", "true"); - } - - final DataSource datasource; - final DataSourceCall.Factory dataSourceCallFactory; - final DSLContexts context; - final boolean strictTraceId, searchEnabled; - final List autocompleteKeys; - volatile Schema schema; - - MySQLStorage(MySQLStorage.Builder builder) { - datasource = builder.datasource; - if (datasource == null) throw new NullPointerException("datasource == null"); - Executor executor = builder.executor; - if (executor == null) throw new NullPointerException("executor == null"); - context = new DSLContexts(builder.settings, builder.listenerProvider); - dataSourceCallFactory = new DataSourceCall.Factory(datasource, context, executor); - strictTraceId = builder.strictTraceId; - searchEnabled = builder.searchEnabled; - autocompleteKeys = builder.autocompleteKeys; - } - - /** Returns the session in use by this storage component. */ - public DataSource datasource() { - return datasource; - } - - /** Lazy to avoid eager I/O */ - Schema schema() { - if (schema == null) { - synchronized (this) { - if (schema == null) { - schema = new Schema(datasource, context, strictTraceId); - } - } - } - return schema; - } - - @Override public SpanStore spanStore() { - return new MySQLSpanStore(this, schema()); - } - - @Override public Traces traces() { - return (Traces) spanStore(); - } - - @Override public ServiceAndSpanNames serviceAndSpanNames() { - return (ServiceAndSpanNames) spanStore(); - } - - @Override public AutocompleteTags autocompleteTags() { - return new MySQLAutocompleteTags(this, schema()); - } - - @Override public SpanConsumer spanConsumer() { - return new MySQLSpanConsumer(dataSourceCallFactory, schema()); - } - - @Override public CheckResult check() { - try (Connection conn = datasource.getConnection()) { - context.get(conn).select(ZIPKIN_SPANS.TRACE_ID).from(ZIPKIN_SPANS).limit(1).execute(); - } catch (Throwable e) { - Call.propagateIfFatal(e); - return CheckResult.failed(e); - } - return CheckResult.OK; - } - - @Override public final String toString() { - return "MySQLStorage{datasource=" + datasource + "}"; - } - - @Override public void close() { - // didn't open the DataSource or executor - } - - /** Visible for testing */ - void clear() { - try (Connection conn = datasource.getConnection()) { - context.get(conn).truncate(ZIPKIN_SPANS).execute(); - context.get(conn).truncate(ZIPKIN_ANNOTATIONS).execute(); - context.get(conn).truncate(ZIPKIN_DEPENDENCIES).execute(); - } catch (SQLException | RuntimeException e) { - throw new AssertionError(e); - } - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/Pair.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/Pair.java deleted file mode 100644 index e932bf45a84..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/Pair.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -final class Pair { - final long left, right; - - Pair(long left, long right) { - this.left = left; - this.right = right; - } - - @Override - public boolean equals(Object o) { - if (o == this) return true; - if (!(o instanceof Pair)) return false; - Pair that = (Pair) o; - return left == that.left && right == that.right; - } - - @Override - public int hashCode() { - int h$ = 1; - h$ *= 1000003; - h$ ^= (int) (h$ ^ ((left >>> 32) ^ left)); - h$ *= 1000003; - h$ ^= (int) (h$ ^ ((right >>> 32) ^ right)); - return h$; - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/PeekingIterator.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/PeekingIterator.java deleted file mode 100644 index e689f165944..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/PeekingIterator.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.Iterator; -import java.util.NoSuchElementException; - -/** adapted from guava's {@code com.google.common.collect.AbstractIterator}. */ -class PeekingIterator implements Iterator { - - private final Iterator delegate; - private PeekingIterator.State state = State.NOT_READY; - private T next; - - PeekingIterator(Iterator delegate) { - if (delegate == null) throw new NullPointerException("delegate == null"); - this.delegate = delegate; - } - - protected T computeNext() { - if (delegate.hasNext()) { - return delegate.next(); - } - return endOfData(); - } - - protected final T endOfData() { - state = State.DONE; - return null; - } - - @Override - public final boolean hasNext() { - switch (state) { - case DONE: - return false; - case READY: - return true; - default: - } - return tryToComputeNext(); - } - - private boolean tryToComputeNext() { - next = computeNext(); - if (state != State.DONE) { - state = State.READY; - return true; - } - return false; - } - - @Override - public final T next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - state = State.NOT_READY; - return next; - } - - public T peek() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - return next; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - private enum State { - /** We have computed the next element and haven't returned it yet. */ - READY, - - /** We haven't yet computed or have already returned the element. */ - NOT_READY, - - /** We have reached the end of the data and are finished. */ - DONE, - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/Schema.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/Schema.java deleted file mode 100644 index 49139f3c660..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/Schema.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Set; -import javax.sql.DataSource; -import org.jooq.Condition; -import org.jooq.Field; -import org.jooq.Record; -import org.jooq.Result; -import org.jooq.Row2; -import org.jooq.SelectOffsetStep; -import org.jooq.TableField; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations; - -import static org.jooq.impl.DSL.row; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinDependencies.ZIPKIN_DEPENDENCIES; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS; - -final class Schema { - final List> spanIdFields; - final List> spanFields; - final List> annotationFields; - final List> dependencyLinkerFields; - final List> dependencyLinkerGroupByFields; - final List> dependencyLinkFields; - final boolean hasTraceIdHigh; - final boolean hasPreAggregatedDependencies; - final boolean hasIpv6; - final boolean hasErrorCount; - final boolean hasRemoteServiceName; - final boolean strictTraceId; - - Schema(DataSource datasource, DSLContexts context, boolean strictTraceId) { - hasTraceIdHigh = HasTraceIdHigh.test(datasource, context); - hasPreAggregatedDependencies = HasPreAggregatedDependencies.test(datasource, context); - hasIpv6 = HasIpv6.test(datasource, context); - hasErrorCount = HasErrorCount.test(datasource, context); - hasRemoteServiceName = HasRemoteServiceName.test(datasource, context); - this.strictTraceId = strictTraceId; - - spanIdFields = list(ZIPKIN_SPANS.TRACE_ID_HIGH, ZIPKIN_SPANS.TRACE_ID); - spanFields = list(ZIPKIN_SPANS.fields()); - spanIdFields.remove(ZIPKIN_SPANS.REMOTE_SERVICE_NAME); // not used to recreate the span - annotationFields = list(ZIPKIN_ANNOTATIONS.fields()); - dependencyLinkFields = list(ZIPKIN_DEPENDENCIES.fields()); - dependencyLinkerFields = - list( - ZIPKIN_SPANS.TRACE_ID_HIGH, - ZIPKIN_SPANS.TRACE_ID, - ZIPKIN_SPANS.PARENT_ID, - ZIPKIN_SPANS.ID, - ZIPKIN_ANNOTATIONS.A_KEY, - ZIPKIN_ANNOTATIONS.A_TYPE, - ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME); - dependencyLinkerGroupByFields = new ArrayList<>(dependencyLinkerFields); - dependencyLinkerGroupByFields.remove(ZIPKIN_SPANS.PARENT_ID); - if (!hasTraceIdHigh) { - spanIdFields.remove(ZIPKIN_SPANS.TRACE_ID_HIGH); - spanFields.remove(ZIPKIN_SPANS.TRACE_ID_HIGH); - annotationFields.remove(ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH); - dependencyLinkerFields.remove(ZIPKIN_SPANS.TRACE_ID_HIGH); - dependencyLinkerGroupByFields.remove(ZIPKIN_SPANS.TRACE_ID_HIGH); - } - if (!hasIpv6) { - annotationFields.remove(ZIPKIN_ANNOTATIONS.ENDPOINT_IPV6); - } - if (!hasErrorCount) { - dependencyLinkFields.remove(ZIPKIN_DEPENDENCIES.ERROR_COUNT); - } - } - - Condition joinCondition(ZipkinAnnotations annotationTable) { - if (hasTraceIdHigh) { - return ZIPKIN_SPANS - .TRACE_ID_HIGH - .eq(annotationTable.TRACE_ID_HIGH) - .and(ZIPKIN_SPANS.TRACE_ID.eq(annotationTable.TRACE_ID)) - .and(ZIPKIN_SPANS.ID.eq(annotationTable.SPAN_ID)); - } else { - return ZIPKIN_SPANS - .TRACE_ID - .eq(annotationTable.TRACE_ID) - .and(ZIPKIN_SPANS.ID.eq(annotationTable.SPAN_ID)); - } - } - - /** Returns a mutable list */ - static List list(T... elements) { - return new ArrayList<>(Arrays.asList(elements)); - } - - Condition spanTraceIdCondition(SelectOffsetStep traceIdQuery) { - if (hasTraceIdHigh && strictTraceId) { - Result result = traceIdQuery.fetch(); - List> traceIds = new ArrayList<>(result.size()); - for (Record r : result) { - traceIds.add(row(r.get(ZIPKIN_SPANS.TRACE_ID_HIGH), r.get(ZIPKIN_SPANS.TRACE_ID))); - } - return row(ZIPKIN_SPANS.TRACE_ID_HIGH, ZIPKIN_SPANS.TRACE_ID).in(traceIds); - } else { - List traceIds = traceIdQuery.fetch(ZIPKIN_SPANS.TRACE_ID); - return ZIPKIN_SPANS.TRACE_ID.in(traceIds); - } - } - - Condition spanTraceIdCondition(long traceIdHigh, long traceIdLow) { - return traceIdHigh != 0L && hasTraceIdHigh - ? row(ZIPKIN_SPANS.TRACE_ID_HIGH, ZIPKIN_SPANS.TRACE_ID).eq(traceIdHigh, traceIdLow) - : ZIPKIN_SPANS.TRACE_ID.eq(traceIdLow); - } - - Condition spanTraceIdCondition(Set traceIds) { - return traceIdCondition(ZIPKIN_SPANS.TRACE_ID_HIGH, ZIPKIN_SPANS.TRACE_ID, traceIds); - } - - Condition annotationsTraceIdCondition(Set traceIds) { - return traceIdCondition(ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, ZIPKIN_ANNOTATIONS.TRACE_ID, traceIds); - } - - Condition traceIdCondition( - TableField TRACE_ID_HIGH, - TableField TRACE_ID, Set traceIds - ) { - boolean hasTraceIdHigh = false; - for (Pair traceId : traceIds) { - if (traceId.left != 0) { - hasTraceIdHigh = true; - break; - } - } - if (hasTraceIdHigh && strictTraceId) { - Row2[] result = new Row2[traceIds.size()]; - int i = 0; - for (Pair traceId128 : traceIds) { - result[i++] = row(traceId128.left, traceId128.right); - } - return row(TRACE_ID_HIGH, TRACE_ID).in(result); - } else { - Long[] result = new Long[traceIds.size()]; - int i = 0; - for (Pair traceId128 : traceIds) { - result[i++] = traceId128.right; - } - return TRACE_ID.in(result); - } - } - - /** returns the default value if the column doesn't exist or the result was null */ - static T maybeGet(Record record, TableField field, T defaultValue) { - if (record.fieldsRow().indexOf(field) < 0) { - return defaultValue; - } else { - T result = record.get(field); - return result != null ? result : defaultValue; - } - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectAnnotationServiceNames.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectAnnotationServiceNames.java deleted file mode 100644 index f6c8348f23f..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectAnnotationServiceNames.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.List; -import java.util.function.Function; -import org.jooq.Condition; -import org.jooq.DSLContext; -import zipkin2.v1.V1BinaryAnnotation; - -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; - -final class SelectAnnotationServiceNames implements Function> { - @Override public List apply(DSLContext context) { - return context - .selectDistinct(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME) - .from(ZIPKIN_ANNOTATIONS) - .where(localServiceNameCondition()) - .orderBy(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME) - .fetch(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME); - } - - static Condition localServiceNameCondition() { - return ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.isNotNull() - .and(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.ne("")) // exclude address annotations - .and(ZIPKIN_ANNOTATIONS.A_TYPE.ne(V1BinaryAnnotation.TYPE_BOOLEAN)); - } - - @Override public String toString() { - return "SelectAnnotationServiceNames{}"; - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectAutocompleteValues.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectAutocompleteValues.java deleted file mode 100644 index 4b76ba1974b..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectAutocompleteValues.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.List; -import java.util.function.Function; -import org.jooq.Converter; -import org.jooq.DSLContext; -import zipkin2.v1.V1BinaryAnnotation; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; - -final class SelectAutocompleteValues implements Function> { - final Schema schema; - final String autocompleteKey; - - SelectAutocompleteValues(Schema schema, String autocompleteKey) { - this.schema = schema; - this.autocompleteKey = autocompleteKey; - } - - @Override public List apply(DSLContext context) { - return context.selectDistinct(ZIPKIN_ANNOTATIONS.A_VALUE) - .from(ZIPKIN_ANNOTATIONS) - .where(ZIPKIN_ANNOTATIONS.A_TYPE.eq(V1BinaryAnnotation.TYPE_STRING) - .and(ZIPKIN_ANNOTATIONS.A_KEY.eq(autocompleteKey))) - .fetch(ZIPKIN_ANNOTATIONS.A_VALUE, STRING_CONVERTER); - } - - static final Converter STRING_CONVERTER = new Converter() { - @Override public String from(byte[] bytes) { - return new String(bytes, UTF_8); - } - - @Override public byte[] to(String input) { - return input.getBytes(UTF_8); - } - - @Override public Class fromType() { - return byte[].class; - } - - @Override public Class toType() { - return String.class; - } - }; -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectDependencies.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectDependencies.java deleted file mode 100644 index 579d04e2578..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectDependencies.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.List; -import java.util.function.Function; -import org.jooq.DSLContext; -import org.jooq.Record; -import zipkin2.DependencyLink; -import zipkin2.internal.DependencyLinker; - -import static zipkin2.storage.mysql.v1.Schema.maybeGet; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinDependencies.ZIPKIN_DEPENDENCIES; - -final class SelectDependencies implements Function> { - final Schema schema; - final List epochDays; - - SelectDependencies(Schema schema, List epochDays) { - this.schema = schema; - this.epochDays = epochDays; - } - - @Override - public List apply(DSLContext context) { - List unmerged = - context - .select(schema.dependencyLinkFields) - .from(ZIPKIN_DEPENDENCIES) - .where(ZIPKIN_DEPENDENCIES.DAY.in(epochDays)) - .fetch( - (Record l) -> - DependencyLink.newBuilder() - .parent(l.get(ZIPKIN_DEPENDENCIES.PARENT)) - .child(l.get(ZIPKIN_DEPENDENCIES.CHILD)) - .callCount(l.get(ZIPKIN_DEPENDENCIES.CALL_COUNT)) - .errorCount(maybeGet(l, ZIPKIN_DEPENDENCIES.ERROR_COUNT, 0L)) - .build()); - return DependencyLinker.merge(unmerged); - } - - @Override - public String toString() { - return "SelectDependencies{epochDays=" + epochDays + "}"; - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectRemoteServiceNames.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectRemoteServiceNames.java deleted file mode 100644 index f413ced608e..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectRemoteServiceNames.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.List; -import java.util.function.Function; -import org.jooq.DSLContext; - -import static zipkin2.storage.mysql.v1.SelectAnnotationServiceNames.localServiceNameCondition; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS; - -final class SelectRemoteServiceNames implements Function> { - final Schema schema; - final String serviceName; - - SelectRemoteServiceNames(Schema schema, String serviceName) { - this.schema = schema; - this.serviceName = serviceName; - } - - @Override - public List apply(DSLContext context) { - return context - .selectDistinct(ZIPKIN_SPANS.REMOTE_SERVICE_NAME) - .from(ZIPKIN_SPANS) - .join(ZIPKIN_ANNOTATIONS) - .on(schema.joinCondition(ZIPKIN_ANNOTATIONS)) - .where( - localServiceNameCondition().and(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.eq(serviceName))) - .and(ZIPKIN_SPANS.REMOTE_SERVICE_NAME.notEqual("")) - .orderBy(ZIPKIN_SPANS.REMOTE_SERVICE_NAME) - .fetch(ZIPKIN_SPANS.REMOTE_SERVICE_NAME); - } - - @Override - public String toString() { - return "SelectRemoteServiceNames{serviceName=" + serviceName + "}"; - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectSpanNames.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectSpanNames.java deleted file mode 100644 index 00827b141b7..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectSpanNames.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.List; -import java.util.function.Function; -import org.jooq.DSLContext; - -import static zipkin2.storage.mysql.v1.SelectAnnotationServiceNames.localServiceNameCondition; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS; - -final class SelectSpanNames implements Function> { - final Schema schema; - final String serviceName; - - SelectSpanNames(Schema schema, String serviceName) { - this.schema = schema; - this.serviceName = serviceName; - } - - @Override - public List apply(DSLContext context) { - return context - .selectDistinct(ZIPKIN_SPANS.NAME) - .from(ZIPKIN_SPANS) - .join(ZIPKIN_ANNOTATIONS) - .on(schema.joinCondition(ZIPKIN_ANNOTATIONS)) - .where( - localServiceNameCondition().and(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.eq(serviceName))) - .and(ZIPKIN_SPANS.NAME.notEqual("")) - .orderBy(ZIPKIN_SPANS.NAME) - .fetch(ZIPKIN_SPANS.NAME); - } - - @Override - public String toString() { - return "SelectSpanNames{serviceName=" + serviceName + "}"; - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectSpansAndAnnotations.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectSpansAndAnnotations.java deleted file mode 100644 index f31941fc838..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/SelectSpansAndAnnotations.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; -import org.jooq.Condition; -import org.jooq.DSLContext; -import org.jooq.Record; -import org.jooq.Row3; -import org.jooq.SelectConditionStep; -import org.jooq.SelectField; -import org.jooq.SelectOffsetStep; -import org.jooq.TableOnConditionStep; -import zipkin2.Endpoint; -import zipkin2.Span; -import zipkin2.internal.Nullable; -import zipkin2.storage.QueryRequest; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations; -import zipkin2.v1.V1BinaryAnnotation; -import zipkin2.v1.V1Span; -import zipkin2.v1.V1SpanConverter; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.stream.Collectors.groupingBy; -import static org.jooq.impl.DSL.max; -import static org.jooq.impl.DSL.row; -import static zipkin2.internal.HexCodec.lowerHexToUnsignedLong; -import static zipkin2.storage.mysql.v1.Schema.maybeGet; -import static zipkin2.storage.mysql.v1.SelectAnnotationServiceNames.localServiceNameCondition; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans.ZIPKIN_SPANS; - -abstract class SelectSpansAndAnnotations implements Function> { - static final class Factory { - final Schema schema; - final boolean strictTraceId; - - Factory(Schema schema, boolean strictTraceId) { - this.schema = schema; - this.strictTraceId = strictTraceId; - } - - SelectSpansAndAnnotations create(long traceIdHigh, long traceIdLow) { - if (traceIdHigh != 0L && !strictTraceId) traceIdHigh = 0L; - long finalTraceIdHigh = traceIdHigh; - return new SelectSpansAndAnnotations(schema) { - @Override - Condition traceIdCondition(DSLContext context) { - return schema.spanTraceIdCondition(finalTraceIdHigh, traceIdLow); - } - }; - } - - SelectSpansAndAnnotations create(Set traceIdPairs) { - return new SelectSpansAndAnnotations(schema) { - @Override Condition traceIdCondition(DSLContext context) { - return schema.spanTraceIdCondition(traceIdPairs); - } - }; - } - - SelectSpansAndAnnotations create(QueryRequest request) { - if (request.remoteServiceName() != null && !schema.hasRemoteServiceName) { - throw new IllegalArgumentException("remoteService=" + request.remoteServiceName() - + " unsupported due to missing column zipkin_spans.remote_service_name"); - } - return new SelectSpansAndAnnotations(schema) { - @Override - Condition traceIdCondition(DSLContext context) { - return schema.spanTraceIdCondition(toTraceIdQuery(context, request)); - } - }; - } - } - - final Schema schema; - - SelectSpansAndAnnotations(Schema schema) { - this.schema = schema; - } - - abstract Condition traceIdCondition(DSLContext context); - - @Override - public List apply(DSLContext context) { - final Map> spansWithoutAnnotations; - final Map, List> dbAnnotations; - - spansWithoutAnnotations = - context - .select(schema.spanFields) - .from(ZIPKIN_SPANS) - .where(traceIdCondition(context)) - .stream() - .map( - r -> - V1Span.newBuilder() - .traceIdHigh(maybeGet(r, ZIPKIN_SPANS.TRACE_ID_HIGH, 0L)) - .traceId(r.getValue(ZIPKIN_SPANS.TRACE_ID)) - .name(r.getValue(ZIPKIN_SPANS.NAME)) - .id(r.getValue(ZIPKIN_SPANS.ID)) - .parentId(maybeGet(r, ZIPKIN_SPANS.PARENT_ID, 0L)) - .timestamp(maybeGet(r, ZIPKIN_SPANS.START_TS, 0L)) - .duration(maybeGet(r, ZIPKIN_SPANS.DURATION, 0L)) - .debug(r.getValue(ZIPKIN_SPANS.DEBUG))) - .collect( - groupingBy( - s -> new Pair(s.traceIdHigh(), s.traceId()), - LinkedHashMap::new, - Collectors.toList())); - - dbAnnotations = - context - .select(schema.annotationFields) - .from(ZIPKIN_ANNOTATIONS) - .where(schema.annotationsTraceIdCondition(spansWithoutAnnotations.keySet())) - .orderBy(ZIPKIN_ANNOTATIONS.A_TIMESTAMP.asc(), ZIPKIN_ANNOTATIONS.A_KEY.asc()) - .stream() - .collect( - groupingBy( - (Record a) -> - row( - maybeGet(a, ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, 0L), - a.getValue(ZIPKIN_ANNOTATIONS.TRACE_ID), - a.getValue(ZIPKIN_ANNOTATIONS.SPAN_ID)), - LinkedHashMap::new, - Collectors.toList())); // LinkedHashMap preserves order while grouping - - V1SpanConverter converter = V1SpanConverter.create(); - List allSpans = new ArrayList<>(spansWithoutAnnotations.size()); - for (List spans : spansWithoutAnnotations.values()) { - for (V1Span.Builder span : spans) { - Row3 key = row(span.traceIdHigh(), span.traceId(), span.id()); - if (dbAnnotations.containsKey(key)) { - for (Record a : dbAnnotations.get(key)) { - Endpoint endpoint = endpoint(a); - processAnnotationRecord(a, span, endpoint); - } - } - converter.convert(span.build(), allSpans); - } - } - return allSpans; - } - - static void processAnnotationRecord(Record a, V1Span.Builder span, @Nullable Endpoint endpoint) { - Integer type = a.getValue(ZIPKIN_ANNOTATIONS.A_TYPE); - if (type == null) return; - if (type == -1) { - span.addAnnotation( - a.getValue(ZIPKIN_ANNOTATIONS.A_TIMESTAMP), - a.getValue(ZIPKIN_ANNOTATIONS.A_KEY), - endpoint); - } else { - switch (type) { - case V1BinaryAnnotation.TYPE_STRING: - span.addBinaryAnnotation( - a.getValue(ZIPKIN_ANNOTATIONS.A_KEY), - new String(a.getValue(ZIPKIN_ANNOTATIONS.A_VALUE), UTF_8), - endpoint); - break; - case V1BinaryAnnotation.TYPE_BOOLEAN: - // address annotations require an endpoint - if (endpoint == null) break; - String aKey = a.getValue(ZIPKIN_ANNOTATIONS.A_KEY); - // ensure we are only processing address annotations - if (!aKey.equals("sa") && !aKey.equals("ca") && !aKey.equals("ma")) break; - byte[] value = a.getValue(ZIPKIN_ANNOTATIONS.A_VALUE); - // address annotations are a single byte of 1 - if (value == null || value.length != 1 || value[0] != 1) break; - span.addBinaryAnnotation(a.getValue(ZIPKIN_ANNOTATIONS.A_KEY), endpoint); - break; - default: - // other values unsupported - } - } - } - - SelectOffsetStep toTraceIdQuery(DSLContext context, QueryRequest request) { - long endTs = request.endTs() * 1000; - - TableOnConditionStep table = - ZIPKIN_SPANS.join(ZIPKIN_ANNOTATIONS).on(schema.joinCondition(ZIPKIN_ANNOTATIONS)); - - int i = 0; - for (Map.Entry kv : request.annotationQuery().entrySet()) { - ZipkinAnnotations aTable = ZIPKIN_ANNOTATIONS.as("a" + i++); - if (kv.getValue().isEmpty()) { - table = - maybeOnService( - table - .join(aTable) - .on(schema.joinCondition(aTable)) - .and(aTable.A_KEY.eq(kv.getKey())), - aTable, - request.serviceName()); - } else { - table = - maybeOnService( - table - .join(aTable) - .on(schema.joinCondition(aTable)) - .and(aTable.A_TYPE.eq(V1BinaryAnnotation.TYPE_STRING)) - .and(aTable.A_KEY.eq(kv.getKey())) - .and(aTable.A_VALUE.eq(kv.getValue().getBytes(UTF_8))), - aTable, - request.serviceName()); - } - } - - List> distinctFields = new ArrayList<>(schema.spanIdFields); - distinctFields.add(max(ZIPKIN_SPANS.START_TS)); - SelectConditionStep dsl = context.selectDistinct(distinctFields) - .from(table) - .where(ZIPKIN_SPANS.START_TS.between(endTs - request.lookback() * 1000, endTs)); - - if (request.serviceName() != null) { - dsl.and(localServiceNameCondition() - .and(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME.eq(request.serviceName()))); - } - - if (request.remoteServiceName() != null) { - dsl.and(ZIPKIN_SPANS.REMOTE_SERVICE_NAME.eq(request.remoteServiceName())); - } - - if (request.spanName() != null) { - dsl.and(ZIPKIN_SPANS.NAME.eq(request.spanName())); - } - - if (request.minDuration() != null && request.maxDuration() != null) { - dsl.and(ZIPKIN_SPANS.DURATION.between(request.minDuration(), request.maxDuration())); - } else if (request.minDuration() != null) { - dsl.and(ZIPKIN_SPANS.DURATION.greaterOrEqual(request.minDuration())); - } - return dsl.groupBy(schema.spanIdFields) - .orderBy(max(ZIPKIN_SPANS.START_TS).desc()) - .limit(request.limit()); - } - - static TableOnConditionStep maybeOnService( - TableOnConditionStep table, ZipkinAnnotations aTable, String serviceName) { - if (serviceName == null) return table; - return table.and(aTable.ENDPOINT_SERVICE_NAME.eq(serviceName)); - } - - static Endpoint endpoint(Record a) { - Endpoint.Builder result = - Endpoint.newBuilder() - .serviceName(a.getValue(ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME)) - .port(Schema.maybeGet(a, ZIPKIN_ANNOTATIONS.ENDPOINT_PORT, (short) 0)); - int ipv4 = maybeGet(a, ZIPKIN_ANNOTATIONS.ENDPOINT_IPV4, 0); - if (ipv4 != 0) { - result.parseIp( // allocation is ok here as Endpoint.ipv4Bytes would anyway - new byte[] { - (byte) (ipv4 >> 24 & 0xff), - (byte) (ipv4 >> 16 & 0xff), - (byte) (ipv4 >> 8 & 0xff), - (byte) (ipv4 & 0xff) - }); - } - result.parseIp(Schema.maybeGet(a, ZIPKIN_ANNOTATIONS.ENDPOINT_IPV6, null)); - return result.build(); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/DefaultCatalog.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/DefaultCatalog.java deleted file mode 100644 index 508a252b309..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/DefaultCatalog.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * This file is generated by jOOQ. - */ -package zipkin2.storage.mysql.v1.internal.generated; - - -import java.util.Arrays; -import java.util.List; - -import org.jooq.Schema; -import org.jooq.impl.CatalogImpl; - - -/** - * This class is generated by jOOQ. - */ -@SuppressWarnings({ "all", "unchecked", "rawtypes" }) -public class DefaultCatalog extends CatalogImpl { - - private static final long serialVersionUID = 1L; - - /** - * The reference instance of DEFAULT_CATALOG - */ - public static final DefaultCatalog DEFAULT_CATALOG = new DefaultCatalog(); - - /** - * The schema zipkin. - */ - public final Zipkin ZIPKIN = Zipkin.ZIPKIN; - - /** - * No further instances allowed - */ - private DefaultCatalog() { - super(""); - } - - @Override - public final List getSchemas() { - return Arrays.asList( - Zipkin.ZIPKIN); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Indexes.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Indexes.java deleted file mode 100644 index cfb40992165..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Indexes.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * This file is generated by jOOQ. - */ -package zipkin2.storage.mysql.v1.internal.generated; - - -import org.jooq.Index; -import org.jooq.OrderField; -import org.jooq.impl.DSL; -import org.jooq.impl.Internal; - -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans; - - -/** - * A class modelling indexes of tables in zipkin. - */ -@SuppressWarnings({ "all", "unchecked", "rawtypes" }) -public class Indexes { - - // ------------------------------------------------------------------------- - // INDEX definitions - // ------------------------------------------------------------------------- - - public static final Index ZIPKIN_ANNOTATIONS_A_KEY = Internal.createIndex(DSL.name("a_key"), ZipkinAnnotations.ZIPKIN_ANNOTATIONS, new OrderField[] { ZipkinAnnotations.ZIPKIN_ANNOTATIONS.A_KEY }, false); - public static final Index ZIPKIN_ANNOTATIONS_A_TYPE = Internal.createIndex(DSL.name("a_type"), ZipkinAnnotations.ZIPKIN_ANNOTATIONS, new OrderField[] { ZipkinAnnotations.ZIPKIN_ANNOTATIONS.A_TYPE }, false); - public static final Index ZIPKIN_ANNOTATIONS_ENDPOINT_SERVICE_NAME = Internal.createIndex(DSL.name("endpoint_service_name"), ZipkinAnnotations.ZIPKIN_ANNOTATIONS, new OrderField[] { ZipkinAnnotations.ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME }, false); - public static final Index ZIPKIN_SPANS_NAME = Internal.createIndex(DSL.name("name"), ZipkinSpans.ZIPKIN_SPANS, new OrderField[] { ZipkinSpans.ZIPKIN_SPANS.NAME }, false); - public static final Index ZIPKIN_SPANS_REMOTE_SERVICE_NAME = Internal.createIndex(DSL.name("remote_service_name"), ZipkinSpans.ZIPKIN_SPANS, new OrderField[] { ZipkinSpans.ZIPKIN_SPANS.REMOTE_SERVICE_NAME }, false); - public static final Index ZIPKIN_SPANS_START_TS = Internal.createIndex(DSL.name("start_ts"), ZipkinSpans.ZIPKIN_SPANS, new OrderField[] { ZipkinSpans.ZIPKIN_SPANS.START_TS }, false); - public static final Index ZIPKIN_ANNOTATIONS_TRACE_ID = Internal.createIndex(DSL.name("trace_id"), ZipkinAnnotations.ZIPKIN_ANNOTATIONS, new OrderField[] { ZipkinAnnotations.ZIPKIN_ANNOTATIONS.TRACE_ID, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.SPAN_ID, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.A_KEY }, false); - public static final Index ZIPKIN_ANNOTATIONS_TRACE_ID_HIGH = Internal.createIndex(DSL.name("trace_id_high"), ZipkinAnnotations.ZIPKIN_ANNOTATIONS, new OrderField[] { ZipkinAnnotations.ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.TRACE_ID, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.SPAN_ID, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.A_KEY, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.A_TIMESTAMP }, true); - public static final Index ZIPKIN_SPANS_TRACE_ID_HIGH = Internal.createIndex(DSL.name("trace_id_high"), ZipkinSpans.ZIPKIN_SPANS, new OrderField[] { ZipkinSpans.ZIPKIN_SPANS.TRACE_ID_HIGH, ZipkinSpans.ZIPKIN_SPANS.TRACE_ID }, false); - public static final Index ZIPKIN_ANNOTATIONS_TRACE_ID_HIGH_2 = Internal.createIndex(DSL.name("trace_id_high_2"), ZipkinAnnotations.ZIPKIN_ANNOTATIONS, new OrderField[] { ZipkinAnnotations.ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.TRACE_ID, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.SPAN_ID }, false); - public static final Index ZIPKIN_ANNOTATIONS_TRACE_ID_HIGH_3 = Internal.createIndex(DSL.name("trace_id_high_3"), ZipkinAnnotations.ZIPKIN_ANNOTATIONS, new OrderField[] { ZipkinAnnotations.ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.TRACE_ID }, false); -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Keys.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Keys.java deleted file mode 100644 index 7986fb0cd62..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Keys.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * This file is generated by jOOQ. - */ -package zipkin2.storage.mysql.v1.internal.generated; - - -import org.jooq.Record; -import org.jooq.TableField; -import org.jooq.UniqueKey; -import org.jooq.impl.DSL; -import org.jooq.impl.Internal; - -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinDependencies; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans; - - -/** - * A class modelling foreign key relationships and constraints of tables in - * zipkin. - */ -@SuppressWarnings({ "all", "unchecked", "rawtypes" }) -public class Keys { - - // ------------------------------------------------------------------------- - // UNIQUE and PRIMARY KEY definitions - // ------------------------------------------------------------------------- - - public static final UniqueKey KEY_ZIPKIN_ANNOTATIONS_TRACE_ID_HIGH = Internal.createUniqueKey(ZipkinAnnotations.ZIPKIN_ANNOTATIONS, DSL.name("KEY_zipkin_annotations_trace_id_high"), new TableField[] { ZipkinAnnotations.ZIPKIN_ANNOTATIONS.TRACE_ID_HIGH, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.TRACE_ID, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.SPAN_ID, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.A_KEY, ZipkinAnnotations.ZIPKIN_ANNOTATIONS.A_TIMESTAMP }, true); - public static final UniqueKey KEY_ZIPKIN_DEPENDENCIES_PRIMARY = Internal.createUniqueKey(ZipkinDependencies.ZIPKIN_DEPENDENCIES, DSL.name("KEY_zipkin_dependencies_PRIMARY"), new TableField[] { ZipkinDependencies.ZIPKIN_DEPENDENCIES.DAY, ZipkinDependencies.ZIPKIN_DEPENDENCIES.PARENT, ZipkinDependencies.ZIPKIN_DEPENDENCIES.CHILD }, true); - public static final UniqueKey KEY_ZIPKIN_SPANS_PRIMARY = Internal.createUniqueKey(ZipkinSpans.ZIPKIN_SPANS, DSL.name("KEY_zipkin_spans_PRIMARY"), new TableField[] { ZipkinSpans.ZIPKIN_SPANS.TRACE_ID_HIGH, ZipkinSpans.ZIPKIN_SPANS.TRACE_ID, ZipkinSpans.ZIPKIN_SPANS.ID }, true); -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Tables.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Tables.java deleted file mode 100644 index 6d89b9f5a2a..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Tables.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * This file is generated by jOOQ. - */ -package zipkin2.storage.mysql.v1.internal.generated; - - -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinDependencies; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans; - - -/** - * Convenience access to all tables in zipkin. - */ -@SuppressWarnings({ "all", "unchecked", "rawtypes" }) -public class Tables { - - /** - * The table zipkin.zipkin_annotations. - */ - public static final ZipkinAnnotations ZIPKIN_ANNOTATIONS = ZipkinAnnotations.ZIPKIN_ANNOTATIONS; - - /** - * The table zipkin.zipkin_dependencies. - */ - public static final ZipkinDependencies ZIPKIN_DEPENDENCIES = ZipkinDependencies.ZIPKIN_DEPENDENCIES; - - /** - * The table zipkin.zipkin_spans. - */ - public static final ZipkinSpans ZIPKIN_SPANS = ZipkinSpans.ZIPKIN_SPANS; -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Zipkin.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Zipkin.java deleted file mode 100644 index 10d159c1fe8..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/Zipkin.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * This file is generated by jOOQ. - */ -package zipkin2.storage.mysql.v1.internal.generated; - - -import java.util.Arrays; -import java.util.List; - -import org.jooq.Catalog; -import org.jooq.Table; -import org.jooq.impl.SchemaImpl; - -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinDependencies; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans; - - -/** - * This class is generated by jOOQ. - */ -@SuppressWarnings({ "all", "unchecked", "rawtypes" }) -public class Zipkin extends SchemaImpl { - - private static final long serialVersionUID = 1L; - - /** - * The reference instance of zipkin - */ - public static final Zipkin ZIPKIN = new Zipkin(); - - /** - * The table zipkin.zipkin_annotations. - */ - public final ZipkinAnnotations ZIPKIN_ANNOTATIONS = ZipkinAnnotations.ZIPKIN_ANNOTATIONS; - - /** - * The table zipkin.zipkin_dependencies. - */ - public final ZipkinDependencies ZIPKIN_DEPENDENCIES = ZipkinDependencies.ZIPKIN_DEPENDENCIES; - - /** - * The table zipkin.zipkin_spans. - */ - public final ZipkinSpans ZIPKIN_SPANS = ZipkinSpans.ZIPKIN_SPANS; - - /** - * No further instances allowed - */ - private Zipkin() { - super("zipkin", null); - } - - - @Override - public Catalog getCatalog() { - return DefaultCatalog.DEFAULT_CATALOG; - } - - @Override - public final List> getTables() { - return Arrays.>asList( - ZipkinAnnotations.ZIPKIN_ANNOTATIONS, - ZipkinDependencies.ZIPKIN_DEPENDENCIES, - ZipkinSpans.ZIPKIN_SPANS); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/tables/ZipkinAnnotations.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/tables/ZipkinAnnotations.java deleted file mode 100644 index 52da7a5a427..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/tables/ZipkinAnnotations.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * This file is generated by jOOQ. - */ -package zipkin2.storage.mysql.v1.internal.generated.tables; - - -import java.util.Arrays; -import java.util.List; - -import org.jooq.Field; -import org.jooq.ForeignKey; -import org.jooq.Index; -import org.jooq.Name; -import org.jooq.Record; -import org.jooq.Schema; -import org.jooq.Table; -import org.jooq.TableField; -import org.jooq.TableOptions; -import org.jooq.UniqueKey; -import org.jooq.impl.DSL; -import org.jooq.impl.SQLDataType; -import org.jooq.impl.TableImpl; - -import zipkin2.storage.mysql.v1.internal.generated.Indexes; -import zipkin2.storage.mysql.v1.internal.generated.Keys; -import zipkin2.storage.mysql.v1.internal.generated.Zipkin; - - -/** - * This class is generated by jOOQ. - */ -@SuppressWarnings({ "all", "unchecked", "rawtypes" }) -public class ZipkinAnnotations extends TableImpl { - - private static final long serialVersionUID = 1L; - - /** - * The reference instance of zipkin.zipkin_annotations - */ - public static final ZipkinAnnotations ZIPKIN_ANNOTATIONS = new ZipkinAnnotations(); - - /** - * The class holding records for this type - */ - @Override - public Class getRecordType() { - return Record.class; - } - - /** - * The column zipkin.zipkin_annotations.trace_id_high. If non zero, this means the trace uses 128 bit traceIds instead of 64 bit - */ - public final TableField TRACE_ID_HIGH = createField(DSL.name("trace_id_high"), SQLDataType.BIGINT.nullable(false).defaultValue(DSL.inline("0", SQLDataType.BIGINT)), this, "If non zero, this means the trace uses 128 bit traceIds instead of 64 bit"); - - /** - * The column zipkin.zipkin_annotations.trace_id. coincides with zipkin_spans.trace_id - */ - public final TableField TRACE_ID = createField(DSL.name("trace_id"), SQLDataType.BIGINT.nullable(false), this, "coincides with zipkin_spans.trace_id"); - - /** - * The column zipkin.zipkin_annotations.span_id. coincides with zipkin_spans.id - */ - public final TableField SPAN_ID = createField(DSL.name("span_id"), SQLDataType.BIGINT.nullable(false), this, "coincides with zipkin_spans.id"); - - /** - * The column zipkin.zipkin_annotations.a_key. BinaryAnnotation.key or Annotation.value if type == -1 - */ - public final TableField A_KEY = createField(DSL.name("a_key"), SQLDataType.VARCHAR(255).nullable(false), this, "BinaryAnnotation.key or Annotation.value if type == -1"); - - /** - * The column zipkin.zipkin_annotations.a_value. BinaryAnnotation.value(), which must be smaller than 64KB - */ - public final TableField A_VALUE = createField(DSL.name("a_value"), SQLDataType.BLOB, this, "BinaryAnnotation.value(), which must be smaller than 64KB"); - - /** - * The column zipkin.zipkin_annotations.a_type. BinaryAnnotation.type() or -1 if Annotation - */ - public final TableField A_TYPE = createField(DSL.name("a_type"), SQLDataType.INTEGER.nullable(false), this, "BinaryAnnotation.type() or -1 if Annotation"); - - /** - * The column zipkin.zipkin_annotations.a_timestamp. Used to implement TTL; Annotation.timestamp or zipkin_spans.timestamp - */ - public final TableField A_TIMESTAMP = createField(DSL.name("a_timestamp"), SQLDataType.BIGINT, this, "Used to implement TTL; Annotation.timestamp or zipkin_spans.timestamp"); - - /** - * The column zipkin.zipkin_annotations.endpoint_ipv4. Null when Binary/Annotation.endpoint is null - */ - public final TableField ENDPOINT_IPV4 = createField(DSL.name("endpoint_ipv4"), SQLDataType.INTEGER, this, "Null when Binary/Annotation.endpoint is null"); - - /** - * The column zipkin.zipkin_annotations.endpoint_ipv6. Null when Binary/Annotation.endpoint is null, or no IPv6 address - */ - public final TableField ENDPOINT_IPV6 = createField(DSL.name("endpoint_ipv6"), SQLDataType.BINARY(16), this, "Null when Binary/Annotation.endpoint is null, or no IPv6 address"); - - /** - * The column zipkin.zipkin_annotations.endpoint_port. Null when Binary/Annotation.endpoint is null - */ - public final TableField ENDPOINT_PORT = createField(DSL.name("endpoint_port"), SQLDataType.SMALLINT, this, "Null when Binary/Annotation.endpoint is null"); - - /** - * The column zipkin.zipkin_annotations.endpoint_service_name. Null when Binary/Annotation.endpoint is null - */ - public final TableField ENDPOINT_SERVICE_NAME = createField(DSL.name("endpoint_service_name"), SQLDataType.VARCHAR(255), this, "Null when Binary/Annotation.endpoint is null"); - - private ZipkinAnnotations(Name alias, Table aliased) { - this(alias, aliased, null); - } - - private ZipkinAnnotations(Name alias, Table aliased, Field[] parameters) { - super(alias, null, aliased, parameters, DSL.comment(""), TableOptions.table()); - } - - /** - * Create an aliased zipkin.zipkin_annotations table reference - */ - public ZipkinAnnotations(String alias) { - this(DSL.name(alias), ZIPKIN_ANNOTATIONS); - } - - /** - * Create an aliased zipkin.zipkin_annotations table reference - */ - public ZipkinAnnotations(Name alias) { - this(alias, ZIPKIN_ANNOTATIONS); - } - - /** - * Create a zipkin.zipkin_annotations table reference - */ - public ZipkinAnnotations() { - this(DSL.name("zipkin_annotations"), null); - } - - public ZipkinAnnotations(Table child, ForeignKey key) { - super(child, key, ZIPKIN_ANNOTATIONS); - } - - @Override - public Schema getSchema() { - return Zipkin.ZIPKIN; - } - - @Override - public List getIndexes() { - return Arrays.asList(Indexes.ZIPKIN_ANNOTATIONS_A_KEY, Indexes.ZIPKIN_ANNOTATIONS_A_TYPE, Indexes.ZIPKIN_ANNOTATIONS_ENDPOINT_SERVICE_NAME, Indexes.ZIPKIN_ANNOTATIONS_TRACE_ID, Indexes.ZIPKIN_ANNOTATIONS_TRACE_ID_HIGH, Indexes.ZIPKIN_ANNOTATIONS_TRACE_ID_HIGH_2, Indexes.ZIPKIN_ANNOTATIONS_TRACE_ID_HIGH_3); - } - - @Override - public List> getKeys() { - return Arrays.>asList(Keys.KEY_ZIPKIN_ANNOTATIONS_TRACE_ID_HIGH); - } - - @Override - public ZipkinAnnotations as(String alias) { - return new ZipkinAnnotations(DSL.name(alias), this); - } - - @Override - public ZipkinAnnotations as(Name alias) { - return new ZipkinAnnotations(alias, this); - } - - /** - * Rename this table - */ - @Override - public ZipkinAnnotations rename(String name) { - return new ZipkinAnnotations(DSL.name(name), null); - } - - /** - * Rename this table - */ - @Override - public ZipkinAnnotations rename(Name name) { - return new ZipkinAnnotations(name, null); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/tables/ZipkinDependencies.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/tables/ZipkinDependencies.java deleted file mode 100644 index 185abce139b..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/tables/ZipkinDependencies.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * This file is generated by jOOQ. - */ -package zipkin2.storage.mysql.v1.internal.generated.tables; - - -import java.time.LocalDate; -import java.util.Arrays; -import java.util.List; - -import org.jooq.Field; -import org.jooq.ForeignKey; -import org.jooq.Name; -import org.jooq.Record; -import org.jooq.Schema; -import org.jooq.Table; -import org.jooq.TableField; -import org.jooq.TableOptions; -import org.jooq.UniqueKey; -import org.jooq.impl.DSL; -import org.jooq.impl.SQLDataType; -import org.jooq.impl.TableImpl; - -import zipkin2.storage.mysql.v1.internal.generated.Keys; -import zipkin2.storage.mysql.v1.internal.generated.Zipkin; - - -/** - * This class is generated by jOOQ. - */ -@SuppressWarnings({ "all", "unchecked", "rawtypes" }) -public class ZipkinDependencies extends TableImpl { - - private static final long serialVersionUID = 1L; - - /** - * The reference instance of zipkin.zipkin_dependencies - */ - public static final ZipkinDependencies ZIPKIN_DEPENDENCIES = new ZipkinDependencies(); - - /** - * The class holding records for this type - */ - @Override - public Class getRecordType() { - return Record.class; - } - - /** - * The column zipkin.zipkin_dependencies.day. - */ - public final TableField DAY = createField(DSL.name("day"), SQLDataType.LOCALDATE.nullable(false), this, ""); - - /** - * The column zipkin.zipkin_dependencies.parent. - */ - public final TableField PARENT = createField(DSL.name("parent"), SQLDataType.VARCHAR(255).nullable(false), this, ""); - - /** - * The column zipkin.zipkin_dependencies.child. - */ - public final TableField CHILD = createField(DSL.name("child"), SQLDataType.VARCHAR(255).nullable(false), this, ""); - - /** - * The column zipkin.zipkin_dependencies.call_count. - */ - public final TableField CALL_COUNT = createField(DSL.name("call_count"), SQLDataType.BIGINT, this, ""); - - /** - * The column zipkin.zipkin_dependencies.error_count. - */ - public final TableField ERROR_COUNT = createField(DSL.name("error_count"), SQLDataType.BIGINT, this, ""); - - private ZipkinDependencies(Name alias, Table aliased) { - this(alias, aliased, null); - } - - private ZipkinDependencies(Name alias, Table aliased, Field[] parameters) { - super(alias, null, aliased, parameters, DSL.comment(""), TableOptions.table()); - } - - /** - * Create an aliased zipkin.zipkin_dependencies table reference - */ - public ZipkinDependencies(String alias) { - this(DSL.name(alias), ZIPKIN_DEPENDENCIES); - } - - /** - * Create an aliased zipkin.zipkin_dependencies table reference - */ - public ZipkinDependencies(Name alias) { - this(alias, ZIPKIN_DEPENDENCIES); - } - - /** - * Create a zipkin.zipkin_dependencies table reference - */ - public ZipkinDependencies() { - this(DSL.name("zipkin_dependencies"), null); - } - - public ZipkinDependencies(Table child, ForeignKey key) { - super(child, key, ZIPKIN_DEPENDENCIES); - } - - @Override - public Schema getSchema() { - return Zipkin.ZIPKIN; - } - - @Override - public UniqueKey getPrimaryKey() { - return Keys.KEY_ZIPKIN_DEPENDENCIES_PRIMARY; - } - - @Override - public List> getKeys() { - return Arrays.>asList(Keys.KEY_ZIPKIN_DEPENDENCIES_PRIMARY); - } - - @Override - public ZipkinDependencies as(String alias) { - return new ZipkinDependencies(DSL.name(alias), this); - } - - @Override - public ZipkinDependencies as(Name alias) { - return new ZipkinDependencies(alias, this); - } - - /** - * Rename this table - */ - @Override - public ZipkinDependencies rename(String name) { - return new ZipkinDependencies(DSL.name(name), null); - } - - /** - * Rename this table - */ - @Override - public ZipkinDependencies rename(Name name) { - return new ZipkinDependencies(name, null); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/tables/ZipkinSpans.java b/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/tables/ZipkinSpans.java deleted file mode 100644 index 49ad4e06efd..00000000000 --- a/zipkin-storage/mysql-v1/src/main/java/zipkin2/storage/mysql/v1/internal/generated/tables/ZipkinSpans.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * This file is generated by jOOQ. - */ -package zipkin2.storage.mysql.v1.internal.generated.tables; - - -import java.util.Arrays; -import java.util.List; - -import org.jooq.Field; -import org.jooq.ForeignKey; -import org.jooq.Index; -import org.jooq.Name; -import org.jooq.Record; -import org.jooq.Schema; -import org.jooq.Table; -import org.jooq.TableField; -import org.jooq.TableOptions; -import org.jooq.UniqueKey; -import org.jooq.impl.DSL; -import org.jooq.impl.SQLDataType; -import org.jooq.impl.TableImpl; - -import zipkin2.storage.mysql.v1.internal.generated.Indexes; -import zipkin2.storage.mysql.v1.internal.generated.Keys; -import zipkin2.storage.mysql.v1.internal.generated.Zipkin; - - -/** - * This class is generated by jOOQ. - */ -@SuppressWarnings({ "all", "unchecked", "rawtypes" }) -public class ZipkinSpans extends TableImpl { - - private static final long serialVersionUID = 1L; - - /** - * The reference instance of zipkin.zipkin_spans - */ - public static final ZipkinSpans ZIPKIN_SPANS = new ZipkinSpans(); - - /** - * The class holding records for this type - */ - @Override - public Class getRecordType() { - return Record.class; - } - - /** - * The column zipkin.zipkin_spans.trace_id_high. If non zero, this means the trace uses 128 bit traceIds instead of 64 bit - */ - public final TableField TRACE_ID_HIGH = createField(DSL.name("trace_id_high"), SQLDataType.BIGINT.nullable(false).defaultValue(DSL.inline("0", SQLDataType.BIGINT)), this, "If non zero, this means the trace uses 128 bit traceIds instead of 64 bit"); - - /** - * The column zipkin.zipkin_spans.trace_id. - */ - public final TableField TRACE_ID = createField(DSL.name("trace_id"), SQLDataType.BIGINT.nullable(false), this, ""); - - /** - * The column zipkin.zipkin_spans.id. - */ - public final TableField ID = createField(DSL.name("id"), SQLDataType.BIGINT.nullable(false), this, ""); - - /** - * The column zipkin.zipkin_spans.name. - */ - public final TableField NAME = createField(DSL.name("name"), SQLDataType.VARCHAR(255).nullable(false), this, ""); - - /** - * The column zipkin.zipkin_spans.remote_service_name. - */ - public final TableField REMOTE_SERVICE_NAME = createField(DSL.name("remote_service_name"), SQLDataType.VARCHAR(255), this, ""); - - /** - * The column zipkin.zipkin_spans.parent_id. - */ - public final TableField PARENT_ID = createField(DSL.name("parent_id"), SQLDataType.BIGINT, this, ""); - - /** - * The column zipkin.zipkin_spans.debug. - */ - public final TableField DEBUG = createField(DSL.name("debug"), SQLDataType.BIT, this, ""); - - /** - * The column zipkin.zipkin_spans.start_ts. Span.timestamp(): epoch micros used for endTs query and to implement TTL - */ - public final TableField START_TS = createField(DSL.name("start_ts"), SQLDataType.BIGINT, this, "Span.timestamp(): epoch micros used for endTs query and to implement TTL"); - - /** - * The column zipkin.zipkin_spans.duration. Span.duration(): micros used for minDuration and maxDuration query - */ - public final TableField DURATION = createField(DSL.name("duration"), SQLDataType.BIGINT, this, "Span.duration(): micros used for minDuration and maxDuration query"); - - private ZipkinSpans(Name alias, Table aliased) { - this(alias, aliased, null); - } - - private ZipkinSpans(Name alias, Table aliased, Field[] parameters) { - super(alias, null, aliased, parameters, DSL.comment(""), TableOptions.table()); - } - - /** - * Create an aliased zipkin.zipkin_spans table reference - */ - public ZipkinSpans(String alias) { - this(DSL.name(alias), ZIPKIN_SPANS); - } - - /** - * Create an aliased zipkin.zipkin_spans table reference - */ - public ZipkinSpans(Name alias) { - this(alias, ZIPKIN_SPANS); - } - - /** - * Create a zipkin.zipkin_spans table reference - */ - public ZipkinSpans() { - this(DSL.name("zipkin_spans"), null); - } - - public ZipkinSpans(Table child, ForeignKey key) { - super(child, key, ZIPKIN_SPANS); - } - - @Override - public Schema getSchema() { - return Zipkin.ZIPKIN; - } - - @Override - public List getIndexes() { - return Arrays.asList(Indexes.ZIPKIN_SPANS_NAME, Indexes.ZIPKIN_SPANS_REMOTE_SERVICE_NAME, Indexes.ZIPKIN_SPANS_START_TS, Indexes.ZIPKIN_SPANS_TRACE_ID_HIGH); - } - - @Override - public UniqueKey getPrimaryKey() { - return Keys.KEY_ZIPKIN_SPANS_PRIMARY; - } - - @Override - public List> getKeys() { - return Arrays.>asList(Keys.KEY_ZIPKIN_SPANS_PRIMARY); - } - - @Override - public ZipkinSpans as(String alias) { - return new ZipkinSpans(DSL.name(alias), this); - } - - @Override - public ZipkinSpans as(Name alias) { - return new ZipkinSpans(alias, this); - } - - /** - * Rename this table - */ - @Override - public ZipkinSpans rename(String name) { - return new ZipkinSpans(DSL.name(name), null); - } - - /** - * Rename this table - */ - @Override - public ZipkinSpans rename(Name name) { - return new ZipkinSpans(name, null); - } -} diff --git a/zipkin-storage/mysql-v1/src/main/resources/mysql.sql b/zipkin-storage/mysql-v1/src/main/resources/mysql.sql deleted file mode 100644 index d8aa4b706c3..00000000000 --- a/zipkin-storage/mysql-v1/src/main/resources/mysql.sql +++ /dev/null @@ -1,62 +0,0 @@ --- --- Copyright 2015-2019 The OpenZipkin Authors --- --- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except --- in compliance with the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software distributed under the License --- is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express --- or implied. See the License for the specific language governing permissions and limitations under --- the License. --- - -CREATE TABLE IF NOT EXISTS zipkin_spans ( - `trace_id_high` BIGINT NOT NULL DEFAULT 0 COMMENT 'If non zero, this means the trace uses 128 bit traceIds instead of 64 bit', - `trace_id` BIGINT NOT NULL, - `id` BIGINT NOT NULL, - `name` VARCHAR(255) NOT NULL, - `remote_service_name` VARCHAR(255), - `parent_id` BIGINT, - `debug` BIT(1), - `start_ts` BIGINT COMMENT 'Span.timestamp(): epoch micros used for endTs query and to implement TTL', - `duration` BIGINT COMMENT 'Span.duration(): micros used for minDuration and maxDuration query', - PRIMARY KEY (`trace_id_high`, `trace_id`, `id`) -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED CHARACTER SET=utf8 COLLATE utf8_general_ci; - -ALTER TABLE zipkin_spans ADD INDEX(`trace_id_high`, `trace_id`) COMMENT 'for getTracesByIds'; -ALTER TABLE zipkin_spans ADD INDEX(`name`) COMMENT 'for getTraces and getSpanNames'; -ALTER TABLE zipkin_spans ADD INDEX(`remote_service_name`) COMMENT 'for getTraces and getRemoteServiceNames'; -ALTER TABLE zipkin_spans ADD INDEX(`start_ts`) COMMENT 'for getTraces ordering and range'; - -CREATE TABLE IF NOT EXISTS zipkin_annotations ( - `trace_id_high` BIGINT NOT NULL DEFAULT 0 COMMENT 'If non zero, this means the trace uses 128 bit traceIds instead of 64 bit', - `trace_id` BIGINT NOT NULL COMMENT 'coincides with zipkin_spans.trace_id', - `span_id` BIGINT NOT NULL COMMENT 'coincides with zipkin_spans.id', - `a_key` VARCHAR(255) NOT NULL COMMENT 'BinaryAnnotation.key or Annotation.value if type == -1', - `a_value` BLOB COMMENT 'BinaryAnnotation.value(), which must be smaller than 64KB', - `a_type` INT NOT NULL COMMENT 'BinaryAnnotation.type() or -1 if Annotation', - `a_timestamp` BIGINT COMMENT 'Used to implement TTL; Annotation.timestamp or zipkin_spans.timestamp', - `endpoint_ipv4` INT COMMENT 'Null when Binary/Annotation.endpoint is null', - `endpoint_ipv6` BINARY(16) COMMENT 'Null when Binary/Annotation.endpoint is null, or no IPv6 address', - `endpoint_port` SMALLINT COMMENT 'Null when Binary/Annotation.endpoint is null', - `endpoint_service_name` VARCHAR(255) COMMENT 'Null when Binary/Annotation.endpoint is null' -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED CHARACTER SET=utf8 COLLATE utf8_general_ci; - -ALTER TABLE zipkin_annotations ADD UNIQUE KEY(`trace_id_high`, `trace_id`, `span_id`, `a_key`, `a_timestamp`) COMMENT 'Ignore insert on duplicate'; -ALTER TABLE zipkin_annotations ADD INDEX(`trace_id_high`, `trace_id`, `span_id`) COMMENT 'for joining with zipkin_spans'; -ALTER TABLE zipkin_annotations ADD INDEX(`trace_id_high`, `trace_id`) COMMENT 'for getTraces/ByIds'; -ALTER TABLE zipkin_annotations ADD INDEX(`endpoint_service_name`) COMMENT 'for getTraces and getServiceNames'; -ALTER TABLE zipkin_annotations ADD INDEX(`a_type`) COMMENT 'for getTraces and autocomplete values'; -ALTER TABLE zipkin_annotations ADD INDEX(`a_key`) COMMENT 'for getTraces and autocomplete values'; -ALTER TABLE zipkin_annotations ADD INDEX(`trace_id`, `span_id`, `a_key`) COMMENT 'for dependencies job'; - -CREATE TABLE IF NOT EXISTS zipkin_dependencies ( - `day` DATE NOT NULL, - `parent` VARCHAR(255) NOT NULL, - `child` VARCHAR(255) NOT NULL, - `call_count` BIGINT, - `error_count` BIGINT, - PRIMARY KEY (`day`, `parent`, `child`) -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED CHARACTER SET=utf8 COLLATE utf8_general_ci; diff --git a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/DependencyLinkV2SpanIteratorTest.java b/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/DependencyLinkV2SpanIteratorTest.java deleted file mode 100644 index 36b7c1c438f..00000000000 --- a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/DependencyLinkV2SpanIteratorTest.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import org.jooq.Record; -import org.jooq.Record7; -import org.jooq.SQLDialect; -import org.jooq.impl.DSL; -import org.junit.Test; -import zipkin2.Span; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations; -import zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinSpans; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.entry; -import static zipkin2.storage.mysql.v1.Schema.maybeGet; -import static zipkin2.v1.V1BinaryAnnotation.TYPE_BOOLEAN; -import static zipkin2.v1.V1BinaryAnnotation.TYPE_STRING; - -public class DependencyLinkV2SpanIteratorTest { - Long traceIdHigh = null; - long traceId = 1L; - Long parentId = null; - long spanId = 1L; - - /** You cannot make a dependency link unless you know the the local or peer endpoint. */ - @Test - public void whenNoServiceLabelsExist_kindIsUnknown() { - DependencyLinkV2SpanIterator iterator = - iterator(newRecord().values(traceIdHigh, traceId, parentId, spanId, "cs", -1, null)); - - Span span = iterator.next(); - assertThat(span.kind()).isNull(); - assertThat(span.localEndpoint()).isNull(); - assertThat(span.remoteEndpoint()).isNull(); - } - - @Test - public void whenOnlyAddressLabelsExist_kindIsNull() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord().values(traceIdHigh, traceId, parentId, spanId, "ca", TYPE_BOOLEAN, "s1"), - newRecord().values(traceIdHigh, traceId, parentId, spanId, "sa", TYPE_BOOLEAN, "s2")); - Span span = iterator.next(); - - assertThat(span.kind()).isNull(); - assertThat(span.localServiceName()).isEqualTo("s1"); - assertThat(span.remoteServiceName()).isEqualTo("s2"); - } - - /** - * The linker is biased towards server spans, or client spans that know the peer localEndpoint(). - */ - @Test - public void whenServerLabelsAreMissing_kindIsUnknownAndLabelsAreCleared() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord().values(traceIdHigh, traceId, parentId, spanId, "ca", TYPE_BOOLEAN, "s1")); - Span span = iterator.next(); - - assertThat(span.kind()).isNull(); - assertThat(span.localEndpoint()).isNull(); - assertThat(span.remoteEndpoint()).isNull(); - } - - /** "sr" is only applied when the local span is acting as a server */ - @Test - public void whenSrServiceExists_kindIsServer() { - DependencyLinkV2SpanIterator iterator = - iterator(newRecord().values(traceIdHigh, traceId, parentId, spanId, "sr", -1, "service")); - Span span = iterator.next(); - - assertThat(span.kind()).isEqualTo(Span.Kind.SERVER); - assertThat(span.localServiceName()).isEqualTo("service"); - assertThat(span.remoteEndpoint()).isNull(); - } - - @Test - public void errorAnnotationIgnored() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord().values(traceIdHigh, traceId, parentId, spanId, "error", -1, "service")); - Span span = iterator.next(); - - assertThat(span.tags()).isEmpty(); - assertThat(span.annotations()).isEmpty(); - } - - @Test - public void errorTagAdded() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord() - .values(traceIdHigh, traceId, parentId, spanId, "error", TYPE_STRING, "foo")); - Span span = iterator.next(); - - assertThat(span.tags()).containsOnly(entry("error", "")); - } - - /** "ca" indicates the peer, which is a client in the case of a server span */ - @Test - public void whenSrAndCaServiceExists_caIsThePeer() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord().values(traceIdHigh, traceId, parentId, spanId, "ca", TYPE_BOOLEAN, "s1"), - newRecord().values(traceIdHigh, traceId, parentId, spanId, "sr", -1, "s2")); - Span span = iterator.next(); - - assertThat(span.kind()).isEqualTo(Span.Kind.SERVER); - assertThat(span.localServiceName()).isEqualTo("s2"); - assertThat(span.remoteServiceName()).isEqualTo("s1"); - } - - /** "cs" indicates the peer, which is a client in the case of a server span */ - @Test - public void whenSrAndCsServiceExists_caIsThePeer() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord().values(traceIdHigh, traceId, parentId, spanId, "cs", -1, "s1"), - newRecord().values(traceIdHigh, traceId, parentId, spanId, "sr", -1, "s2")); - Span span = iterator.next(); - - assertThat(span.kind()).isEqualTo(Span.Kind.SERVER); - assertThat(span.localServiceName()).isEqualTo("s2"); - assertThat(span.remoteServiceName()).isEqualTo("s1"); - } - - /** "ca" is more authoritative than "cs" */ - @Test - public void whenCrAndCaServiceExists_caIsThePeer() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord().values(traceIdHigh, traceId, parentId, spanId, "cs", -1, "foo"), - newRecord().values(traceIdHigh, traceId, parentId, spanId, "ca", TYPE_BOOLEAN, "s1"), - newRecord().values(traceIdHigh, traceId, parentId, spanId, "sr", -1, "s2")); - Span span = iterator.next(); - - assertThat(span.kind()).isEqualTo(Span.Kind.SERVER); - assertThat(span.localServiceName()).isEqualTo("s2"); - assertThat(span.remoteServiceName()).isEqualTo("s1"); - } - - /** - * Finagle labels two sides of the same socket "ca", V1BinaryAnnotation.TYPE_BOOLEAN, "sa" with - * the local endpoint name - */ - @Test - public void specialCasesFinagleLocalSocketLabeling_client() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord().values(traceIdHigh, traceId, parentId, spanId, "cs", -1, "service"), - newRecord() - .values(traceIdHigh, traceId, parentId, spanId, "ca", TYPE_BOOLEAN, "service"), - newRecord() - .values(traceIdHigh, traceId, parentId, spanId, "sa", TYPE_BOOLEAN, "service")); - Span span = iterator.next(); - - // When there's no "sr" annotation, we assume it is a client. - assertThat(span.kind()).isEqualTo(Span.Kind.CLIENT); - assertThat(span.localEndpoint()).isNull(); - assertThat(span.remoteServiceName()).isEqualTo("service"); - } - - @Test - public void specialCasesFinagleLocalSocketLabeling_server() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord() - .values(traceIdHigh, traceId, parentId, spanId, "ca", TYPE_BOOLEAN, "service"), - newRecord() - .values(traceIdHigh, traceId, parentId, spanId, "sa", TYPE_BOOLEAN, "service"), - newRecord().values(traceIdHigh, traceId, parentId, spanId, "sr", -1, "service")); - Span span = iterator.next(); - - // When there is an "sr" annotation, we know it is a server - assertThat(span.kind()).isEqualTo(Span.Kind.SERVER); - assertThat(span.localServiceName()).isEqualTo("service"); - assertThat(span.remoteEndpoint()).isNull(); - } - - /** - * Dependency linker works backwards: it is easier to treat a "cs" as a server span lacking its - * caller, than a client span lacking its receiver. - */ - @Test - public void csWithoutSaIsServer() { - DependencyLinkV2SpanIterator iterator = - iterator(newRecord().values(traceIdHigh, traceId, parentId, spanId, "cs", -1, "s1")); - Span span = iterator.next(); - - assertThat(span.kind()).isEqualTo(Span.Kind.SERVER); - assertThat(span.localServiceName()).isEqualTo("s1"); - assertThat(span.remoteEndpoint()).isNull(); - } - - /** Service links to empty string are confusing and offer no value. */ - @Test - public void emptyToNull() { - DependencyLinkV2SpanIterator iterator = - iterator( - newRecord().values(traceIdHigh, traceId, parentId, spanId, "ca", TYPE_BOOLEAN, ""), - newRecord().values(traceIdHigh, traceId, parentId, spanId, "cs", -1, ""), - newRecord().values(traceIdHigh, traceId, parentId, spanId, "sa", TYPE_BOOLEAN, ""), - newRecord().values(traceIdHigh, traceId, parentId, spanId, "sr", -1, "")); - Span span = iterator.next(); - - assertThat(span.kind()).isNull(); - assertThat(span.localEndpoint()).isNull(); - assertThat(span.remoteEndpoint()).isNull(); - } - - static DependencyLinkV2SpanIterator iterator(Record... records) { - return new DependencyLinkV2SpanIterator( - new PeekingIterator<>(asList(records).iterator()), - maybeGet(records[0], ZipkinSpans.ZIPKIN_SPANS.TRACE_ID_HIGH, 0L), - records[0].get(ZipkinSpans.ZIPKIN_SPANS.TRACE_ID)); - } - - static Record7 newRecord() { - return DSL.using(SQLDialect.MYSQL) - .newRecord( - ZipkinSpans.ZIPKIN_SPANS.TRACE_ID_HIGH, - ZipkinSpans.ZIPKIN_SPANS.TRACE_ID, - ZipkinSpans.ZIPKIN_SPANS.PARENT_ID, - ZipkinSpans.ZIPKIN_SPANS.ID, - ZipkinAnnotations.ZIPKIN_ANNOTATIONS.A_KEY, - ZipkinAnnotations.ZIPKIN_ANNOTATIONS.A_TYPE, - ZipkinAnnotations.ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME); - } -} diff --git a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/ITMySQLStorage.java b/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/ITMySQLStorage.java deleted file mode 100644 index 877ca264c18..00000000000 --- a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/ITMySQLStorage.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.Connection; -import java.sql.SQLException; -import java.time.Instant; -import java.time.LocalDate; -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.List; -import org.jooq.DSLContext; -import org.jooq.Query; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; -import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.extension.RegisterExtension; -import zipkin2.DependencyLink; -import zipkin2.storage.StorageComponent; - -import static zipkin2.storage.ITDependencies.aggregateLinks; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinDependencies.ZIPKIN_DEPENDENCIES; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class ITMySQLStorage { - - @RegisterExtension MySQLExtension mysql = new MySQLExtension(); - - @Nested - class ITTraces extends zipkin2.storage.ITTraces { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override @Test @Disabled("v1 format is lossy in conversion when rows as upsert") - protected void getTrace_differentiatesDebugFromShared(TestInfo testInfo) { - } - - @Override @Test @Disabled("v1 format is lossy in conversion when rows as upsert") - protected void getTraces_differentiatesDebugFromShared(TestInfo testInfo) { - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override @Test @Disabled("No consumer-side span deduplication") - public void getTrace_deduplicates(TestInfo testInfo) { - } - - @Override public void clear() { - storage.clear(); - } - } - - @Nested - class ITSpanStore extends zipkin2.storage.ITSpanStore { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override @Test @Disabled("v1 format is lossy in conversion when rows as upsert") - protected void getTraces_differentiatesDebugFromShared(TestInfo testInfo) { - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - } - - @Nested - class ITSpanStoreHeavy extends zipkin2.storage.ITSpanStoreHeavy { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - } - - @Nested - class ITStrictTraceIdFalse extends zipkin2.storage.ITStrictTraceIdFalse { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - } - - @Nested - class ITSearchEnabledFalse extends zipkin2.storage.ITSearchEnabledFalse { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - } - - @Nested - class ITServiceAndSpanNames extends zipkin2.storage.ITServiceAndSpanNames { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - } - - @Nested - class ITAutocompleteTags extends zipkin2.storage.ITAutocompleteTags { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - } - - @Nested - class ITDependenciesOnDemand extends zipkin2.storage.ITDependencies { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - } - - @Nested - class ITDependenciesHeavyOnDemand extends zipkin2.storage.ITDependenciesHeavy { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - } - - @Nested - class ITDependenciesPreAggregated extends zipkin2.storage.ITDependencies { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - - /** - * The current implementation does not include dependency aggregation. It includes retrieval of - * pre-aggregated links, usually made via zipkin-dependencies - */ - @Override protected void processDependencies(List spans) throws Exception { - aggregateDependencies(storage, spans); - } - } - - @Nested - class ITDependenciesHeavyPreAggregated extends zipkin2.storage.ITDependenciesHeavy { - @Override protected StorageComponent.Builder newStorageBuilder(TestInfo testInfo) { - return mysql.computeStorageBuilder(); - } - - @Override protected boolean returnsRawSpans() { - return false; - } - - @Override public void clear() { - storage.clear(); - } - - /** - * The current implementation does not include dependency aggregation. It includes retrieval of - * pre-aggregated links, usually made via zipkin-dependencies - */ - @Override protected void processDependencies(List spans) throws Exception { - aggregateDependencies(storage, spans); - } - } - - static void aggregateDependencies(MySQLStorage storage, List spans) - throws SQLException { - try (Connection conn = storage.datasource.getConnection()) { - DSLContext context = storage.context.get(conn); - - // batch insert the rows at timestamp midnight - List inserts = new ArrayList<>(); - aggregateLinks(spans).forEach((midnight, links) -> { - - LocalDate day = Instant.ofEpochMilli(midnight) - .atZone(ZoneId.of("UTC")) - .toLocalDate(); - - for (DependencyLink link : links) { - inserts.add(context.insertInto(ZIPKIN_DEPENDENCIES) - .set(ZIPKIN_DEPENDENCIES.DAY, day) - .set(ZIPKIN_DEPENDENCIES.PARENT, link.parent()) - .set(ZIPKIN_DEPENDENCIES.CHILD, link.child()) - .set(ZIPKIN_DEPENDENCIES.CALL_COUNT, link.callCount()) - .set(ZIPKIN_DEPENDENCIES.ERROR_COUNT, link.errorCount()) - .onDuplicateKeyIgnore()); - } - }); - context.batch(inserts).execute(); - } - } -} diff --git a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/MySQLExtension.java b/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/MySQLExtension.java deleted file mode 100644 index 263ab6af689..00000000000 --- a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/MySQLExtension.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright 2015-2020 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.Connection; -import java.sql.SQLException; -import javax.sql.DataSource; -import org.junit.jupiter.api.extension.AfterAllCallback; -import org.junit.jupiter.api.extension.BeforeAllCallback; -import org.junit.jupiter.api.extension.ExtensionContext; -import org.mariadb.jdbc.MariaDbDataSource; -import org.opentest4j.TestAbortedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.output.Slf4jLogConsumer; -import org.testcontainers.containers.wait.strategy.Wait; -import org.testcontainers.jdbc.ContainerLessJdbcDelegate; -import zipkin2.CheckResult; - -import static org.junit.jupiter.api.Assumptions.assumeTrue; -import static org.testcontainers.ext.ScriptUtils.runInitScript; -import static org.testcontainers.utility.DockerImageName.parse; - -class MySQLExtension implements BeforeAllCallback, AfterAllCallback { - static final Logger LOGGER = LoggerFactory.getLogger(MySQLExtension.class); - - final MySQLContainer container = new MySQLContainer(); - - @Override public void beforeAll(ExtensionContext context) throws Exception { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - - container.start(); - LOGGER.info("Using hostPort " + host() + ":" + port()); - - try (MySQLStorage result = computeStorageBuilder().build()) { - CheckResult check = result.check(); - assumeTrue(check.ok(), () -> "Could not connect to storage, skipping test: " - + check.error().getMessage()); - - dropAndRecreateSchema(result.datasource); - } - } - - /** - * MySQL doesn't auto-install schema. However, we may have changed it since the last time this - * image was published. So, we drop and re-create the schema before running any tests. - */ - static void dropAndRecreateSchema(DataSource datasource) throws SQLException { - String[] scripts = { - // Drop all previously created tables in zipkin.* - "drop_zipkin_tables.sql", - - // Populate the schema - "mysql.sql" - }; - - try (Connection connection = datasource.getConnection()) { - for (String scriptPath : scripts) { - runInitScript(new ContainerLessJdbcDelegate(connection), scriptPath); - } - } - } - - @Override public void afterAll(ExtensionContext context) { - if (context.getRequiredTestClass().getEnclosingClass() != null) { - // Only run once in outermost scope. - return; - } - - container.stop(); - } - - MySQLStorage.Builder computeStorageBuilder() { - final MariaDbDataSource dataSource; - - try { - dataSource = new MariaDbDataSource(host(), port(), "zipkin"); - dataSource.setUser("zipkin"); - dataSource.setPassword("zipkin"); - dataSource.setProperties("autoReconnect=true&useUnicode=yes&characterEncoding=UTF-8"); - } catch (SQLException e) { - throw new AssertionError(e); - } - - return new MySQLStorage.Builder() - .datasource(dataSource) - .executor(Runnable::run); - } - - String host() { - return container.getHost(); - } - - int port() { - return container.getMappedPort(3306); - } - - // mostly waiting for https://github.com/testcontainers/testcontainers-java/issues/3537 - static final class MySQLContainer extends GenericContainer { - MySQLContainer() { - super(parse("ghcr.io/openzipkin/zipkin-mysql:2.23.2")); - if ("true".equals(System.getProperty("docker.skip"))) { - throw new TestAbortedException("${docker.skip} == true"); - } - addExposedPort(3306); - waitStrategy = Wait.forHealthcheck(); - withLogConsumer(new Slf4jLogConsumer(LOGGER)); - } - } -} diff --git a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/MySQLStorageTest.java b/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/MySQLStorageTest.java deleted file mode 100644 index 91c0325d5b3..00000000000 --- a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/MySQLStorageTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.SQLException; -import javax.sql.DataSource; -import org.junit.Test; -import zipkin2.CheckResult; -import zipkin2.Component; - -import static java.util.Arrays.asList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class MySQLStorageTest { - - @Test public void check_failsInsteadOfThrowing() throws SQLException { - DataSource dataSource = mock(DataSource.class); - when(dataSource.getConnection()).thenThrow(new SQLException("foo")); - - CheckResult result = storage(dataSource).check(); - - assertThat(result.ok()).isFalse(); - assertThat(result.error()) - .isInstanceOf(SQLException.class); - } - - @Test public void returns_whitelisted_autocompletekey() throws Exception { - DataSource dataSource = mock(DataSource.class); - assertThat(storage(dataSource).autocompleteTags().getKeys().execute()) - .containsOnlyOnce("http.method"); - } - - static MySQLStorage storage(DataSource dataSource) { - return MySQLStorage.newBuilder() - .strictTraceId(false) - .executor(Runnable::run) - .datasource(dataSource) - .autocompleteKeys(asList("http.method")) - .build(); - } - - /** - * The {@code toString()} of {@link Component} implementations appear in health check endpoints. - * Since these are likely to be exposed in logs and other monitoring tools, care should be taken - * to ensure {@code toString()} output is a reasonable length and does not contain sensitive - * information. - */ - @Test public void toStringContainsOnlySummaryInformation() { - DataSource datasource = mock(DataSource.class); - when(datasource.toString()).thenReturn("Blamo"); - - assertThat(storage(datasource)).hasToString("MySQLStorage{datasource=Blamo}"); - } -} diff --git a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/SchemaTest.java b/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/SchemaTest.java deleted file mode 100644 index f7cb7c7c505..00000000000 --- a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/SchemaTest.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import java.sql.SQLException; -import java.sql.SQLSyntaxErrorException; -import javax.sql.DataSource; -import org.jooq.conf.Settings; -import org.jooq.exception.DataAccessException; -import org.junit.Test; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class SchemaTest { - DataSource dataSource = mock(DataSource.class); - Schema schema = new Schema( - dataSource, - new DSLContexts(new Settings().withRenderSchema(false), null), - true - ); - - @Test - public void hasIpv6_falseWhenKnownSQLState() throws SQLException { - SQLSyntaxErrorException sqlException = new SQLSyntaxErrorException( - "Unknown column 'zipkin_annotations.endpoint_ipv6' in 'field list'", - "42S22", 1054); - - // cheats to lower mock count: this exception is really thrown during execution of the query - when(dataSource.getConnection()).thenThrow( - new DataAccessException(sqlException.getMessage(), sqlException)); - - assertThat(schema.hasIpv6).isFalse(); - } - - /** - * This returns false instead of failing when the SQLState code doesn't imply the column is - * missing. This is to prevent zipkin from crashing due to scenarios we haven't thought up, yet. - * The root error goes into the log in this case. - */ - @Test - public void hasIpv6_falseWhenUnknownSQLState() throws SQLException { - SQLSyntaxErrorException sqlException = new SQLSyntaxErrorException( - "java.sql.SQLSyntaxErrorException: Table 'zipkin.zipkin_annotations' doesn't exist", - "42S02", 1146); - DataSource dataSource = mock(DataSource.class); - - // cheats to lower mock count: this exception is really thrown during execution of the query - when(dataSource.getConnection()).thenThrow( - new DataAccessException(sqlException.getMessage(), sqlException)); - - assertThat(schema.hasIpv6).isFalse(); - } - - @Test - public void hasErrorCount_falseWhenKnownSQLState() throws SQLException { - SQLSyntaxErrorException sqlException = new SQLSyntaxErrorException( - "Unknown column 'zipkin_dependencies.error_count' in 'field list'", - "42S22", 1054); - - // cheats to lower mock count: this exception is really thrown during execution of the query - when(dataSource.getConnection()).thenThrow( - new DataAccessException(sqlException.getMessage(), sqlException)); - - assertThat(schema.hasErrorCount).isFalse(); - } - - /** - * This returns false instead of failing when the SQLState code doesn't imply the column is - * missing. This is to prevent zipkin from crashing due to scenarios we haven't thought up, yet. - * The root error goes into the log in this case. - */ - @Test - public void hasErrorCount_falseWhenUnknownSQLState() throws SQLException { - SQLSyntaxErrorException sqlException = new SQLSyntaxErrorException( - "java.sql.SQLSyntaxErrorException: Table 'zipkin.zipkin_dependencies' doesn't exist", - "42S02", 1146); - DataSource dataSource = mock(DataSource.class); - - // cheats to lower mock count: this exception is really thrown during execution of the query - when(dataSource.getConnection()).thenThrow( - new DataAccessException(sqlException.getMessage(), sqlException)); - - assertThat(schema.hasErrorCount).isFalse(); - } - - @Test - public void hasDependencies_missing() throws SQLException { - SQLSyntaxErrorException sqlException = new SQLSyntaxErrorException( - "SQL [select count(*) from `zipkin_dependencies`]; Table 'zipkin.zipkin_dependencies' doesn't exist\n" - + " Query is : select count(*) from `zipkin_dependencies`", - "42S02", 1146); - DataSource dataSource = mock(DataSource.class); - - // cheats to lower mock count: this exception is really thrown during execution of the query - when(dataSource.getConnection()).thenThrow( - new DataAccessException(sqlException.getMessage(), sqlException)); - - assertThat(schema.hasPreAggregatedDependencies).isFalse(); - } - - @Test - public void hasRemoteServiceName_falseWhenKnownSQLState() throws SQLException { - SQLSyntaxErrorException sqlException = new SQLSyntaxErrorException( - "Unknown column 'zipkin_spans.remote_serviceName' in 'field list'", - "42S22", 1054); - - // cheats to lower mock count: this exception is really thrown during execution of the query - when(dataSource.getConnection()).thenThrow( - new DataAccessException(sqlException.getMessage(), sqlException)); - - assertThat(schema.hasRemoteServiceName).isFalse(); - } -} diff --git a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/SelectSpansAndAnnotationsTest.java b/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/SelectSpansAndAnnotationsTest.java deleted file mode 100644 index 9a26bc21b15..00000000000 --- a/zipkin-storage/mysql-v1/src/test/java/zipkin2/storage/mysql/v1/SelectSpansAndAnnotationsTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2015-2019 The OpenZipkin Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package zipkin2.storage.mysql.v1; - -import org.jooq.Record4; -import org.jooq.SQLDialect; -import org.jooq.impl.DSL; -import org.junit.Test; -import zipkin2.Endpoint; -import zipkin2.v1.V1Annotation; -import zipkin2.v1.V1BinaryAnnotation; -import zipkin2.v1.V1Span; - -import static org.assertj.core.api.Assertions.assertThat; -import static zipkin2.storage.mysql.v1.internal.generated.tables.ZipkinAnnotations.ZIPKIN_ANNOTATIONS; - -public class SelectSpansAndAnnotationsTest { - @Test - public void processAnnotationRecord_nulls() { - Record4 annotationRecord = - annotationRecord(null, null, null, null); - - V1Span.Builder builder = V1Span.newBuilder().traceId(1).id(1); - SelectSpansAndAnnotations.processAnnotationRecord(annotationRecord, builder, null); - - assertThat(builder) - .usingRecursiveComparison().isEqualTo(V1Span.newBuilder().traceId(1).id(1)); - } - - @Test - public void processAnnotationRecord_annotation() { - Record4 annotationRecord = annotationRecord(-1, 0L, "foo", null); - - V1Span.Builder builder = V1Span.newBuilder().traceId(1).id(1); - SelectSpansAndAnnotations.processAnnotationRecord(annotationRecord, builder, null); - - assertThat(builder.build().annotations().get(0)) - .isEqualTo(V1Annotation.create(0L, "foo", null)); - } - - @Test - public void processAnnotationRecord_tag() { - Record4 annotationRecord = - annotationRecord(6, null, "foo", new byte[0]); - - V1Span.Builder builder = V1Span.newBuilder().traceId(1).id(1); - SelectSpansAndAnnotations.processAnnotationRecord(annotationRecord, builder, null); - - assertThat(builder.build().binaryAnnotations().get(0)) - .isEqualTo(V1BinaryAnnotation.createString("foo", "", null)); - } - - @Test - public void processAnnotationRecord_address() { - Record4 annotationRecord = - annotationRecord(0, null, "ca", new byte[] {1}); - Endpoint ep = Endpoint.newBuilder().serviceName("foo").build(); - - V1Span.Builder builder = V1Span.newBuilder().traceId(1).id(1); - SelectSpansAndAnnotations.processAnnotationRecord(annotationRecord, builder, ep); - - assertThat(builder.build().binaryAnnotations().get(0)) - .isEqualTo(V1BinaryAnnotation.createAddress("ca", ep)); - } - - @Test - public void processAnnotationRecord_address_skipMissingEndpoint() { - Record4 annotationRecord = - annotationRecord(0, null, "ca", new byte[] {1}); - - V1Span.Builder builder = V1Span.newBuilder().traceId(1).id(1); - SelectSpansAndAnnotations.processAnnotationRecord(annotationRecord, builder, null); - - assertThat(builder.build().binaryAnnotations()).isEmpty(); - } - - @Test - public void processAnnotationRecord_address_skipWrongKey() { - Record4 annotationRecord = - annotationRecord(0, null, "sr", new byte[] {1}); - Endpoint ep = Endpoint.newBuilder().serviceName("foo").build(); - - V1Span.Builder builder = V1Span.newBuilder().traceId(1).id(1); - SelectSpansAndAnnotations.processAnnotationRecord(annotationRecord, builder, ep); - - assertThat(builder.build().binaryAnnotations()).isEmpty(); - } - - static Record4 annotationRecord( - Integer type, Long timestamp, String key, byte[] value) { - return DSL.using(SQLDialect.MYSQL) - .newRecord( - ZIPKIN_ANNOTATIONS.A_TYPE, - ZIPKIN_ANNOTATIONS.A_TIMESTAMP, - ZIPKIN_ANNOTATIONS.A_KEY, - ZIPKIN_ANNOTATIONS.A_VALUE) - .value1(type) - .value2(timestamp) - .value3(key) - .value4(value); - } - - @Test - public void endpoint_justIpv4() { - Record4 endpointRecord = - endpointRecord("", 127 << 24 | 1, (short) 0, new byte[0]); - - assertThat(SelectSpansAndAnnotations.endpoint(endpointRecord)) - .isEqualTo(Endpoint.newBuilder().ip("127.0.0.1").build()); - } - - static Record4 endpointRecord( - String serviceName, Integer ipv4, Short port, byte[] ipv6) { - return DSL.using(SQLDialect.MYSQL) - .newRecord( - ZIPKIN_ANNOTATIONS.ENDPOINT_SERVICE_NAME, - ZIPKIN_ANNOTATIONS.ENDPOINT_IPV4, - ZIPKIN_ANNOTATIONS.ENDPOINT_PORT, - ZIPKIN_ANNOTATIONS.ENDPOINT_IPV6) - .value1(serviceName) - .value2(ipv4) - .value3(port) - .value4(ipv6); - } -} diff --git a/zipkin-storage/mysql-v1/src/test/resources/drop_zipkin_tables.sql b/zipkin-storage/mysql-v1/src/test/resources/drop_zipkin_tables.sql deleted file mode 100644 index f68242de30c..00000000000 --- a/zipkin-storage/mysql-v1/src/test/resources/drop_zipkin_tables.sql +++ /dev/null @@ -1,8 +0,0 @@ -SET FOREIGN_KEY_CHECKS = 0; -SET @tables = NULL; -SELECT GROUP_CONCAT(table_schema, '.', table_name) INTO @tables FROM information_schema.tables WHERE table_schema = 'zipkin'; -SET @tables = CONCAT('DROP TABLE ', @tables); -PREPARE stmt FROM @tables; -EXECUTE stmt; -DEALLOCATE PREPARE stmt; -SET FOREIGN_KEY_CHECKS = 1; diff --git a/zipkin-storage/mysql-v1/src/test/resources/simplelogger.properties b/zipkin-storage/mysql-v1/src/test/resources/simplelogger.properties deleted file mode 100644 index 722851b466c..00000000000 --- a/zipkin-storage/mysql-v1/src/test/resources/simplelogger.properties +++ /dev/null @@ -1,9 +0,0 @@ -# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options - -org.slf4j.simpleLogger.logFile=System.out -org.slf4j.simpleLogger.defaultLogLevel=warn -org.slf4j.simpleLogger.showDateTime=true -org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS - -# stop huge spam -org.slf4j.simpleLogger.log.org.testcontainers.dockerclient=off diff --git a/zipkin-storage/pom.xml b/zipkin-storage/pom.xml deleted file mode 100644 index 06ecf4f491e..00000000000 --- a/zipkin-storage/pom.xml +++ /dev/null @@ -1,76 +0,0 @@ - - - - 4.0.0 - - - io.zipkin - zipkin-parent - 2.24.4-SNAPSHOT - - - io.zipkin.zipkin2 - zipkin-storage-parent - Storage - pom - - - ${project.basedir}/.. - 1.8 - java18 - - - - cassandra - mysql-v1 - elasticsearch - - - - - ${project.groupId} - zipkin - ${project.version} - - - - - com.google.code.gson - gson - ${gson.version} - test - - - ${project.groupId} - zipkin-tests - ${project.version} - test - - - org.slf4j - slf4j-simple - ${slf4j.version} - test - - - org.testcontainers - testcontainers - ${testcontainers.version} - test - - -