From c924b19f01e6ad706aa5ced8e7696746bfb30072 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Thu, 17 Oct 2024 01:42:55 -0700 Subject: [PATCH 1/3] feat(observability): spin up micro-benchmarking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This requires you to add to your environment SPANNER_RUN_BENCHMARKS=true and then it can be run by npm run observability-test whose results look like: ```shell Benchmarking Total Runs: 10000 databaseRunSelect1AsyncAwait RAM Untraced(206.453kB) vs Traced (240.953kB): increase by (34.500kB) or 16.71% RAM Untraced(206.453kB) vs Traced+OTEL(243.391kB): increase by (36.938kB) or 17.89% Time Untraced(911.057µs) vs Traced (1.014ms): increase by (102.749µs) or 11.28% Time Untraced(911.057µs) vs Traced+OTEL(1.017ms): increase by (106.116µs) or 11.65% databaseRunSelect1Callback RAM Untraced(210.219kB) vs Traced (238.797kB): increase by (28.578kB) or 13.59% RAM Untraced(210.219kB) vs Traced+OTEL(242.914kB): increase by (32.695kB) or 15.55% Time Untraced(890.877µs) vs Traced (997.541µs): increase by (106.664µs) or 11.97% Time Untraced(890.877µs) vs Traced+OTEL(1.019ms): increase by (127.997µs) or 14.37% databaseRunTransactionAsyncTxRunUpdate RAM Untraced(308.898kB) vs Traced (325.313kB): increase by (16.414kB) or 5.31% RAM Untraced(308.898kB) vs Traced+OTEL(339.836kB): increase by (30.938kB) or 10.02% Time Untraced(1.510ms) vs Traced (1.652ms): increase by (141.330µs) or 9.36% Time Untraced(1.510ms) vs Traced+OTEL(1.668ms): increase by (157.466µs) or 10.43% databaseRunTransactionAsyncTxRun RAM Untraced(298.977kB) vs Traced (326.227kB): increase by (27.250kB) or 9.11% RAM Untraced(298.977kB) vs Traced+OTEL(315.164kB): increase by (16.188kB) or 5.41% Time Untraced(1.450ms) vs Traced (1.581ms): increase by (130.870µs) or 9.03% Time Untraced(1.450ms) vs Traced+OTEL(1.615ms): increase by (165.426µs) or 11.41% ``` --- observability-test/benchmark.ts | 143 +++++++++++++ observability-test/comparisons.ts | 327 ++++++++++++++++++++++++++++++ observability-test/spanner.ts | 2 + package.json | 3 +- 4 files changed, 474 insertions(+), 1 deletion(-) create mode 100644 observability-test/benchmark.ts create mode 100644 observability-test/comparisons.ts diff --git a/observability-test/benchmark.ts b/observability-test/benchmark.ts new file mode 100644 index 000000000..828838d7a --- /dev/null +++ b/observability-test/benchmark.ts @@ -0,0 +1,143 @@ +/*! + * Copyright 2024 Google LLC. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +const lessComparator = (a, b) => { + if (a < b) return -1; + if (a > b) return 1; + return 0; +}; + +/* + * runBenchmarks runs each of the functions in runners ${nRuns} times + * each time collecting RAM usage and time spent and then produces + * a map of functionNames to the percentiles of RAM usage and time spent. + */ +export async function runBenchmarks(runners: Function[], done: Function) { + const nRuns = 10000; + const benchmarkValues = {_totalRuns: nRuns}; + + let k = 0; + for (k = 0; k < runners.length; k++) { + const fn = runners[k]; + const functionName = fn.name; + const timeSpentL: bigint[] = []; + const ramL: number[] = []; + let i = 0; + + // Warm up runs to ensure stable behavior. + for (i = 0; i < 8; i++) { + const value = await fn(); + } + + for (i = 0; i < nRuns; i++) { + const startTime: bigint = process.hrtime.bigint(); + const startHeapUsedBytes: number = process.memoryUsage().heapUsed; + const value = await fn(); + timeSpentL.push(process.hrtime.bigint() - startTime); + ramL.push(process.memoryUsage().heapUsed - startHeapUsedBytes); + } + + timeSpentL.sort(lessComparator); + ramL.sort(lessComparator); + + benchmarkValues[functionName] = { + ram: percentiles(functionName, ramL, 'bytes'), + timeSpent: percentiles(functionName, timeSpentL, 'time'), + }; + } + + done(benchmarkValues); +} + +function percentiles(method, sortedValues, kind) { + const n = sortedValues.length; + const p50 = sortedValues[Math.floor(n * 0.5)]; + const p75 = sortedValues[Math.floor(n * 0.75)]; + const p90 = sortedValues[Math.floor(n * 0.9)]; + const p95 = sortedValues[Math.floor(n * 0.95)]; + const p99 = sortedValues[Math.floor(n * 0.99)]; + + return { + p50: p50, + p75: p75, + p90: p90, + p95: p95, + p99: p99, + p50_s: humanize(p50, kind), + p75_s: humanize(p75, kind), + p90_s: humanize(p90, kind), + p95_s: humanize(p95, kind), + p99_s: humanize(p99, kind), + }; +} + +function humanize(values, kind) { + let converterFn = humanizeTime; + if (kind === 'bytes') { + converterFn = humanizeBytes; + } + return converterFn(values); +} + +const secondUnits = ['ns', 'µs', 'ms', 's']; +interface unitDivisor { + unit: string; + divisor: number; +} +const pastSecondUnits: unitDivisor[] = [ + {unit: 'min', divisor: 60}, + {unit: 'hr', divisor: 60}, + {unit: 'day', divisor: 24}, + {unit: 'week', divisor: 7}, + {unit: 'month', divisor: 30}, +]; +function humanizeTime(ns) { + const sign: number = ns < 0 ? -1 : +1; + let value = Math.abs(Number(ns)); + for (const unit of secondUnits) { + if (value < 1000) { + return `${(sign * value).toFixed(3)}${unit}`; + } + value /= 1000; + } + + let i = 0; + for (i = 0; i < pastSecondUnits.length; i++) { + const unitPlusValue = pastSecondUnits[i]; + const unitName = unitPlusValue.unit; + const divisor = unitPlusValue.divisor; + if (value < divisor) { + return `${sign * value}${unitName}`; + } + value = value / divisor; + } + return `${(sign * value).toFixed(3)}${pastSecondUnits[pastSecondUnits.length - 1][0]}`; +} + +const bytesUnits = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'ExB']; +function humanizeBytes(b) { + const sign: number = b < 0 ? -1 : +1; + let value = Math.abs(b); + for (const unit of bytesUnits) { + if (value < 1024) { + return `${(sign * value).toFixed(3)}${unit}`; + } + value = value / 1024; + } + + return `${(sign * value).toFixed(3)}${bytesUnits[bytesUnits.length - 1]}`; +} +export {humanizeTime, humanizeBytes}; diff --git a/observability-test/comparisons.ts b/observability-test/comparisons.ts new file mode 100644 index 000000000..7bc8145fb --- /dev/null +++ b/observability-test/comparisons.ts @@ -0,0 +1,327 @@ +/*! + * Copyright 2024 Google LLC. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as assert from 'assert'; +import {grpc} from 'google-gax'; +import {google} from '../protos/protos'; +import {Database, Instance, Spanner} from '../src'; +import { + Database as DatabaseUntraced, + Instance as InstanceUntraced, + Spanner as SpannerUntraced, +} from '@google-cloud/spanner'; +import protobuf = google.spanner.v1; +import * as mock from '../test/mockserver/mockspanner'; +import * as mockInstanceAdmin from '../test/mockserver/mockinstanceadmin'; +import * as mockDatabaseAdmin from '../test/mockserver/mockdatabaseadmin'; +const { + AlwaysOnSampler, + NodeTracerProvider, + InMemorySpanExporter, +} = require('@opentelemetry/sdk-trace-node'); +const {SimpleSpanProcessor} = require('@opentelemetry/sdk-trace-base'); +import {humanizeBytes, humanizeTime, runBenchmarks} from './benchmark'; + +const {ObservabilityOptions} = require('../src/instrument'); + +const selectSql = 'SELECT 1'; +const updateSql = 'UPDATE FOO SET BAR=1 WHERE BAZ=2'; + +/** A simple result set for SELECT 1. */ +function createSelect1ResultSet(): protobuf.ResultSet { + const fields = [ + protobuf.StructType.Field.create({ + name: 'NUM', + type: protobuf.Type.create({code: protobuf.TypeCode.INT64}), + }), + ]; + const metadata = new protobuf.ResultSetMetadata({ + rowType: new protobuf.StructType({ + fields, + }), + }); + return protobuf.ResultSet.create({ + metadata, + rows: [{values: [{stringValue: '1'}]}], + }); +} + +interface setupResults { + server: grpc.Server; + spanner: Spanner | SpannerUntraced; + spannerMock: mock.MockSpanner; +} + +async function setup( + traced: boolean, + observabilityOptions?: typeof ObservabilityOptions +): Promise { + const server = new grpc.Server(); + + const spannerMock = mock.createMockSpanner(server); + mockInstanceAdmin.createMockInstanceAdmin(server); + mockDatabaseAdmin.createMockDatabaseAdmin(server); + + const port: number = await new Promise((resolve, reject) => { + server.bindAsync( + '0.0.0.0:0', + grpc.ServerCredentials.createInsecure(), + (err, assignedPort) => { + if (err) { + reject(err); + } else { + resolve(assignedPort); + } + } + ); + }); + + spannerMock.putStatementResult( + selectSql, + mock.StatementResult.resultSet(createSelect1ResultSet()) + ); + spannerMock.putStatementResult( + updateSql, + mock.StatementResult.updateCount(1) + ); + + let spanner: Spanner | SpannerUntraced; + + if (traced) { + spanner = new Spanner({ + projectId: 'observability-project-id', + servicePath: 'localhost', + port, + sslCreds: grpc.credentials.createInsecure(), + observabilityOptions: observabilityOptions, + }); + } else { + spanner = new SpannerUntraced({ + projectId: 'observability-project-id', + servicePath: 'localhost', + port, + sslCreds: grpc.credentials.createInsecure(), + }); + } + + return Promise.resolve({ + spanner: spanner, + server: server, + spannerMock: spannerMock, + }); +} + +describe('Benchmarking', () => { + if (!process.env.SPANNER_RUN_BENCHMARKS) { + console.log( + 'Skipping micro-benchmarking because SPANNER_RUN_BENCHMARKS is not set' + ); + return; + } + + async function setItUp( + traced: boolean, + withExporters?: boolean + ): Promise> { + // Setup firstly. + let observabilityOptions: typeof ObservabilityOptions = null; + if (withExporters) { + const traceExporter = new InMemorySpanExporter(); + const sampler = new AlwaysOnSampler(); + const provider = new NodeTracerProvider({ + sampler: sampler, + exporter: traceExporter, + }); + provider.addSpanProcessor(new SimpleSpanProcessor(traceExporter)); + + observabilityOptions = { + tracerProvider: provider, + enableExtendedTracing: true, + } as typeof ObservabilityOptions; + } + + const setupResult = await setup(traced, observabilityOptions); + const spanner = setupResult.spanner; + const instance = spanner.instance('instance'); + const database = instance.database('database'); + const server = setupResult.server; + + after(async () => { + if (observabilityOptions) { + await observabilityOptions.tracerProvider!.shutdown(); + } + database.close(); + spanner.close(); + server.tryShutdown(() => {}); + }); + + const runners: Function[] = [ + async function databaseRunSelect1AsyncAwait() { + if (traced) { + const [rows] = await (database as Database).run('SELECT 1'); + for (const row of rows) { + var _ = row; + } + return rows; + } else { + const [rows] = await (database as DatabaseUntraced).run('SELECT 1'); + for (const row of rows) { + var _ = row; + } + return rows; + } + }, + + async function databaseRunSelect1Callback() { + return new Promise((resolve, reject) => { + const callback = (err, rows) => { + if (err) { + reject(err); + return; + } + + for (const row of rows) { + const _ = row; + } + resolve(rows); + }; + + if (traced) { + (database as Database).run('SELECT 1', callback); + } else { + (database as DatabaseUntraced).run('SELECT 1', callback); + } + }); + }, + + /* + async function databaseGetTransactionAsync() { + const tx = await database.getTransction(); + + try { + await tx!.begin(); + return await tx!.runUpdate(updateSql); + } catch (e) { + console.log(e); + return null; + } finally { + console.log('tx.end'); + tx!.end(); + console.log('exiting'); + } + }, + */ + async function databaseRunTransactionAsyncTxRunUpdate() { + const withTx = async tx => { + await tx!.begin(); + const result = await tx!.runUpdate(updateSql); + tx!.end(); + return result; + }; + + try { + if (traced) { + return await (database as Database).runTransactionAsync(withTx); + } else { + return await (database as DatabaseUntraced).runTransactionAsync( + withTx + ); + } + } catch (e) { + return null; + } + }, + + async function databaseRunTransactionAsyncTxRun() { + const withTx = async tx => { + await tx!.begin(); + const result = await tx!.runUpdate('SELECT 1'); + tx!.end(); + return result; + }; + + try { + if (traced) { + return await (database as Database).runTransactionAsync(withTx); + } else { + return await (database as DatabaseUntraced).runTransactionAsync( + withTx + ); + } + } catch (e) { + return null; + } + }, + ]; + + return new Promise(resolve => { + runBenchmarks(runners, results => { + resolve(results); + }); + }); + } + + interface percentiles { + p50: number; + p50_s: string; + } + + interface description { + ram: percentiles; + timeSpent: percentiles; + } + + it('Database runs compared', async () => { + const traced = await setItUp(true); + const untraced = await setItUp(false); + const tracedWithOTELOn = await setItUp(true, true); + + console.log(`Total Runs: ${traced['_totalRuns']}`); + for (const tracedM in traced) { + const tracedV = traced[tracedM]; + if (typeof tracedV !== 'object') { + continue; + } + const tracedMethod = tracedM as string; + const tracedValue = tracedV as description; + const untracedValue = untraced[tracedMethod] as description; + const tracedWithOTELValue = tracedWithOTELOn[tracedMethod] as description; + const tracedRAM = tracedValue!.ram; + const tracedWithOTELRAM = tracedWithOTELValue!.ram; + const untracedRAM = untracedValue!.ram; + console.log(`${tracedMethod}`); + console.log( + `\tRAM Untraced(${untracedRAM.p50_s}) vs Traced (${tracedRAM.p50_s}): increase by (${humanizeBytes(tracedRAM.p50 - untracedRAM.p50)}) or ${percentDiff(untracedRAM.p50, tracedRAM.p50).toFixed(2)}%` + ); + console.log( + `\tRAM Untraced(${untracedRAM.p50_s}) vs Traced+OTEL(${tracedWithOTELRAM.p50_s}): increase by (${humanizeBytes(tracedWithOTELRAM.p50 - untracedRAM.p50)}) or ${percentDiff(untracedRAM.p50, tracedWithOTELRAM.p50).toFixed(2)}%` + ); + const tracedTime = tracedValue.timeSpent; + const tracedWithOTELTime = tracedWithOTELValue.timeSpent; + const untracedTime = untracedValue.timeSpent; + console.log( + `\tTime Untraced(${untracedTime.p50_s}) vs Traced (${tracedTime.p50_s}): increase by (${humanizeTime(tracedTime.p50 - untracedTime.p50)}) or ${percentDiff(untracedTime.p50, tracedTime.p50).toFixed(2)}%` + ); + console.log( + `\tTime Untraced(${untracedTime.p50_s}) vs Traced+OTEL(${tracedWithOTELTime.p50_s}): increase by (${humanizeTime(tracedWithOTELTime.p50 - untracedTime.p50)}) or ${percentDiff(untracedTime.p50, tracedWithOTELTime.p50).toFixed(2)}%\n` + ); + } + }); +}); + +function percentDiff(orig, fresh) { + return ((Number(fresh) - Number(orig)) * 100.0) / Number(orig); +} diff --git a/observability-test/spanner.ts b/observability-test/spanner.ts index 2e90b3044..78cab0e3b 100644 --- a/observability-test/spanner.ts +++ b/observability-test/spanner.ts @@ -44,6 +44,8 @@ const { const { AsyncHooksContextManager, } = require('@opentelemetry/context-async-hooks'); +import {promisify} from '@google-cloud/promisify'; +import {runBenchmarks} from './benchmark'; const {ObservabilityOptions} = require('../src/instrument'); import {SessionPool} from '../src/session-pool'; diff --git a/package.json b/package.json index 27ed2f743..9afa6b556 100644 --- a/package.json +++ b/package.json @@ -33,7 +33,7 @@ "samples-test-with-archived": "cd samples/ && npm link ../ && npm test-with-archived && cd ../", "samples-test": "cd samples/ && npm link ../ && npm test && cd ../", "system-test": "mocha build/system-test --timeout 1600000", - "observability-test": "mocha build/observability-test --timeout 1600000", + "observability-test": "mocha build/observability-test --timeout 1600000 -g 'Benchmarking'", "cleanup": "mocha scripts/cleanup.js --timeout 30000", "test": "mocha build/test build/test/common build/observability-test", "ycsb": "node ./benchmark/ycsb.js run -P ./benchmark/workloada -p table=usertable -p cloudspanner.instance=ycsb-instance -p operationcount=100 -p cloudspanner.database=ycsb", @@ -58,6 +58,7 @@ "@google-cloud/precise-date": "^4.0.0", "@google-cloud/projectify": "^4.0.0", "@google-cloud/promisify": "^4.0.0", + "@google-cloud/spanner": "^7.14.0", "@grpc/proto-loader": "^0.7.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/context-async-hooks": "^1.26.0", From fbc460bd54e956beab93decc3aab848498e8cb58 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Mon, 28 Oct 2024 02:40:15 -0700 Subject: [PATCH 2/3] Apply warm up runs plus hoist Attributes to inline --- observability-test/benchmark.ts | 5 ++-- observability-test/comparisons.ts | 6 ++-- package.json | 2 +- src/instrument.ts | 46 +++++++++++++++---------------- 4 files changed, 30 insertions(+), 29 deletions(-) diff --git a/observability-test/benchmark.ts b/observability-test/benchmark.ts index 828838d7a..e8f49d42d 100644 --- a/observability-test/benchmark.ts +++ b/observability-test/benchmark.ts @@ -27,7 +27,8 @@ const lessComparator = (a, b) => { */ export async function runBenchmarks(runners: Function[], done: Function) { const nRuns = 10000; - const benchmarkValues = {_totalRuns: nRuns}; + const nWarmups = Math.round(nRuns / 8); + const benchmarkValues = {_totalRuns: nRuns, _warmRuns: nWarmups}; let k = 0; for (k = 0; k < runners.length; k++) { @@ -38,7 +39,7 @@ export async function runBenchmarks(runners: Function[], done: Function) { let i = 0; // Warm up runs to ensure stable behavior. - for (i = 0; i < 8; i++) { + for (i = 0; i < nWarmups; i++) { const value = await fn(); } diff --git a/observability-test/comparisons.ts b/observability-test/comparisons.ts index 7bc8145fb..45ec32c90 100644 --- a/observability-test/comparisons.ts +++ b/observability-test/comparisons.ts @@ -124,7 +124,7 @@ async function setup( }); } -describe('Benchmarking', () => { +describe('Benchmarking Database', () => { if (!process.env.SPANNER_RUN_BENCHMARKS) { console.log( 'Skipping micro-benchmarking because SPANNER_RUN_BENCHMARKS is not set' @@ -289,7 +289,9 @@ describe('Benchmarking', () => { const untraced = await setItUp(false); const tracedWithOTELOn = await setItUp(true, true); - console.log(`Total Runs: ${traced['_totalRuns']}`); + console.log( + `Total Runs: ${traced['_totalRuns']}\nWarm up runs: ${traced['_warmRuns']}` + ); for (const tracedM in traced) { const tracedV = traced[tracedM]; if (typeof tracedV !== 'object') { diff --git a/package.json b/package.json index 9afa6b556..e60bba0de 100644 --- a/package.json +++ b/package.json @@ -33,7 +33,7 @@ "samples-test-with-archived": "cd samples/ && npm link ../ && npm test-with-archived && cd ../", "samples-test": "cd samples/ && npm link ../ && npm test && cd ../", "system-test": "mocha build/system-test --timeout 1600000", - "observability-test": "mocha build/observability-test --timeout 1600000 -g 'Benchmarking'", + "observability-test": "mocha build/observability-test --timeout 1600000", "cleanup": "mocha scripts/cleanup.js --timeout 30000", "test": "mocha build/test build/test/common build/observability-test", "ycsb": "node ./benchmark/ycsb.js run -P ./benchmark/workloada -p table=usertable -p cloudspanner.instance=ycsb-instance -p operationcount=100 -p cloudspanner.database=ycsb", diff --git a/src/instrument.ts b/src/instrument.ts index 15fe7b8e3..5121ee099 100644 --- a/src/instrument.ts +++ b/src/instrument.ts @@ -24,6 +24,7 @@ import { } from '@opentelemetry/semantic-conventions'; import { + Attributes, Span, SpanStatusCode, context, @@ -136,33 +137,30 @@ export function startTrace( config = {} as traceConfig; } + const spanAttributes: Attributes = { + SEMATTRS_DB_SYSTEM: 'spanner', + ATTR_OTEL_SCOPE_NAME: TRACER_NAME, + ATTR_OTEL_SCOPE_VERSION: TRACER_VERSION, + SEMATTRS_DB_SQL_TABLE: config.tableName, + SEMATTRS_DB_NAME: config.dbName, + kind: SpanKind.CLIENT, + }; + + const allowExtendedTracing = optedInPII || config.opts?.enableExtendedTracing; + if (config.sql && allowExtendedTracing) { + const sql = config.sql; + if (typeof sql === 'string') { + spanAttributes[SEMATTRS_DB_STATEMENT] = sql as string; + } else { + const stmt = sql as SQLStatement; + spanAttributes[SEMATTRS_DB_STATEMENT] = stmt.sql; + } + } + return getTracer(config.opts?.tracerProvider).startActiveSpan( SPAN_NAMESPACE_PREFIX + '.' + spanNameSuffix, - {kind: SpanKind.CLIENT}, + spanAttributes, span => { - span.setAttribute(SEMATTRS_DB_SYSTEM, 'spanner'); - span.setAttribute(ATTR_OTEL_SCOPE_NAME, TRACER_NAME); - span.setAttribute(ATTR_OTEL_SCOPE_VERSION, TRACER_VERSION); - - if (config.tableName) { - span.setAttribute(SEMATTRS_DB_SQL_TABLE, config.tableName); - } - if (config.dbName) { - span.setAttribute(SEMATTRS_DB_NAME, config.dbName); - } - - const allowExtendedTracing = - optedInPII || config.opts?.enableExtendedTracing; - if (config.sql && allowExtendedTracing) { - const sql = config.sql; - if (typeof sql === 'string') { - span.setAttribute(SEMATTRS_DB_STATEMENT, sql as string); - } else { - const stmt = sql as SQLStatement; - span.setAttribute(SEMATTRS_DB_STATEMENT, stmt.sql); - } - } - // If at all the invoked function throws an exception, // record the exception and then end this span. try { From 4d5c8978994ca9bd5c98bf39e654816518dbe205 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Mon, 28 Oct 2024 04:06:10 -0700 Subject: [PATCH 3/3] Isolate benchmarking code because npm run is erroneous and sadly runs other code too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We now have more isolated and more accurate values ```shell Benchmarking Database Total Runs: 20000 Warm up runs: 2500 databaseRunSelect1AsyncAwait RAM Untraced(209.906kB) vs Traced (224.453kB): increase by (14.547kB) or 6.93% RAM Untraced(209.906kB) vs Traced+OTEL(243.211kB): increase by (33.305kB) or 15.87% Time Untraced(901.139µs) vs Traced (918.845µs): increase by (17.706µs) or 1.96% Time Untraced(901.139µs) vs Traced+OTEL(1.019ms): increase by (118.223µs) or 13.12% databaseRunSelect1Callback RAM Untraced(209.789kB) vs Traced (224.641kB): increase by (14.852kB) or 7.08% RAM Untraced(209.789kB) vs Traced+OTEL(242.500kB): increase by (32.711kB) or 15.59% Time Untraced(902.862µs) vs Traced (920.289µs): increase by (17.427µs) or 1.93% Time Untraced(902.862µs) vs Traced+OTEL(1.027ms): increase by (124.280µs) or 13.77% databaseRunTransactionAsyncTxRunUpdate RAM Untraced(309.352kB) vs Traced (323.734kB): increase by (14.383kB) or 4.65% RAM Untraced(309.352kB) vs Traced+OTEL(324.258kB): increase by (14.906kB) or 4.82% Time Untraced(1.508ms) vs Traced (1.516ms): increase by (7.340µs) or 0.49% Time Untraced(1.508ms) vs Traced+OTEL(1.679ms): increase by (170.989µs) or 11.34% databaseRunTransactionAsyncTxRun RAM Untraced(299.117kB) vs Traced (311.320kB): increase by (12.203kB) or 4.08% RAM Untraced(299.117kB) vs Traced+OTEL(313.602kB): increase by (14.484kB) or 4.84% Time Untraced(1.460ms) vs Traced (1.469ms): increase by (8.468µs) or 0.58% Time Untraced(1.460ms) vs Traced+OTEL(1.679ms): increase by (219.068µs) or 15.00% ✔ Database runs compared (403854ms) Benchmark getTracer tracerProvider Total Runs: 20000 Warm up runs: 2500 withNullTracerProviderUnsetGlobalTracerProvider RAM (2.273kB) TimeSpent (12.171µs) withTracerDefinedTracerProvider RAM (2.695kB) TimeSpent (14.074µs) withTracerRegisteredGlobally RAM (2.727kB) TimeSpent (14.413µs) ``` --- .../benchmark.ts | 2 +- .../comparisons.ts | 194 +++++++++++++++--- observability-test/spanner.ts | 1 - package.json | 2 + tsconfig.json | 1 + 5 files changed, 170 insertions(+), 30 deletions(-) rename {observability-test => observability-benchmark}/benchmark.ts (99%) rename {observability-test => observability-benchmark}/comparisons.ts (68%) diff --git a/observability-test/benchmark.ts b/observability-benchmark/benchmark.ts similarity index 99% rename from observability-test/benchmark.ts rename to observability-benchmark/benchmark.ts index e8f49d42d..2327c3812 100644 --- a/observability-test/benchmark.ts +++ b/observability-benchmark/benchmark.ts @@ -26,7 +26,7 @@ const lessComparator = (a, b) => { * a map of functionNames to the percentiles of RAM usage and time spent. */ export async function runBenchmarks(runners: Function[], done: Function) { - const nRuns = 10000; + const nRuns = 20000; const nWarmups = Math.round(nRuns / 8); const benchmarkValues = {_totalRuns: nRuns, _warmRuns: nWarmups}; diff --git a/observability-test/comparisons.ts b/observability-benchmark/comparisons.ts similarity index 68% rename from observability-test/comparisons.ts rename to observability-benchmark/comparisons.ts index 45ec32c90..80f27b95f 100644 --- a/observability-test/comparisons.ts +++ b/observability-benchmark/comparisons.ts @@ -34,8 +34,15 @@ const { } = require('@opentelemetry/sdk-trace-node'); const {SimpleSpanProcessor} = require('@opentelemetry/sdk-trace-base'); import {humanizeBytes, humanizeTime, runBenchmarks} from './benchmark'; +const {diag, DiagConsoleLogger, DiagLogLevel} = require('@opentelemetry/api'); +// diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG); -const {ObservabilityOptions} = require('../src/instrument'); +const { + getTracer, + startTrace, + traceConfig, + ObservabilityOptions, +} = require('../src/instrument'); const selectSql = 'SELECT 1'; const updateSql = 'UPDATE FOO SET BAR=1 WHERE BAZ=2'; @@ -124,6 +131,16 @@ async function setup( }); } +interface percentiles { + p50: number; + p50_s: string; +} + +interface description { + ram: percentiles; + timeSpent: percentiles; +} + describe('Benchmarking Database', () => { if (!process.env.SPANNER_RUN_BENCHMARKS) { console.log( @@ -207,23 +224,6 @@ describe('Benchmarking Database', () => { }); }, - /* - async function databaseGetTransactionAsync() { - const tx = await database.getTransction(); - - try { - await tx!.begin(); - return await tx!.runUpdate(updateSql); - } catch (e) { - console.log(e); - return null; - } finally { - console.log('tx.end'); - tx!.end(); - console.log('exiting'); - } - }, - */ async function databaseRunTransactionAsyncTxRunUpdate() { const withTx = async tx => { await tx!.begin(); @@ -274,16 +274,6 @@ describe('Benchmarking Database', () => { }); } - interface percentiles { - p50: number; - p50_s: string; - } - - interface description { - ram: percentiles; - timeSpent: percentiles; - } - it('Database runs compared', async () => { const traced = await setItUp(true); const untraced = await setItUp(false); @@ -327,3 +317,151 @@ describe('Benchmarking Database', () => { function percentDiff(orig, fresh) { return ((Number(fresh) - Number(orig)) * 100.0) / Number(orig); } + +describe('Benchmark getTracer', () => { + it('No tracerProvider/global tracerProvider', async () => { + console.log('tracerProvider'); + const results = await benchmarkStartTrace(); + + console.log( + `Total Runs: ${results['_totalRuns']}\nWarm up runs: ${results['_warmRuns']}` + ); + + for (const method in results) { + const values = results[method]; + if (typeof values !== 'object') { + continue; + } + const desc = values as description; + const ram = desc.ram; + const timeSpent = desc!.timeSpent; + console.log(`${method}`); + console.log(`\tRAM (${ram.p50_s})`); + console.log(`\tTimeSpent (${timeSpent.p50_s})`); + } + }); +}); + +function benchmarkGetTracer(): Promise> { + const customTracerProvider = new NodeTracerProvider(); + let trapDoorCalled = false; + + const runners: Function[] = [ + function getTracerNullTracerProviderUnsetGlobalTracerProvider() { + return getTracer(null); + }, + + function getTracerDefinedTracerProvider() { + return getTracer(customTracerProvider); + }, + + function getTracerRegisteredGlobally() { + if (!trapDoorCalled) { + customTracerProvider.register(); + trapDoorCalled = true; + } + return getTracer(null); + }, + ]; + + return new Promise(resolve => { + runBenchmarks(runners, results => { + resolve(results); + }); + }); +} + +function benchmarkStartSpan(): Promise> { + const customTracerProvider = new NodeTracerProvider(); + let trapDoorCalled = false; + + const runners: Function[] = [ + function withNullTracerProviderUnsetGlobalTracerProvider() { + return new Promise(resolve => { + getTracer(null).startActiveSpan('aSpan', {}, span => { + resolve(span); + }); + }); + }, + + function withTracerDefinedTracerProvider() { + return new Promise(resolve => { + getTracer(customTracerProvider).startActiveSpan('aSpan', {}, span => { + resolve(span); + }); + }); + }, + + function getTracerRegisteredGlobally() { + if (!trapDoorCalled) { + customTracerProvider.register(); + trapDoorCalled = true; + } + return new Promise(resolve => { + getTracer(null).startActiveSpan('aSpan', {}, span => { + resolve(span); + }); + }); + }, + ]; + + return new Promise(resolve => { + runBenchmarks(runners, results => { + resolve(results); + }); + }); +} + +function benchmarkStartTrace(): Promise> { + const customTracerProvider = new NodeTracerProvider(); + let trapDoorCalled = false; + + const runners: Function[] = [ + async function withNullTracerProviderUnsetGlobalTracerProvider() { + const promise = new Promise((resolve, reject) => { + const opts: typeof traceConfig = { + opts: {tracerProvider: null}, + }; + startTrace('aspan', null, span => { + span.end(); + resolve(span); + }); + }); + return promise; + }, + + function withTracerDefinedTracerProvider() { + return new Promise(resolve => { + const opts: typeof traceConfig = { + opts: {tracerProvider: customTracerProvider}, + }; + startTrace('aspan', opts, span => { + span.end(); + resolve(span); + }); + }); + }, + + function withTracerRegisteredGlobally() { + if (!trapDoorCalled) { + customTracerProvider.register(); + trapDoorCalled = true; + } + return new Promise(resolve => { + const opts: typeof traceConfig = { + opts: {tracerProvider: null}, + }; + startTrace('aspan', opts, span => { + span.end(); + resolve(span); + }); + }); + }, + ]; + + return new Promise(resolve => { + runBenchmarks(runners, results => { + resolve(results); + }); + }); +} diff --git a/observability-test/spanner.ts b/observability-test/spanner.ts index 78cab0e3b..0c0c108ac 100644 --- a/observability-test/spanner.ts +++ b/observability-test/spanner.ts @@ -45,7 +45,6 @@ const { AsyncHooksContextManager, } = require('@opentelemetry/context-async-hooks'); import {promisify} from '@google-cloud/promisify'; -import {runBenchmarks} from './benchmark'; const {ObservabilityOptions} = require('../src/instrument'); import {SessionPool} from '../src/session-pool'; diff --git a/package.json b/package.json index e60bba0de..d93a73173 100644 --- a/package.json +++ b/package.json @@ -33,6 +33,7 @@ "samples-test-with-archived": "cd samples/ && npm link ../ && npm test-with-archived && cd ../", "samples-test": "cd samples/ && npm link ../ && npm test && cd ../", "system-test": "mocha build/system-test --timeout 1600000", + "observability-benchmark": "mocha build/observability-benchmark --timeout 1600000", "observability-test": "mocha build/observability-test --timeout 1600000", "cleanup": "mocha scripts/cleanup.js --timeout 30000", "test": "mocha build/test build/test/common build/observability-test", @@ -43,6 +44,7 @@ "prepare": "npm run compile-protos && npm run compile", "pretest": "npm run compile", "presystem-test": "npm run compile", + "preobservability-benchmark": "npm run compile", "preobservability-test": "npm run compile", "proto": "compileProtos src", "docs-test": "linkinator docs", diff --git a/tsconfig.json b/tsconfig.json index 9b165b9e7..1598feaeb 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -20,6 +20,7 @@ "test/**/*.ts", "system-test/*.ts", "benchmark/*.ts", + "observability-benchmark/*.ts", "observability-test/*.ts" ] }