From 2cda3d57fd012c303f23bffbdef6429f29065869 Mon Sep 17 00:00:00 2001 From: James Sumners Date: Fri, 17 Nov 2023 14:18:24 -0500 Subject: [PATCH 1/4] feat: Ensure API errors are tracked This PR is to resolve #1845. --- lib/instrumentation/openai.js | 32 +++++++++---- lib/llm-events/openai/llm-error.js | 38 +++++++++++++++ test/lib/openai-responses.js | 20 ++++++++ test/versioned/openai/openai.tap.js | 72 +++++++++++++++++++++++++++++ 4 files changed, 154 insertions(+), 8 deletions(-) create mode 100644 lib/llm-events/openai/llm-error.js diff --git a/lib/instrumentation/openai.js b/lib/instrumentation/openai.js index d1d6ac47e3..cc097b3803 100644 --- a/lib/instrumentation/openai.js +++ b/lib/instrumentation/openai.js @@ -11,6 +11,7 @@ const { LlmEmbedding } = require('../../lib/llm-events/openai') const LlmTrackedIds = require('../../lib/llm-events/tracked-ids') +const OpenAiLlmError = require('../llm-events/openai/llm-error') const MIN_VERSION = '4.0.0' const { AI } = require('../../lib/metrics/names') @@ -55,8 +56,14 @@ module.exports = function initialize(agent, openai, moduleName, shim) { if (segment) { segment[openAiApiKey] = apiKey - segment[openAiHeaders] = - result?.response?.headers && Object.fromEntries(result.response.headers) + + // If the result is an error, which is an OpenAI client error, then + // the headers are provided via a proxy attached to `result.headers`. + // Otherwise, result is a typical response-like object. + const headers = result?.response?.headers + ? Object.fromEntries(result.response.headers) + : { ...result?.headers } + segment[openAiHeaders] = headers } } @@ -127,6 +134,7 @@ module.exports = function initialize(agent, openai, moduleName, shim) { * @param {TraceSegment} params.segment active segment from chat completion * @param {object} params.request chat completion params * @param {object} params.response chat completion response + * @returns {LlmChatCompletionSummary} A summary object. */ function recordChatCompletionMessages({ segment, request, response }) { const tx = segment.transaction @@ -155,6 +163,8 @@ module.exports = function initialize(agent, openai, moduleName, shim) { }) recordEvent('LlmChatCompletionSummary', completionSummary) + + return completionSummary } /** @@ -173,19 +183,25 @@ module.exports = function initialize(agent, openai, moduleName, shim) { promise: true, // eslint-disable-next-line max-params after(_shim, _fn, _name, err, response, segment) { + if (!response) { + // If we get an error, it is possible that `response = null`. + // In that case, we define it to be an empty object. + response = {} + } response.headers = segment[openAiHeaders] response.api_key = segment[openAiApiKey] - // TODO: add LlmErrorMessage on failure - // and exit - // See: https://github.com/newrelic/node-newrelic/issues/1845 - // if (err) {} - - recordChatCompletionMessages({ + const summary = recordChatCompletionMessages({ segment, request, response }) + + if (err) { + const llmError = new OpenAiLlmError({ cause: err, summary, response }) + shim.agent.errors.add(segment.transaction, llmError) + } + // cleanup keys on response before returning to user code delete response.api_key delete response.headers diff --git a/lib/llm-events/openai/llm-error.js b/lib/llm-events/openai/llm-error.js new file mode 100644 index 0000000000..8a8933c2c6 --- /dev/null +++ b/lib/llm-events/openai/llm-error.js @@ -0,0 +1,38 @@ +/* + * Copyright 2023 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +/** + * Represents an error object, to be tracked via `agent.errors`, that is the + * result of some error returned from the OpenAI API. + */ +class OpenAiLlmError extends Error { + /** + * @param {object} params + * @param {Error} params.cause The OpenAI error object. It has some extra + * properties, but is a direct descendent of Error. + * @param {LlmChatCompletionSummary} [params.summary] Details about the + * conversation. + * @param {LlmEmbedding} [params.embedding] + */ + constructor({ cause, summary, embedding }) { + super(cause.message, { cause: cause.error }) + this.http = { + statusCode: cause.status + } + this.error = { + code: cause.error.code, + param: cause.error.param + } + // this.statusCode = cause.status + // this.code = cause.error.code + // this.param = cause.error.param + this.completion_id = summary?.id + this.embedding_id = embedding?.id + } +} + +module.exports = OpenAiLlmError diff --git a/test/lib/openai-responses.js b/test/lib/openai-responses.js index 6ab3024722..51b5a67138 100644 --- a/test/lib/openai-responses.js +++ b/test/lib/openai-responses.js @@ -158,6 +158,26 @@ responses.set('You are a mathematician.', { } }) +responses.set('Invalid role.', { + headers: { + 'content-type': 'application/json', + 'x-request-id': '5db943f509e9031e73de8f4a5e46de32', + 'openai-organization': 'new-relic-nkmd8b', + 'openai-processing-ms': '4', + 'openai-version': '2020-10-01' + }, + code: 400, + body: { + error: { + message: + "'bad-role' is not one of ['system', 'assistant', 'user', 'function'] - 'messages.0.role'", + type: 'invalid_request_error', + param: null, + code: null + } + } +}) + responses.set('Streamed response', { headers: { 'Content-Type': 'text/event-stream', diff --git a/test/versioned/openai/openai.tap.js b/test/versioned/openai/openai.tap.js index 8454cbd9de..1683be681c 100644 --- a/test/versioned/openai/openai.tap.js +++ b/test/versioned/openai/openai.tap.js @@ -356,4 +356,76 @@ tap.test('OpenAI instrumentation', (t) => { }) } ) + + t.test('chat completion auth errors should be tracked', (test) => { + const { client, agent } = t.context + helper.runInTransaction(agent, async (tx) => { + try { + await client.chat.completions.create({ + messages: [{ role: 'user', content: 'Invalid API key.' }] + }) + } catch {} + + t.equal(tx.exceptions.length, 1) + t.match(tx.exceptions[0], { + error: { + http: { + statusCode: 401 + }, + error: { + code: 'invalid_api_key', + param: 'null' + }, + completion_id: /[\w\d]{32}/, + cause: { + message: /Incorrect API key provided:/, + type: 'invalid_request_error' + } + }, + customAttributes: {}, + agentAttributes: { + spanId: /[\w\d]+/ + } + }) + + tx.end() + test.end() + }) + }) + + t.test('chat completion invalid payload errors should be tracked', (test) => { + const { client, agent } = t.context + helper.runInTransaction(agent, async (tx) => { + try { + await client.chat.completions.create({ + messages: [{ role: 'bad-role', content: 'Invalid role.' }] + }) + } catch {} + + t.equal(tx.exceptions.length, 1) + t.match(tx.exceptions[0], { + error: { + http: { + statusCode: 400 + }, + error: { + code: null, + param: null + }, + completion_id: /[\w\d]{32}/, + cause: { + message: /'bad-role' is not one of/, + type: 'invalid_request_error' + } + }, + customAttributes: {}, + agentAttributes: { + spanId: /[\w\d]+/ + } + }) + + tx.end() + test.end() + }) + }) }) From 756a8057821655711dc1bccf7e4c0c4cfe810cb6 Mon Sep 17 00:00:00 2001 From: James Sumners Date: Fri, 17 Nov 2023 15:01:05 -0500 Subject: [PATCH 2/4] add embedding tracking --- lib/instrumentation/openai.js | 12 +++++++++ lib/llm-events/openai/llm-error.js | 3 --- test/lib/openai-responses.js | 16 +++++++++++ test/versioned/openai/openai.tap.js | 42 +++++++++++++++++++++++++++-- 4 files changed, 68 insertions(+), 5 deletions(-) diff --git a/lib/instrumentation/openai.js b/lib/instrumentation/openai.js index cc097b3803..47097ebd34 100644 --- a/lib/instrumentation/openai.js +++ b/lib/instrumentation/openai.js @@ -36,6 +36,7 @@ function shouldSkipInstrumentation(config, shim) { return semver.lt(pkgVersion, MIN_VERSION) } +// eslint-disable-next-line sonarjs/cognitive-complexity module.exports = function initialize(agent, openai, moduleName, shim) { if (shouldSkipInstrumentation(agent.config, shim)) { shim.logger.debug( @@ -224,6 +225,11 @@ module.exports = function initialize(agent, openai, moduleName, shim) { promise: true, // eslint-disable-next-line max-params after(_shim, _fn, _name, err, response, segment) { + if (!response) { + // If we get an error, it is possible that `response = null`. + // In that case, we define it to be an empty object. + response = {} + } response.headers = segment[openAiHeaders] response.api_key = segment[openAiApiKey] const embedding = new LlmEmbedding({ @@ -234,6 +240,12 @@ module.exports = function initialize(agent, openai, moduleName, shim) { }) recordEvent('LlmEmbedding', embedding) + + if (err) { + const llmError = new OpenAiLlmError({ cause: err, embedding, response }) + shim.agent.errors.add(segment.transaction, llmError) + } + // cleanup keys on response before returning to user code delete response.api_key delete response.headers diff --git a/lib/llm-events/openai/llm-error.js b/lib/llm-events/openai/llm-error.js index 8a8933c2c6..c49d598a0c 100644 --- a/lib/llm-events/openai/llm-error.js +++ b/lib/llm-events/openai/llm-error.js @@ -27,9 +27,6 @@ class OpenAiLlmError extends Error { code: cause.error.code, param: cause.error.param } - // this.statusCode = cause.status - // this.code = cause.error.code - // this.param = cause.error.param this.completion_id = summary?.id this.embedding_id = embedding?.id } diff --git a/test/lib/openai-responses.js b/test/lib/openai-responses.js index 51b5a67138..7c839c9833 100644 --- a/test/lib/openai-responses.js +++ b/test/lib/openai-responses.js @@ -178,6 +178,22 @@ responses.set('Invalid role.', { } }) +responses.set('Embedding not allowed.', { + headers: { + 'content-type': 'application/json; charset=utf-8', + 'x-request-id': '25e22d5a7af9e40b7d5b1828593b52aa' + }, + code: 403, + body: { + error: { + message: 'You are not allowed to generate embeddings from this model', + type: 'invalid_request_error', + param: null, + code: null + } + } +}) + responses.set('Streamed response', { headers: { 'Content-Type': 'text/event-stream', diff --git a/test/versioned/openai/openai.tap.js b/test/versioned/openai/openai.tap.js index 1683be681c..25ffead07c 100644 --- a/test/versioned/openai/openai.tap.js +++ b/test/versioned/openai/openai.tap.js @@ -412,7 +412,7 @@ tap.test('OpenAI instrumentation', (t) => { code: null, param: null }, - completion_id: /[\w\d]{32}/, + completion_id: /\w{32}/, cause: { message: /'bad-role' is not one of/, type: 'invalid_request_error' @@ -420,7 +420,45 @@ tap.test('OpenAI instrumentation', (t) => { }, customAttributes: {}, agentAttributes: { - spanId: /[\w\d]+/ + spanId: /\w+/ + } + }) + + tx.end() + test.end() + }) + }) + + t.test('embedding invalid payload errors should be tracked', (test) => { + const { client, agent } = t.context + helper.runInTransaction(agent, async (tx) => { + try { + await client.embeddings.create({ + model: 'gpt-4', + input: 'Embedding not allowed.' + }) + } catch {} + + t.equal(tx.exceptions.length, 1) + t.match(tx.exceptions[0], { + error: { + http: { + statusCode: 403 + }, + error: { + code: null, + param: null + }, + completion_id: undefined, + embedding_id: /\w{32}/, + cause: { + message: 'You are not allowed to generate embeddings from this model', + type: 'invalid_request_error' + } + }, + customAttributes: {}, + agentAttributes: { + spanId: /\w+/ } }) From 406f831cced514cc97d805a5ad711327d12789c8 Mon Sep 17 00:00:00 2001 From: James Sumners Date: Mon, 20 Nov 2023 12:09:59 -0500 Subject: [PATCH 3/4] address feedback --- lib/instrumentation/openai.js | 8 ++-- lib/llm-events/openai/error-message.js | 26 +++++++++-- lib/llm-events/openai/llm-error.js | 35 --------------- test/unit/llm-events/openai/error.test.js | 7 ++- test/versioned/openai/openai.tap.js | 53 +++++++---------------- 5 files changed, 47 insertions(+), 82 deletions(-) delete mode 100644 lib/llm-events/openai/llm-error.js diff --git a/lib/instrumentation/openai.js b/lib/instrumentation/openai.js index 47097ebd34..ac6b67a531 100644 --- a/lib/instrumentation/openai.js +++ b/lib/instrumentation/openai.js @@ -8,10 +8,10 @@ const { openAiHeaders, openAiApiKey } = require('../../lib/symbols') const { LlmChatCompletionMessage, LlmChatCompletionSummary, - LlmEmbedding + LlmEmbedding, + LlmErrorMessage } = require('../../lib/llm-events/openai') const LlmTrackedIds = require('../../lib/llm-events/tracked-ids') -const OpenAiLlmError = require('../llm-events/openai/llm-error') const MIN_VERSION = '4.0.0' const { AI } = require('../../lib/metrics/names') @@ -199,7 +199,7 @@ module.exports = function initialize(agent, openai, moduleName, shim) { }) if (err) { - const llmError = new OpenAiLlmError({ cause: err, summary, response }) + const llmError = new LlmErrorMessage({ cause: err, summary, request, response }) shim.agent.errors.add(segment.transaction, llmError) } @@ -242,7 +242,7 @@ module.exports = function initialize(agent, openai, moduleName, shim) { recordEvent('LlmEmbedding', embedding) if (err) { - const llmError = new OpenAiLlmError({ cause: err, embedding, response }) + const llmError = new LlmErrorMessage({ cause: err, embedding, request, response }) shim.agent.errors.add(segment.transaction, llmError) } diff --git a/lib/llm-events/openai/error-message.js b/lib/llm-events/openai/error-message.js index 7316fa3969..8982e6a974 100644 --- a/lib/llm-events/openai/error-message.js +++ b/lib/llm-events/openai/error-message.js @@ -4,8 +4,23 @@ */ 'use strict' + +/** + * Represents an error object, to be tracked via `agent.errors`, that is the + * result of some error returned from the OpenAI API. + */ module.exports = class LlmErrorMessage { - constructor(request, response) { + /** + * @param {object} params Constructor parameters + * @param {object} params.request Instance of an outgoing message. + * @param {object} [params.response] Instance of an incoming message. + * @param {object} [params.cause] An instance of the OpenAI error object. + * @param {LlmChatCompletionSummary} [params.summary] Details about the + * conversation if it was a chat completion conversation. + * @param {LlmEmbedding} [params.embedding] Details about the conversation + * if it was an embedding conversation. + */ + constructor({ request, response, cause, summary, embedding } = {}) { this.api_key_last_four_digits = response?.api_key && `sk-${response.api_key.slice(-4)}` this['request.model'] = request.model || request.engine this['request.temperature'] = request.temperature @@ -13,9 +28,12 @@ module.exports = class LlmErrorMessage { this.vendor = 'openAI' this.ingest_source = 'Node' this['response.number_of_messages'] = request?.messages?.length - this['http.statusCode'] = response.status + this['http.statusCode'] = response?.status ?? cause?.status this['response.organization'] = response.headers['openai-organization'] - this['error.code'] = response.code - this['error.param'] = response.param + this['error.message'] = cause?.message + this['error.code'] = response?.code ?? cause?.error?.code + this['error.param'] = response?.param ?? cause?.error?.param + this.completion_id = summary?.id + this.embedding_id = embedding?.id } } diff --git a/lib/llm-events/openai/llm-error.js b/lib/llm-events/openai/llm-error.js deleted file mode 100644 index c49d598a0c..0000000000 --- a/lib/llm-events/openai/llm-error.js +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2023 New Relic Corporation. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict' - -/** - * Represents an error object, to be tracked via `agent.errors`, that is the - * result of some error returned from the OpenAI API. - */ -class OpenAiLlmError extends Error { - /** - * @param {object} params - * @param {Error} params.cause The OpenAI error object. It has some extra - * properties, but is a direct descendent of Error. - * @param {LlmChatCompletionSummary} [params.summary] Details about the - * conversation. - * @param {LlmEmbedding} [params.embedding] - */ - constructor({ cause, summary, embedding }) { - super(cause.message, { cause: cause.error }) - this.http = { - statusCode: cause.status - } - this.error = { - code: cause.error.code, - param: cause.error.param - } - this.completion_id = summary?.id - this.embedding_id = embedding?.id - } -} - -module.exports = OpenAiLlmError diff --git a/test/unit/llm-events/openai/error.test.js b/test/unit/llm-events/openai/error.test.js index e370cee4cb..1fb24982ce 100644 --- a/test/unit/llm-events/openai/error.test.js +++ b/test/unit/llm-events/openai/error.test.js @@ -11,7 +11,7 @@ const { req, chatRes } = require('./common') tap.test('LlmErrorMessage', (t) => { const res = { ...chatRes, code: 'insufficient_quota', param: 'test-param', status: 429 } - const errorMsg = new LlmErrorMessage(req, res) + const errorMsg = new LlmErrorMessage({ request: req, response: res }) const expected = { 'api_key_last_four_digits': 'sk-7890', 'request.model': 'gpt-3.5-turbo-0613', @@ -22,8 +22,11 @@ tap.test('LlmErrorMessage', (t) => { 'response.number_of_messages': 2, 'http.statusCode': 429, 'response.organization': 'new-relic', + 'error.message': undefined, 'error.code': 'insufficient_quota', - 'error.param': 'test-param' + 'error.param': 'test-param', + 'completion_id': undefined, + 'embedding_id': undefined } t.same(errorMsg, expected) t.end() diff --git a/test/versioned/openai/openai.tap.js b/test/versioned/openai/openai.tap.js index 25ffead07c..257d846fdd 100644 --- a/test/versioned/openai/openai.tap.js +++ b/test/versioned/openai/openai.tap.js @@ -369,18 +369,11 @@ tap.test('OpenAI instrumentation', (t) => { t.equal(tx.exceptions.length, 1) t.match(tx.exceptions[0], { error: { - http: { - statusCode: 401 - }, - error: { - code: 'invalid_api_key', - param: 'null' - }, - completion_id: /[\w\d]{32}/, - cause: { - message: /Incorrect API key provided:/, - type: 'invalid_request_error' - } + 'http.statusCode': 401, + 'error.message': /Incorrect API key provided:/, + 'error.code': 'invalid_api_key', + 'error.param': 'null', + 'completion_id': /[\w\d]{32}/ }, customAttributes: {}, agentAttributes: { @@ -405,18 +398,11 @@ tap.test('OpenAI instrumentation', (t) => { t.equal(tx.exceptions.length, 1) t.match(tx.exceptions[0], { error: { - http: { - statusCode: 400 - }, - error: { - code: null, - param: null - }, - completion_id: /\w{32}/, - cause: { - message: /'bad-role' is not one of/, - type: 'invalid_request_error' - } + 'http.statusCode': 400, + 'error.message': /'bad-role' is not one of/, + 'error.code': null, + 'error.param': null, + 'completion_id': /\w{32}/ }, customAttributes: {}, agentAttributes: { @@ -442,19 +428,12 @@ tap.test('OpenAI instrumentation', (t) => { t.equal(tx.exceptions.length, 1) t.match(tx.exceptions[0], { error: { - http: { - statusCode: 403 - }, - error: { - code: null, - param: null - }, - completion_id: undefined, - embedding_id: /\w{32}/, - cause: { - message: 'You are not allowed to generate embeddings from this model', - type: 'invalid_request_error' - } + 'http.statusCode': 403, + 'error.message': 'You are not allowed to generate embeddings from this model', + 'error.code': null, + 'error.param': null, + 'completion_id': undefined, + 'embedding_id': /\w{32}/ }, customAttributes: {}, agentAttributes: { From 9c155c7cfc013397e87210c231c817a22faffc17 Mon Sep 17 00:00:00 2001 From: James Sumners Date: Mon, 20 Nov 2023 13:02:50 -0500 Subject: [PATCH 4/4] address feedback --- lib/instrumentation/openai.js | 21 +++++++----- .../openai/chat-completion-summary.js | 3 +- lib/llm-events/openai/embedding.js | 3 +- lib/llm-events/openai/error-message.js | 11 +------ .../openai/chat-completion-summary.test.js | 17 ++++++++++ test/unit/llm-events/openai/common.js | 4 ++- test/unit/llm-events/openai/embedding.test.js | 23 +++++++++++++ test/unit/llm-events/openai/error.test.js | 8 ----- test/versioned/openai/openai.tap.js | 33 ++++++++++++++++--- 9 files changed, 89 insertions(+), 34 deletions(-) diff --git a/lib/instrumentation/openai.js b/lib/instrumentation/openai.js index ac6b67a531..26d4de32ac 100644 --- a/lib/instrumentation/openai.js +++ b/lib/instrumentation/openai.js @@ -135,15 +135,18 @@ module.exports = function initialize(agent, openai, moduleName, shim) { * @param {TraceSegment} params.segment active segment from chat completion * @param {object} params.request chat completion params * @param {object} params.response chat completion response + * @param {boolean} [params.hasError=false] indicates that the completion + * resulted in some sort of error case * @returns {LlmChatCompletionSummary} A summary object. */ - function recordChatCompletionMessages({ segment, request, response }) { + function recordChatCompletionMessages({ segment, request, response, hasError = false }) { const tx = segment.transaction const completionSummary = new LlmChatCompletionSummary({ agent, segment, request, - response + response, + withError: hasError }) // Only take the first response message and append to input messages @@ -195,12 +198,13 @@ module.exports = function initialize(agent, openai, moduleName, shim) { const summary = recordChatCompletionMessages({ segment, request, - response + response, + hasError: err != null }) if (err) { - const llmError = new LlmErrorMessage({ cause: err, summary, request, response }) - shim.agent.errors.add(segment.transaction, llmError) + const llmError = new LlmErrorMessage({ cause: err, summary, response }) + shim.agent.errors.add(segment.transaction, err, llmError) } // cleanup keys on response before returning to user code @@ -236,14 +240,15 @@ module.exports = function initialize(agent, openai, moduleName, shim) { agent, segment, request, - response + response, + withError: err != null }) recordEvent('LlmEmbedding', embedding) if (err) { - const llmError = new LlmErrorMessage({ cause: err, embedding, request, response }) - shim.agent.errors.add(segment.transaction, llmError) + const llmError = new LlmErrorMessage({ cause: err, embedding, response }) + shim.agent.errors.add(segment.transaction, err, llmError) } // cleanup keys on response before returning to user code diff --git a/lib/llm-events/openai/chat-completion-summary.js b/lib/llm-events/openai/chat-completion-summary.js index 975992b7a6..0a8a713311 100644 --- a/lib/llm-events/openai/chat-completion-summary.js +++ b/lib/llm-events/openai/chat-completion-summary.js @@ -7,8 +7,9 @@ const LlmEvent = require('./event') module.exports = class LlmChatCompletionSummary extends LlmEvent { - constructor({ agent, segment, request = {}, response = {} }) { + constructor({ agent, segment, request = {}, response = {}, withError = false }) { super({ agent, segment, request, response, responseAttrs: true }) + this.error = withError this.conversation_id = this.conversationId(agent) this['request.max_tokens'] = request.max_tokens this['request.temperature'] = request.temperature diff --git a/lib/llm-events/openai/embedding.js b/lib/llm-events/openai/embedding.js index c9e763edea..7d9ca4ab40 100644 --- a/lib/llm-events/openai/embedding.js +++ b/lib/llm-events/openai/embedding.js @@ -7,8 +7,9 @@ const LlmEvent = require('./event') module.exports = class LlmEmbedding extends LlmEvent { - constructor({ agent, segment, request = {}, response = {} }) { + constructor({ agent, segment, request = {}, response = {}, withError = false }) { super({ agent, segment, request, response, responseAttrs: true }) + this.error = withError this.input = request.input?.toString() } } diff --git a/lib/llm-events/openai/error-message.js b/lib/llm-events/openai/error-message.js index 8982e6a974..65d798f593 100644 --- a/lib/llm-events/openai/error-message.js +++ b/lib/llm-events/openai/error-message.js @@ -12,7 +12,6 @@ module.exports = class LlmErrorMessage { /** * @param {object} params Constructor parameters - * @param {object} params.request Instance of an outgoing message. * @param {object} [params.response] Instance of an incoming message. * @param {object} [params.cause] An instance of the OpenAI error object. * @param {LlmChatCompletionSummary} [params.summary] Details about the @@ -20,16 +19,8 @@ module.exports = class LlmErrorMessage { * @param {LlmEmbedding} [params.embedding] Details about the conversation * if it was an embedding conversation. */ - constructor({ request, response, cause, summary, embedding } = {}) { - this.api_key_last_four_digits = response?.api_key && `sk-${response.api_key.slice(-4)}` - this['request.model'] = request.model || request.engine - this['request.temperature'] = request.temperature - this['request.max_tokens'] = request.max_tokens - this.vendor = 'openAI' - this.ingest_source = 'Node' - this['response.number_of_messages'] = request?.messages?.length + constructor({ response, cause, summary, embedding } = {}) { this['http.statusCode'] = response?.status ?? cause?.status - this['response.organization'] = response.headers['openai-organization'] this['error.message'] = cause?.message this['error.code'] = response?.code ?? cause?.error?.code this['error.param'] = response?.param ?? cause?.error?.param diff --git a/test/unit/llm-events/openai/chat-completion-summary.test.js b/test/unit/llm-events/openai/chat-completion-summary.test.js index 017a1b270d..c1daf8dc1b 100644 --- a/test/unit/llm-events/openai/chat-completion-summary.test.js +++ b/test/unit/llm-events/openai/chat-completion-summary.test.js @@ -55,4 +55,21 @@ tap.test('LlmChatCompletionSummary', (t) => { t.end() }) }) + + t.test('should set error to true', (t) => { + const api = helper.getAgentApi() + const conversationId = 'convo-id' + helper.runInTransaction(agent, () => { + api.addCustomAttribute('conversation_id', conversationId) + const chatSummaryEvent = new LlmChatCompletionSummary({ + agent, + segment: null, + request: {}, + response: {}, + withError: true + }) + t.equal(true, chatSummaryEvent.error) + t.end() + }) + }) }) diff --git a/test/unit/llm-events/openai/common.js b/test/unit/llm-events/openai/common.js index 93a254d372..70f98f3b34 100644 --- a/test/unit/llm-events/openai/common.js +++ b/test/unit/llm-events/openai/common.js @@ -76,6 +76,7 @@ function getExpectedResult(tx, event, type, completionId) { case 'embedding': expected = { ...expected, ...resKeys } expected.input = 'This is my test input' + expected.error = false break case 'summary': expected = { @@ -86,7 +87,8 @@ function getExpectedResult(tx, event, type, completionId) { ['request.temperature']: 'medium-rare', ['response.number_of_messages']: 3, ['response.usage.completion_tokens']: 10, - ['response.choices.finish_reason']: 'stop' + ['response.choices.finish_reason']: 'stop', + error: false } break case 'message': diff --git a/test/unit/llm-events/openai/embedding.test.js b/test/unit/llm-events/openai/embedding.test.js index 34b399d06f..34d814a0fd 100644 --- a/test/unit/llm-events/openai/embedding.test.js +++ b/test/unit/llm-events/openai/embedding.test.js @@ -68,4 +68,27 @@ tap.test('LlmEmbedding', (t) => { t.end() }) }) + + t.test('should set error to true', (t) => { + const req = { + input: 'This is my test input', + model: 'gpt-3.5-turbo-0613' + } + + const api = helper.getAgentApi() + helper.runInTransaction(agent, () => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + const embeddingEvent = new LlmEmbedding({ + agent, + segment, + request: req, + response: res, + withError: true + }) + t.equal(true, embeddingEvent.error) + t.end() + }) + }) + }) }) diff --git a/test/unit/llm-events/openai/error.test.js b/test/unit/llm-events/openai/error.test.js index 1fb24982ce..57cf2f1a8f 100644 --- a/test/unit/llm-events/openai/error.test.js +++ b/test/unit/llm-events/openai/error.test.js @@ -13,15 +13,7 @@ tap.test('LlmErrorMessage', (t) => { const res = { ...chatRes, code: 'insufficient_quota', param: 'test-param', status: 429 } const errorMsg = new LlmErrorMessage({ request: req, response: res }) const expected = { - 'api_key_last_four_digits': 'sk-7890', - 'request.model': 'gpt-3.5-turbo-0613', - 'request.temperature': 'medium-rare', - 'request.max_tokens': '1000000', - 'vendor': 'openAI', - 'ingest_source': 'Node', - 'response.number_of_messages': 2, 'http.statusCode': 429, - 'response.organization': 'new-relic', 'error.message': undefined, 'error.code': 'insufficient_quota', 'error.param': 'test-param', diff --git a/test/versioned/openai/openai.tap.js b/test/versioned/openai/openai.tap.js index 257d846fdd..bc0021fa18 100644 --- a/test/versioned/openai/openai.tap.js +++ b/test/versioned/openai/openai.tap.js @@ -142,7 +142,8 @@ tap.test('OpenAI instrumentation', (t) => { 'response.headers.ratelimitRemainingRequests': '199', 'response.number_of_messages': 3, 'response.usage.completion_tokens': 11, - 'response.choices.finish_reason': 'stop' + 'response.choices.finish_reason': 'stop', + 'error': false } test.match(chatSummary[1], expectedChatSummary, 'should match chat summary message') tx.end() @@ -317,7 +318,8 @@ tap.test('OpenAI instrumentation', (t) => { 'response.headers.ratelimitResetTokens': '2ms', 'response.headers.ratelimitRemainingTokens': '149994', 'response.headers.ratelimitRemainingRequests': '197', - 'input': 'This is an embedding test.' + 'input': 'This is an embedding test.', + 'error': false } test.equal(embedding[0].type, 'LlmEmbedding') @@ -369,18 +371,28 @@ tap.test('OpenAI instrumentation', (t) => { t.equal(tx.exceptions.length, 1) t.match(tx.exceptions[0], { error: { + status: 401, + code: 'invalid_api_key', + param: 'null' + }, + customAttributes: { 'http.statusCode': 401, 'error.message': /Incorrect API key provided:/, 'error.code': 'invalid_api_key', 'error.param': 'null', 'completion_id': /[\w\d]{32}/ }, - customAttributes: {}, agentAttributes: { spanId: /[\w\d]+/ } }) + const summary = agent.customEventAggregator.events.toArray().find((e) => { + return e[0].type === 'LlmChatCompletionSummary' + }) + t.ok(summary) + t.equal(summary[1].error, true) + tx.end() test.end() }) @@ -398,13 +410,17 @@ tap.test('OpenAI instrumentation', (t) => { t.equal(tx.exceptions.length, 1) t.match(tx.exceptions[0], { error: { + status: 400, + code: null, + param: null + }, + customAttributes: { 'http.statusCode': 400, 'error.message': /'bad-role' is not one of/, 'error.code': null, 'error.param': null, 'completion_id': /\w{32}/ }, - customAttributes: {}, agentAttributes: { spanId: /\w+/ } @@ -428,6 +444,11 @@ tap.test('OpenAI instrumentation', (t) => { t.equal(tx.exceptions.length, 1) t.match(tx.exceptions[0], { error: { + status: 403, + code: null, + param: null + }, + customAttributes: { 'http.statusCode': 403, 'error.message': 'You are not allowed to generate embeddings from this model', 'error.code': null, @@ -435,12 +456,14 @@ tap.test('OpenAI instrumentation', (t) => { 'completion_id': undefined, 'embedding_id': /\w{32}/ }, - customAttributes: {}, agentAttributes: { spanId: /\w+/ } }) + const embedding = agent.customEventAggregator.events.toArray().slice(0, 1)[0][1] + t.equal(embedding.error, true) + tx.end() test.end() })