diff --git a/lib/instrumentation/openai.js b/lib/instrumentation/openai.js index d1d6ac47e3..26d4de32ac 100644 --- a/lib/instrumentation/openai.js +++ b/lib/instrumentation/openai.js @@ -8,7 +8,8 @@ const { openAiHeaders, openAiApiKey } = require('../../lib/symbols') const { LlmChatCompletionMessage, LlmChatCompletionSummary, - LlmEmbedding + LlmEmbedding, + LlmErrorMessage } = require('../../lib/llm-events/openai') const LlmTrackedIds = require('../../lib/llm-events/tracked-ids') @@ -35,6 +36,7 @@ function shouldSkipInstrumentation(config, shim) { return semver.lt(pkgVersion, MIN_VERSION) } +// eslint-disable-next-line sonarjs/cognitive-complexity module.exports = function initialize(agent, openai, moduleName, shim) { if (shouldSkipInstrumentation(agent.config, shim)) { shim.logger.debug( @@ -55,8 +57,14 @@ module.exports = function initialize(agent, openai, moduleName, shim) { if (segment) { segment[openAiApiKey] = apiKey - segment[openAiHeaders] = - result?.response?.headers && Object.fromEntries(result.response.headers) + + // If the result is an error, which is an OpenAI client error, then + // the headers are provided via a proxy attached to `result.headers`. + // Otherwise, result is a typical response-like object. + const headers = result?.response?.headers + ? Object.fromEntries(result.response.headers) + : { ...result?.headers } + segment[openAiHeaders] = headers } } @@ -127,14 +135,18 @@ module.exports = function initialize(agent, openai, moduleName, shim) { * @param {TraceSegment} params.segment active segment from chat completion * @param {object} params.request chat completion params * @param {object} params.response chat completion response + * @param {boolean} [params.hasError=false] indicates that the completion + * resulted in some sort of error case + * @returns {LlmChatCompletionSummary} A summary object. */ - function recordChatCompletionMessages({ segment, request, response }) { + function recordChatCompletionMessages({ segment, request, response, hasError = false }) { const tx = segment.transaction const completionSummary = new LlmChatCompletionSummary({ agent, segment, request, - response + response, + withError: hasError }) // Only take the first response message and append to input messages @@ -155,6 +167,8 @@ module.exports = function initialize(agent, openai, moduleName, shim) { }) recordEvent('LlmChatCompletionSummary', completionSummary) + + return completionSummary } /** @@ -173,19 +187,26 @@ module.exports = function initialize(agent, openai, moduleName, shim) { promise: true, // eslint-disable-next-line max-params after(_shim, _fn, _name, err, response, segment) { + if (!response) { + // If we get an error, it is possible that `response = null`. + // In that case, we define it to be an empty object. + response = {} + } response.headers = segment[openAiHeaders] response.api_key = segment[openAiApiKey] - // TODO: add LlmErrorMessage on failure - // and exit - // See: https://github.com/newrelic/node-newrelic/issues/1845 - // if (err) {} - - recordChatCompletionMessages({ + const summary = recordChatCompletionMessages({ segment, request, - response + response, + hasError: err != null }) + + if (err) { + const llmError = new LlmErrorMessage({ cause: err, summary, response }) + shim.agent.errors.add(segment.transaction, err, llmError) + } + // cleanup keys on response before returning to user code delete response.api_key delete response.headers @@ -208,16 +229,28 @@ module.exports = function initialize(agent, openai, moduleName, shim) { promise: true, // eslint-disable-next-line max-params after(_shim, _fn, _name, err, response, segment) { + if (!response) { + // If we get an error, it is possible that `response = null`. + // In that case, we define it to be an empty object. + response = {} + } response.headers = segment[openAiHeaders] response.api_key = segment[openAiApiKey] const embedding = new LlmEmbedding({ agent, segment, request, - response + response, + withError: err != null }) recordEvent('LlmEmbedding', embedding) + + if (err) { + const llmError = new LlmErrorMessage({ cause: err, embedding, response }) + shim.agent.errors.add(segment.transaction, err, llmError) + } + // cleanup keys on response before returning to user code delete response.api_key delete response.headers diff --git a/lib/llm-events/openai/chat-completion-summary.js b/lib/llm-events/openai/chat-completion-summary.js index 975992b7a6..0a8a713311 100644 --- a/lib/llm-events/openai/chat-completion-summary.js +++ b/lib/llm-events/openai/chat-completion-summary.js @@ -7,8 +7,9 @@ const LlmEvent = require('./event') module.exports = class LlmChatCompletionSummary extends LlmEvent { - constructor({ agent, segment, request = {}, response = {} }) { + constructor({ agent, segment, request = {}, response = {}, withError = false }) { super({ agent, segment, request, response, responseAttrs: true }) + this.error = withError this.conversation_id = this.conversationId(agent) this['request.max_tokens'] = request.max_tokens this['request.temperature'] = request.temperature diff --git a/lib/llm-events/openai/embedding.js b/lib/llm-events/openai/embedding.js index c9e763edea..7d9ca4ab40 100644 --- a/lib/llm-events/openai/embedding.js +++ b/lib/llm-events/openai/embedding.js @@ -7,8 +7,9 @@ const LlmEvent = require('./event') module.exports = class LlmEmbedding extends LlmEvent { - constructor({ agent, segment, request = {}, response = {} }) { + constructor({ agent, segment, request = {}, response = {}, withError = false }) { super({ agent, segment, request, response, responseAttrs: true }) + this.error = withError this.input = request.input?.toString() } } diff --git a/lib/llm-events/openai/error-message.js b/lib/llm-events/openai/error-message.js index 7316fa3969..65d798f593 100644 --- a/lib/llm-events/openai/error-message.js +++ b/lib/llm-events/openai/error-message.js @@ -4,18 +4,27 @@ */ 'use strict' + +/** + * Represents an error object, to be tracked via `agent.errors`, that is the + * result of some error returned from the OpenAI API. + */ module.exports = class LlmErrorMessage { - constructor(request, response) { - this.api_key_last_four_digits = response?.api_key && `sk-${response.api_key.slice(-4)}` - this['request.model'] = request.model || request.engine - this['request.temperature'] = request.temperature - this['request.max_tokens'] = request.max_tokens - this.vendor = 'openAI' - this.ingest_source = 'Node' - this['response.number_of_messages'] = request?.messages?.length - this['http.statusCode'] = response.status - this['response.organization'] = response.headers['openai-organization'] - this['error.code'] = response.code - this['error.param'] = response.param + /** + * @param {object} params Constructor parameters + * @param {object} [params.response] Instance of an incoming message. + * @param {object} [params.cause] An instance of the OpenAI error object. + * @param {LlmChatCompletionSummary} [params.summary] Details about the + * conversation if it was a chat completion conversation. + * @param {LlmEmbedding} [params.embedding] Details about the conversation + * if it was an embedding conversation. + */ + constructor({ response, cause, summary, embedding } = {}) { + this['http.statusCode'] = response?.status ?? cause?.status + this['error.message'] = cause?.message + this['error.code'] = response?.code ?? cause?.error?.code + this['error.param'] = response?.param ?? cause?.error?.param + this.completion_id = summary?.id + this.embedding_id = embedding?.id } } diff --git a/test/lib/openai-responses.js b/test/lib/openai-responses.js index 6ab3024722..7c839c9833 100644 --- a/test/lib/openai-responses.js +++ b/test/lib/openai-responses.js @@ -158,6 +158,42 @@ responses.set('You are a mathematician.', { } }) +responses.set('Invalid role.', { + headers: { + 'content-type': 'application/json', + 'x-request-id': '5db943f509e9031e73de8f4a5e46de32', + 'openai-organization': 'new-relic-nkmd8b', + 'openai-processing-ms': '4', + 'openai-version': '2020-10-01' + }, + code: 400, + body: { + error: { + message: + "'bad-role' is not one of ['system', 'assistant', 'user', 'function'] - 'messages.0.role'", + type: 'invalid_request_error', + param: null, + code: null + } + } +}) + +responses.set('Embedding not allowed.', { + headers: { + 'content-type': 'application/json; charset=utf-8', + 'x-request-id': '25e22d5a7af9e40b7d5b1828593b52aa' + }, + code: 403, + body: { + error: { + message: 'You are not allowed to generate embeddings from this model', + type: 'invalid_request_error', + param: null, + code: null + } + } +}) + responses.set('Streamed response', { headers: { 'Content-Type': 'text/event-stream', diff --git a/test/unit/llm-events/openai/chat-completion-summary.test.js b/test/unit/llm-events/openai/chat-completion-summary.test.js index 017a1b270d..c1daf8dc1b 100644 --- a/test/unit/llm-events/openai/chat-completion-summary.test.js +++ b/test/unit/llm-events/openai/chat-completion-summary.test.js @@ -55,4 +55,21 @@ tap.test('LlmChatCompletionSummary', (t) => { t.end() }) }) + + t.test('should set error to true', (t) => { + const api = helper.getAgentApi() + const conversationId = 'convo-id' + helper.runInTransaction(agent, () => { + api.addCustomAttribute('conversation_id', conversationId) + const chatSummaryEvent = new LlmChatCompletionSummary({ + agent, + segment: null, + request: {}, + response: {}, + withError: true + }) + t.equal(true, chatSummaryEvent.error) + t.end() + }) + }) }) diff --git a/test/unit/llm-events/openai/common.js b/test/unit/llm-events/openai/common.js index 93a254d372..70f98f3b34 100644 --- a/test/unit/llm-events/openai/common.js +++ b/test/unit/llm-events/openai/common.js @@ -76,6 +76,7 @@ function getExpectedResult(tx, event, type, completionId) { case 'embedding': expected = { ...expected, ...resKeys } expected.input = 'This is my test input' + expected.error = false break case 'summary': expected = { @@ -86,7 +87,8 @@ function getExpectedResult(tx, event, type, completionId) { ['request.temperature']: 'medium-rare', ['response.number_of_messages']: 3, ['response.usage.completion_tokens']: 10, - ['response.choices.finish_reason']: 'stop' + ['response.choices.finish_reason']: 'stop', + error: false } break case 'message': diff --git a/test/unit/llm-events/openai/embedding.test.js b/test/unit/llm-events/openai/embedding.test.js index 34b399d06f..34d814a0fd 100644 --- a/test/unit/llm-events/openai/embedding.test.js +++ b/test/unit/llm-events/openai/embedding.test.js @@ -68,4 +68,27 @@ tap.test('LlmEmbedding', (t) => { t.end() }) }) + + t.test('should set error to true', (t) => { + const req = { + input: 'This is my test input', + model: 'gpt-3.5-turbo-0613' + } + + const api = helper.getAgentApi() + helper.runInTransaction(agent, () => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + const embeddingEvent = new LlmEmbedding({ + agent, + segment, + request: req, + response: res, + withError: true + }) + t.equal(true, embeddingEvent.error) + t.end() + }) + }) + }) }) diff --git a/test/unit/llm-events/openai/error.test.js b/test/unit/llm-events/openai/error.test.js index e370cee4cb..57cf2f1a8f 100644 --- a/test/unit/llm-events/openai/error.test.js +++ b/test/unit/llm-events/openai/error.test.js @@ -11,19 +11,14 @@ const { req, chatRes } = require('./common') tap.test('LlmErrorMessage', (t) => { const res = { ...chatRes, code: 'insufficient_quota', param: 'test-param', status: 429 } - const errorMsg = new LlmErrorMessage(req, res) + const errorMsg = new LlmErrorMessage({ request: req, response: res }) const expected = { - 'api_key_last_four_digits': 'sk-7890', - 'request.model': 'gpt-3.5-turbo-0613', - 'request.temperature': 'medium-rare', - 'request.max_tokens': '1000000', - 'vendor': 'openAI', - 'ingest_source': 'Node', - 'response.number_of_messages': 2, 'http.statusCode': 429, - 'response.organization': 'new-relic', + 'error.message': undefined, 'error.code': 'insufficient_quota', - 'error.param': 'test-param' + 'error.param': 'test-param', + 'completion_id': undefined, + 'embedding_id': undefined } t.same(errorMsg, expected) t.end() diff --git a/test/versioned/openai/openai.tap.js b/test/versioned/openai/openai.tap.js index 8454cbd9de..bc0021fa18 100644 --- a/test/versioned/openai/openai.tap.js +++ b/test/versioned/openai/openai.tap.js @@ -142,7 +142,8 @@ tap.test('OpenAI instrumentation', (t) => { 'response.headers.ratelimitRemainingRequests': '199', 'response.number_of_messages': 3, 'response.usage.completion_tokens': 11, - 'response.choices.finish_reason': 'stop' + 'response.choices.finish_reason': 'stop', + 'error': false } test.match(chatSummary[1], expectedChatSummary, 'should match chat summary message') tx.end() @@ -317,7 +318,8 @@ tap.test('OpenAI instrumentation', (t) => { 'response.headers.ratelimitResetTokens': '2ms', 'response.headers.ratelimitRemainingTokens': '149994', 'response.headers.ratelimitRemainingRequests': '197', - 'input': 'This is an embedding test.' + 'input': 'This is an embedding test.', + 'error': false } test.equal(embedding[0].type, 'LlmEmbedding') @@ -356,4 +358,114 @@ tap.test('OpenAI instrumentation', (t) => { }) } ) + + t.test('chat completion auth errors should be tracked', (test) => { + const { client, agent } = t.context + helper.runInTransaction(agent, async (tx) => { + try { + await client.chat.completions.create({ + messages: [{ role: 'user', content: 'Invalid API key.' }] + }) + } catch {} + + t.equal(tx.exceptions.length, 1) + t.match(tx.exceptions[0], { + error: { + status: 401, + code: 'invalid_api_key', + param: 'null' + }, + customAttributes: { + 'http.statusCode': 401, + 'error.message': /Incorrect API key provided:/, + 'error.code': 'invalid_api_key', + 'error.param': 'null', + 'completion_id': /[\w\d]{32}/ + }, + agentAttributes: { + spanId: /[\w\d]+/ + } + }) + + const summary = agent.customEventAggregator.events.toArray().find((e) => { + return e[0].type === 'LlmChatCompletionSummary' + }) + t.ok(summary) + t.equal(summary[1].error, true) + + tx.end() + test.end() + }) + }) + + t.test('chat completion invalid payload errors should be tracked', (test) => { + const { client, agent } = t.context + helper.runInTransaction(agent, async (tx) => { + try { + await client.chat.completions.create({ + messages: [{ role: 'bad-role', content: 'Invalid role.' }] + }) + } catch {} + + t.equal(tx.exceptions.length, 1) + t.match(tx.exceptions[0], { + error: { + status: 400, + code: null, + param: null + }, + customAttributes: { + 'http.statusCode': 400, + 'error.message': /'bad-role' is not one of/, + 'error.code': null, + 'error.param': null, + 'completion_id': /\w{32}/ + }, + agentAttributes: { + spanId: /\w+/ + } + }) + + tx.end() + test.end() + }) + }) + + t.test('embedding invalid payload errors should be tracked', (test) => { + const { client, agent } = t.context + helper.runInTransaction(agent, async (tx) => { + try { + await client.embeddings.create({ + model: 'gpt-4', + input: 'Embedding not allowed.' + }) + } catch {} + + t.equal(tx.exceptions.length, 1) + t.match(tx.exceptions[0], { + error: { + status: 403, + code: null, + param: null + }, + customAttributes: { + 'http.statusCode': 403, + 'error.message': 'You are not allowed to generate embeddings from this model', + 'error.code': null, + 'error.param': null, + 'completion_id': undefined, + 'embedding_id': /\w{32}/ + }, + agentAttributes: { + spanId: /\w+/ + } + }) + + const embedding = agent.customEventAggregator.events.toArray().slice(0, 1)[0][1] + t.equal(embedding.error, true) + + tx.end() + test.end() + }) + }) })