From 200ad00133d9d93bc3c0e111d6c09ebde39be861 Mon Sep 17 00:00:00 2001 From: James Sumners Date: Mon, 20 Nov 2023 13:02:50 -0500 Subject: [PATCH] address feedback --- lib/instrumentation/openai.js | 21 ++++++++----- .../openai/chat-completion-summary.js | 3 +- lib/llm-events/openai/embedding.js | 3 +- lib/llm-events/openai/error-message.js | 11 +------ .../openai/chat-completion-summary.test.js | 17 +++++++++++ test/unit/llm-events/openai/common.js | 3 +- test/unit/llm-events/openai/embedding.test.js | 23 ++++++++++++++ test/versioned/openai/openai.tap.js | 30 +++++++++++++++---- 8 files changed, 85 insertions(+), 26 deletions(-) diff --git a/lib/instrumentation/openai.js b/lib/instrumentation/openai.js index ac6b67a531..26d4de32ac 100644 --- a/lib/instrumentation/openai.js +++ b/lib/instrumentation/openai.js @@ -135,15 +135,18 @@ module.exports = function initialize(agent, openai, moduleName, shim) { * @param {TraceSegment} params.segment active segment from chat completion * @param {object} params.request chat completion params * @param {object} params.response chat completion response + * @param {boolean} [params.hasError=false] indicates that the completion + * resulted in some sort of error case * @returns {LlmChatCompletionSummary} A summary object. */ - function recordChatCompletionMessages({ segment, request, response }) { + function recordChatCompletionMessages({ segment, request, response, hasError = false }) { const tx = segment.transaction const completionSummary = new LlmChatCompletionSummary({ agent, segment, request, - response + response, + withError: hasError }) // Only take the first response message and append to input messages @@ -195,12 +198,13 @@ module.exports = function initialize(agent, openai, moduleName, shim) { const summary = recordChatCompletionMessages({ segment, request, - response + response, + hasError: err != null }) if (err) { - const llmError = new LlmErrorMessage({ cause: err, summary, request, response }) - shim.agent.errors.add(segment.transaction, llmError) + const llmError = new LlmErrorMessage({ cause: err, summary, response }) + shim.agent.errors.add(segment.transaction, err, llmError) } // cleanup keys on response before returning to user code @@ -236,14 +240,15 @@ module.exports = function initialize(agent, openai, moduleName, shim) { agent, segment, request, - response + response, + withError: err != null }) recordEvent('LlmEmbedding', embedding) if (err) { - const llmError = new LlmErrorMessage({ cause: err, embedding, request, response }) - shim.agent.errors.add(segment.transaction, llmError) + const llmError = new LlmErrorMessage({ cause: err, embedding, response }) + shim.agent.errors.add(segment.transaction, err, llmError) } // cleanup keys on response before returning to user code diff --git a/lib/llm-events/openai/chat-completion-summary.js b/lib/llm-events/openai/chat-completion-summary.js index 975992b7a6..0a8a713311 100644 --- a/lib/llm-events/openai/chat-completion-summary.js +++ b/lib/llm-events/openai/chat-completion-summary.js @@ -7,8 +7,9 @@ const LlmEvent = require('./event') module.exports = class LlmChatCompletionSummary extends LlmEvent { - constructor({ agent, segment, request = {}, response = {} }) { + constructor({ agent, segment, request = {}, response = {}, withError = false }) { super({ agent, segment, request, response, responseAttrs: true }) + this.error = withError this.conversation_id = this.conversationId(agent) this['request.max_tokens'] = request.max_tokens this['request.temperature'] = request.temperature diff --git a/lib/llm-events/openai/embedding.js b/lib/llm-events/openai/embedding.js index c9e763edea..7d9ca4ab40 100644 --- a/lib/llm-events/openai/embedding.js +++ b/lib/llm-events/openai/embedding.js @@ -7,8 +7,9 @@ const LlmEvent = require('./event') module.exports = class LlmEmbedding extends LlmEvent { - constructor({ agent, segment, request = {}, response = {} }) { + constructor({ agent, segment, request = {}, response = {}, withError = false }) { super({ agent, segment, request, response, responseAttrs: true }) + this.error = withError this.input = request.input?.toString() } } diff --git a/lib/llm-events/openai/error-message.js b/lib/llm-events/openai/error-message.js index 8982e6a974..65d798f593 100644 --- a/lib/llm-events/openai/error-message.js +++ b/lib/llm-events/openai/error-message.js @@ -12,7 +12,6 @@ module.exports = class LlmErrorMessage { /** * @param {object} params Constructor parameters - * @param {object} params.request Instance of an outgoing message. * @param {object} [params.response] Instance of an incoming message. * @param {object} [params.cause] An instance of the OpenAI error object. * @param {LlmChatCompletionSummary} [params.summary] Details about the @@ -20,16 +19,8 @@ module.exports = class LlmErrorMessage { * @param {LlmEmbedding} [params.embedding] Details about the conversation * if it was an embedding conversation. */ - constructor({ request, response, cause, summary, embedding } = {}) { - this.api_key_last_four_digits = response?.api_key && `sk-${response.api_key.slice(-4)}` - this['request.model'] = request.model || request.engine - this['request.temperature'] = request.temperature - this['request.max_tokens'] = request.max_tokens - this.vendor = 'openAI' - this.ingest_source = 'Node' - this['response.number_of_messages'] = request?.messages?.length + constructor({ response, cause, summary, embedding } = {}) { this['http.statusCode'] = response?.status ?? cause?.status - this['response.organization'] = response.headers['openai-organization'] this['error.message'] = cause?.message this['error.code'] = response?.code ?? cause?.error?.code this['error.param'] = response?.param ?? cause?.error?.param diff --git a/test/unit/llm-events/openai/chat-completion-summary.test.js b/test/unit/llm-events/openai/chat-completion-summary.test.js index 017a1b270d..c1daf8dc1b 100644 --- a/test/unit/llm-events/openai/chat-completion-summary.test.js +++ b/test/unit/llm-events/openai/chat-completion-summary.test.js @@ -55,4 +55,21 @@ tap.test('LlmChatCompletionSummary', (t) => { t.end() }) }) + + t.test('should set error to true', (t) => { + const api = helper.getAgentApi() + const conversationId = 'convo-id' + helper.runInTransaction(agent, () => { + api.addCustomAttribute('conversation_id', conversationId) + const chatSummaryEvent = new LlmChatCompletionSummary({ + agent, + segment: null, + request: {}, + response: {}, + withError: true + }) + t.equal(true, chatSummaryEvent.error) + t.end() + }) + }) }) diff --git a/test/unit/llm-events/openai/common.js b/test/unit/llm-events/openai/common.js index 93a254d372..3907329a28 100644 --- a/test/unit/llm-events/openai/common.js +++ b/test/unit/llm-events/openai/common.js @@ -55,7 +55,8 @@ function getExpectedResult(tx, event, type, completionId) { 'transaction_id': tx.id, 'response.model': 'gpt-3.5-turbo-0613', 'vendor': 'openAI', - 'ingest_source': 'Node' + 'ingest_source': 'Node', + 'error': false } const resKeys = { 'duration': trace.children[0].getExclusiveDurationInMillis(), diff --git a/test/unit/llm-events/openai/embedding.test.js b/test/unit/llm-events/openai/embedding.test.js index 34b399d06f..34d814a0fd 100644 --- a/test/unit/llm-events/openai/embedding.test.js +++ b/test/unit/llm-events/openai/embedding.test.js @@ -68,4 +68,27 @@ tap.test('LlmEmbedding', (t) => { t.end() }) }) + + t.test('should set error to true', (t) => { + const req = { + input: 'This is my test input', + model: 'gpt-3.5-turbo-0613' + } + + const api = helper.getAgentApi() + helper.runInTransaction(agent, () => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + const embeddingEvent = new LlmEmbedding({ + agent, + segment, + request: req, + response: res, + withError: true + }) + t.equal(true, embeddingEvent.error) + t.end() + }) + }) + }) }) diff --git a/test/versioned/openai/openai.tap.js b/test/versioned/openai/openai.tap.js index 257d846fdd..339027d940 100644 --- a/test/versioned/openai/openai.tap.js +++ b/test/versioned/openai/openai.tap.js @@ -142,7 +142,8 @@ tap.test('OpenAI instrumentation', (t) => { 'response.headers.ratelimitRemainingRequests': '199', 'response.number_of_messages': 3, 'response.usage.completion_tokens': 11, - 'response.choices.finish_reason': 'stop' + 'response.choices.finish_reason': 'stop', + 'error': false } test.match(chatSummary[1], expectedChatSummary, 'should match chat summary message') tx.end() @@ -317,7 +318,8 @@ tap.test('OpenAI instrumentation', (t) => { 'response.headers.ratelimitResetTokens': '2ms', 'response.headers.ratelimitRemainingTokens': '149994', 'response.headers.ratelimitRemainingRequests': '197', - 'input': 'This is an embedding test.' + 'input': 'This is an embedding test.', + 'error': false } test.equal(embedding[0].type, 'LlmEmbedding') @@ -369,18 +371,25 @@ tap.test('OpenAI instrumentation', (t) => { t.equal(tx.exceptions.length, 1) t.match(tx.exceptions[0], { error: { + status: 401, + code: 'invalid_api_key', + param: 'null' + }, + customAttributes: { 'http.statusCode': 401, 'error.message': /Incorrect API key provided:/, 'error.code': 'invalid_api_key', 'error.param': 'null', 'completion_id': /[\w\d]{32}/ }, - customAttributes: {}, agentAttributes: { spanId: /[\w\d]+/ } }) + const summary = agent.customEventAggregator.events.toArray().slice(0, 1)[0][1] + t.equal(summary.error, true) + tx.end() test.end() }) @@ -398,13 +407,17 @@ tap.test('OpenAI instrumentation', (t) => { t.equal(tx.exceptions.length, 1) t.match(tx.exceptions[0], { error: { + status: 400, + code: null, + param: null + }, + customAttributes: { 'http.statusCode': 400, 'error.message': /'bad-role' is not one of/, 'error.code': null, 'error.param': null, 'completion_id': /\w{32}/ }, - customAttributes: {}, agentAttributes: { spanId: /\w+/ } @@ -428,6 +441,11 @@ tap.test('OpenAI instrumentation', (t) => { t.equal(tx.exceptions.length, 1) t.match(tx.exceptions[0], { error: { + status: 403, + code: null, + param: null + }, + customAttributes: { 'http.statusCode': 403, 'error.message': 'You are not allowed to generate embeddings from this model', 'error.code': null, @@ -435,12 +453,14 @@ tap.test('OpenAI instrumentation', (t) => { 'completion_id': undefined, 'embedding_id': /\w{32}/ }, - customAttributes: {}, agentAttributes: { spanId: /\w+/ } }) + const embedding = agent.customEventAggregator.events.toArray().slice(0, 1)[0][1] + t.equal(embedding.error, true) + tx.end() test.end() })