-
Notifications
You must be signed in to change notification settings - Fork 404
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: Ensure API errors are tracked #1880
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,7 +8,8 @@ const { openAiHeaders, openAiApiKey } = require('../../lib/symbols') | |
const { | ||
LlmChatCompletionMessage, | ||
LlmChatCompletionSummary, | ||
LlmEmbedding | ||
LlmEmbedding, | ||
LlmErrorMessage | ||
} = require('../../lib/llm-events/openai') | ||
const LlmTrackedIds = require('../../lib/llm-events/tracked-ids') | ||
|
||
|
@@ -35,6 +36,7 @@ function shouldSkipInstrumentation(config, shim) { | |
return semver.lt(pkgVersion, MIN_VERSION) | ||
} | ||
|
||
// eslint-disable-next-line sonarjs/cognitive-complexity | ||
module.exports = function initialize(agent, openai, moduleName, shim) { | ||
if (shouldSkipInstrumentation(agent.config, shim)) { | ||
shim.logger.debug( | ||
|
@@ -55,8 +57,14 @@ module.exports = function initialize(agent, openai, moduleName, shim) { | |
|
||
if (segment) { | ||
segment[openAiApiKey] = apiKey | ||
segment[openAiHeaders] = | ||
result?.response?.headers && Object.fromEntries(result.response.headers) | ||
|
||
// If the result is an error, which is an OpenAI client error, then | ||
// the headers are provided via a proxy attached to `result.headers`. | ||
// Otherwise, result is a typical response-like object. | ||
const headers = result?.response?.headers | ||
? Object.fromEntries(result.response.headers) | ||
: { ...result?.headers } | ||
segment[openAiHeaders] = headers | ||
} | ||
} | ||
|
||
|
@@ -127,14 +135,18 @@ module.exports = function initialize(agent, openai, moduleName, shim) { | |
* @param {TraceSegment} params.segment active segment from chat completion | ||
* @param {object} params.request chat completion params | ||
* @param {object} params.response chat completion response | ||
* @param {boolean} [params.hasError=false] indicates that the completion | ||
* resulted in some sort of error case | ||
* @returns {LlmChatCompletionSummary} A summary object. | ||
*/ | ||
function recordChatCompletionMessages({ segment, request, response }) { | ||
function recordChatCompletionMessages({ segment, request, response, hasError = false }) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: hasError and withError There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What is your suggestion? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. just calling out the arg coming in is diff than it going into the event creation There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In my mind there's a difference between the two, but I'm totally open to changing it one way or the other. Which would you prefer? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah I guess so. Just keep it. |
||
const tx = segment.transaction | ||
const completionSummary = new LlmChatCompletionSummary({ | ||
agent, | ||
segment, | ||
request, | ||
response | ||
response, | ||
withError: hasError | ||
}) | ||
|
||
// Only take the first response message and append to input messages | ||
|
@@ -155,6 +167,8 @@ module.exports = function initialize(agent, openai, moduleName, shim) { | |
}) | ||
|
||
recordEvent('LlmChatCompletionSummary', completionSummary) | ||
|
||
return completionSummary | ||
} | ||
|
||
/** | ||
|
@@ -173,19 +187,26 @@ module.exports = function initialize(agent, openai, moduleName, shim) { | |
promise: true, | ||
// eslint-disable-next-line max-params | ||
after(_shim, _fn, _name, err, response, segment) { | ||
if (!response) { | ||
// If we get an error, it is possible that `response = null`. | ||
// In that case, we define it to be an empty object. | ||
response = {} | ||
} | ||
response.headers = segment[openAiHeaders] | ||
response.api_key = segment[openAiApiKey] | ||
|
||
// TODO: add LlmErrorMessage on failure | ||
// and exit | ||
// See: https://github.com/newrelic/node-newrelic/issues/1845 | ||
// if (err) {} | ||
|
||
recordChatCompletionMessages({ | ||
const summary = recordChatCompletionMessages({ | ||
segment, | ||
request, | ||
response | ||
response, | ||
hasError: err != null | ||
}) | ||
|
||
if (err) { | ||
bizob2828 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
const llmError = new LlmErrorMessage({ cause: err, summary, response }) | ||
shim.agent.errors.add(segment.transaction, err, llmError) | ||
} | ||
|
||
// cleanup keys on response before returning to user code | ||
delete response.api_key | ||
delete response.headers | ||
|
@@ -208,16 +229,28 @@ module.exports = function initialize(agent, openai, moduleName, shim) { | |
promise: true, | ||
// eslint-disable-next-line max-params | ||
after(_shim, _fn, _name, err, response, segment) { | ||
if (!response) { | ||
// If we get an error, it is possible that `response = null`. | ||
// In that case, we define it to be an empty object. | ||
response = {} | ||
} | ||
response.headers = segment[openAiHeaders] | ||
response.api_key = segment[openAiApiKey] | ||
const embedding = new LlmEmbedding({ | ||
agent, | ||
segment, | ||
request, | ||
response | ||
response, | ||
withError: err != null | ||
}) | ||
|
||
recordEvent('LlmEmbedding', embedding) | ||
|
||
if (err) { | ||
const llmError = new LlmErrorMessage({ cause: err, embedding, response }) | ||
shim.agent.errors.add(segment.transaction, err, llmError) | ||
} | ||
|
||
// cleanup keys on response before returning to user code | ||
delete response.api_key | ||
delete response.headers | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
i noticed this too. I think it's because we're defining all the functions within the module.exports. We try not to add to the cognitive complexity but we can address later