Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: Add streaming responses to mock OpenAI sever #1881

Merged
merged 2 commits into from
Nov 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 39 additions & 2 deletions test/lib/openai-mock-server.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
module.exports = openaiMockServer

const http = require('node:http')
const { Readable } = require('node:stream')
const RESPONSES = require('./openai-responses')

/**
Expand Down Expand Up @@ -69,8 +70,44 @@ function handler(req, res) {
res.setHeader(key, value)
}
res.statusCode = code
res.write(JSON.stringify(body))
res.end()

if (payload.stream === true) {
// OpenAI streamed responses are double newline delimited lines that
// are prefixed with the string `data: `. The end of the stream is
// terminated with a `done: [DONE]` string.
const parts = body.split(' ')
jsumners-nr marked this conversation as resolved.
Show resolved Hide resolved
let i = 0
const outStream = new Readable({
read() {
if (i < parts.length) {
const content = parts.length - 1 === i ? parts[i] : `${parts[i]} `
const chunk = JSON.stringify({
id: 'chatcmpl-8MzOfSMbLxEy70lYAolSwdCzfguQZ',
object: 'chat.completion.chunk',
// 2023-11-20T09:00:00-05:00
created: 1700488800,
model: 'gpt-4',
choices: [
{
index: 0,
finish_reason: null,
delta: { role: 'assistant', content }
}
]
})
this.push(`data: ${chunk}\n\n`)
i += 1
} else {
this.push('data: [DONE]\n\n')
this.push(null)
}
}
})
outStream.pipe(res)
} else {
res.write(JSON.stringify(body))
res.end()
}
})
}

Expand Down
19 changes: 19 additions & 0 deletions test/lib/openai-responses.js
Original file line number Diff line number Diff line change
Expand Up @@ -157,3 +157,22 @@ responses.set('You are a mathematician.', {
usage: { completion_tokens: 11, prompt_tokens: 53, total_tokens: 64 }
}
})

responses.set('Streamed response', {
headers: {
'Content-Type': 'text/event-stream',
'openai-model': 'gpt-3.5-turbo-0613',
'openai-organization': 'new-relic-nkmd8b',
'openai-processing-ms': '1469',
'openai-version': '2020-10-01',
'x-ratelimit-limit-requests': '200',
'x-ratelimit-limit-tokens': '40000',
'x-ratelimit-remaining-requests': '199',
'x-ratelimit-remaining-tokens': '39940',
'x-ratelimit-reset-requests': '7m12s',
'x-ratelimit-reset-tokens': '90ms',
'x-request-id': '49dbbffbd3c3f4612aa48def69059aad'
},
code: 200,
body: "A streamed response is a way of transmitting data from a server to a client (e.g. from a website to a user's computer or mobile device) in a continuous flow or stream, rather than all at one time. This means the client can start to process the data before all of it has been received, which can improve performance for large amounts of data or slow connections. Streaming is often used for real-time or near-real-time applications like video or audio playback."
})
Loading