|
| 1 | +import { Message as VercelChatMessage } from 'ai'; |
| 2 | +import { RunnableSequence } from '@langchain/core/runnables'; |
| 3 | +import { PromptTemplate } from '@langchain/core/prompts'; |
| 4 | +import { ChatOpenAI } from '@langchain/openai'; |
| 5 | + |
| 6 | +import { HttpResponseOutputParser } from 'langchain/output_parsers'; |
| 7 | +import { JSONLoader } from 'langchain/document_loaders/fs/json'; |
| 8 | +import { formatDocumentsAsString } from 'langchain/util/document'; |
| 9 | + |
| 10 | +const loader = new JSONLoader('data/states.json', [ |
| 11 | + '/state', |
| 12 | + '/code', |
| 13 | + '/nickname', |
| 14 | + '/website', |
| 15 | + '/admission_date', |
| 16 | + '/admission_number', |
| 17 | + '/capital_city', |
| 18 | + '/capital_url', |
| 19 | + '/population', |
| 20 | + '/population_rank', |
| 21 | + '/constitution_url', |
| 22 | + '/twitter_url', |
| 23 | +]); |
| 24 | +const formatMessage = (message: VercelChatMessage) => { |
| 25 | + return `${message.role}: ${message.content}`; |
| 26 | +}; |
| 27 | + |
| 28 | +const TEMPLATE = ` |
| 29 | +Answer the user's questions based only on the following context. |
| 30 | +If the answer is not in the context, reply politely that you do not have that information available.: |
| 31 | +============================== |
| 32 | +Context: {context} |
| 33 | +============================== |
| 34 | +Current conversation: {chat_history} |
| 35 | +
|
| 36 | +user: {question} |
| 37 | +assistant:`; |
| 38 | + |
| 39 | +export async function POST(req: Request) { |
| 40 | + try { |
| 41 | + const { messages } = await req.json(); |
| 42 | + |
| 43 | + const formattedPreviousMessages = messages.slice(0, -1).map(formatMessage); |
| 44 | + |
| 45 | + const currentMessageContent = messages[messages.length - 1].content; |
| 46 | + |
| 47 | + const docs = await loader.load(); |
| 48 | + |
| 49 | + const prompt = PromptTemplate.fromTemplate(TEMPLATE); |
| 50 | + |
| 51 | + const model = new ChatOpenAI({ |
| 52 | + apiKey: process.env.OPENAI_API_KEY!, |
| 53 | + model: 'gpt-3.5-turbo', |
| 54 | + temperature: 0, |
| 55 | + streaming: true, |
| 56 | + }); |
| 57 | + |
| 58 | + const parser = new HttpResponseOutputParser(); |
| 59 | + |
| 60 | + const chain = RunnableSequence.from([ |
| 61 | + { |
| 62 | + question: (input) => input.question, |
| 63 | + chat_history: (input) => input.chat_history, |
| 64 | + context: () => formatDocumentsAsString(docs), |
| 65 | + }, |
| 66 | + prompt, |
| 67 | + model, |
| 68 | + parser, |
| 69 | + ]); |
| 70 | + |
| 71 | + const stream = await chain.stream({ |
| 72 | + chat_history: formattedPreviousMessages.join('\n'), |
| 73 | + question: currentMessageContent, |
| 74 | + }); |
| 75 | + |
| 76 | + return new Response(stream, { |
| 77 | + status: 200, |
| 78 | + headers: { 'Content-Type': 'text/plain; charset=utf-8' }, |
| 79 | + }); |
| 80 | + } catch (error) { |
| 81 | + console.error('라우팅 오류:', error); |
| 82 | + return new Response(JSON.stringify({ error: '처리 실패' }), { |
| 83 | + status: 500, |
| 84 | + headers: { 'Content-Type': 'application/json' }, |
| 85 | + }); |
| 86 | + } |
| 87 | +} |
0 commit comments