Skip to content

Commit

Permalink
feat : gpt 모델 3개
Browse files Browse the repository at this point in the history
  • Loading branch information
dev-9hee committed Jan 18, 2025
1 parent 2d14e02 commit 602e68b
Show file tree
Hide file tree
Showing 3 changed files with 99 additions and 32 deletions.
48 changes: 36 additions & 12 deletions apps/chat/endpoints.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,59 @@
# endpoints.py
from ninja import Router, Schema
from django.http import StreamingHttpResponse
from openai import OpenAI
import json
import os
from dotenv import load_dotenv
from itertools import zip_longest

router = Router(tags=["Chat"])
load_dotenv()
client = OpenAI(api_key=os.getenv('GPT_OPENAI_API_KEY'))

# 요청 스키마 정의
class MessageSchema(Schema):
message: str
models: list[str] = ['gpt-4o']

@router.post("/chat/stream")
def chat_stream(request, message_data: MessageSchema):
def event_stream():
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
# 모든 모델의 응답을 동시에 시작
responses = {
model: client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": message_data.message}],
stream=True
)

for chunk in response:
if chunk.choices[0].delta.content:
data = {"content": chunk.choices[0].delta.content}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
) for model in message_data.models
}

# 각 모델의 시작을 알림
for model in message_data.models:
yield f"data: {json.dumps({'type': 'start', 'model': model}, ensure_ascii=False)}\n\n"

# 모든 응답의 청크를 번갈아가며 처리
active_responses = {model: True for model in message_data.models}

while any(active_responses.values()):
for model in message_data.models:
if not active_responses[model]:
continue

except Exception as e:
yield f"data: {json.dumps({'error': str(e)}, ensure_ascii=False)}\n\n"
try:
chunk = next(responses[model])
if chunk.choices[0].delta.content:
data = {
"type": "content",
"model": model,
"content": chunk.choices[0].delta.content
}
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
except StopIteration:
active_responses[model] = False
yield f"data: {json.dumps({'type': 'end', 'model': model}, ensure_ascii=False)}\n\n"
except Exception as e:
active_responses[model] = False
yield f"data: {json.dumps({'type': 'error', 'model': model, 'error': str(e)}, ensure_ascii=False)}\n\n"

return StreamingHttpResponse(
event_stream(),
Expand Down
82 changes: 62 additions & 20 deletions apps/chat/templates/chat/chat.html
Original file line number Diff line number Diff line change
Expand Up @@ -3,41 +3,71 @@
<head>
<title>Chat Stream Test</title>
<style>
#response {
white-space: pre-wrap;
border: 1px solid #ccc;
padding: 10px;
margin-top: 10px;
min-height: 100px;
.container {
max-width: 1200px;
margin: 20px auto;
padding: 20px;
}
.input-container {
margin-bottom: 20px;
}
#message {
width: 80%;
padding: 5px;
padding: 10px;
margin-right: 10px;
}
.container {
max-width: 800px;
margin: 20px auto;
padding: 20px;
.responses-container {
display: flex;
gap: 20px;
}
.model-response {
flex: 1;
border: 1px solid #ccc;
padding: 15px;
border-radius: 5px;
}
.model-title {
font-weight: bold;
margin-bottom: 10px;
}
.response-content {
white-space: pre-wrap;
min-height: 200px;
}
</style>
</head>
<body>
<div class="container">
<h2>Chat Stream Test</h2>
<div>
<div class="input-container">
<input type="text" id="message" placeholder="메시지를 입력하세요" />
<button onclick="sendMessage()">전송</button>
</div>
<div id="response"></div>
<div class="responses-container">
<div class="model-response">
<div class="model-title">GPT-4o</div>
<div id="gpt-4o-response" class="response-content"></div>
</div>
<div class="model-response">
<div class="model-title">GPT-4o mini</div>
<div id="gpt-4o-mini-response" class="response-content"></div>
</div>
<div class="model-response">
<div class="model-title">GPT-3.5 Terbo</div>
<div id="gpt-3.5-turbo-response" class="response-content"></div>
</div>
</div>
</div>

<script>
function sendMessage() {
const messageInput = document.getElementById('message');
const responseDiv = document.getElementById('response');
const message = messageInput.value;

responseDiv.textContent = '';
// 응답 영역 초기화
document.getElementById('gpt-4o-response').textContent = '';
document.getElementById('gpt-4o-mini-response').textContent = '';
document.getElementById('gpt-3.5-turbo-response').textContent = '';

fetch('/api/v1/chat/stream', {
method: 'POST',
Expand All @@ -46,6 +76,7 @@ <h2>Chat Stream Test</h2>
},
body: JSON.stringify({
message: message,
models: ['gpt-4o', 'gpt-4o-mini', 'gpt-3.5-turbo'],
}),
})
.then((response) => {
Expand All @@ -54,9 +85,7 @@ <h2>Chat Stream Test</h2>

function readStream() {
reader.read().then(({ done, value }) => {
if (done) {
return;
}
if (done) return;

const text = decoder.decode(value);
const lines = text.split('\n');
Expand All @@ -65,7 +94,21 @@ <h2>Chat Stream Test</h2>
if (line.startsWith('data: ')) {
try {
const data = JSON.parse(line.slice(6));
responseDiv.textContent += data.content;
if (data.type === 'content') {
const responseDiv = document.getElementById(
`${data.model}-response`
);
if (responseDiv) {
responseDiv.textContent += data.content;
}
} else if (data.type === 'error') {
const responseDiv = document.getElementById(
`${data.model}-response`
);
if (responseDiv) {
responseDiv.textContent = `Error: ${data.error}`;
}
}
} catch (e) {
console.error('Error parsing JSON:', e);
}
Expand All @@ -80,7 +123,6 @@ <h2>Chat Stream Test</h2>
})
.catch((error) => {
console.error('Error:', error);
responseDiv.textContent = '에러가 발생했습니다: ' + error;
});

messageInput.value = '';
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
annotated-types==0.7.0
anthropic==0.43.1
anyio==4.8.0
asgiref==3.8.1
certifi==2024.12.14
Expand Down

0 comments on commit 602e68b

Please sign in to comment.