Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions bun.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 3 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
"@ai-sdk/google": "^2.0.7",
"@ai-sdk/groq": "^2.0.6",
"@ai-sdk/openai": "^2.0.11",
"@ai-sdk/vercel": "^1.0.11",
"@babel/runtime": "^7.28.2",
"@clerk/backend": "^2.7.1",
"@clerk/clerk-react": "^5.41.0",
Expand Down Expand Up @@ -122,14 +123,14 @@
"react-router-dom": "^7.8.0",
"recharts": "^3.1.2",
"rollup": "^4.46.2",
"sanitize-html": "^2.17.0",
"sonner": "^2.0.7",
"tailwind-merge": "^3.3.1",
"tailwindcss-animate": "^1.0.7",
"undici": "^7.13.0",
"vaul": "^1.1.2",
"vercel": "^44.7.3",
"zod": "^4.0.17",
"sanitize-html": "^2.17.0"
"zod": "^4.0.17"
},
"devDependencies": {
"@eslint/js": "^9.33.0",
Expand Down
32 changes: 24 additions & 8 deletions src/components/EnhancedChatInterface.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,24 @@ const EnhancedChatInterface: React.FC = () => {
// Handle the streaming response
if (typeof aiResponse === 'string') {
responseContent = aiResponse;
} else if (aiResponse && typeof aiResponse === 'object') {
// Handle streaming response - convert to string
} else if (aiResponse && typeof aiResponse === 'object' && 'textStream' in aiResponse) {
// Properly consume the streaming response
const streamResult = aiResponse as { textStream: AsyncIterable<string> };
const chunks: string[] = [];
let totalLength = 0;

for await (const delta of streamResult.textStream) {
const piece = String(delta);
chunks.push(piece);
totalLength += piece.length;
if (totalLength > 50000) {
break;
}
}

responseContent = chunks.join('').slice(0, 50000);
} else {
// Fallback if response format is unexpected
responseContent = 'AI response generated successfully';
}

Expand All @@ -159,13 +175,13 @@ const EnhancedChatInterface: React.FC = () => {
role: 'assistant',
metadata: {
model: 'ai-assistant',
tokens: undefined,
cost: undefined
tokens: Math.floor(responseContent.length / 4), // Rough estimate
cost: 0.01 // Default cost
}
});

// Auto-generate chat title if first message
if (messages && 'messages' in messages && messages.messages.length === 0) {
if (messages && typeof messages === 'object' && 'messages' in messages && Array.isArray(messages.messages) && messages.messages.length === 0) {
await generateChatTitleFromMessages([
{ content: userInput, role: 'user' },
{ content: responseContent, role: 'assistant' }
Expand Down Expand Up @@ -219,8 +235,8 @@ const EnhancedChatInterface: React.FC = () => {

// Memoized message list to prevent unnecessary re-renders
const memoizedMessages = useMemo(() => {
if (messages && 'messages' in messages) {
return messages.messages || [];
if (messages && typeof messages === 'object' && 'messages' in messages && Array.isArray(messages.messages)) {
return messages.messages;
}
return [];
}, [messages]);
Expand Down Expand Up @@ -269,7 +285,7 @@ const EnhancedChatInterface: React.FC = () => {
<ChatSidebar
sidebarExpanded={sidebarExpanded}
setSidebarExpanded={setSidebarExpanded}
chats={chats && 'chats' in chats ? chats.chats : []}
chats={chats && typeof chats === 'object' && 'chats' in chats && Array.isArray(chats.chats) ? chats.chats : []}
selectedChatId={selectedChatId}
startNewChat={startNewChat}
selectChat={selectChat}
Expand Down
22 changes: 12 additions & 10 deletions src/hooks/useUsageTracking.ts
Original file line number Diff line number Diff line change
Expand Up @@ -155,24 +155,26 @@ export const useUsageTracking = () => {
syncPendingEvents,
getSubscription: async () => {
try {
const base = import.meta.env.VITE_CONVEX_URL as string | undefined;
const url = base
? `${base.replace(/\/$/, '')}/trpc/billing.getUserSubscription`
: '/trpc/billing.getUserSubscription';
// Use the correct tRPC endpoint based on vercel.json routing
const url = '/hono/trpc/billing.getUserSubscription';
const token = authTokenManager.getToken();
const res = await fetch(url, {
method: 'GET',
method: 'POST', // tRPC queries use POST
headers: {
'Content-Type': 'application/json',
...(token ? { authorization: `Bearer ${token}` } : {}),
accept: 'application/json',
},
// Include cookies if same-origin; omit if cross-origin bearer-token flow
credentials: base ? 'omit' : 'include',
body: JSON.stringify({}), // Empty body for query
credentials: 'include',
});
if (!res.ok) return null;
if (!res.ok) {
console.error('tRPC subscription fetch failed:', res.status, res.statusText);
return null;
}
const json = await res.json();
return json?.result?.data ?? null;
} catch {
} catch (error) {
console.error('Error fetching subscription:', error);
return null;
}
},
Expand Down
46 changes: 23 additions & 23 deletions src/lib/ai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ const responseCache = new AIResponseCache();

// Cost tracking and limits
const MODEL_PRICING = {
'openai/gpt-oss-120b': {
// Pricing based on Groq docs: $0.15 / 1M input tokens, $0.75 / 1M output tokens
input: 0.15 / 1_000_000,
output: 0.75 / 1_000_000,
'moonshotai/kimi-k2-instruct': {
// Pricing based on Groq docs: $1.00 / 1M input tokens, $3.00 / 333,333 output tokens
input: 1.00 / 1_000_000,
output: 3.00 / 333_333,
}
};

Expand Down Expand Up @@ -157,13 +157,13 @@ const openrouter = createOpenRouter({
// Get current model instance
async function getCurrentModel() {
const groq = createGroqInstance();
return (await groq)('openai/gpt-oss-120b');
return (await groq)('moonshotai/kimi-k2-instruct');
}

// Gemma model (for concise title generation)
async function getGemmaModel() {
const groq = await createGroqInstance();
return groq('openai/gpt-oss-120b');
return groq('moonshotai/kimi-k2-instruct');
}

// OpenRouter failsafe model
Expand All @@ -181,7 +181,7 @@ export async function generateAIResponse(prompt: string, options?: { skipCache?:
logger.info('Using cached AI response');
aiMonitoring.recordOperation({
operation: 'generateText',
model: 'openai/gpt-oss-120b',
model: 'moonshotai/kimi-k2-instruct',
duration: 0,
success: true,
inputTokens: 0,
Expand Down Expand Up @@ -216,7 +216,7 @@ export async function generateAIResponse(prompt: string, options?: { skipCache?:

const estimatedInputTokens = Math.ceil(prompt.length / 4);
const estimatedOutputTokens = 8000;
const estimatedCost = calculateCost('openai/gpt-oss-120b', estimatedInputTokens, estimatedOutputTokens);
const estimatedCost = calculateCost('moonshotai/kimi-k2-instruct', estimatedInputTokens, estimatedOutputTokens);

checkCostLimit(estimatedCost);

Expand All @@ -227,29 +227,29 @@ export async function generateAIResponse(prompt: string, options?: { skipCache?:
});

const currentModel = await getCurrentModel();
span.setAttribute("model", "openai/gpt-oss-120b");
span.setAttribute("model", "moonshotai/kimi-k2-instruct");

const { text, usage } = await circuitBreaker.execute(
() => withRetry(
() => monitorAIOperation(
() => withTimeout(generateText({
model: currentModel,
prompt,
temperature: 0.7,
temperature: 0.6,
}), 60_000),
'generateText',
{ model: 'openai/gpt-oss-120b', promptLength: prompt.length }
{ model: 'moonshotai/kimi-k2-instruct', promptLength: prompt.length }
),
'AI Text Generation'
),
'generateAIResponse'
)

const actualCost = usage ? calculateCost('openai/gpt-oss-120b', usage.inputTokens || 0, usage.outputTokens || 0) : estimatedCost;
const actualCost = usage ? calculateCost('moonshotai/kimi-k2-instruct', usage.inputTokens || 0, usage.outputTokens || 0) : estimatedCost;
addTodayCost(actualCost);

await recordAIConversation({
model: 'openai/gpt-oss-120b',
model: 'moonshotai/kimi-k2-instruct',
inputTokens: usage?.inputTokens || 0,
outputTokens: usage?.outputTokens || 0,
cost: actualCost,
Expand All @@ -262,7 +262,7 @@ export async function generateAIResponse(prompt: string, options?: { skipCache?:

logger.info("AI text generation completed", {
responseLength: text.length,
model: "openai/gpt-oss-120b",
model: "moonshotai/kimi-k2-instruct",
actualCost: actualCost.toFixed(6),
inputTokens: usage?.inputTokens || 0,
outputTokens: usage?.outputTokens || 0,
Expand All @@ -271,7 +271,7 @@ export async function generateAIResponse(prompt: string, options?: { skipCache?:

aiMonitoring.recordOperation({
operation: 'generateText',
model: 'openai/gpt-oss-120b',
model: 'moonshotai/kimi-k2-instruct',
duration: Date.now() - startTime,
success: true,
inputTokens: usage?.inputTokens || 0,
Expand All @@ -297,7 +297,7 @@ export async function generateAIResponse(prompt: string, options?: { skipCache?:

aiMonitoring.recordOperation({
operation: 'generateText',
model: 'openai/gpt-oss-120b',
model: 'moonshotai/kimi-k2-instruct',
duration: Date.now() - startTime,
success: false,
error: aiError.message,
Expand Down Expand Up @@ -366,7 +366,7 @@ export async function streamAIResponse(prompt: string) {
const fullPrompt = systemPrompt + "\n\n" + prompt;
const estimatedInputTokens = Math.ceil(fullPrompt.length / 4);
const estimatedOutputTokens = 8000;
const estimatedCost = calculateCost('openai/gpt-oss-120b', estimatedInputTokens, estimatedOutputTokens);
const estimatedCost = calculateCost('moonshotai/kimi-k2-instruct', estimatedInputTokens, estimatedOutputTokens);

checkCostLimit(estimatedCost);

Expand All @@ -377,7 +377,7 @@ export async function streamAIResponse(prompt: string) {
});

const model = await getCurrentModel();
span.setAttribute("model", "openai/gpt-oss-120b");
span.setAttribute("model", "moonshotai/kimi-k2-instruct");

const result = await circuitBreaker.execute(
() => withRetry(
Expand All @@ -388,10 +388,10 @@ export async function streamAIResponse(prompt: string) {
{ role: 'system', content: systemPrompt },
{ role: 'user', content: prompt }
],
temperature: 0.7,
temperature: 0.6,
}),
'streamText',
{ model: 'openai/gpt-oss-120b', promptLength: prompt.length }
{ model: 'moonshotai/kimi-k2-instruct', promptLength: prompt.length }
),
'AI Stream Generation'
),
Expand All @@ -401,21 +401,21 @@ export async function streamAIResponse(prompt: string) {
addTodayCost(estimatedCost);

await recordAIConversation({
model: 'openai/gpt-oss-120b',
model: 'moonshotai/kimi-k2-instruct',
inputTokens: estimatedInputTokens,
outputTokens: estimatedOutputTokens,
cost: estimatedCost,
});

logger.info("AI streaming started successfully", {
model: "openai/gpt-oss-120b",
model: "moonshotai/kimi-k2-instruct",
estimatedCost: estimatedCost.toFixed(6),
dailyCost: getTodayCost().toFixed(4)
});

aiMonitoring.recordOperation({
operation: 'streamText',
model: 'openai/gpt-oss-120b',
model: 'moonshotai/kimi-k2-instruct',
duration: Date.now() - startTime,
success: true,
inputTokens: estimatedInputTokens,
Expand Down