diff --git a/app/components/chat/BaseChat.tsx b/app/components/chat/BaseChat.tsx index 1777d734b5..7e82b3581f 100644 --- a/app/components/chat/BaseChat.tsx +++ b/app/components/chat/BaseChat.tsx @@ -3,7 +3,7 @@ * Preventing TS checks with files presented in the video for a better presentation. */ import type { Message } from 'ai'; -import React, { type RefCallback, useEffect, useState } from 'react'; +import React, { type RefCallback, useCallback, useEffect, useState } from 'react'; import { ClientOnly } from 'remix-utils/client-only'; import { Menu } from '~/components/sidebar/Menu.client'; import { IconButton } from '~/components/ui/IconButton'; @@ -31,6 +31,7 @@ import { toast } from 'react-toastify'; import StarterTemplates from './StarterTemplates'; import type { ActionAlert } from '~/types/actions'; import ChatAlert from './ChatAlert'; +import { LLMManager } from '~/lib/modules/llm/manager'; const TEXTAREA_MIN_HEIGHT = 76; @@ -100,26 +101,36 @@ export const BaseChat = React.forwardRef( ref, ) => { const TEXTAREA_MAX_HEIGHT = chatStarted ? 400 : 200; - const [apiKeys, setApiKeys] = useState>(() => { - const savedKeys = Cookies.get('apiKeys'); - - if (savedKeys) { - try { - return JSON.parse(savedKeys); - } catch (error) { - console.error('Failed to parse API keys from cookies:', error); - return {}; - } - } - - return {}; - }); + const [apiKeys, setApiKeys] = useState>(getApiKeysFromCookies()); const [modelList, setModelList] = useState(MODEL_LIST); const [isModelSettingsCollapsed, setIsModelSettingsCollapsed] = useState(false); const [isListening, setIsListening] = useState(false); const [recognition, setRecognition] = useState(null); const [transcript, setTranscript] = useState(''); + const [isModelLoading, setIsModelLoading] = useState('all'); + + const getProviderSettings = useCallback(() => { + let providerSettings: Record | undefined = undefined; + + try { + const savedProviderSettings = Cookies.get('providers'); + + if (savedProviderSettings) { + const parsedProviderSettings = JSON.parse(savedProviderSettings); + + if (typeof parsedProviderSettings === 'object' && parsedProviderSettings !== null) { + providerSettings = parsedProviderSettings; + } + } + } catch (error) { + console.error('Error loading Provider Settings from cookies:', error); + // Clear invalid cookie data + Cookies.remove('providers'); + } + + return providerSettings; + }, []); useEffect(() => { console.log(transcript); }, [transcript]); @@ -157,42 +168,63 @@ export const BaseChat = React.forwardRef( }, []); useEffect(() => { - let providerSettings: Record | undefined = undefined; - - try { - const savedProviderSettings = Cookies.get('providers'); + if (typeof window !== 'undefined') { + const providerSettings = getProviderSettings(); + let parsedApiKeys: Record | undefined = {}; - if (savedProviderSettings) { - const parsedProviderSettings = JSON.parse(savedProviderSettings); + try { + parsedApiKeys = getApiKeysFromCookies(); + setApiKeys(parsedApiKeys); + } catch (error) { + console.error('Error loading API keys from cookies:', error); - if (typeof parsedProviderSettings === 'object' && parsedProviderSettings !== null) { - providerSettings = parsedProviderSettings; - } + // Clear invalid cookie data + Cookies.remove('apiKeys'); } - } catch (error) { - console.error('Error loading Provider Settings from cookies:', error); - - // Clear invalid cookie data - Cookies.remove('providers'); + setIsModelLoading('all'); + initializeModelList({ apiKeys: parsedApiKeys, providerSettings }) + .then((modelList) => { + // console.log('Model List: ', modelList); + setModelList(modelList); + }) + .catch((error) => { + console.error('Error initializing model list:', error); + }) + .finally(() => { + setIsModelLoading(undefined); + }); } + }, [providerList]); - let parsedApiKeys: Record | undefined = {}; + const onApiKeysChange = async (providerName: string, apiKey: string) => { + const newApiKeys = { ...apiKeys, [providerName]: apiKey }; + setApiKeys(newApiKeys); + Cookies.set('apiKeys', JSON.stringify(newApiKeys)); - try { - parsedApiKeys = getApiKeysFromCookies(); - setApiKeys(parsedApiKeys); - } catch (error) { - console.error('Error loading API keys from cookies:', error); + const provider = LLMManager.getInstance(import.meta.env || process.env || {}).getProvider(providerName); - // Clear invalid cookie data - Cookies.remove('apiKeys'); - } + if (provider && provider.getDynamicModels) { + setIsModelLoading(providerName); - initializeModelList({ apiKeys: parsedApiKeys, providerSettings }).then((modelList) => { - console.log('Model List: ', modelList); - setModelList(modelList); - }); - }, [apiKeys]); + try { + const providerSettings = getProviderSettings(); + const staticModels = provider.staticModels; + const dynamicModels = await provider.getDynamicModels( + newApiKeys, + providerSettings, + import.meta.env || process.env || {}, + ); + + setModelList((preModels) => { + const filteredOutPreModels = preModels.filter((x) => x.provider !== providerName); + return [...filteredOutPreModels, ...staticModels, ...dynamicModels]; + }); + } catch (error) { + console.error('Error loading dynamic models:', error); + } + setIsModelLoading(undefined); + } + }; const startListening = () => { if (recognition) { @@ -371,29 +403,32 @@ export const BaseChat = React.forwardRef(
-
- - {(providerList || []).length > 0 && provider && ( - { - const newApiKeys = { ...apiKeys, [provider.name]: key }; - setApiKeys(newApiKeys); - Cookies.set('apiKeys', JSON.stringify(newApiKeys)); - }} - /> + + {() => ( +
+ + {(providerList || []).length > 0 && provider && ( + { + onApiKeysChange(provider.name, key); + }} + /> + )} +
)} -
+
{ const prompt = searchParams.get('prompt'); - console.log(prompt, searchParams, model, provider); + + // console.log(prompt, searchParams, model, provider); if (prompt) { setSearchParams({}); @@ -289,14 +290,14 @@ export const ChatImpl = memo( // reload(); - const template = await selectStarterTemplate({ + const { template, title } = await selectStarterTemplate({ message: messageInput, model, provider, }); if (template !== 'blank') { - const temResp = await getTemplates(template); + const temResp = await getTemplates(template, title); if (temResp) { const { assistantMessage, userMessage } = temResp; diff --git a/app/components/chat/ModelSelector.tsx b/app/components/chat/ModelSelector.tsx index ec4da63ff8..521ccac304 100644 --- a/app/components/chat/ModelSelector.tsx +++ b/app/components/chat/ModelSelector.tsx @@ -10,6 +10,7 @@ interface ModelSelectorProps { modelList: ModelInfo[]; providerList: ProviderInfo[]; apiKeys: Record; + modelLoading?: string; } export const ModelSelector = ({ @@ -19,6 +20,7 @@ export const ModelSelector = ({ setProvider, modelList, providerList, + modelLoading, }: ModelSelectorProps) => { // Load enabled providers from cookies @@ -83,14 +85,21 @@ export const ModelSelector = ({ value={model} onChange={(e) => setModel?.(e.target.value)} className="flex-1 p-2 rounded-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus transition-all lg:max-w-[70%]" + disabled={modelLoading === 'all' || modelLoading === provider?.name} > - {[...modelList] - .filter((e) => e.provider == provider?.name && e.name) - .map((modelOption, index) => ( - - ))} + {modelLoading == 'all' || modelLoading == provider?.name ? ( + + ) : ( + [...modelList] + .filter((e) => e.provider == provider?.name && e.name) + .map((modelOption, index) => ( + + )) + )} ); diff --git a/app/components/settings/providers/ProvidersTab.tsx b/app/components/settings/providers/ProvidersTab.tsx index e03731f436..2f790bc8ab 100644 --- a/app/components/settings/providers/ProvidersTab.tsx +++ b/app/components/settings/providers/ProvidersTab.tsx @@ -6,9 +6,10 @@ import type { IProviderConfig } from '~/types/model'; import { logStore } from '~/lib/stores/logs'; // Import a default fallback icon -import DefaultIcon from '/icons/Default.svg'; // Adjust the path as necessary import { providerBaseUrlEnvKeys } from '~/utils/constants'; +const DefaultIcon = '/icons/Default.svg'; // Adjust the path as necessary + export default function ProvidersTab() { const { providers, updateProviderSettings, isLocalModel } = useSettings(); const [filteredProviders, setFilteredProviders] = useState([]); diff --git a/app/entry.server.tsx b/app/entry.server.tsx index 5e92d21ecd..642918b22d 100644 --- a/app/entry.server.tsx +++ b/app/entry.server.tsx @@ -5,7 +5,6 @@ import { renderToReadableStream } from 'react-dom/server'; import { renderHeadToString } from 'remix-island'; import { Head } from './root'; import { themeStore } from '~/lib/stores/theme'; -import { initializeModelList } from '~/utils/constants'; export default async function handleRequest( request: Request, @@ -14,7 +13,7 @@ export default async function handleRequest( remixContext: EntryContext, _loadContext: AppLoadContext, ) { - await initializeModelList({}); + // await initializeModelList({}); const readable = await renderToReadableStream(, { signal: request.signal, diff --git a/app/lib/.server/llm/stream-text.ts b/app/lib/.server/llm/stream-text.ts index fee99e21e5..d8f41b591d 100644 --- a/app/lib/.server/llm/stream-text.ts +++ b/app/lib/.server/llm/stream-text.ts @@ -4,7 +4,6 @@ import { getSystemPrompt } from '~/lib/common/prompts/prompts'; import { DEFAULT_MODEL, DEFAULT_PROVIDER, - getModelList, MODEL_REGEX, MODIFICATIONS_TAG_NAME, PROVIDER_LIST, @@ -15,6 +14,8 @@ import ignore from 'ignore'; import type { IProviderSetting } from '~/types/model'; import { PromptLibrary } from '~/lib/common/prompt-library'; import { allowedHTMLElements } from '~/utils/markdown'; +import { LLMManager } from '~/lib/modules/llm/manager'; +import { createScopedLogger } from '~/utils/logger'; interface ToolResult { toolCallId: string; @@ -142,6 +143,8 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid return { model, provider, content: cleanedContent }; } +const logger = createScopedLogger('stream-text'); + export async function streamText(props: { messages: Messages; env: Env; @@ -158,15 +161,10 @@ export async function streamText(props: { let currentModel = DEFAULT_MODEL; let currentProvider = DEFAULT_PROVIDER.name; - const MODEL_LIST = await getModelList({ apiKeys, providerSettings, serverEnv: serverEnv as any }); const processedMessages = messages.map((message) => { if (message.role === 'user') { const { model, provider, content } = extractPropertiesFromMessage(message); - - if (MODEL_LIST.find((m) => m.name === model)) { - currentModel = model; - } - + currentModel = model; currentProvider = provider; return { ...message, content }; @@ -183,11 +181,36 @@ export async function streamText(props: { return message; }); - const modelDetails = MODEL_LIST.find((m) => m.name === currentModel); + const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER; + const staticModels = LLMManager.getInstance().getStaticModelListFromProvider(provider); + let modelDetails = staticModels.find((m) => m.name === currentModel); + + if (!modelDetails) { + const modelsList = [ + ...(provider.staticModels || []), + ...(await LLMManager.getInstance().getModelListFromProvider(provider, { + apiKeys, + providerSettings, + serverEnv: serverEnv as any, + })), + ]; + + if (!modelsList.length) { + throw new Error(`No models found for provider ${provider.name}`); + } - const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS; + modelDetails = modelsList.find((m) => m.name === currentModel); - const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER; + if (!modelDetails) { + // Fallback to first model + logger.warn( + `MODEL [${currentModel}] not found in provider [${provider.name}]. Falling back to first model. ${modelsList[0].name}`, + ); + modelDetails = modelsList[0]; + } + } + + const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS; let systemPrompt = PromptLibrary.getPropmtFromLibrary(promptId || 'default', { @@ -201,6 +224,8 @@ export async function streamText(props: { systemPrompt = `${systemPrompt}\n\n ${codeContext}`; } + logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`); + return _streamText({ model: provider.getModelInstance({ model: currentModel, diff --git a/app/lib/modules/llm/base-provider.ts b/app/lib/modules/llm/base-provider.ts index 5a1ff1b209..ebe104998f 100644 --- a/app/lib/modules/llm/base-provider.ts +++ b/app/lib/modules/llm/base-provider.ts @@ -8,6 +8,10 @@ export abstract class BaseProvider implements ProviderInfo { abstract name: string; abstract staticModels: ModelInfo[]; abstract config: ProviderConfig; + cachedDynamicModels?: { + cacheId: string; + models: ModelInfo[]; + }; getApiKeyLink?: string; labelForGetApiKey?: string; @@ -49,6 +53,54 @@ export abstract class BaseProvider implements ProviderInfo { apiKey, }; } + getModelsFromCache(options: { + apiKeys?: Record; + providerSettings?: Record; + serverEnv?: Record; + }): ModelInfo[] | null { + if (!this.cachedDynamicModels) { + // console.log('no dynamic models',this.name); + return null; + } + + const cacheKey = this.cachedDynamicModels.cacheId; + const generatedCacheKey = this.getDynamicModelsCacheKey(options); + + if (cacheKey !== generatedCacheKey) { + // console.log('cache key mismatch',this.name,cacheKey,generatedCacheKey); + this.cachedDynamicModels = undefined; + return null; + } + + return this.cachedDynamicModels.models; + } + getDynamicModelsCacheKey(options: { + apiKeys?: Record; + providerSettings?: Record; + serverEnv?: Record; + }) { + return JSON.stringify({ + apiKeys: options.apiKeys?.[this.name], + providerSettings: options.providerSettings?.[this.name], + serverEnv: options.serverEnv, + }); + } + storeDynamicModels( + options: { + apiKeys?: Record; + providerSettings?: Record; + serverEnv?: Record; + }, + models: ModelInfo[], + ) { + const cacheId = this.getDynamicModelsCacheKey(options); + + // console.log('caching dynamic models',this.name,cacheId); + this.cachedDynamicModels = { + cacheId, + models, + }; + } // Declare the optional getDynamicModels method getDynamicModels?( diff --git a/app/lib/modules/llm/manager.ts b/app/lib/modules/llm/manager.ts index 38dc8254d9..5b134218d0 100644 --- a/app/lib/modules/llm/manager.ts +++ b/app/lib/modules/llm/manager.ts @@ -2,7 +2,9 @@ import type { IProviderSetting } from '~/types/model'; import { BaseProvider } from './base-provider'; import type { ModelInfo, ProviderInfo } from './types'; import * as providers from './registry'; +import { createScopedLogger } from '~/utils/logger'; +const logger = createScopedLogger('LLMManager'); export class LLMManager { private static _instance: LLMManager; private _providers: Map = new Map(); @@ -40,22 +42,22 @@ export class LLMManager { try { this.registerProvider(provider); } catch (error: any) { - console.log('Failed To Register Provider: ', provider.name, 'error:', error.message); + logger.warn('Failed To Register Provider: ', provider.name, 'error:', error.message); } } } } catch (error) { - console.error('Error registering providers:', error); + logger.error('Error registering providers:', error); } } registerProvider(provider: BaseProvider) { if (this._providers.has(provider.name)) { - console.warn(`Provider ${provider.name} is already registered. Skipping.`); + logger.warn(`Provider ${provider.name} is already registered. Skipping.`); return; } - console.log('Registering Provider: ', provider.name); + logger.info('Registering Provider: ', provider.name); this._providers.set(provider.name, provider); this._modelList = [...this._modelList, ...provider.staticModels]; } @@ -79,19 +81,42 @@ export class LLMManager { }): Promise { const { apiKeys, providerSettings, serverEnv } = options; + let enabledProviders = Array.from(this._providers.values()).map((p) => p.name); + + if (providerSettings) { + enabledProviders = enabledProviders.filter((p) => providerSettings[p].enabled); + } + // Get dynamic models from all providers that support them const dynamicModels = await Promise.all( Array.from(this._providers.values()) + .filter((provider) => enabledProviders.includes(provider.name)) .filter( (provider): provider is BaseProvider & Required> => !!provider.getDynamicModels, ) - .map((provider) => - provider.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv).catch((err) => { - console.error(`Error getting dynamic models ${provider.name} :`, err); - return []; - }), - ), + .map(async (provider) => { + const cachedModels = provider.getModelsFromCache(options); + + if (cachedModels) { + return cachedModels; + } + + const dynamicModels = await provider + .getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv) + .then((models) => { + logger.info(`Caching ${models.length} dynamic models for ${provider.name}`); + provider.storeDynamicModels(options, models); + + return models; + }) + .catch((err) => { + logger.error(`Error getting dynamic models ${provider.name} :`, err); + return []; + }); + + return dynamicModels; + }), ); // Combine static and dynamic models @@ -103,6 +128,68 @@ export class LLMManager { return modelList; } + getStaticModelList() { + return [...this._providers.values()].flatMap((p) => p.staticModels || []); + } + async getModelListFromProvider( + providerArg: BaseProvider, + options: { + apiKeys?: Record; + providerSettings?: Record; + serverEnv?: Record; + }, + ): Promise { + const provider = this._providers.get(providerArg.name); + + if (!provider) { + throw new Error(`Provider ${providerArg.name} not found`); + } + + const staticModels = provider.staticModels || []; + + if (!provider.getDynamicModels) { + return staticModels; + } + + const { apiKeys, providerSettings, serverEnv } = options; + + const cachedModels = provider.getModelsFromCache({ + apiKeys, + providerSettings, + serverEnv, + }); + + if (cachedModels) { + logger.info(`Found ${cachedModels.length} cached models for ${provider.name}`); + return [...cachedModels, ...staticModels]; + } + + logger.info(`Getting dynamic models for ${provider.name}`); + + const dynamicModels = await provider + .getDynamicModels?.(apiKeys, providerSettings?.[provider.name], serverEnv) + .then((models) => { + logger.info(`Got ${models.length} dynamic models for ${provider.name}`); + provider.storeDynamicModels(options, models); + + return models; + }) + .catch((err) => { + logger.error(`Error getting dynamic models ${provider.name} :`, err); + return []; + }); + + return [...dynamicModels, ...staticModels]; + } + getStaticModelListFromProvider(providerArg: BaseProvider) { + const provider = this._providers.get(providerArg.name); + + if (!provider) { + throw new Error(`Provider ${providerArg.name} not found`); + } + + return [...(provider.staticModels || [])]; + } getDefaultProvider(): BaseProvider { const firstProvider = this._providers.values().next().value; diff --git a/app/lib/modules/llm/providers/huggingface.ts b/app/lib/modules/llm/providers/huggingface.ts index 5ae1d1b46b..996dbfced1 100644 --- a/app/lib/modules/llm/providers/huggingface.ts +++ b/app/lib/modules/llm/providers/huggingface.ts @@ -25,6 +25,30 @@ export default class HuggingFaceProvider extends BaseProvider { provider: 'HuggingFace', maxTokenAllowed: 8000, }, + { + name: 'codellama/CodeLlama-34b-Instruct-hf', + label: 'CodeLlama-34b-Instruct (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'NousResearch/Hermes-3-Llama-3.1-8B', + label: 'Hermes-3-Llama-3.1-8B (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'Qwen/Qwen2.5-Coder-32B-Instruct', + label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'Qwen/Qwen2.5-72B-Instruct', + label: 'Qwen2.5-72B-Instruct (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, { name: 'meta-llama/Llama-3.1-70B-Instruct', label: 'Llama-3.1-70B-Instruct (HuggingFace)', @@ -37,6 +61,24 @@ export default class HuggingFaceProvider extends BaseProvider { provider: 'HuggingFace', maxTokenAllowed: 8000, }, + { + name: '01-ai/Yi-1.5-34B-Chat', + label: 'Yi-1.5-34B-Chat (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'codellama/CodeLlama-34b-Instruct-hf', + label: 'CodeLlama-34b-Instruct (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'NousResearch/Hermes-3-Llama-3.1-8B', + label: 'Hermes-3-Llama-3.1-8B (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, ]; getModelInstance(options: { diff --git a/app/lib/modules/llm/providers/hyperbolic.ts b/app/lib/modules/llm/providers/hyperbolic.ts index 88c943ca61..2f75215613 100644 --- a/app/lib/modules/llm/providers/hyperbolic.ts +++ b/app/lib/modules/llm/providers/hyperbolic.ts @@ -50,40 +50,35 @@ export default class HyperbolicProvider extends BaseProvider { settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { - try { - const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({ - apiKeys, - providerSettings: settings, - serverEnv, - defaultBaseUrlKey: '', - defaultApiTokenKey: 'HYPERBOLIC_API_KEY', - }); - const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1'; + const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({ + apiKeys, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: '', + defaultApiTokenKey: 'HYPERBOLIC_API_KEY', + }); + const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1'; - if (!baseUrl || !apiKey) { - return []; - } + if (!apiKey) { + throw `Missing Api Key configuration for ${this.name} provider`; + } - const response = await fetch(`${baseUrl}/models`, { - headers: { - Authorization: `Bearer ${apiKey}`, - }, - }); + const response = await fetch(`${baseUrl}/models`, { + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }); - const res = (await response.json()) as any; + const res = (await response.json()) as any; - const data = res.data.filter((model: any) => model.object === 'model' && model.supports_chat); + const data = res.data.filter((model: any) => model.object === 'model' && model.supports_chat); - return data.map((m: any) => ({ - name: m.id, - label: `${m.id} - context ${m.context_length ? Math.floor(m.context_length / 1000) + 'k' : 'N/A'}`, - provider: this.name, - maxTokenAllowed: m.context_length || 8000, - })); - } catch (error: any) { - console.error('Error getting Hyperbolic models:', error.message); - return []; - } + return data.map((m: any) => ({ + name: m.id, + label: `${m.id} - context ${m.context_length ? Math.floor(m.context_length / 1000) + 'k' : 'N/A'}`, + provider: this.name, + maxTokenAllowed: m.context_length || 8000, + })); } getModelInstance(options: { @@ -103,8 +98,7 @@ export default class HyperbolicProvider extends BaseProvider { }); if (!apiKey) { - console.log(`Missing configuration for ${this.name} provider`); - throw new Error(`Missing configuration for ${this.name} provider`); + throw `Missing Api Key configuration for ${this.name} provider`; } const openai = createOpenAI({ diff --git a/app/lib/modules/llm/providers/lmstudio.ts b/app/lib/modules/llm/providers/lmstudio.ts index 3162972247..4309df0dbc 100644 --- a/app/lib/modules/llm/providers/lmstudio.ts +++ b/app/lib/modules/llm/providers/lmstudio.ts @@ -22,33 +22,27 @@ export default class LMStudioProvider extends BaseProvider { settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { - try { - const { baseUrl } = this.getProviderBaseUrlAndKey({ - apiKeys, - providerSettings: settings, - serverEnv, - defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL', - defaultApiTokenKey: '', - }); - - if (!baseUrl) { - return []; - } - - const response = await fetch(`${baseUrl}/v1/models`); - const data = (await response.json()) as { data: Array<{ id: string }> }; - - return data.data.map((model) => ({ - name: model.id, - label: model.id, - provider: this.name, - maxTokenAllowed: 8000, - })); - } catch (error: any) { - console.log('Error getting LMStudio models:', error.message); + const { baseUrl } = this.getProviderBaseUrlAndKey({ + apiKeys, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL', + defaultApiTokenKey: '', + }); + if (!baseUrl) { return []; } + + const response = await fetch(`${baseUrl}/v1/models`); + const data = (await response.json()) as { data: Array<{ id: string }> }; + + return data.data.map((model) => ({ + name: model.id, + label: model.id, + provider: this.name, + maxTokenAllowed: 8000, + })); } getModelInstance: (options: { model: string; diff --git a/app/lib/modules/llm/providers/ollama.ts b/app/lib/modules/llm/providers/ollama.ts index 2c99be3cf6..8f0ddf2238 100644 --- a/app/lib/modules/llm/providers/ollama.ts +++ b/app/lib/modules/llm/providers/ollama.ts @@ -45,34 +45,29 @@ export default class OllamaProvider extends BaseProvider { settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { - try { - const { baseUrl } = this.getProviderBaseUrlAndKey({ - apiKeys, - providerSettings: settings, - serverEnv, - defaultBaseUrlKey: 'OLLAMA_API_BASE_URL', - defaultApiTokenKey: '', - }); - - if (!baseUrl) { - return []; - } - - const response = await fetch(`${baseUrl}/api/tags`); - const data = (await response.json()) as OllamaApiResponse; - - // console.log({ ollamamodels: data.models }); - - return data.models.map((model: OllamaModel) => ({ - name: model.name, - label: `${model.name} (${model.details.parameter_size})`, - provider: this.name, - maxTokenAllowed: 8000, - })); - } catch (e) { - console.error('Failed to get Ollama models:', e); + const { baseUrl } = this.getProviderBaseUrlAndKey({ + apiKeys, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: 'OLLAMA_API_BASE_URL', + defaultApiTokenKey: '', + }); + + if (!baseUrl) { return []; } + + const response = await fetch(`${baseUrl}/api/tags`); + const data = (await response.json()) as OllamaApiResponse; + + // console.log({ ollamamodels: data.models }); + + return data.models.map((model: OllamaModel) => ({ + name: model.name, + label: `${model.name} (${model.details.parameter_size})`, + provider: this.name, + maxTokenAllowed: 8000, + })); } getModelInstance: (options: { model: string; diff --git a/app/lib/modules/llm/providers/open-router.ts b/app/lib/modules/llm/providers/open-router.ts index 45defb2a81..242f8f7d79 100644 --- a/app/lib/modules/llm/providers/open-router.ts +++ b/app/lib/modules/llm/providers/open-router.ts @@ -27,7 +27,6 @@ export default class OpenRouterProvider extends BaseProvider { }; staticModels: ModelInfo[] = [ - { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 }, { name: 'anthropic/claude-3.5-sonnet', label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)', diff --git a/app/lib/modules/llm/providers/openai-like.ts b/app/lib/modules/llm/providers/openai-like.ts index 44fb1ab9cf..f33f21ede0 100644 --- a/app/lib/modules/llm/providers/openai-like.ts +++ b/app/lib/modules/llm/providers/openai-like.ts @@ -19,37 +19,32 @@ export default class OpenAILikeProvider extends BaseProvider { settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { - try { - const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({ - apiKeys, - providerSettings: settings, - serverEnv, - defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL', - defaultApiTokenKey: 'OPENAI_LIKE_API_KEY', - }); + const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({ + apiKeys, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL', + defaultApiTokenKey: 'OPENAI_LIKE_API_KEY', + }); - if (!baseUrl || !apiKey) { - return []; - } + if (!baseUrl || !apiKey) { + return []; + } - const response = await fetch(`${baseUrl}/models`, { - headers: { - Authorization: `Bearer ${apiKey}`, - }, - }); + const response = await fetch(`${baseUrl}/models`, { + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }); - const res = (await response.json()) as any; + const res = (await response.json()) as any; - return res.data.map((model: any) => ({ - name: model.id, - label: model.id, - provider: this.name, - maxTokenAllowed: 8000, - })); - } catch (error) { - console.error('Error getting OpenAILike models:', error); - return []; - } + return res.data.map((model: any) => ({ + name: model.id, + label: model.id, + provider: this.name, + maxTokenAllowed: 8000, + })); } getModelInstance(options: { diff --git a/app/lib/modules/llm/providers/openai.ts b/app/lib/modules/llm/providers/openai.ts index 9a54118938..8a19ff21c7 100644 --- a/app/lib/modules/llm/providers/openai.ts +++ b/app/lib/modules/llm/providers/openai.ts @@ -13,6 +13,7 @@ export default class OpenAIProvider extends BaseProvider { }; staticModels: ModelInfo[] = [ + { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 }, { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 }, { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 }, { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 }, diff --git a/app/lib/modules/llm/providers/together.ts b/app/lib/modules/llm/providers/together.ts index 1a908b849b..2e11f64b9c 100644 --- a/app/lib/modules/llm/providers/together.ts +++ b/app/lib/modules/llm/providers/together.ts @@ -38,41 +38,36 @@ export default class TogetherProvider extends BaseProvider { settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { - try { - const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({ - apiKeys, - providerSettings: settings, - serverEnv, - defaultBaseUrlKey: 'TOGETHER_API_BASE_URL', - defaultApiTokenKey: 'TOGETHER_API_KEY', - }); - const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1'; + const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({ + apiKeys, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: 'TOGETHER_API_BASE_URL', + defaultApiTokenKey: 'TOGETHER_API_KEY', + }); + const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1'; - if (!baseUrl || !apiKey) { - return []; - } + if (!baseUrl || !apiKey) { + return []; + } - // console.log({ baseUrl, apiKey }); + // console.log({ baseUrl, apiKey }); - const response = await fetch(`${baseUrl}/models`, { - headers: { - Authorization: `Bearer ${apiKey}`, - }, - }); + const response = await fetch(`${baseUrl}/models`, { + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }); - const res = (await response.json()) as any; - const data = (res || []).filter((model: any) => model.type === 'chat'); + const res = (await response.json()) as any; + const data = (res || []).filter((model: any) => model.type === 'chat'); - return data.map((m: any) => ({ - name: m.id, - label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`, - provider: this.name, - maxTokenAllowed: 8000, - })); - } catch (error: any) { - console.error('Error getting Together models:', error.message); - return []; - } + return data.map((m: any) => ({ + name: m.id, + label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`, + provider: this.name, + maxTokenAllowed: 8000, + })); } getModelInstance(options: { diff --git a/app/lib/runtime/message-parser.ts b/app/lib/runtime/message-parser.ts index 8f1ccd5266..275717941b 100644 --- a/app/lib/runtime/message-parser.ts +++ b/app/lib/runtime/message-parser.ts @@ -55,7 +55,8 @@ interface MessageState { function cleanoutMarkdownSyntax(content: string) { const codeBlockRegex = /^\s*```\w*\n([\s\S]*?)\n\s*```\s*$/; const match = content.match(codeBlockRegex); - console.log('matching', !!match, content); + + // console.log('matching', !!match, content); if (match) { return match[1]; // Remove common leading 4-space indent diff --git a/app/lib/stores/settings.ts b/app/lib/stores/settings.ts index 72b89331a3..90d2bf66fd 100644 --- a/app/lib/stores/settings.ts +++ b/app/lib/stores/settings.ts @@ -54,5 +54,5 @@ export const promptStore = atom('default'); export const latestBranchStore = atom(false); -export const autoSelectStarterTemplate = atom(true); +export const autoSelectStarterTemplate = atom(false); export const enableContextOptimizationStore = atom(false); diff --git a/app/routes/api.chat.ts b/app/routes/api.chat.ts index b20331f50d..05f8746cf5 100644 --- a/app/routes/api.chat.ts +++ b/app/routes/api.chat.ts @@ -5,11 +5,14 @@ import { CONTINUE_PROMPT } from '~/lib/common/prompts/prompts'; import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text'; import SwitchableStream from '~/lib/.server/llm/switchable-stream'; import type { IProviderSetting } from '~/types/model'; +import { createScopedLogger } from '~/utils/logger'; export async function action(args: ActionFunctionArgs) { return chatAction(args); } +const logger = createScopedLogger('api.chat'); + function parseCookies(cookieHeader: string): Record { const cookies: Record = {}; @@ -54,7 +57,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) { const options: StreamingOptions = { toolChoice: 'none', onFinish: async ({ text: content, finishReason, usage }) => { - console.log('usage', usage); + logger.debug('usage', JSON.stringify(usage)); if (usage) { cumulativeUsage.completionTokens += usage.completionTokens || 0; @@ -63,23 +66,33 @@ async function chatAction({ context, request }: ActionFunctionArgs) { } if (finishReason !== 'length') { - return stream - .switchSource( - createDataStream({ - async execute(dataStream) { - dataStream.writeMessageAnnotation({ - type: 'usage', - value: { - completionTokens: cumulativeUsage.completionTokens, - promptTokens: cumulativeUsage.promptTokens, - totalTokens: cumulativeUsage.totalTokens, - }, - }); + const encoder = new TextEncoder(); + const usageStream = createDataStream({ + async execute(dataStream) { + dataStream.writeMessageAnnotation({ + type: 'usage', + value: { + completionTokens: cumulativeUsage.completionTokens, + promptTokens: cumulativeUsage.promptTokens, + totalTokens: cumulativeUsage.totalTokens, }, - onError: (error: any) => `Custom error: ${error.message}`, - }), - ) - .then(() => stream.close()); + }); + }, + onError: (error: any) => `Custom error: ${error.message}`, + }).pipeThrough( + new TransformStream({ + transform: (chunk, controller) => { + // Convert the string stream to a byte stream + const str = typeof chunk === 'string' ? chunk : JSON.stringify(chunk); + controller.enqueue(encoder.encode(str)); + }, + }), + ); + await stream.switchSource(usageStream); + await new Promise((resolve) => setTimeout(resolve, 0)); + stream.close(); + + return; } if (stream.switches >= MAX_RESPONSE_SEGMENTS) { @@ -88,7 +101,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) { const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches; - console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`); + logger.info(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`); messages.push({ role: 'assistant', content }); messages.push({ role: 'user', content: CONTINUE_PROMPT }); @@ -104,7 +117,9 @@ async function chatAction({ context, request }: ActionFunctionArgs) { contextOptimization, }); - return stream.switchSource(result.toDataStream()); + stream.switchSource(result.toDataStream()); + + return; }, }; @@ -128,7 +143,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) { }, }); } catch (error: any) { - console.error(error); + logger.error(error); if (error.message?.includes('API key')) { throw new Response('Invalid or missing API key', { diff --git a/app/utils/constants.ts b/app/utils/constants.ts index be83083f57..31e72b77c8 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -19,312 +19,6 @@ export const DEFAULT_PROVIDER = llmManager.getDefaultProvider(); let MODEL_LIST = llmManager.getModelList(); -/* - *const PROVIDER_LIST_OLD: ProviderInfo[] = [ - * { - * name: 'Anthropic', - * staticModels: [ - * { - * name: 'claude-3-5-sonnet-latest', - * label: 'Claude 3.5 Sonnet (new)', - * provider: 'Anthropic', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'claude-3-5-sonnet-20240620', - * label: 'Claude 3.5 Sonnet (old)', - * provider: 'Anthropic', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'claude-3-5-haiku-latest', - * label: 'Claude 3.5 Haiku (new)', - * provider: 'Anthropic', - * maxTokenAllowed: 8000, - * }, - * { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 }, - * { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 }, - * { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 }, - * ], - * getApiKeyLink: 'https://console.anthropic.com/settings/keys', - * }, - * { - * name: 'Ollama', - * staticModels: [], - * getDynamicModels: getOllamaModels, - * getApiKeyLink: 'https://ollama.com/download', - * labelForGetApiKey: 'Download Ollama', - * icon: 'i-ph:cloud-arrow-down', - * }, - * { - * name: 'OpenAILike', - * staticModels: [], - * getDynamicModels: getOpenAILikeModels, - * }, - * { - * name: 'Cohere', - * staticModels: [ - * { name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 }, - * { name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 }, - * { name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 }, - * { name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 }, - * { name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 }, - * { name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 }, - * { name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 }, - * { name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 }, - * { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 }, - * { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 }, - * ], - * getApiKeyLink: 'https://dashboard.cohere.com/api-keys', - * }, - * { - * name: 'OpenRouter', - * staticModels: [ - * { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 }, - * { - * name: 'anthropic/claude-3.5-sonnet', - * label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)', - * provider: 'OpenRouter', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'anthropic/claude-3-haiku', - * label: 'Anthropic: Claude 3 Haiku (OpenRouter)', - * provider: 'OpenRouter', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'deepseek/deepseek-coder', - * label: 'Deepseek-Coder V2 236B (OpenRouter)', - * provider: 'OpenRouter', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'google/gemini-flash-1.5', - * label: 'Google Gemini Flash 1.5 (OpenRouter)', - * provider: 'OpenRouter', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'google/gemini-pro-1.5', - * label: 'Google Gemini Pro 1.5 (OpenRouter)', - * provider: 'OpenRouter', - * maxTokenAllowed: 8000, - * }, - * { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 }, - * { - * name: 'mistralai/mistral-nemo', - * label: 'OpenRouter Mistral Nemo (OpenRouter)', - * provider: 'OpenRouter', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'qwen/qwen-110b-chat', - * label: 'OpenRouter Qwen 110b Chat (OpenRouter)', - * provider: 'OpenRouter', - * maxTokenAllowed: 8000, - * }, - * { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 }, - * ], - * getDynamicModels: getOpenRouterModels, - * getApiKeyLink: 'https://openrouter.ai/settings/keys', - * }, - * { - * name: 'Google', - * staticModels: [ - * { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 }, - * { name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 }, - * { name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 }, - * { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 }, - * { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 }, - * { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 }, - * { name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 }, - * ], - * getApiKeyLink: 'https://aistudio.google.com/app/apikey', - * }, - * { - * name: 'Groq', - * staticModels: [ - * { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 }, - * { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 }, - * { name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 }, - * { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 }, - * { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 }, - * { name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 }, - * ], - * getApiKeyLink: 'https://console.groq.com/keys', - * }, - * { - * name: 'HuggingFace', - * staticModels: [ - * { - * name: 'Qwen/Qwen2.5-Coder-32B-Instruct', - * label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: '01-ai/Yi-1.5-34B-Chat', - * label: 'Yi-1.5-34B-Chat (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'codellama/CodeLlama-34b-Instruct-hf', - * label: 'CodeLlama-34b-Instruct (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'NousResearch/Hermes-3-Llama-3.1-8B', - * label: 'Hermes-3-Llama-3.1-8B (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'Qwen/Qwen2.5-Coder-32B-Instruct', - * label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'Qwen/Qwen2.5-72B-Instruct', - * label: 'Qwen2.5-72B-Instruct (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'meta-llama/Llama-3.1-70B-Instruct', - * label: 'Llama-3.1-70B-Instruct (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'meta-llama/Llama-3.1-405B', - * label: 'Llama-3.1-405B (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: '01-ai/Yi-1.5-34B-Chat', - * label: 'Yi-1.5-34B-Chat (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'codellama/CodeLlama-34b-Instruct-hf', - * label: 'CodeLlama-34b-Instruct (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'NousResearch/Hermes-3-Llama-3.1-8B', - * label: 'Hermes-3-Llama-3.1-8B (HuggingFace)', - * provider: 'HuggingFace', - * maxTokenAllowed: 8000, - * }, - * ], - * getApiKeyLink: 'https://huggingface.co/settings/tokens', - * }, - * { - * name: 'OpenAI', - * staticModels: [ - * { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 }, - * { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 }, - * { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 }, - * { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 }, - * ], - * getApiKeyLink: 'https://platform.openai.com/api-keys', - * }, - * { - * name: 'xAI', - * staticModels: [{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }], - * getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key', - * }, - * { - * name: 'Deepseek', - * staticModels: [ - * { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 }, - * { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 }, - * ], - * getApiKeyLink: 'https://platform.deepseek.com/apiKeys', - * }, - * { - * name: 'Mistral', - * staticModels: [ - * { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 }, - * { name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 }, - * { name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 }, - * { name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 }, - * { name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 }, - * { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 }, - * { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 }, - * { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 }, - * { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 }, - * ], - * getApiKeyLink: 'https://console.mistral.ai/api-keys/', - * }, - * { - * name: 'LMStudio', - * staticModels: [], - * getDynamicModels: getLMStudioModels, - * getApiKeyLink: 'https://lmstudio.ai/', - * labelForGetApiKey: 'Get LMStudio', - * icon: 'i-ph:cloud-arrow-down', - * }, - * { - * name: 'Together', - * getDynamicModels: getTogetherModels, - * staticModels: [ - * { - * name: 'Qwen/Qwen2.5-Coder-32B-Instruct', - * label: 'Qwen/Qwen2.5-Coder-32B-Instruct', - * provider: 'Together', - * maxTokenAllowed: 8000, - * }, - * { - * name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo', - * label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo', - * provider: 'Together', - * maxTokenAllowed: 8000, - * }, - * - * { - * name: 'mistralai/Mixtral-8x7B-Instruct-v0.1', - * label: 'Mixtral 8x7B Instruct', - * provider: 'Together', - * maxTokenAllowed: 8192, - * }, - * ], - * getApiKeyLink: 'https://api.together.xyz/settings/api-keys', - * }, - * { - * name: 'Perplexity', - * staticModels: [ - * { - * name: 'llama-3.1-sonar-small-128k-online', - * label: 'Sonar Small Online', - * provider: 'Perplexity', - * maxTokenAllowed: 8192, - * }, - * { - * name: 'llama-3.1-sonar-large-128k-online', - * label: 'Sonar Large Online', - * provider: 'Perplexity', - * maxTokenAllowed: 8192, - * }, - * { - * name: 'llama-3.1-sonar-huge-128k-online', - * label: 'Sonar Huge Online', - * provider: 'Perplexity', - * maxTokenAllowed: 8192, - * }, - * ], - * getApiKeyLink: 'https://www.perplexity.ai/settings/api', - * }, - *]; - */ - const providerBaseUrlEnvKeys: Record = {}; PROVIDER_LIST.forEach((provider) => { providerBaseUrlEnvKeys[provider.name] = { diff --git a/app/utils/logger.ts b/app/utils/logger.ts index 9b2c31c95e..26f9e37de5 100644 --- a/app/utils/logger.ts +++ b/app/utils/logger.ts @@ -1,4 +1,7 @@ export type DebugLevel = 'trace' | 'debug' | 'info' | 'warn' | 'error'; +import { Chalk } from 'chalk'; + +const chalk = new Chalk({ level: 3 }); type LoggerFunction = (...messages: any[]) => void; @@ -13,9 +16,6 @@ interface Logger { let currentLevel: DebugLevel = (import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV) ? 'debug' : 'info'; -const isWorker = 'HTMLRewriter' in globalThis; -const supportsColor = !isWorker; - export const logger: Logger = { trace: (...messages: any[]) => log('trace', undefined, messages), debug: (...messages: any[]) => log('debug', undefined, messages), @@ -63,14 +63,8 @@ function log(level: DebugLevel, scope: string | undefined, messages: any[]) { return `${acc} ${current}`; }, ''); - if (!supportsColor) { - console.log(`[${level.toUpperCase()}]`, allMessages); - - return; - } - const labelBackgroundColor = getColorForLevel(level); - const labelTextColor = level === 'warn' ? 'black' : 'white'; + const labelTextColor = level === 'warn' ? '#000000' : '#FFFFFF'; const labelStyles = getLabelStyles(labelBackgroundColor, labelTextColor); const scopeStyles = getLabelStyles('#77828D', 'white'); @@ -81,7 +75,21 @@ function log(level: DebugLevel, scope: string | undefined, messages: any[]) { styles.push('', scopeStyles); } - console.log(`%c${level.toUpperCase()}${scope ? `%c %c${scope}` : ''}`, ...styles, allMessages); + let labelText = formatText(` ${level.toUpperCase()} `, labelTextColor, labelBackgroundColor); + + if (scope) { + labelText = `${labelText} ${formatText(` ${scope} `, '#FFFFFF', '77828D')}`; + } + + if (typeof window !== 'undefined') { + console.log(`%c${level.toUpperCase()}${scope ? `%c %c${scope}` : ''}`, ...styles, allMessages); + } else { + console.log(`${labelText}`, allMessages); + } +} + +function formatText(text: string, color: string, bg: string) { + return chalk.bgHex(bg)(chalk.hex(color)(text)); } function getLabelStyles(color: string, textColor: string) { @@ -104,7 +112,7 @@ function getColorForLevel(level: DebugLevel): string { return '#EE4744'; } default: { - return 'black'; + return '#000000'; } } } diff --git a/app/utils/selectStarterTemplate.ts b/app/utils/selectStarterTemplate.ts index 6f0cb12d7a..0e779e1c99 100644 --- a/app/utils/selectStarterTemplate.ts +++ b/app/utils/selectStarterTemplate.ts @@ -27,7 +27,7 @@ ${templates Response Format: {selected template name} - {brief explanation for the choice} + {a proper title for the project} Examples: @@ -37,7 +37,7 @@ User: I need to build a todo app Response: react-basic-starter - Simple React setup perfect for building a todo application + Simple React todo application @@ -46,7 +46,7 @@ User: Write a script to generate numbers from 1 to 100 Response: blank - This is a simple script that doesn't require any template setup + script to generate numbers from 1 to 100 @@ -62,16 +62,17 @@ Important: Provide only the selection tags in your response, no additional text. const templates: Template[] = STARTER_TEMPLATES.filter((t) => !t.name.includes('shadcn')); -const parseSelectedTemplate = (llmOutput: string): string | null => { +const parseSelectedTemplate = (llmOutput: string): { template: string; title: string } | null => { try { // Extract content between tags const templateNameMatch = llmOutput.match(/(.*?)<\/templateName>/); + const titleMatch = llmOutput.match(/(.*?)<\/title>/); if (!templateNameMatch) { return null; } - return templateNameMatch[1].trim(); + return { template: templateNameMatch[1].trim(), title: titleMatch?.[1].trim() || 'Untitled Project' }; } catch (error) { console.error('Error parsing template selection:', error); return null; @@ -101,7 +102,10 @@ export const selectStarterTemplate = async (options: { message: string; model: s } else { console.log('No template selected, using blank template'); - return 'blank'; + return { + template: 'blank', + title: '', + }; } }; @@ -181,7 +185,7 @@ const getGitHubRepoContent = async ( } }; -export async function getTemplates(templateName: string) { +export async function getTemplates(templateName: string, title?: string) { const template = STARTER_TEMPLATES.find((t) => t.name == templateName); if (!template) { @@ -211,7 +215,7 @@ export async function getTemplates(templateName: string) { const filesToImport = { files: filteredFiles, - ignoreFile: filteredFiles, + ignoreFile: [] as typeof filteredFiles, }; if (templateIgnoreFile) { @@ -227,7 +231,7 @@ export async function getTemplates(templateName: string) { } const assistantMessage = ` -<boltArtifact id="imported-files" title="Importing Starter Files" type="bundled"> +<boltArtifact id="imported-files" title="${title || 'Importing Starter Files'}" type="bundled"> ${filesToImport.files .map( (file) => @@ -278,10 +282,16 @@ Any attempt to modify these protected files will result in immediate termination If you need to make changes to functionality, create new files instead of modifying the protected ones listed above. --- `; - userMessage += ` + } + + userMessage += ` +--- +template import is done, and you can now use the imported files, +edit only the files that need to be changed, and you can create new files as needed. +NO NOT EDIT/WRITE ANY FILES THAT ALREADY EXIST IN THE PROJECT AND DOES NOT NEED TO BE MODIFIED +--- Now that the Template is imported please continue with my original request `; - } return { assistantMessage, diff --git a/changelog.md b/changelog.md index 9c4d50f1b6..8de95f5c52 100644 --- a/changelog.md +++ b/changelog.md @@ -1,31 +1,68 @@ -# Release v0.0.3 +# 🚀 Release v0.0.4 -### 🔄 Changes since v0.0.2 +## What's Changed 🌟 -#### 🐛 Bug Fixes +### 🔄 Changes since v0.0.3 -- Prompt Enhance +### ✨ Features +* add xAI grok-2-1212 model ([#800](https://github.com/stackblitz-labs/bolt.diy/pull/800)) +* providers list is now 2 columns (75ec49b) by Dustin Loring +* enhanced Terminal Error Handling and Alert System ([#797](https://github.com/stackblitz-labs/bolt.diy/pull/797)) +* add Starter template menu in homepage ([#884](https://github.com/stackblitz-labs/bolt.diy/pull/884)) +* catch errors from web container preview and show in actionable alert so user can send them to AI for fixing ([#856](https://github.com/stackblitz-labs/bolt.diy/pull/856)) +* redact file contents from chat and put latest files into system prompt ([#904](https://github.com/stackblitz-labs/bolt.diy/pull/904)) +* added Automatic Code Template Detection And Import ([#867](https://github.com/stackblitz-labs/bolt.diy/pull/867)) +* added hyperbolic llm models ([#943](https://github.com/stackblitz-labs/bolt.diy/pull/943)) -#### 📚 Documentation -- miniflare error knowledge +### 🐛 Bug Fixes +* chat title character restriction (e064803) by Dustin Loring +* fixed model not loading/working, even after baseUrl set in .env file ([#816](https://github.com/stackblitz-labs/bolt.diy/pull/816)) +* added wait till terminal prompt for bolt shell execution ([#789](https://github.com/stackblitz-labs/bolt.diy/pull/789)) +* fixed console error for SettingsWIndow & Removed ts-nocheck ([#714](https://github.com/stackblitz-labs/bolt.diy/pull/714)) +* add Message Processing Throttling to Prevent Browser Crashes ([#848](https://github.com/stackblitz-labs/bolt.diy/pull/848)) +* provider menu dropdown fix (ghost providers) ([#862](https://github.com/stackblitz-labs/bolt.diy/pull/862)) +* ollama provider module base url hotfix for docker ([#863](https://github.com/stackblitz-labs/bolt.diy/pull/863)) +* check for updates does not look for commit.json now ([#861](https://github.com/stackblitz-labs/bolt.diy/pull/861)) +* detect and remove markdown block syntax that llms sometimes hallucinate for file actions ([#886](https://github.com/stackblitz-labs/bolt.diy/pull/886)) +* add defaults for LMStudio to work out of the box ([#928](https://github.com/stackblitz-labs/bolt.diy/pull/928)) +* import folder filtering ([#939](https://github.com/stackblitz-labs/bolt.diy/pull/939)) +* refresh model list after api key changes ([#944](https://github.com/stackblitz-labs/bolt.diy/pull/944)) +* better model loading ui feedback and model list update ([#954](https://github.com/stackblitz-labs/bolt.diy/pull/954)) +* updated logger and model caching minor bugfix #release ([#895](https://github.com/stackblitz-labs/bolt.diy/pull/895)) -#### 🔧 Chores -- adding back semantic pull pr check for better changelog system -- update commit hash to 1e72d52278730f7d22448be9d5cf2daf12559486 -- update commit hash to 282beb96e2ee92ba8b1174aaaf9f270e03a288e8 +### 📚 Documentation +* simplified setup ([#817](https://github.com/stackblitz-labs/bolt.diy/pull/817)) +* toc for readme (de64007) by Dustin Loring +* faq style change, toc added to index (636f87f) by Dustin Loring +* setup updated (ab5cde3) by Dustin Loring +* updated Docs ([#845](https://github.com/stackblitz-labs/bolt.diy/pull/845)) +* updated download link ([#850](https://github.com/stackblitz-labs/bolt.diy/pull/850)) +* updated env.example of OLLAMA & LMSTUDIO base url ([#877](https://github.com/stackblitz-labs/bolt.diy/pull/877)) -#### 🔍 Other Changes -- Merge remote-tracking branch 'upstream/main' -- Merge pull request #781 from thecodacus/semantic-pull-pr -- miniflare and wrangler error -- simplified the fix -- Merge branch 'main' into fix/prompt-enhance +### ♻️ Code Refactoring +* updated vite config to inject add version metadata into the app on build ([#841](https://github.com/stackblitz-labs/bolt.diy/pull/841)) +* refactored LLM Providers: Adapting Modular Approach ([#832](https://github.com/stackblitz-labs/bolt.diy/pull/832)) -**Full Changelog**: [`v0.0.2..v0.0.3`](https://github.com/stackblitz-labs/bolt.diy/compare/v0.0.2...v0.0.3) + +### ⚙️ CI + +* updated the docs ci to only trigger if any files changed in the docs folder ([#849](https://github.com/stackblitz-labs/bolt.diy/pull/849)) +* improved change-log generation script and cleaner release ci action ([#896](https://github.com/stackblitz-labs/bolt.diy/pull/896)) + + +### 🔍 Other Changes + +* fix hotfix for version metadata issue ([#853](https://github.com/stackblitz-labs/bolt.diy/pull/853)) +* feat; data tab added to the settings (1f938fc) by Dustin Loring + + +## 📈 Stats + +**Full Changelog**: [`v0.0.3..v0.0.4`](https://github.com/stackblitz-labs/bolt.diy/compare/v0.0.3...v0.0.4) diff --git a/package.json b/package.json index 19c83531f2..bd381f9de2 100644 --- a/package.json +++ b/package.json @@ -5,7 +5,7 @@ "license": "MIT", "sideEffects": false, "type": "module", - "version": "0.0.3", + "version": "0.0.4", "scripts": { "deploy": "npm run build && wrangler pages deploy", "build": "remix vite:build", @@ -74,6 +74,7 @@ "@xterm/addon-web-links": "^0.11.0", "@xterm/xterm": "^5.5.0", "ai": "^4.0.13", + "chalk": "^5.4.1", "date-fns": "^3.6.0", "diff": "^5.2.0", "dotenv": "^16.4.7", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a78cfef324..128560a560 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -143,6 +143,9 @@ importers: ai: specifier: ^4.0.13 version: 4.0.18(react@18.3.1)(zod@3.23.8) + chalk: + specifier: ^5.4.1 + version: 5.4.1 date-fns: specifier: ^3.6.0 version: 3.6.0 @@ -2604,8 +2607,8 @@ packages: resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} engines: {node: '>=10'} - chalk@5.3.0: - resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==} + chalk@5.4.1: + resolution: {integrity: sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==} engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} character-entities-html4@2.1.0: @@ -8207,7 +8210,7 @@ snapshots: ansi-styles: 4.3.0 supports-color: 7.2.0 - chalk@5.3.0: {} + chalk@5.4.1: {} character-entities-html4@2.1.0: {} @@ -9415,7 +9418,7 @@ snapshots: jsondiffpatch@0.6.0: dependencies: '@types/diff-match-patch': 1.0.36 - chalk: 5.3.0 + chalk: 5.4.1 diff-match-patch: 1.0.5 jsonfile@6.1.0: diff --git a/public/icons/Hyperbolic.svg b/public/icons/Hyperbolic.svg new file mode 100644 index 0000000000..392ed08c3c --- /dev/null +++ b/public/icons/Hyperbolic.svg @@ -0,0 +1,3 @@ +<svg width="50" height="50" viewBox="0 0 50 50" fill="none" xmlns="http://www.w3.org/2000/svg"> +<path d="M1.17374 40.438C0.920158 41.0455 0.788964 41.6972 0.787668 42.3556C0.787668 45.0847 3.09359 47.5748 6.90525 49.482C7.25319 49.6512 7.63267 49.7456 8.01927 49.7593C8.40589 49.7729 8.79108 49.7054 9.15007 49.5612C9.50907 49.417 9.8339 49.1992 10.1037 48.9218C10.3736 48.6444 10.5824 48.3136 10.7169 47.9507L14.9639 37.73C15.7076 35.9512 16.2948 34.1109 16.7187 32.2298C8.8497 33.5505 2.91813 36.5675 1.27554 40.2588L1.20883 40.4169L1.17374 40.438ZM16.7052 17.533C16.2814 15.652 15.6941 13.8116 14.9503 12.0328L10.7034 1.80865C10.5682 1.44582 10.3587 1.11534 10.0884 0.838345C9.81807 0.561352 9.49284 0.343933 9.13362 0.200047C8.77438 0.05616 8.38911 -0.0108795 8.00244 0.00294113C7.61577 0.0169029 7.23626 0.111584 6.88827 0.280903C3.08013 2.18447 0.77417 4.67824 0.77417 7.40712C0.775742 8.06561 0.906925 8.71717 1.16024 9.32493V9.34594L1.22695 9.50406C2.90464 13.1954 8.83621 16.2089 16.7052 17.533ZM44.4138 0.280903C48.2255 2.18447 50.5314 4.67824 50.5314 7.40712C50.5214 8.06685 50.3831 8.71814 50.1242 9.32493L50.0681 9.45486C48.4396 13.1708 42.4939 16.2018 34.6004 17.533C35.0242 15.652 35.6113 13.8116 36.3552 12.0328L40.6021 1.80865C40.7364 1.44569 40.9453 1.11478 41.2152 0.837513C41.4851 0.560246 41.8101 0.342552 42.1692 0.198662C42.5284 0.0547752 42.9136 -0.0122643 43.3003 0.00183487C43.6869 0.015934 44.0661 0.111031 44.4138 0.280903ZM34.6246 32.2298C35.0405 34.1093 35.6206 35.9487 36.3584 37.7265L40.6054 47.9507C40.7397 48.3136 40.9487 48.6444 41.2185 48.9218C41.4883 49.1992 41.8131 49.417 42.1722 49.5612C42.5312 49.7054 42.9164 49.7729 43.303 49.7593C43.6896 49.7456 44.069 49.6512 44.4169 49.482C48.2286 47.5748 50.5346 45.0847 50.5346 42.3556C50.5315 41.6974 50.4005 41.046 50.1485 40.438L50.0924 40.308C48.4708 36.5921 42.5041 33.5574 34.6246 32.2298ZM34.3566 19.7084C39.9443 18.848 44.5876 17.1727 47.7921 14.9845L46.8831 17.1656C44.8472 22.1033 44.8472 27.6467 46.8831 32.5844L47.7851 34.7584C44.5771 32.5703 39.9336 30.9126 34.3531 30.0415L34.2056 30.0204C31.3759 29.5919 28.5177 29.3794 25.6557 29.3847C22.8008 29.3804 19.9497 29.593 17.1269 30.0204L16.9795 30.0415C11.3954 30.8985 6.75195 32.5739 3.54398 34.762L4.44949 32.5844C6.48536 27.6467 6.48536 22.1033 4.44949 17.1656L3.54398 14.9845C6.7379 17.1832 11.3814 18.848 16.9654 19.7084L17.1129 19.7296C22.7805 20.5725 28.5415 20.5725 34.2092 19.7296L34.3566 19.7084Z" fill="#000000" style="translate: none; rotate: none; scale: none; transform-origin: 0px 0px;" data-svg-origin="0.7741699814796448 -0.000005409121513366699" transform="matrix(1,0,0,1,0,0)"></path> +</svg> \ No newline at end of file