diff --git a/front/lib/api/assistant/generation.ts b/front/lib/api/assistant/generation.ts index 363546ef24ac..59f4450046ce 100644 --- a/front/lib/api/assistant/generation.ts +++ b/front/lib/api/assistant/generation.ts @@ -328,15 +328,13 @@ export async function* runGeneration( return; } - const model = c.model; - const contextSize = getSupportedModelConfig(c.model).contextSize; const MIN_GENERATION_TOKENS = 2048; if (contextSize < MIN_GENERATION_TOKENS) { throw new Error( - `Model contextSize unexpectedly small for model: ${model.providerId} ${model.modelId}` + `Model contextSize unexpectedly small for model: ${c.model.providerId} ${c.model.modelId}` ); } @@ -345,7 +343,7 @@ export async function* runGeneration( // Turn the conversation into a digest that can be presented to the model. const modelConversationRes = await renderConversationForModel({ conversation, - model, + model: c.model, prompt, allowedTokenCount: contextSize - MIN_GENERATION_TOKENS, }); @@ -358,7 +356,7 @@ export async function* runGeneration( messageId: agentMessage.sId, error: { code: "internal_server_error", - message: `Failed tokenization for ${model.providerId} ${model.modelId}: ${modelConversationRes.error.message}`, + message: `Failed tokenization for ${c.model.providerId} ${c.model.modelId}: ${modelConversationRes.error.message}`, }, }; return; @@ -367,8 +365,8 @@ export async function* runGeneration( const config = cloneBaseConfig( DustProdActionRegistry["assistant-v2-generator"].config ); - config.MODEL.provider_id = model.providerId; - config.MODEL.model_id = model.modelId; + config.MODEL.provider_id = c.model.providerId; + config.MODEL.model_id = c.model.modelId; config.MODEL.temperature = c.temperature; // This is the console.log you want to uncomment to generate inputs for the generator app. @@ -383,7 +381,7 @@ export async function* runGeneration( { workspaceId: conversation.owner.sId, conversationId: conversation.sId, - model: model, + model: c.model, temperature: c.temperature, }, "[ASSISTANT_TRACE] Generation exection"