@@ -16,9 +16,6 @@ import { RequestContext, Message } from '../domain.js';
16
16
import { ApplicationTool } from '../tools.js' ;
17
17
import type { ChatModel , AssistantResponse , ModelUsage } from './types.js' ;
18
18
19
- const DEFAULT_MAX_TOKENS = 1024 ;
20
- const DEFAULT_MAX_STEPS = 5 ;
21
-
22
19
// Workaround for memory issue happening when sending image attachment. The attachments get inefficiently serialised causing a memory spike.
23
20
const VERCEL_AI_SHARED_OPTIONS = {
24
21
experimental_telemetry : {
@@ -127,9 +124,9 @@ export class VercelChatModelAdapter implements ChatModel {
127
124
const result = await streamText ( {
128
125
...VERCEL_AI_SHARED_OPTIONS ,
129
126
model : this . _options . languageModel ,
130
- maxTokens : this . _options . maxTokens ?? DEFAULT_MAX_TOKENS ,
131
- maxSteps : this . _options . maxSteps ?? DEFAULT_MAX_STEPS ,
132
127
messages : context . messages ,
128
+ maxTokens : this . _options . maxTokens ,
129
+ maxSteps : this . _options . maxSteps ,
133
130
tools : context . tools ,
134
131
} ) ;
135
132
@@ -160,9 +157,9 @@ export class VercelChatModelAdapter implements ChatModel {
160
157
const result = await generateText ( {
161
158
...VERCEL_AI_SHARED_OPTIONS ,
162
159
model : this . _options . languageModel ,
163
- maxTokens : this . _options . maxTokens ?? DEFAULT_MAX_TOKENS ,
164
- maxSteps : this . _options . maxSteps ?? DEFAULT_MAX_STEPS ,
165
160
messages : context . messages ,
161
+ maxTokens : this . _options . maxTokens ,
162
+ maxSteps : this . _options . maxSteps ,
166
163
tools : context . tools ,
167
164
} ) ;
168
165
const responseTime = performance . now ( ) - startTime ;
0 commit comments