-
Notifications
You must be signed in to change notification settings - Fork 0
/
basic-local-chatbot-flowise.json
409 lines (409 loc) · 17.2 KB
/
basic-local-chatbot-flowise.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
{
"nodes": [
{
"id": "conversationChain_0",
"position": {
"x": 1426.8618616435535,
"y": 263.00609403762263
},
"type": "customNode",
"data": {
"id": "conversationChain_0",
"label": "Conversation Chain",
"version": 3,
"name": "conversationChain",
"type": "ConversationChain",
"baseClasses": [
"ConversationChain",
"LLMChain",
"BaseChain",
"Runnable"
],
"category": "Chains",
"description": "Chat models specific conversational chain with memory",
"inputParams": [
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"description": "If Chat Prompt Template is provided, this will be ignored",
"additionalParams": true,
"optional": true,
"default": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"placeholder": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"id": "conversationChain_0-input-systemMessagePrompt-string"
}
],
"inputAnchors": [
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"id": "conversationChain_0-input-model-BaseChatModel"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseMemory",
"id": "conversationChain_0-input-memory-BaseMemory"
},
{
"label": "Chat Prompt Template",
"name": "chatPromptTemplate",
"type": "ChatPromptTemplate",
"description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable",
"optional": true,
"id": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOllama_0.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}",
"chatPromptTemplate": "",
"inputModeration": "",
"systemMessagePrompt": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."
},
"outputAnchors": [
{
"id": "conversationChain_0-output-conversationChain-ConversationChain|LLMChain|BaseChain|Runnable",
"name": "conversationChain",
"label": "ConversationChain",
"description": "Chat models specific conversational chain with memory",
"type": "ConversationChain | LLMChain | BaseChain | Runnable"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 435,
"selected": false,
"positionAbsolute": {
"x": 1426.8618616435535,
"y": 263.00609403762263
},
"dragging": false
},
{
"id": "bufferMemory_0",
"position": {
"x": 1002.7673440362969,
"y": 135.8882509962489
},
"type": "customNode",
"data": {
"id": "bufferMemory_0",
"label": "Buffer Memory",
"version": 2,
"name": "bufferMemory",
"type": "BufferMemory",
"baseClasses": [
"BufferMemory",
"BaseChatMemory",
"BaseMemory"
],
"category": "Memory",
"description": "Retrieve chat messages stored in database",
"inputParams": [
{
"label": "Session Id",
"name": "sessionId",
"type": "string",
"description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory#ui-and-embedded-chat\">more</a>",
"default": "",
"additionalParams": true,
"optional": true,
"id": "bufferMemory_0-input-sessionId-string"
},
{
"label": "Memory Key",
"name": "memoryKey",
"type": "string",
"default": "chat_history",
"additionalParams": true,
"id": "bufferMemory_0-input-memoryKey-string"
}
],
"inputAnchors": [],
"inputs": {
"sessionId": "",
"memoryKey": "chat_history"
},
"outputAnchors": [
{
"id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"name": "bufferMemory",
"label": "BufferMemory",
"description": "Retrieve chat messages stored in database",
"type": "BufferMemory | BaseChatMemory | BaseMemory"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 253,
"selected": false,
"positionAbsolute": {
"x": 1002.7673440362969,
"y": 135.8882509962489
},
"dragging": false
},
{
"id": "chatOllama_0",
"position": {
"x": 527.7825642464799,
"y": 187.2894095306179
},
"type": "customNode",
"data": {
"id": "chatOllama_0",
"label": "ChatOllama",
"version": 3,
"name": "chatOllama",
"type": "ChatOllama",
"baseClasses": [
"ChatOllama",
"BaseChatModel",
"BaseLanguageModel",
"Runnable"
],
"category": "Chat Models",
"description": "Chat completion using open-source LLM on Ollama",
"inputParams": [
{
"label": "Base URL",
"name": "baseUrl",
"type": "string",
"default": "http://localhost:11434",
"id": "chatOllama_0-input-baseUrl-string"
},
{
"label": "Model Name",
"name": "modelName",
"type": "string",
"placeholder": "llama2",
"id": "chatOllama_0-input-modelName-string"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"description": "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8). Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOllama_0-input-temperature-number"
},
{
"label": "Keep Alive",
"name": "keepAlive",
"type": "string",
"description": "How long to keep connection alive. A duration string (such as \"10m\" or \"24h\")",
"default": "5m",
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-keepAlive-string"
},
{
"label": "Top P",
"name": "topP",
"type": "number",
"description": "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9). Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-topP-number"
},
{
"label": "Top K",
"name": "topK",
"type": "number",
"description": "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40). Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-topK-number"
},
{
"label": "Mirostat",
"name": "mirostat",
"type": "number",
"description": "Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0). Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-mirostat-number"
},
{
"label": "Mirostat ETA",
"name": "mirostatEta",
"type": "number",
"description": "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-mirostatEta-number"
},
{
"label": "Mirostat TAU",
"name": "mirostatTau",
"type": "number",
"description": "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-mirostatTau-number"
},
{
"label": "Context Window Size",
"name": "numCtx",
"type": "number",
"description": "Sets the size of the context window used to generate the next token. (Default: 2048) Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-numCtx-number"
},
{
"label": "Number of GPU",
"name": "numGpu",
"type": "number",
"description": "The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-numGpu-number"
},
{
"label": "Number of Thread",
"name": "numThread",
"type": "number",
"description": "Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-numThread-number"
},
{
"label": "Repeat Last N",
"name": "repeatLastN",
"type": "number",
"description": "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx). Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-repeatLastN-number"
},
{
"label": "Repeat Penalty",
"name": "repeatPenalty",
"type": "number",
"description": "Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1). Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-repeatPenalty-number"
},
{
"label": "Stop Sequence",
"name": "stop",
"type": "string",
"rows": 4,
"placeholder": "AI assistant:",
"description": "Sets the stop sequences to use. Use comma to seperate different sequences. Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-stop-string"
},
{
"label": "Tail Free Sampling",
"name": "tfsZ",
"type": "number",
"description": "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (Default: 1). Refer to <a target=\"_blank\" href=\"https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values\">docs</a> for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-tfsZ-number"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOllama_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"baseUrl": "http://host.docker.internal:11434",
"modelName": "SARA-llama3.2:latest",
"temperature": "0.3",
"keepAlive": "5m",
"topP": "",
"topK": "",
"mirostat": "",
"mirostatEta": "",
"mirostatTau": "",
"numCtx": "",
"numGpu": "",
"numThread": "",
"repeatLastN": "",
"repeatPenalty": "",
"stop": "",
"tfsZ": ""
},
"outputAnchors": [
{
"id": "chatOllama_0-output-chatOllama-ChatOllama|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOllama",
"label": "ChatOllama",
"description": "Chat completion using open-source LLM on Ollama",
"type": "ChatOllama | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 580,
"selected": false,
"positionAbsolute": {
"x": 527.7825642464799,
"y": 187.2894095306179
},
"dragging": false
}
],
"edges": [
{
"source": "bufferMemory_0",
"sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"target": "conversationChain_0",
"targetHandle": "conversationChain_0-input-memory-BaseMemory",
"type": "buttonedge",
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationChain_0-conversationChain_0-input-memory-BaseMemory"
},
{
"source": "chatOllama_0",
"sourceHandle": "chatOllama_0-output-chatOllama-ChatOllama|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationChain_0",
"targetHandle": "conversationChain_0-input-model-BaseChatModel",
"type": "buttonedge",
"id": "chatOllama_0-chatOllama_0-output-chatOllama-ChatOllama|BaseChatModel|BaseLanguageModel|Runnable-conversationChain_0-conversationChain_0-input-model-BaseChatModel"
}
]
}