-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path.env.example
36 lines (26 loc) · 1.09 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# GLHF Configuration
# API key for authentication with GLHF service
VITE_API_KEY_GLHF=your_api_key_here
# Base URL for GLHF's OpenAI-compatible API endpoint
VITE_GLHF_CHAT_BASE_URL=https://glhf.chat
VITE_API_URL_GLHF=https://glhf.chat/api/openai/v1/chat/completions
# Local proxy server URL for GLHF requests
VITE_BASE_URL_GLHF=https://glhf.chat
# Fast, lighter model for quick processing
VITE_AI_FAST_MODEL_GLHF=gpt-3.5-turbo
# More accurate but slower model for precise results
VITE_AI_PRECISE_MODEL_GLHF=gpt-4
# LMStudio Configuration
# Local LMStudio server endpoint (OpenAI compatible)
VITE_API_URL_LMSTUDIO=http://localhost:1234/v1
# Model identifier for fast processing in LMStudio
VITE_AI_FAST_MODEL_LMSTUDIO="local-model"
# Model identifier for precise processing in LMStudio
VITE_AI_PRECISE_MODEL_LMSTUDIO="local-model"
# Local LM Configuration
# Ollama API endpoint for local model inference
VITE_API_URL_LOCAL_LM=http://localhost:11434/api/chat
# Fast model identifier for Ollama
VITE_AI_FAST_MODEL_LOCAL_LM="llama2"
# Precise model identifier for Ollama
VITE_AI_PRECISE_MODEL_LOCAL_LM="llama2"