Skip to content

Commit 2e5c7ff

Browse files
authored
Merge pull request #494 from innogames/md_openai_config
openai: add more config flags
2 parents 5391a0e + 14e55db commit 2e5c7ff

File tree

7 files changed

+42
-26
lines changed

7 files changed

+42
-26
lines changed

command/openai/api.go

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -13,21 +13,20 @@ const (
1313
roleAssistant = "assistant"
1414
)
1515

16-
// https://platform.openai.com/docs/guides/chat/chat-completions-beta
1716
// https://platform.openai.com/docs/api-reference/chat
1817
type ChatRequest struct {
19-
Model string `json:"model"`
20-
Messages []ChatMessage `json:"messages"`
21-
Temperature float32 `json:"temperature,omitempty"`
22-
TopP float32 `json:"top_p,omitempty"`
23-
N int `json:"n,omitempty"`
24-
Stop []string `json:"stop,omitempty"`
25-
Stream bool `json:"stream,omitempty"`
26-
MaxTokens int `json:"max_tokens,omitempty"`
27-
PresencePenalty float32 `json:"presence_penalty,omitempty"`
28-
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
29-
LogitBias map[string]int `json:"logit_bias,omitempty"`
30-
User string `json:"user,omitempty"`
18+
Model string `json:"model"`
19+
Messages []ChatMessage `json:"messages"`
20+
Temperature float32 `json:"temperature,omitempty"`
21+
TopP float32 `json:"top_p,omitempty"`
22+
N int `json:"n,omitempty"`
23+
Stop []string `json:"stop,omitempty"`
24+
Stream bool `json:"stream,omitempty"`
25+
MaxTokens int `json:"max_tokens,omitempty"`
26+
PresencePenalty float32 `json:"presence_penalty,omitempty"`
27+
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
28+
User string `json:"user,omitempty"`
29+
Seed string `json:"seed,omitempty"`
3130
}
3231

3332
type ChatMessage struct {

command/openai/client.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ func CallChatGPT(cfg Config, inputMessages []ChatMessage, stream bool) (<-chan s
1818
jsonData, _ := json.Marshal(ChatRequest{
1919
Model: cfg.Model,
2020
Temperature: cfg.Temperature,
21+
Seed: cfg.Seed,
22+
MaxTokens: cfg.MaxTokens,
2123
Stream: stream,
2224
Messages: inputMessages,
2325
})

command/openai/command.go

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,9 @@ func (c *chatGPTCommand) startConversation(message msg.Ref, text string) bool {
101101
})
102102
}
103103
storageIdentifier = getIdentifier(message.GetChannel(), message.GetThread())
104-
log.Infof("openai thread context: %s", messageHistory)
104+
if c.cfg.LogTexts {
105+
log.Infof("openai thread context: %s", messageHistory)
106+
}
105107
} else if linkRe.MatchString(text) {
106108
// a link to another thread was posted -> use this messages as context
107109
link := linkRe.FindStringSubmatch(text)
@@ -242,14 +244,19 @@ func (c *chatGPTCommand) callAndStore(messages []ChatMessage, storageIdentifier
242244
stats.Increase("openai_output_tokens", estimateTokensForMessage(responseText.String()))
243245

244246
log.Infof(
245-
"Openai %s call took %s with %d sub messages (%d tokens). Message: '%s'. Response: '%s'",
247+
"Openai %s call took %s with %d sub messages (%d tokens).",
246248
c.cfg.Model,
247249
util.FormatDuration(time.Since(startTime)),
248250
len(messages),
249251
inputTokens,
250-
inputText,
251-
responseText.String(),
252252
)
253+
if c.cfg.LogTexts {
254+
log.Infof(
255+
"Openai texts. Input: '%s'. Response: '%s'",
256+
inputText,
257+
responseText.String(),
258+
)
259+
}
253260
}()
254261
}
255262

command/openai/config.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ type Config struct {
1313
InitialSystemMessage string `mapstructure:"initial_system_message"`
1414
Model string `mapstructure:"model"`
1515
Temperature float32 `mapstructure:"temperature"`
16+
Seed string `mapstructure:"seed"`
17+
MaxTokens int `mapstructure:"max_tokens"`
1618

1719
// number of thread messages stored which are used as a context for further requests
1820
HistorySize int `mapstructure:"history_size"`
@@ -22,6 +24,9 @@ type Config struct {
2224

2325
// maximum update frequency of slack messages when "stream" is active
2426
UpdateInterval time.Duration `mapstructure:"update_interval"`
27+
28+
// log all input+output text to the logger. This could include personal information, therefore disabled by default!
29+
LogTexts bool `mapstructure:"log_texts"`
2530
}
2631

2732
// IsEnabled checks if token is set

command/openai/tokens.go

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,17 @@ import (
99
var maxTokens = map[string]int{
1010
"gpt-4": 8192,
1111
"gpt-4-32k": 32768,
12-
"gpt-3.5-turbo-16k": 16385,
13-
"gpt-3.5-turbo": 4096,
1412
"gpt-4-1106-preview": 128000,
1513
"gpt-4-vision-preview": 128000,
14+
"gpt-3.5-turbo-16k": 16385,
15+
"gpt-3.5-turbo": 4096,
1616
"dummy-test": 100, // just for testing
1717
}
1818

1919
var modelDateRe = regexp.MustCompile(`-\d{4}`)
2020

21+
// truncateMessages will truncate the messages to fit into the max tokens limit of the model
22+
// we always try to keep the last message, so we will truncate the first messages
2123
func truncateMessages(model string, inputMessages []ChatMessage) ([]ChatMessage, int, int) {
2224
outputMessages := make([]ChatMessage, 0, len(inputMessages))
2325

@@ -47,12 +49,12 @@ func getMaxTokensForModel(model string) int {
4749
return getMaxTokensForModel(modelDateRe.ReplaceAllString(model, ""))
4850
}
4951

50-
// we need some default
51-
return 4000
52+
// we need some default, keep it high, as new models will most likely support more tokens
53+
return 128000
5254
}
5355

56+
// to lower the dependency to heavy external libs we use the rule of thumbs which is totally fine here
57+
// https://platform.openai.com/tokenizer
5458
func estimateTokensForMessage(message string) int {
55-
// to lower the dependency to heavy external libs we use the rule of thumbs which is totally fine here
56-
// https://platform.openai.com/tokenizer
5759
return len(message) / 4
5860
}

command/openai/tokens_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@ func TestModels(t *testing.T) {
1111
input string
1212
expected int
1313
}{
14-
{"", 4000},
15-
{"jolo", 4000},
14+
{"", 128000},
15+
{"jolo", 128000},
1616
{"gpt-4", 8192},
1717
{"gpt-4-0613", 8192},
1818
{"gpt-4-32k-0613", 32768},

readme.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ oauth_config:
4949
- reactions:read
5050
- reactions:write
5151
- users:read
52-
- files:write
52+
- files:read
5353
settings:
5454
event_subscriptions:
5555
bot_events:
@@ -302,6 +302,7 @@ openai:
302302
update_interval: '3s' # fewer Slack messages update during generation
303303
model: gpt-3.5-turbo
304304
temperature: 0.8
305+
log_texts: true # opt in: log all input/output text to the log
305306
```
306307

307308
When using the "openai XXX" command within a existing thread, the previous messages are used as context for further calls.

0 commit comments

Comments
 (0)