-
Notifications
You must be signed in to change notification settings - Fork 0
/
messages.go
94 lines (84 loc) · 3.35 KB
/
messages.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
package anthropic
import (
"context"
"net/http"
)
type MessagesService service
type Message struct {
Content string `json:"content"`
Role string `json:"role"`
}
type Content struct {
Type string `json:"type"`
Text string `json:"text"`
}
// CreateMessageInput defines a structured list of input messages.
type CreateMessageInput struct {
// Temperature defines the amount of randomness injected into the response.
// Note that even with a temperature of 0.0, results will not be fully
// deterministic.
Temperature *float64 `json:"temperature,omitempty"`
// TopK is used to remove long tail low probability responses by only
// sampling from the top K options for each subsequent token.
// Recommended for advanced use cases only. You usually only need to use
// Temperature.
TopK *int `json:"top_k,omitempty"`
// TopP is the nucleus-sampling parameter. Temperature or TopP should be
// used, but not both.
// Recommended for advanced use cases only. You usually only need to use
// Temperature.
TopP *float64 `json:"top_p,omitempty"`
// Model defines the language model that will be used to complete the
// prompt. See model.go for a list of available models.
Model LanguageModel `json:"model"`
// System provides a means of specifying context and instructions to the
// model, such as specifying a particular goal or role.
System string `json:"system,omitempty"`
// Messages are the input messages, models are trained to operate on
// alternating user and assistant conversational turns. When creating a new
// message, prior conversational turns can be specified with this field,
// and the model generates the next Message in the conversation.
Messages []Message `json:"messages"`
// StopSequences defines custom text sequences that will cause the model to
// stop generating. If the model encounters any of the sequences, the
// StopReason field will be set to "stop_sequence" and the response
// StopSequence field will be set to the sequence that caused the model to
// stop.
StopSequences []string `json:"stop_sequences,omitempty"`
// MaxTokens defines the maximum number of tokens to generate before
// stopping. Token generation may stop before reaching this limit, this only
// specifies the absolute maximum number of tokens to generate. Different
// models have different maximum token limits.
MaxTokens int `json:"max_tokens"`
}
// CreateMessageOutput defines the response from creating a new message.
type CreateMessageOutput struct {
ID *string `json:"id"`
Type *string `json:"type"`
Role *string `json:"role"`
Model *string `json:"model"`
StopSequence *string `json:"stop_sequence"`
StopReason *string `json:"stop_reason"`
Usage *Usage `json:"usage"`
Content []*Content `json:"content"`
}
// String implements the fmt.Stringer interface for CreateMessageOutput.
func (c *CreateMessageOutput) String() string {
return c.Content[0].Text
}
// Create creates a new message using the provided options.
func (c *MessagesService) Create(
ctx context.Context,
in *CreateMessageInput,
) (*CreateMessageOutput, *http.Response, error) {
req, err := c.client.NewRequest(http.MethodPost, "messages", in)
if err != nil {
return nil, nil, err
}
out := new(CreateMessageOutput)
resp, err := c.client.Do(ctx, req, out)
if err != nil {
return nil, resp, err
}
return out, resp, nil
}