@@ -13,39 +13,13 @@ type Message struct {
13
13
Role string `json:"role"`
14
14
}
15
15
16
- // Usage defines billing and rate-limit usage information.
17
- // Billing and rate-limiting are driven by token counts, since tokens represent
18
- // the underlying cost to Anthropic.
19
- type Usage struct {
20
- InputTokens int `json:"input_tokens"`
21
- OutputTokens int `json:"output_tokens"`
22
- }
23
-
24
16
type Content struct {
25
17
Type string `json:"type"`
26
18
Text string `json:"text"`
27
19
}
28
20
29
21
// CreateMessageInput defines a structured list of input messages.
30
22
type CreateMessageInput struct {
31
- // MaxTokens defines the maximum number of tokens to generate before
32
- // stopping. Token generation may stop before reaching this limit, this only
33
- // specifies the absolute maximum number of tokens to generate. Different
34
- // models have different maximum token limits.
35
- MaxTokens int `json:"max_tokens"`
36
- // Messages are the input messages, models are trained to operate on
37
- // alternating user and assistant conversational turns. When creating a new
38
- // message, prior conversational turns can be specified with this field.
39
- Messages []Message `json:"messages"`
40
- // Model defines the language model that will be used to complete the
41
- // prompt. See model.go for a list of available models.
42
- Model LanguageModel `json:"model"`
43
- // StopSequences defines custom text sequences that will cause the model to
44
- // stop generating.
45
- StopSequences []string `json:"stop_sequences,omitempty"`
46
- // System provides a means of specifying context and instructions to the
47
- // model, such as specifying a particular goal or role.
48
- System string `json:"system,omitempty"`
49
23
// Temperature defines the amount of randomness injected into the response.
50
24
// Note that even with a temperature of 0.0, results will not be fully
51
25
// deterministic.
@@ -54,23 +28,45 @@ type CreateMessageInput struct {
54
28
// Recommended for advanced use cases only. You usually only need to use
55
29
// Temperature.
56
30
TopK * int `json:"top_k,omitempty"`
57
- // TopP (nucleus-sampling) defines the cumulative probability of the highest probability.
31
+ // TopP (nucleus-sampling) defines the cumulative probability of the highest
32
+ // probability.
58
33
// Recommended for advanced use cases only. You usually only need to use
59
34
// Temperature.
60
35
TopP * float64 `json:"top_p,omitempty"`
36
+ // Model defines the language model that will be used to complete the
37
+ // prompt. See model.go for a list of available models.
38
+ Model LanguageModel `json:"model"`
39
+ // System provides a means of specifying context and instructions to the
40
+ // model, such as specifying a particular goal or role.
41
+ System string `json:"system,omitempty"`
42
+ // Messages are the input messages, models are trained to operate on
43
+ // alternating user and assistant conversational turns. When creating a new
44
+ // message, prior conversational turns can be specified with this field.
45
+ Messages []Message `json:"messages"`
46
+ // StopSequences defines custom text sequences that will cause the model to
47
+ // stop generating.
48
+ StopSequences []string `json:"stop_sequences,omitempty"`
49
+ // MaxTokens defines the maximum number of tokens to generate before
50
+ // stopping. Token generation may stop before reaching this limit, this only
51
+ // specifies the absolute maximum number of tokens to generate. Different
52
+ // models have different maximum token limits.
53
+ MaxTokens int `json:"max_tokens"`
61
54
}
62
55
56
+ // CreateMessageOutput defines the response from creating a new message.
63
57
type CreateMessageOutput struct {
64
- Id * string `json:"id"`
65
- Type * string `json:"type"`
66
- Role * string `json:"role"`
67
- Model * string `json:"model"`
68
- StopSequence * string `json:"stop_sequence"`
69
- StopReason * string `json:"stop_reason"`
70
- Content []* Content `json:"content"`
71
- Usage * Usage `json:"usage"`
58
+ ID * string `json:"id"`
59
+ Type * string `json:"type"`
60
+ Role * string `json:"role"`
61
+ Model * string `json:"model"`
62
+ StopSequence * string `json:"stop_sequence"`
63
+ StopReason * string `json:"stop_reason"`
64
+ Usage * Usage `json:"usage"`
65
+ // Content is a list of generated messages.
66
+ Content []* Content `json:"content"`
72
67
}
73
68
69
+ // String implements the fmt.Stringer interface for CreateMessageOutput.
74
70
func (c * CreateMessageOutput ) String () string {
75
71
return c .Content [0 ].Text
76
72
}
0 commit comments