Skip to content

Commit

Permalink
Release v0.3.21
Browse files Browse the repository at this point in the history
  • Loading branch information
fern-api[bot] committed Apr 9, 2024
1 parent 37554ea commit 7f04e76
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 1 deletion.
2 changes: 1 addition & 1 deletion core/client_option.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,6 @@ func (c *ClientOptions) cloneHeader() http.Header {
headers := c.HTTPHeader.Clone()
headers.Set("X-Fern-Language", "Go")
headers.Set("X-Fern-SDK-Name", "github.com/vellum-ai/vellum-client-go")
headers.Set("X-Fern-SDK-Version", "v0.3.20")
headers.Set("X-Fern-SDK-Version", "v0.3.21")
return headers
}
37 changes: 37 additions & 0 deletions types.go
Original file line number Diff line number Diff line change
Expand Up @@ -3093,6 +3093,7 @@ func (f *FulfilledFunctionCall) String() string {
type FulfilledPromptExecutionMeta struct {
Latency *int `json:"latency,omitempty"`
FinishReason *FinishReasonEnum `json:"finish_reason,omitempty"`
Usage *MlModelUsage `json:"usage,omitempty"`

_rawJSON json.RawMessage
}
Expand Down Expand Up @@ -4216,6 +4217,39 @@ func (m *MetadataFilterRuleRequest) String() string {
return fmt.Sprintf("%#v", m)
}

type MlModelUsage struct {
OutputTokenCount *int `json:"output_token_count,omitempty"`
InputTokenCount *int `json:"input_token_count,omitempty"`
InputCharCount *int `json:"input_char_count,omitempty"`
OutputCharCount *int `json:"output_char_count,omitempty"`
ComputeNanos *int `json:"compute_nanos,omitempty"`

_rawJSON json.RawMessage
}

func (m *MlModelUsage) UnmarshalJSON(data []byte) error {
type unmarshaler MlModelUsage
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*m = MlModelUsage(value)
m._rawJSON = json.RawMessage(data)
return nil
}

func (m *MlModelUsage) String() string {
if len(m._rawJSON) > 0 {
if value, err := core.StringifyJSON(m._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(m); err == nil {
return value
}
return fmt.Sprintf("%#v", m)
}

type ModelVersionBuildConfig struct {
// The name of the base model used to create this model version, as identified by the LLM provider.
BaseModel string `json:"base_model"`
Expand Down Expand Up @@ -6074,6 +6108,8 @@ type PromptDeploymentExpandMetaRequestRequest struct {
PromptVersionId *bool `json:"prompt_version_id,omitempty"`
// If enabled, the response will include the reason provided by the model for why the execution finished.
FinishReason *bool `json:"finish_reason,omitempty"`
// If enabled, the response will include model host usage tracking. This may increase latency for some model hosts.
Usage *bool `json:"usage,omitempty"`

_rawJSON json.RawMessage
}
Expand Down Expand Up @@ -6206,6 +6242,7 @@ func (p *PromptDeploymentInputRequest) Accept(visitor PromptDeploymentInputReque

// The subset of the metadata tracked by Vellum during prompt execution that the request opted into with `expand_meta`.
type PromptExecutionMeta struct {
Usage *MlModelUsage `json:"usage,omitempty"`
ModelName *string `json:"model_name,omitempty"`
Latency *int `json:"latency,omitempty"`
DeploymentReleaseTag *string `json:"deployment_release_tag,omitempty"`
Expand Down

0 comments on commit 7f04e76

Please sign in to comment.