diff --git a/pkg/component/ai/openai/v0/README.mdx b/pkg/component/ai/openai/v0/README.mdx index 87d07510e..78abe6a01 100644 --- a/pkg/component/ai/openai/v0/README.mdx +++ b/pkg/component/ai/openai/v0/README.mdx @@ -62,7 +62,7 @@ OpenAI's text generation models (often called generative pre-trained transformer | Input | Field ID | Type | Description | | :--- | :--- | :--- | :--- | | Task ID (required) | `task` | string | `TASK_TEXT_GENERATION` | -| Model (required) | `model` | string | ID of the model to use.
Enum values
| +| Model (required) | `model` | string | ID of the model to use.
Enum values
| | Prompt (required) | `prompt` | string | The prompt text. | | System Message | `system-message` | string | The system message helps set the behavior of the assistant. For example, you can modify the personality of the assistant or provide specific instructions about how it should behave throughout the conversation. By default, the model’s behavior is using a generic message as "You are a helpful assistant.". | | Image | `images` | array[string] | The images. | @@ -74,6 +74,9 @@ OpenAI's text generation models (often called generative pre-trained transformer | Top P | `top-p` | number | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. . | | Presence Penalty | `presence-penalty` | number | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. | | Frequency Penalty | `frequency-penalty` | number | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. | +| [Prediction](#text-generation-prediction) | `prediction` | object | Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content. | +| [Tools](#text-generation-tools) | `tools` | array[object] | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. | +| Tool Choice | `tool-choice` | any | Controls which (if any) tool is called by the model. 'none' means the model will not call any tool and instead generates a message. 'auto' means the model can pick between generating a message or calling one or more tools. 'required' means the model must call one or more tools. | @@ -113,6 +116,39 @@ The image URL | :--- | :--- | :--- | :--- | | URL | `url` | string | Either a URL of the image or the base64 encoded image data. | +

Prediction

+ +Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content. + +
+ +| Field | Field ID | Type | Note | +| :--- | :--- | :--- | :--- | +| Content | `content` | string | The content that should be matched when generating a model response. If generated tokens would match this content, the entire model response can be returned much more quickly. | +
+

Tools

+ +A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + +
+ +| Field | Field ID | Type | Note | +| :--- | :--- | :--- | :--- | +| [Function](#text-generation-function) | `function` | object | The function to call. | +
+

Function

+ +The function to call. + +
+ +| Field | Field ID | Type | Note | +| :--- | :--- | :--- | :--- | +| Description | `description` | string | A description of what the function does, used by the model to choose when and how to call the function. | +| Name | `name` | string | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | +| Parameters | `parameters` | object | The parameters the functions accepts, described as a JSON Schema object. Omitting parameters defines a function with an empty parameter list. | +| Strict | `strict` | boolean | Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. | +
@@ -156,22 +192,67 @@ The image URL | Output | Field ID | Type | Description | | :--- | :--- | :--- | :--- | | Texts | `texts` | array[string] | Texts. | +| [Tool Calls](#text-generation-tool-calls) (optional) | `tool-calls` | array[object] | The tool calls generated by the model, such as function calls. | | [Usage](#text-generation-usage) (optional) | `usage` | object | Usage statistics related to the query. |
Output Objects in Text Generation +

Tool Calls

+ +
+ +| Field | Field ID | Type | Note | +| :--- | :--- | :--- | :--- | +| [Function](#text-generation-function) | `function` | object | The function that the model called. | +| Type | `type` | string | The type of the tool. Currently, only function is supported. | +
+ +

Function

+ +
+ +| Field | Field ID | Type | Note | +| :--- | :--- | :--- | :--- | +| Arguments | `arguments` | string | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | +| Name | `name` | string | The name of the function to call. | +
+

Usage

| Field | Field ID | Type | Note | | :--- | :--- | :--- | :--- | +| [Completion token details](#text-generation-completion-token-details) | `completion-token-details` | object | Breakdown of tokens used in a completion. | | Completion tokens | `completion-tokens` | integer | Total number of tokens used (completion). | +| [Prompt token details](#text-generation-prompt-token-details) | `prompt-token-details` | object | Breakdown of tokens used in the prompt. | | Prompt tokens | `prompt-tokens` | integer | Total number of tokens used (prompt). | | Total tokens | `total-tokens` | integer | Total number of tokens used (prompt + completion). |
+ +

Prompt Token Details

+ +
+ +| Field | Field ID | Type | Note | +| :--- | :--- | :--- | :--- | +| Audio tokens | `audio-tokens` | integer | Audio input tokens present in the prompt. | +| Cached tokens | `cached-tokens` | integer | Cached tokens present in the prompt. | +
+ +

Completion Token Details

+ +
+ +| Field | Field ID | Type | Note | +| :--- | :--- | :--- | :--- | +| Accepted prediction tokens | `accepted-prediction-tokens` | integer | When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion. | +| Audio tokens | `audio-tokens` | integer | Audio input tokens generated by the model. | +| Reasoning tokens | `reasoning-tokens` | integer | Tokens generated by the model for reasoning. | +| Rejected prediction tokens | `rejected-prediction-tokens` | integer | When using Predicted Outputs, the number of tokens in the prediction that did not appear in the completion. However, like reasoning tokens, these tokens are still counted in the total completion tokens for purposes of billing, output, and context window limits. | +
diff --git a/pkg/component/ai/openai/v0/config/tasks.yaml b/pkg/component/ai/openai/v0/config/tasks.yaml index 4609b192c..2476f1cd3 100644 --- a/pkg/component/ai/openai/v0/config/tasks.yaml +++ b/pkg/component/ai/openai/v0/config/tasks.yaml @@ -190,6 +190,7 @@ TASK_TEXT_GENERATION: model: description: ID of the model to use. enum: + - o1 - o1-preview - o1-mini - gpt-4o-mini @@ -221,6 +222,7 @@ TASK_TEXT_GENERATION: uiOrder: 0 instillCredentialMap: values: + - o1 - o1-preview - o1-mini - gpt-4o @@ -353,6 +355,94 @@ TASK_TEXT_GENERATION: shortDescription: An alternative to sampling with temperature, called nucleus sampling uiOrder: 9 title: Top P + prediction: + description: Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead + of time. This is most common when you are regenerating a file with only minor changes to most of the content. + type: object + uiOrder: 12 + title: Prediction + properties: + content: + description: The content that should be matched when generating a model response. If generated tokens would match this content, the entire model + response can be returned much more quickly. + type: string + uiOrder: 0 + title: Content + tools: + description: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the + model may generate JSON inputs for. A max of 128 functions are supported. + type: array + uiOrder: 13 + title: Tools + items: + type: object + required: + - function + properties: + function: + uiOrder: 0 + title: Function + type: object + description: The function to call. + required: + - name + properties: + description: + type: string + uiOrder: 0 + title: Description + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + uiOrder: 1 + title: Name + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of + 64. + parameters: + type: object + uiOrder: 2 + title: Parameters + description: The parameters the functions accepts, described as a JSON Schema object. Omitting parameters defines a function with an empty + parameter list. + strict: + type: boolean + default: false + uiOrder: 3 + title: Strict + description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact + schema defined in the parameters field. + tool-choice: + description: Controls which (if any) tool is called by the model. 'none' means the model will not call any tool and instead generates a message. + 'auto' means the model can pick between generating a message or calling one or more tools. 'required' means the model must call one or more tools. + uiOrder: 14 + title: Tool Choice + oneOf: + - type: string + enum: [none, auto, required] + uiOrder: 0 + title: Tool Choice + description: none means the model will not call any tool and instead generates a message. auto means the model can pick between generating a + message or calling one or more tools. required means the model must call one or more tools. + - type: object + uiOrder: 0 + title: Tool Choice + description: Specifies a tool the model should use. Use to force the model to call a specific function. + required: + - function + properties: + function: + uiOrder: 0 + title: Function + description: The function to call. + type: object + required: + - name + properties: + name: + type: string + uiOrder: 0 + title: Name + description: The name of the function to call. required: - model - prompt @@ -369,6 +459,37 @@ TASK_TEXT_GENERATION: description: Texts. title: Texts type: array + tool-calls: + description: The tool calls generated by the model, such as function calls. + uiOrder: 1 + items: + type: object + properties: + type: + type: string + uiOrder: 0 + title: Type + description: The type of the tool. Currently, only function is supported. + function: + type: object + uiOrder: 1 + title: Function + description: The function that the model called. + properties: + name: + type: string + uiOrder: 0 + title: Name + description: The name of the function to call. + arguments: + type: string + uiOrder: 1 + title: Arguments + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your + function. + title: Tool Calls + type: array usage: description: Usage statistics related to the query. uiOrder: 1 @@ -388,6 +509,49 @@ TASK_TEXT_GENERATION: description: Total number of tokens used (prompt). uiOrder: 2 type: integer + prompt-token-details: + title: Prompt token details + description: Breakdown of tokens used in the prompt. + uiOrder: 3 + type: object + properties: + audio-tokens: + title: Audio tokens + description: Audio input tokens present in the prompt. + uiOrder: 0 + type: integer + cached-tokens: + title: Cached tokens + description: Cached tokens present in the prompt. + uiOrder: 1 + type: integer + completion-token-details: + title: Completion token details + description: Breakdown of tokens used in a completion. + uiOrder: 4 + type: object + properties: + reasoning-tokens: + title: Reasoning tokens + description: Tokens generated by the model for reasoning. + uiOrder: 0 + type: integer + audio-tokens: + title: Audio tokens + description: Audio input tokens generated by the model. + uiOrder: 1 + type: integer + accepted-prediction-tokens: + title: Accepted prediction tokens + description: When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion. + uiOrder: 2 + type: integer + rejected-prediction-tokens: + title: Rejected prediction tokens + description: When using Predicted Outputs, the number of tokens in the prediction that did not appear in the completion. However, like reasoning + tokens, these tokens are still counted in the total completion tokens for purposes of billing, output, and context window limits. + uiOrder: 3 + type: integer required: - total-tokens title: Usage diff --git a/pkg/component/ai/openai/v0/io.go b/pkg/component/ai/openai/v0/io.go index 196a8905e..353e091dd 100644 --- a/pkg/component/ai/openai/v0/io.go +++ b/pkg/component/ai/openai/v0/io.go @@ -18,17 +18,45 @@ type taskTextGenerationInput struct { PresencePenalty *float32 `instill:"presence-penalty,default=0"` FrequencyPenalty *float32 `instill:"frequency-penalty,default=0"` ResponseFormat *responseFormatInputStruct `instill:"response-format"` + Prediction *predictionStruct `instill:"prediction"` + Tools []toolStruct `instill:"tools"` + ToolChoice format.Value `instill:"tool-choice"` } type taskTextGenerationOutput struct { - Texts []string `instill:"texts"` - Usage usage `instill:"usage"` + Texts []string `instill:"texts"` + ToolCalls []toolCall `instill:"tool-calls"` + Usage usage `instill:"usage"` +} + +type toolCall struct { + Type string `instill:"type"` + Function functionCall `instill:"function"` +} + +type functionCall struct { + Name string `instill:"name"` + Arguments string `instill:"arguments"` } type usage struct { - PromptTokens int `instill:"prompt-tokens"` - CompletionTokens int `instill:"completion-tokens"` - TotalTokens int `instill:"total-tokens"` + PromptTokens int `instill:"prompt-tokens"` + CompletionTokens int `instill:"completion-tokens"` + TotalTokens int `instill:"total-tokens"` + CompletionTokenDetails *completionTokenDetails `instill:"completion-token-details"` + PromptTokenDetails *promptTokenDetails `instill:"prompt-token-details"` +} + +type promptTokenDetails struct { + AudioTokens int `instill:"audio-tokens"` + CachedTokens int `instill:"cached-tokens"` +} + +type completionTokenDetails struct { + ReasoningTokens int `instill:"reasoning-tokens"` + AudioTokens int `instill:"audio-tokens"` + AcceptedPredictionTokens int `instill:"accepted-prediction-tokens"` + RejectedPredictionTokens int `instill:"rejected-prediction-tokens"` } type responseFormatInputStruct struct { @@ -36,6 +64,21 @@ type responseFormatInputStruct struct { JSONSchema string `instill:"json-schema"` } +type predictionStruct struct { + Content string `instill:"content"` +} + +type toolStruct struct { + Function functionStruct `instill:"function"` +} + +type functionStruct struct { + Description string `instill:"description"` + Name string `instill:"name"` + Parameters map[string]format.Value `instill:"parameters"` + Strict *bool `instill:"strict,default=false"` +} + type textMessage struct { Role string `instill:"role"` Content []textMessageContent `instill:"content"` diff --git a/pkg/component/ai/openai/v0/listmodels.go b/pkg/component/ai/openai/v0/list_models.go similarity index 100% rename from pkg/component/ai/openai/v0/listmodels.go rename to pkg/component/ai/openai/v0/list_models.go diff --git a/pkg/component/ai/openai/v0/main.go b/pkg/component/ai/openai/v0/main.go index 5a3323ed6..efb305127 100644 --- a/pkg/component/ai/openai/v0/main.go +++ b/pkg/component/ai/openai/v0/main.go @@ -21,6 +21,7 @@ import ( "github.com/instill-ai/pipeline-backend/pkg/component/internal/util/httpclient" "github.com/instill-ai/pipeline-backend/pkg/component/resources/schemas" "github.com/instill-ai/pipeline-backend/pkg/data" + "github.com/instill-ai/pipeline-backend/pkg/data/format" "github.com/instill-ai/x/errmsg" ) @@ -214,6 +215,39 @@ func (e *execution) worker(ctx context.Context, client *httpclient.Client, job * } messages = append(messages, multiModalMessage{Role: "user", Content: userContents}) + tools := make([]toolReqStruct, len(inputStruct.Tools)) + for i, tool := range inputStruct.Tools { + params := make(map[string]any) + for k, v := range tool.Function.Parameters { + params[k], err = v.ToJSONValue() + if err != nil { + job.Error.Error(ctx, err) + return + } + } + tools[i] = toolReqStruct{ + Type: "function", + Function: functionReqStruct{ + Name: tool.Function.Name, + Parameters: params, + Strict: tool.Function.Strict, + Description: tool.Function.Description, + }, + } + } + + var toolChoice any + switch choice := inputStruct.ToolChoice.(type) { + case data.Map: + toolChoice, err = choice.ToJSONValue() + if err != nil { + job.Error.Error(ctx, err) + return + } + case format.String: + toolChoice = choice.String() + } + body := textCompletionReq{ Messages: messages, Model: inputStruct.Model, @@ -229,6 +263,19 @@ func (e *execution) worker(ctx context.Context, client *httpclient.Client, job * }, } + if inputStruct.Prediction != nil { + body.Prediction = &predictionReqStruct{ + Type: "content", + Content: inputStruct.Prediction.Content, + } + } + if len(tools) > 0 { + body.Tools = tools + } + if toolChoice != nil { + body.ToolChoice = toolChoice + } + // workaround, the OpenAI service can not accept this param if inputStruct.Model != "gpt-4-vision-preview" { if inputStruct.ResponseFormat != nil { @@ -277,11 +324,11 @@ func (e *execution) worker(ctx context.Context, client *httpclient.Client, job * scanner := bufio.NewScanner(restyResp.RawResponse.Body) outputStruct := taskTextGenerationOutput{} + toolCalls := make(map[int]*toolCall) u := usage{} count := 0 for scanner.Scan() { - res := scanner.Text() if len(res) == 0 { @@ -289,8 +336,8 @@ func (e *execution) worker(ctx context.Context, client *httpclient.Client, job * } res = strings.Replace(res, "data: ", "", 1) - // Note: Since we haven’t provided delta updates for the - // messages, we’re reducing the number of event streams by + // Note: Since we haven't provided delta updates for the + // messages, we're reducing the number of event streams by // returning the response every ten iterations. if count == 3 || res == "[DONE]" { err = job.Output.WriteData(ctx, outputStruct) @@ -313,23 +360,55 @@ func (e *execution) worker(ctx context.Context, client *httpclient.Client, job * } for _, c := range response.Choices { - // Now, there is no document to describe it. - // But, when we test it, we found that the choices idx is not in order. - // So, we need to get idx from the choice, and the len of the choices is always 1. responseIdx := c.Index if len(outputStruct.Texts) <= responseIdx { outputStruct.Texts = append(outputStruct.Texts, "") } outputStruct.Texts[responseIdx] += c.Delta.Content + // Collect tool calls + for _, t := range c.Delta.ToolCalls { + if _, exists := toolCalls[t.Index]; !exists { + toolCalls[t.Index] = &toolCall{ + Type: t.Type, + Function: functionCall{ + Name: t.Function.Name, + Arguments: t.Function.Arguments, + }, + } + } else { + // Append arguments for existing tool call + toolCalls[t.Index].Function.Arguments += t.Function.Arguments + } + } } u = usage{ PromptTokens: response.Usage.PromptTokens, CompletionTokens: response.Usage.CompletionTokens, TotalTokens: response.Usage.TotalTokens, + PromptTokenDetails: &promptTokenDetails{ + AudioTokens: response.Usage.PromptTokenDetails.AudioTokens, + CachedTokens: response.Usage.PromptTokenDetails.CachedTokens, + }, + CompletionTokenDetails: &completionTokenDetails{ + ReasoningTokens: response.Usage.CompletionTokenDetails.ReasoningTokens, + AudioTokens: response.Usage.CompletionTokenDetails.AudioTokens, + AcceptedPredictionTokens: response.Usage.CompletionTokenDetails.AcceptedPredictionTokens, + RejectedPredictionTokens: response.Usage.CompletionTokenDetails.RejectedPredictionTokens, + }, } + } + // Convert collected tool calls to output format + for _, tc := range toolCalls { + outputStruct.ToolCalls = append(outputStruct.ToolCalls, toolCall{ + Type: tc.Type, + Function: functionCall{ + Name: tc.Function.Name, + Arguments: tc.Function.Arguments, + }, + }) } outputStruct.Usage = u diff --git a/pkg/component/ai/openai/v0/audiotranscriptions.go b/pkg/component/ai/openai/v0/task_audio_transcriptions.go similarity index 100% rename from pkg/component/ai/openai/v0/audiotranscriptions.go rename to pkg/component/ai/openai/v0/task_audio_transcriptions.go diff --git a/pkg/component/ai/openai/v0/textembeddings.go b/pkg/component/ai/openai/v0/task_text_embeddings.go similarity index 100% rename from pkg/component/ai/openai/v0/textembeddings.go rename to pkg/component/ai/openai/v0/task_text_embeddings.go diff --git a/pkg/component/ai/openai/v0/textgeneration.go b/pkg/component/ai/openai/v0/task_text_generation.go similarity index 53% rename from pkg/component/ai/openai/v0/textgeneration.go rename to pkg/component/ai/openai/v0/task_text_generation.go index 7bb3b460a..8e867b484 100644 --- a/pkg/component/ai/openai/v0/textgeneration.go +++ b/pkg/component/ai/openai/v0/task_text_generation.go @@ -22,6 +22,26 @@ type textCompletionReq struct { ResponseFormat *responseFormatReqStruct `json:"response_format,omitempty"` Stream bool `json:"stream"` StreamOptions *streamOptions `json:"stream_options,omitempty"` + Prediction *predictionReqStruct `json:"prediction,omitempty"` + Tools []toolReqStruct `json:"tools,omitempty"` + ToolChoice any `json:"tool_choice,omitempty"` +} + +type predictionReqStruct struct { + Type string `json:"type"` + Content string `json:"content"` +} + +type toolReqStruct struct { + Type string `json:"type"` + Function functionReqStruct `json:"function"` +} + +type functionReqStruct struct { + Description string `json:"description"` + Name string `json:"name"` + Parameters map[string]any `json:"parameters"` + Strict *bool `json:"strict"` } type streamOptions struct { @@ -62,8 +82,21 @@ type textCompletionStreamResp struct { } type outputMessage struct { - Role string `json:"role"` - Content string `json:"content"` + Role string `json:"role"` + Content string `json:"content"` + ToolCalls []toolCallResp `json:"tool_calls,omitempty"` +} + +type toolCallResp struct { + Index int `json:"index"` + ID string `json:"id"` + Type string `json:"type"` + Function functionCallResp `json:"function"` +} + +type functionCallResp struct { + Name string `json:"name"` + Arguments string `json:"arguments"` } type streamChoices struct { @@ -73,7 +106,21 @@ type streamChoices struct { } type usageOpenAI struct { - PromptTokens int `json:"prompt_tokens"` - CompletionTokens int `json:"completion_tokens"` - TotalTokens int `json:"total_tokens"` + PromptTokens int `json:"prompt_tokens"` + CompletionTokens int `json:"completion_tokens"` + TotalTokens int `json:"total_tokens"` + PromptTokenDetails promptTokenDetailsOpenAI `json:"prompt_token_details"` + CompletionTokenDetails completionTokenDetailsOpenAI `json:"completion_tokens_details"` +} + +type promptTokenDetailsOpenAI struct { + AudioTokens int `json:"audio_tokens"` + CachedTokens int `json:"cached_tokens"` +} + +type completionTokenDetailsOpenAI struct { + ReasoningTokens int `json:"reasoning_tokens"` + AudioTokens int `json:"audio_tokens"` + AcceptedPredictionTokens int `json:"accepted_prediction_tokens"` + RejectedPredictionTokens int `json:"rejected_prediction_tokens"` } diff --git a/pkg/component/ai/openai/v0/texttoimage.go b/pkg/component/ai/openai/v0/task_text_to_image.go similarity index 100% rename from pkg/component/ai/openai/v0/texttoimage.go rename to pkg/component/ai/openai/v0/task_text_to_image.go diff --git a/pkg/component/ai/openai/v0/texttospeech.go b/pkg/component/ai/openai/v0/task_text_to_speech.go similarity index 100% rename from pkg/component/ai/openai/v0/texttospeech.go rename to pkg/component/ai/openai/v0/task_text_to_speech.go diff --git a/pkg/component/application/github/v0/event_utils.go b/pkg/component/application/github/v0/event_utils.go index 7934846ca..d9d22cb8f 100644 --- a/pkg/component/application/github/v0/event_utils.go +++ b/pkg/component/application/github/v0/event_utils.go @@ -40,26 +40,7 @@ func convertRawRepository(r rawRepository) repository { } func convertRawUser(r rawUser) user { - return user{ - Login: r.Login, - ID: r.ID, - NodeID: r.NodeID, - AvatarURL: r.AvatarURL, - GravatarID: r.GravatarID, - URL: r.URL, - HTMLURL: r.HTMLURL, - FollowersURL: r.FollowersURL, - FollowingURL: r.FollowingURL, - GistsURL: r.GistsURL, - StarredURL: r.StarredURL, - SubscriptionsURL: r.SubscriptionsURL, - OrganizationsURL: r.OrganizationsURL, - ReposURL: r.ReposURL, - EventsURL: r.EventsURL, - ReceivedEventsURL: r.ReceivedEventsURL, - Type: r.Type, - SiteAdmin: r.SiteAdmin, - } + return user(r) } func convertRawLicense(r *rawLicense) *license { diff --git a/pkg/component/application/smartlead/v0/task_add_leads.go b/pkg/component/application/smartlead/v0/task_add_leads.go index f10f263aa..19f4012ab 100644 --- a/pkg/component/application/smartlead/v0/task_add_leads.go +++ b/pkg/component/application/smartlead/v0/task_add_leads.go @@ -64,13 +64,7 @@ func (e *execution) addLeads(ctx context.Context, job *base.Job) error { return err } - outputStruct := addLeadsOutput{ - UploadCount: response.UploadCount, - TotalLeads: response.TotalLeads, - AlreadyAddedToCampaign: response.AlreadyAddedToCampaign, - InvalidEmailCount: response.InvalidEmailCount, - Error: response.Error, - } + outputStruct := addLeadsOutput(response) err = job.Output.WriteData(ctx, outputStruct) diff --git a/pkg/component/application/smartlead/v0/task_create_campaign.go b/pkg/component/application/smartlead/v0/task_create_campaign.go index f4e854903..1f0ccdfe5 100644 --- a/pkg/component/application/smartlead/v0/task_create_campaign.go +++ b/pkg/component/application/smartlead/v0/task_create_campaign.go @@ -67,9 +67,7 @@ func (e *execution) createCampaign(ctx context.Context, job *base.Job) error { } func buildCreateCampaignRequest(input createCampaignInput) createCampaignReq { - return createCampaignReq{ - Name: input.Name, - } + return createCampaignReq(input) } type createCampaignReq struct { diff --git a/pkg/component/tools/compogen/pkg/gen/schema.go b/pkg/component/tools/compogen/pkg/gen/schema.go index f9ea22ce3..5b5de4d91 100644 --- a/pkg/component/tools/compogen/pkg/gen/schema.go +++ b/pkg/component/tools/compogen/pkg/gen/schema.go @@ -30,7 +30,7 @@ type property struct { type objectSchema struct { Description string `json:"description"` - Properties map[string]property `json:"properties" validate:"gt=0,dive"` + Properties map[string]property `json:"properties" validate:"dive"` Title string `json:"title" validate:"required"` Required []string `json:"required"` } diff --git a/pkg/component/tools/compogen/pkg/gen/schema_test.go b/pkg/component/tools/compogen/pkg/gen/schema_test.go index d33cdc9f2..5abb19528 100644 --- a/pkg/component/tools/compogen/pkg/gen/schema_test.go +++ b/pkg/component/tools/compogen/pkg/gen/schema_test.go @@ -46,13 +46,6 @@ func TestObjectSchema_Validate(t *testing.T) { modifier func(*objectSchema) wantErr string }{ - { - name: "nok - no properties", - modifier: func(rs *objectSchema) { - rs.Properties = map[string]property{} - }, - wantErr: "objectSchema.Properties: Properties field doesn't reach the minimum value / number of elements", - }, { name: "nok - no title", modifier: func(rs *objectSchema) {