Skip to content

Commit

Permalink
feat: Updated OpenAPI spec
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Feb 4, 2025
1 parent f323429 commit 5d3bc73
Show file tree
Hide file tree
Showing 6 changed files with 32 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -228,8 +228,7 @@ partial void ProcessOpenaiCompletionsResponseContent(
/// </param>
/// <param name="maxTokens">
/// The maximum number of tokens to generate in the completion.<br/>
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.<br/>
/// Default Value: 512
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.
/// </param>
/// <param name="temperature">
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic<br/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ public partial interface IDeepInfraApi
/// </param>
/// <param name="maxTokens">
/// The maximum number of tokens to generate in the completion.<br/>
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.<br/>
/// Default Value: 512
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.
/// </param>
/// <param name="temperature">
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic<br/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,18 @@ public enum ModelDocBlockKey
/// <summary>
///
/// </summary>
OpenaiImagesHttp,
/// <summary>
///
/// </summary>
OpenaiImagesPython,
/// <summary>
///
/// </summary>
OpenaiImagesJs,
/// <summary>
///
/// </summary>
OpenaiCompPython,
/// <summary>
///
Expand Down Expand Up @@ -170,6 +182,9 @@ public static string ToValueString(this ModelDocBlockKey value)
ModelDocBlockKey.OpenaiSpeechHttp => "openai-speech-http",
ModelDocBlockKey.OpenaiSpeechPython => "openai-speech-python",
ModelDocBlockKey.OpenaiSpeechJs => "openai-speech-js",
ModelDocBlockKey.OpenaiImagesHttp => "openai-images-http",
ModelDocBlockKey.OpenaiImagesPython => "openai-images-python",
ModelDocBlockKey.OpenaiImagesJs => "openai-images-js",
ModelDocBlockKey.OpenaiCompPython => "openai-comp-python",
ModelDocBlockKey.OpenaiCompHttp => "openai-comp-http",
ModelDocBlockKey.OpenaiCompJs => "openai-comp-js",
Expand Down Expand Up @@ -214,6 +229,9 @@ public static string ToValueString(this ModelDocBlockKey value)
"openai-speech-http" => ModelDocBlockKey.OpenaiSpeechHttp,
"openai-speech-python" => ModelDocBlockKey.OpenaiSpeechPython,
"openai-speech-js" => ModelDocBlockKey.OpenaiSpeechJs,
"openai-images-http" => ModelDocBlockKey.OpenaiImagesHttp,
"openai-images-python" => ModelDocBlockKey.OpenaiImagesPython,
"openai-images-js" => ModelDocBlockKey.OpenaiImagesJs,
"openai-comp-python" => ModelDocBlockKey.OpenaiCompPython,
"openai-comp-http" => ModelDocBlockKey.OpenaiCompHttp,
"openai-comp-js" => ModelDocBlockKey.OpenaiCompJs,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,7 @@ public sealed partial class OpenAICompletionsIn

/// <summary>
/// The maximum number of tokens to generate in the completion.<br/>
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.<br/>
/// Default Value: 512
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.
/// </summary>
[global::System.Text.Json.Serialization.JsonPropertyName("max_tokens")]
public int? MaxTokens { get; set; }
Expand Down Expand Up @@ -152,8 +151,7 @@ public sealed partial class OpenAICompletionsIn
/// </param>
/// <param name="maxTokens">
/// The maximum number of tokens to generate in the completion.<br/>
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.<br/>
/// Default Value: 512
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.
/// </param>
/// <param name="temperature">
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic<br/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ public enum SchemaVariantKey
/// <summary>
///
/// </summary>
OpenaiImages,
/// <summary>
///
/// </summary>
CreateVoice,
/// <summary>
///
Expand Down Expand Up @@ -71,6 +75,7 @@ public static string ToValueString(this SchemaVariantKey value)
SchemaVariantKey.OpenaiChatCompletions => "openai-chat-completions",
SchemaVariantKey.OpenaiEmbeddings => "openai-embeddings",
SchemaVariantKey.OpenaiSpeechToText => "openai-speech-to-text",
SchemaVariantKey.OpenaiImages => "openai-images",
SchemaVariantKey.CreateVoice => "create-voice",
SchemaVariantKey.ReadVoice => "read-voice",
SchemaVariantKey.UpdateVoice => "update-voice",
Expand All @@ -92,6 +97,7 @@ public static string ToValueString(this SchemaVariantKey value)
"openai-chat-completions" => SchemaVariantKey.OpenaiChatCompletions,
"openai-embeddings" => SchemaVariantKey.OpenaiEmbeddings,
"openai-speech-to-text" => SchemaVariantKey.OpenaiSpeechToText,
"openai-images" => SchemaVariantKey.OpenaiImages,
"create-voice" => SchemaVariantKey.CreateVoice,
"read-voice" => SchemaVariantKey.ReadVoice,
"update-voice" => SchemaVariantKey.UpdateVoice,
Expand Down
5 changes: 4 additions & 1 deletion src/libs/DeepInfra/openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4384,6 +4384,9 @@ components:
- openai-speech-http
- openai-speech-python
- openai-speech-js
- openai-images-http
- openai-images-python
- openai-images-js
- openai-comp-python
- openai-comp-http
- openai-comp-js
Expand Down Expand Up @@ -5075,7 +5078,6 @@ components:
exclusiveMinimum: true
type: integer
description: "The maximum number of tokens to generate in the completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length."
default: 512
temperature:
title: Temperature
maximum: 2.0
Expand Down Expand Up @@ -5500,6 +5502,7 @@ components:
- openai-chat-completions
- openai-embeddings
- openai-speech-to-text
- openai-images
- create-voice
- read-voice
- update-voice
Expand Down

0 comments on commit 5d3bc73

Please sign in to comment.