diff --git a/.mock/definition/api.yml b/.mock/definition/api.yml index 79b9583..a7923d9 100644 --- a/.mock/definition/api.yml +++ b/.mock/definition/api.yml @@ -1,12 +1,12 @@ name: api error-discrimination: - strategy: status-code + strategy: status-code default-environment: Production environments: - Production: https://api.hume.ai + Production: https://api.hume.ai auth: HeaderAuthScheme auth-schemes: - HeaderAuthScheme: - name: apiKey - header: X-Hume-Api-Key - type: optional + HeaderAuthScheme: + name: apiKey + header: X-Hume-Api-Key + type: optional diff --git a/.mock/definition/empathic-voice/__package__.yml b/.mock/definition/empathic-voice/__package__.yml index e52dd1c..982f528 100644 --- a/.mock/definition/empathic-voice/__package__.yml +++ b/.mock/definition/empathic-voice/__package__.yml @@ -1,2778 +1,2778 @@ errors: - BadRequestError: - status-code: 400 - type: ErrorResponse - docs: Bad Request + BadRequestError: + status-code: 400 + type: ErrorResponse + docs: Bad Request types: - ErrorResponse: - properties: - error: optional - message: optional - source: - openapi: stenographer-openapi.json - ReturnUserDefinedToolToolType: - enum: - - BUILTIN - - FUNCTION - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like web - search, or `FUNCTION` for user-defined tools. - source: - openapi: stenographer-openapi.json - ReturnUserDefinedToolVersionType: - enum: - - FIXED - - LATEST - docs: >- - Versioning method for a Tool. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - source: - openapi: stenographer-openapi.json - ReturnUserDefinedTool: - docs: A specific tool version returned from the server - properties: - tool_type: - type: ReturnUserDefinedToolToolType - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like - web search, or `FUNCTION` for user-defined tools. - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - version_type: - type: ReturnUserDefinedToolVersionType - docs: >- - Versioning method for a Tool. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - version_description: - type: optional - docs: An optional description of the Tool version. - name: - type: string - docs: Name applied to all versions of a particular Tool. - created_on: - type: long - docs: >- - Time at which the Tool was created. Measured in seconds since the Unix - epoch. - modified_on: - type: long - docs: >- - Time at which the Tool was last modified. Measured in seconds since - the Unix epoch. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of the - Tool. - - - These parameters define the inputs needed for the Tool’s execution, - including the expected data type and description for each input field. - Structured as a stringified JSON schema, this format ensures the tool - receives data in the expected format. - source: - openapi: stenographer-openapi.json - ReturnPromptVersionType: - enum: - - FIXED - - LATEST - docs: >- - Versioning method for a Prompt. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - source: - openapi: stenographer-openapi.json - ReturnPrompt: - docs: A Prompt associated with this Config. - properties: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - version_type: - type: ReturnPromptVersionType - docs: >- - Versioning method for a Prompt. Either `FIXED` for using a fixed - version number or `LATEST` for auto-updating to the latest version. - version_description: - type: optional - docs: An optional description of the Prompt version. - name: - type: string - docs: Name applied to all versions of a particular Prompt. - created_on: - type: long - docs: >- - Time at which the Prompt was created. Measured in seconds since the - Unix epoch. - modified_on: - type: long - docs: >- - Time at which the Prompt was last modified. Measured in seconds since - the Unix epoch. - text: - type: string - docs: >- - Instructions used to shape EVI’s behavior, responses, and style. - - - You can use the Prompt to define a specific goal or role for EVI, - specifying how it should act or what it should focus on during the - conversation. For example, EVI can be instructed to act as a customer - support representative, a fitness coach, or a travel advisor, each - with its own set of behaviors and response styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/empathic-voice-interface-evi/prompting). - source: - openapi: stenographer-openapi.json - PostedCustomVoiceBaseVoice: - enum: - - ITO - - KORA - - DACHER - - AURA - - FINN - - STELLA - - WHIMSY + ErrorResponse: + properties: + error: optional + message: optional + source: + openapi: stenographer-openapi.json + ReturnUserDefinedToolToolType: + enum: + - BUILTIN + - FUNCTION + docs: >- + Type of Tool. Either `BUILTIN` for natively implemented tools, like web + search, or `FUNCTION` for user-defined tools. + source: + openapi: stenographer-openapi.json + ReturnUserDefinedToolVersionType: + enum: + - FIXED + - LATEST + docs: >- + Versioning method for a Tool. Either `FIXED` for using a fixed version + number or `LATEST` for auto-updating to the latest version. + source: + openapi: stenographer-openapi.json + ReturnUserDefinedTool: + docs: A specific tool version returned from the server + properties: + tool_type: + type: ReturnUserDefinedToolToolType + docs: >- + Type of Tool. Either `BUILTIN` for natively implemented tools, like + web search, or `FUNCTION` for user-defined tools. + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Tool. + + + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if needed. + + + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + version_type: + type: ReturnUserDefinedToolVersionType + docs: >- + Versioning method for a Tool. Either `FIXED` for using a fixed version + number or `LATEST` for auto-updating to the latest version. + version_description: + type: optional + docs: An optional description of the Tool version. + name: + type: string + docs: Name applied to all versions of a particular Tool. + created_on: + type: long + docs: >- + Time at which the Tool was created. Measured in seconds since the Unix + epoch. + modified_on: + type: long + docs: >- + Time at which the Tool was last modified. Measured in seconds since + the Unix epoch. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the tool call + result. The LLM then uses this text to generate a response back to the + user, ensuring continuity in the conversation if the Tool errors. + description: + type: optional + docs: >- + An optional description of what the Tool does, used by the + supplemental LLM to choose when and how to call the function. + parameters: + type: string + docs: >- + Stringified JSON defining the parameters used by this version of the + Tool. + + + These parameters define the inputs needed for the Tool’s execution, + including the expected data type and description for each input field. + Structured as a stringified JSON schema, this format ensures the tool + receives data in the expected format. + source: + openapi: stenographer-openapi.json + ReturnPromptVersionType: + enum: + - FIXED + - LATEST + docs: >- + Versioning method for a Prompt. Either `FIXED` for using a fixed version + number or `LATEST` for auto-updating to the latest version. + source: + openapi: stenographer-openapi.json + ReturnPrompt: + docs: A Prompt associated with this Config. + properties: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Prompt. + + + Prompts, Configs, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine prompts and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Prompt. Each update to the Prompt increments its version + number. + version_type: + type: ReturnPromptVersionType + docs: >- + Versioning method for a Prompt. Either `FIXED` for using a fixed + version number or `LATEST` for auto-updating to the latest version. + version_description: + type: optional + docs: An optional description of the Prompt version. + name: + type: string + docs: Name applied to all versions of a particular Prompt. + created_on: + type: long + docs: >- + Time at which the Prompt was created. Measured in seconds since the + Unix epoch. + modified_on: + type: long + docs: >- + Time at which the Prompt was last modified. Measured in seconds since + the Unix epoch. + text: + type: string + docs: >- + Instructions used to shape EVI’s behavior, responses, and style. + + + You can use the Prompt to define a specific goal or role for EVI, + specifying how it should act or what it should focus on during the + conversation. For example, EVI can be instructed to act as a customer + support representative, a fitness coach, or a travel advisor, each + with its own set of behaviors and response styles. + + + For help writing a system prompt, see our [Prompting + Guide](/docs/empathic-voice-interface-evi/prompting). + source: + openapi: stenographer-openapi.json + PostedCustomVoiceBaseVoice: + enum: + - ITO + - KORA + - DACHER + - AURA + - FINN + - STELLA + - WHIMSY + docs: Specifies the base voice used to create the Custom Voice. + source: + openapi: stenographer-openapi.json + PostedCustomVoiceParameters: + docs: >- + The specified attributes of a Custom Voice. + + + If no parameters are specified then all attributes will be set to their + defaults, meaning no modfications will be made to the base voice. + properties: + gender: + type: optional + docs: >- + The vocalization of gender, ranging between masculine and feminine. + + + The default value is `0`, with a minimum of `-100` (more masculine) + and a maximum of `100` (more feminine). A value of `0` leaves this + parameter unchanged from the base voice. + huskiness: + type: optional + docs: >- + The texture of the voice, ranging between bright and husky. + + + The default value is `0`, with a minimum of `-100` (brighter) and a + maximum of `100` (huskier). A value of `0` leaves this parameter + unchanged from the base voice. + nasality: + type: optional + docs: >- + The openness of the voice, ranging between resonant and nasal. + + + The default value is `0`, with a minimum of `-100` (more resonant) and + a maximum of `100` (more nasal). A value of `0` leaves this parameter + unchanged from the base voice. + pitch: + type: optional + docs: >- + The frequency of the voice, ranging between low and high. + + + The default value is `0`, with a minimum of `-100` (lower) and a + maximum of `100` (higher). A value of `0` leaves this parameter + unchanged from the base voice. + source: + openapi: stenographer-openapi.json + PostedCustomVoice: + docs: >- + A Custom Voice specification to be associated with this Config. + + + If a Custom Voice specification is not provided then the + [name](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.name) + of a base voice or previously created Custom Voice must be provided. + + See our [Voices guide](/docs/empathic-voice-interface-evi/voices) for a tutorial on how to craft a Custom Voice. + properties: + name: + type: string + docs: >- + The name of the Custom Voice. Maximum length of 75 characters. Will be + converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE + VOICE") + base_voice: + type: PostedCustomVoiceBaseVoice docs: Specifies the base voice used to create the Custom Voice. - source: - openapi: stenographer-openapi.json - PostedCustomVoiceParameters: - docs: >- - The specified attributes of a Custom Voice. - - - If no parameters are specified then all attributes will be set to their - defaults, meaning no modfications will be made to the base voice. - properties: - gender: - type: optional - docs: >- - The vocalization of gender, ranging between masculine and feminine. - - - The default value is `0`, with a minimum of `-100` (more masculine) - and a maximum of `100` (more feminine). A value of `0` leaves this - parameter unchanged from the base voice. - huskiness: - type: optional - docs: >- - The texture of the voice, ranging between bright and husky. - - - The default value is `0`, with a minimum of `-100` (brighter) and a - maximum of `100` (huskier). A value of `0` leaves this parameter - unchanged from the base voice. - nasality: - type: optional - docs: >- - The openness of the voice, ranging between resonant and nasal. - - - The default value is `0`, with a minimum of `-100` (more resonant) and - a maximum of `100` (more nasal). A value of `0` leaves this parameter - unchanged from the base voice. - pitch: - type: optional - docs: >- - The frequency of the voice, ranging between low and high. - - - The default value is `0`, with a minimum of `-100` (lower) and a - maximum of `100` (higher). A value of `0` leaves this parameter - unchanged from the base voice. - source: - openapi: stenographer-openapi.json - PostedCustomVoice: - docs: >- - A Custom Voice specification to be associated with this Config. - - - If a Custom Voice specification is not provided then the - [name](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.name) - of a base voice or previously created Custom Voice must be provided. - - See our [Voices guide](/docs/empathic-voice-interface-evi/voices) for a tutorial on how to craft a Custom Voice. - properties: - name: - type: string - docs: >- - The name of the Custom Voice. Maximum length of 75 characters. Will be - converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE - VOICE") - base_voice: - type: PostedCustomVoiceBaseVoice - docs: Specifies the base voice used to create the Custom Voice. - parameter_model: - type: literal<"20240715-4parameter"> - docs: >- - The name of the parameter model used to define which attributes are - used by the `parameters` field. Currently, only `20240715-4parameter` - is supported as the parameter model. - parameters: - type: optional - docs: >- - The specified attributes of a Custom Voice. - - - If no parameters are specified then all attributes will be set to - their defaults, meaning no modfications will be made to the base - voice. - source: - openapi: stenographer-openapi.json - ReturnCustomVoiceBaseVoice: - enum: - - ITO - - KORA - - DACHER - - AURA - - FINN - - STELLA - - WHIMSY + parameter_model: + type: literal<"20240715-4parameter"> + docs: >- + The name of the parameter model used to define which attributes are + used by the `parameters` field. Currently, only `20240715-4parameter` + is supported as the parameter model. + parameters: + type: optional + docs: >- + The specified attributes of a Custom Voice. + + + If no parameters are specified then all attributes will be set to + their defaults, meaning no modfications will be made to the base + voice. + source: + openapi: stenographer-openapi.json + ReturnCustomVoiceBaseVoice: + enum: + - ITO + - KORA + - DACHER + - AURA + - FINN + - STELLA + - WHIMSY + docs: The base voice used to create the Custom Voice. + source: + openapi: stenographer-openapi.json + ReturnCustomVoiceParameters: + docs: >- + The specified attributes of a Custom Voice. If a parameter's value is `0` + (default), it will not be included in the response. + properties: + gender: + type: optional + docs: >- + The vocalization of gender, ranging between masculine and feminine. + + + The default value is `0`, with a minimum of `-100` (more masculine) + and a maximum of `100` (more feminine). A value of `0` leaves this + parameter unchanged from the base voice. + huskiness: + type: optional + docs: >- + The texture of the voice, ranging between bright and husky. + + + The default value is `0`, with a minimum of `-100` (brighter) and a + maximum of `100` (huskier). A value of `0` leaves this parameter + unchanged from the base voice. + nasality: + type: optional + docs: >- + The openness of the voice, ranging between resonant and nasal. + + + The default value is `0`, with a minimum of `-100` (more resonant) and + a maximum of `100` (more nasal). A value of `0` leaves this parameter + unchanged from the base voice. + pitch: + type: optional + docs: >- + The frequency of the voice, ranging between low and high. + + + The default value is `0`, with a minimum of `-100` (lower) and a + maximum of `100` (higher). A value of `0` leaves this parameter + unchanged from the base voice. + source: + openapi: stenographer-openapi.json + ReturnCustomVoice: + docs: A Custom Voice specification associated with this Config. + properties: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Custom Voice. + + + Custom Voices, Prompts, Configs, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Custom Voice. Each update to the Custom Voice increments its + version number. + name: + type: string + docs: The name of the Custom Voice. Maximum length of 75 characters. + created_on: + type: long + docs: >- + Time at which the Custom Voice was created. Measured in seconds since + the Unix epoch. + modified_on: + type: long + docs: >- + Time at which the Custom Voice was last modified. Measured in seconds + since the Unix epoch. + base_voice: + type: ReturnCustomVoiceBaseVoice docs: The base voice used to create the Custom Voice. - source: - openapi: stenographer-openapi.json - ReturnCustomVoiceParameters: - docs: >- - The specified attributes of a Custom Voice. If a parameter's value is `0` - (default), it will not be included in the response. - properties: - gender: - type: optional - docs: >- - The vocalization of gender, ranging between masculine and feminine. - - - The default value is `0`, with a minimum of `-100` (more masculine) - and a maximum of `100` (more feminine). A value of `0` leaves this - parameter unchanged from the base voice. - huskiness: - type: optional - docs: >- - The texture of the voice, ranging between bright and husky. - - - The default value is `0`, with a minimum of `-100` (brighter) and a - maximum of `100` (huskier). A value of `0` leaves this parameter - unchanged from the base voice. - nasality: - type: optional - docs: >- - The openness of the voice, ranging between resonant and nasal. - - - The default value is `0`, with a minimum of `-100` (more resonant) and - a maximum of `100` (more nasal). A value of `0` leaves this parameter - unchanged from the base voice. - pitch: - type: optional - docs: >- - The frequency of the voice, ranging between low and high. - - - The default value is `0`, with a minimum of `-100` (lower) and a - maximum of `100` (higher). A value of `0` leaves this parameter - unchanged from the base voice. - source: - openapi: stenographer-openapi.json - ReturnCustomVoice: - docs: A Custom Voice specification associated with this Config. - properties: - id: - type: string - docs: Identifier for a Custom Voice. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Custom Voice. - - - Custom Voices, Prompts, Configs, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Custom Voice. Each update to the Custom Voice increments its - version number. - name: - type: string - docs: The name of the Custom Voice. Maximum length of 75 characters. - created_on: - type: long - docs: >- - Time at which the Custom Voice was created. Measured in seconds since - the Unix epoch. - modified_on: - type: long - docs: >- - Time at which the Custom Voice was last modified. Measured in seconds - since the Unix epoch. - base_voice: - type: ReturnCustomVoiceBaseVoice - docs: The base voice used to create the Custom Voice. - parameter_model: - type: literal<"20240715-4parameter"> - docs: >- - The name of the parameter model used to define which attributes are - used by the `parameters` field. Currently, only `20240715-4parameter` - is supported as the parameter model. - parameters: - type: ReturnCustomVoiceParameters - docs: >- - The specified attributes of a Custom Voice. If a parameter's value is - `0` (default), it will not be included in the response. - source: - openapi: stenographer-openapi.json - PostedBuiltinToolName: - enum: - - web_search - - hang_up - docs: >- - Name of the built-in tool to use. Hume supports the following built-in - tools: - - - - **web_search:** enables EVI to search the web for up-to-date information - when applicable. - - - **hang_up:** closes the WebSocket connection when appropriate (e.g., - after detecting a farewell in the conversation). - - - For more information, see our guide on [using built-in - tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). - source: - openapi: stenographer-openapi.json - PostedBuiltinTool: - docs: A configuration of a built-in tool to be posted to the server - properties: - name: - type: PostedBuiltinToolName - docs: >- - Name of the built-in tool to use. Hume supports the following built-in - tools: - - - - **web_search:** enables EVI to search the web for up-to-date - information when applicable. - - - **hang_up:** closes the WebSocket connection when appropriate (e.g., - after detecting a farewell in the conversation). - - - For more information, see our guide on [using built-in - tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - source: - openapi: stenographer-openapi.json - PostedConfigPromptSpec: - docs: >- - Identifies which prompt to use in a a config OR how to create a new prompt - to use in the config - properties: - id: - type: optional - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Prompt. Version numbers should be integers. The - combination of configId and version number is unique. - text: - type: optional - docs: Text used to create a new prompt for a particular config. - source: - openapi: stenographer-openapi.json - PostedEllmModel: - docs: A eLLM model configuration to be posted to the server - properties: - allow_short_responses: - type: optional - docs: |- - Boolean indicating if the eLLM is allowed to generate short responses. - - If omitted, short responses from the eLLM are enabled by default. - source: - openapi: stenographer-openapi.json - PostedEventMessageSpec: - docs: Settings for a specific event_message to be posted to the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this event message is enabled. - - - If set to `true`, a message will be sent when the circumstances for - the specific event are met. - text: - type: optional - docs: >- - Text to use as the event message when the corresponding event occurs. - If no text is specified, EVI will generate an appropriate message - based on its current context and the system prompt. - source: - openapi: stenographer-openapi.json - PostedEventMessageSpecs: - docs: >- - Collection of event messages returned by the server. - - - Event messages are sent by the server when specific events occur during a - chat session. These messages are used to configure behaviors for EVI, such - as controlling how EVI starts a new conversation. - properties: - on_new_chat: - type: optional - docs: >- - Specifies the initial message EVI provides when a new chat is started, - such as a greeting or welcome message. - on_inactivity_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is about to be - disconnected due to a user inactivity timeout, such as a message - mentioning a lack of user input for a period of time. - - - Enabling an inactivity message allows developers to use this message - event for "checking in" with the user if they are not responding to - see if they are still active. - - - If the user does not respond in the number of seconds specified in the - `inactivity_timeout` field, then EVI will say the message and the user - has 15 seconds to respond. If they respond in time, the conversation - will continue; if not, the conversation will end. - - - However, if the inactivity message is not enabled, then reaching the - inactivity timeout will immediately end the connection. - on_max_duration_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is disconnected due - to reaching the maximum chat duration, such as a message mentioning - the time limit for the chat has been reached. - source: - openapi: stenographer-openapi.json - PostedLanguageModelModelProvider: - enum: - - OPEN_AI - - CUSTOM_LANGUAGE_MODEL - - ANTHROPIC - - FIREWORKS - - GROQ - - GOOGLE + parameter_model: + type: literal<"20240715-4parameter"> + docs: >- + The name of the parameter model used to define which attributes are + used by the `parameters` field. Currently, only `20240715-4parameter` + is supported as the parameter model. + parameters: + type: ReturnCustomVoiceParameters + docs: >- + The specified attributes of a Custom Voice. If a parameter's value is + `0` (default), it will not be included in the response. + source: + openapi: stenographer-openapi.json + PostedBuiltinToolName: + enum: + - web_search + - hang_up + docs: >- + Name of the built-in tool to use. Hume supports the following built-in + tools: + + + - **web_search:** enables EVI to search the web for up-to-date information + when applicable. + + - **hang_up:** closes the WebSocket connection when appropriate (e.g., + after detecting a farewell in the conversation). + + + For more information, see our guide on [using built-in + tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). + source: + openapi: stenographer-openapi.json + PostedBuiltinTool: + docs: A configuration of a built-in tool to be posted to the server + properties: + name: + type: PostedBuiltinToolName + docs: >- + Name of the built-in tool to use. Hume supports the following built-in + tools: + + + - **web_search:** enables EVI to search the web for up-to-date + information when applicable. + + - **hang_up:** closes the WebSocket connection when appropriate (e.g., + after detecting a farewell in the conversation). + + + For more information, see our guide on [using built-in + tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the tool call + result. The LLM then uses this text to generate a response back to the + user, ensuring continuity in the conversation if the Tool errors. + source: + openapi: stenographer-openapi.json + PostedConfigPromptSpec: + docs: >- + Identifies which prompt to use in a a config OR how to create a new prompt + to use in the config + properties: + id: + type: optional + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: optional + docs: >- + Version number for a Prompt. Version numbers should be integers. The + combination of configId and version number is unique. + text: + type: optional + docs: Text used to create a new prompt for a particular config. + source: + openapi: stenographer-openapi.json + PostedEllmModel: + docs: A eLLM model configuration to be posted to the server + properties: + allow_short_responses: + type: optional + docs: |- + Boolean indicating if the eLLM is allowed to generate short responses. + + If omitted, short responses from the eLLM are enabled by default. + source: + openapi: stenographer-openapi.json + PostedEventMessageSpec: + docs: Settings for a specific event_message to be posted to the server + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this event message is enabled. + + + If set to `true`, a message will be sent when the circumstances for + the specific event are met. + text: + type: optional + docs: >- + Text to use as the event message when the corresponding event occurs. + If no text is specified, EVI will generate an appropriate message + based on its current context and the system prompt. + source: + openapi: stenographer-openapi.json + PostedEventMessageSpecs: + docs: >- + Collection of event messages returned by the server. + + + Event messages are sent by the server when specific events occur during a + chat session. These messages are used to configure behaviors for EVI, such + as controlling how EVI starts a new conversation. + properties: + on_new_chat: + type: optional + docs: >- + Specifies the initial message EVI provides when a new chat is started, + such as a greeting or welcome message. + on_inactivity_timeout: + type: optional + docs: >- + Specifies the message EVI provides when the chat is about to be + disconnected due to a user inactivity timeout, such as a message + mentioning a lack of user input for a period of time. + + + Enabling an inactivity message allows developers to use this message + event for "checking in" with the user if they are not responding to + see if they are still active. + + + If the user does not respond in the number of seconds specified in the + `inactivity_timeout` field, then EVI will say the message and the user + has 15 seconds to respond. If they respond in time, the conversation + will continue; if not, the conversation will end. + + + However, if the inactivity message is not enabled, then reaching the + inactivity timeout will immediately end the connection. + on_max_duration_timeout: + type: optional + docs: >- + Specifies the message EVI provides when the chat is disconnected due + to reaching the maximum chat duration, such as a message mentioning + the time limit for the chat has been reached. + source: + openapi: stenographer-openapi.json + PostedLanguageModelModelProvider: + enum: + - OPEN_AI + - CUSTOM_LANGUAGE_MODEL + - ANTHROPIC + - FIREWORKS + - GROQ + - GOOGLE + docs: The provider of the supplemental language model. + source: + openapi: stenographer-openapi.json + PostedLanguageModel: + docs: A LanguageModel to be posted to the server + properties: + model_provider: + type: optional docs: The provider of the supplemental language model. - source: - openapi: stenographer-openapi.json - PostedLanguageModel: - docs: A LanguageModel to be posted to the server - properties: - model_provider: - type: optional - docs: The provider of the supplemental language model. - model_resource: - type: optional - docs: String that specifies the language model to use with `model_provider`. - temperature: - type: optional - docs: >- - The model temperature, with values between 0 to 1 (inclusive). - - - Controls the randomness of the LLM’s output, with values closer to 0 - yielding focused, deterministic responses and values closer to 1 - producing more creative, diverse responses. - source: - openapi: stenographer-openapi.json - PostedTimeoutSpec: - docs: Settings for a specific timeout to be posted to the server - properties: - enabled: - type: boolean - docs: Boolean indicating if this event message is enabled. - duration_secs: - type: optional - docs: Duration in seconds for the timeout. - source: - openapi: stenographer-openapi.json - PostedTimeoutSpecsInactivity: - docs: >- - Specifies the duration of user inactivity (in seconds) after which the EVI - WebSocket connection will be automatically disconnected. Default is 600 - seconds (10 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified duration of - user inactivity being reached. However, the conversation will - eventually disconnect after 1,800 seconds (30 minutes), which is the - maximum WebSocket duration limit for EVI. - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - source: - openapi: stenographer-openapi.json - PostedTimeoutSpecsMaxDuration: - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI WebSocket - connection before it is automatically disconnected. Default is 1,800 - seconds (30 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified maximum - duration being reached. However, the conversation will eventually - disconnect after 1,800 seconds (30 minutes), which is the maximum - WebSocket duration limit for EVI. - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - source: - openapi: stenographer-openapi.json - PostedTimeoutSpecs: - docs: >- - Collection of timeout specifications returned by the server. - - - Timeouts are sent by the server when specific time-based events occur - during a chat session. These specifications set the inactivity timeout and - the maximum duration an EVI WebSocket connection can stay open before it - is automatically disconnected. - properties: - inactivity: - type: optional - docs: >- - Specifies the duration of user inactivity (in seconds) after which the - EVI WebSocket connection will be automatically disconnected. Default - is 600 seconds (10 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 - seconds. - max_duration: - type: optional - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI - WebSocket connection before it is automatically disconnected. Default - is 1,800 seconds (30 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 - seconds. - source: - openapi: stenographer-openapi.json - PostedUserDefinedToolSpec: - docs: A specific tool identifier to be posted to the server - properties: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - source: - openapi: stenographer-openapi.json - PostedVoice: - docs: A Voice specification posted to the server - properties: - provider: - type: literal<"HUME_AI"> - docs: >- - The provider of the voice to use. Currently, only `HUME_AI` is - supported as the voice provider. - name: - type: optional - docs: >- - Specifies the name of the voice to use. - - - This can be either the name of a previously created Custom Voice or - one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, - `WHIMSY`, or `STELLA`. - - - The name will be automatically converted to uppercase (e.g., "Ito" - becomes "ITO"). If a name is not specified, then a [Custom - Voice](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.custom_voice) - specification must be provided. - custom_voice: optional - source: - openapi: stenographer-openapi.json - ReturnBuiltinToolToolType: - enum: - - BUILTIN - - FUNCTION - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like web - search, or `FUNCTION` for user-defined tools. - source: - openapi: stenographer-openapi.json - ReturnBuiltinTool: - docs: A specific builtin tool version returned from the server - properties: - tool_type: - type: ReturnBuiltinToolToolType - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like - web search, or `FUNCTION` for user-defined tools. - name: - type: string - docs: Name applied to all versions of a particular Tool. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - source: - openapi: stenographer-openapi.json - ReturnConfig: - docs: A specific config version returned from the server - properties: - id: - type: optional - docs: Identifier for a Config. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - evi_version: - type: optional - docs: >- - Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` - for the latest enhanced version. For a detailed comparison of the two - versions, refer to our - [guide](/docs/empathic-voice-interface-evi/evi-2). - version_description: - type: optional - docs: An optional description of the Config version. - name: - type: optional - docs: Name applied to all versions of a particular Config. - created_on: - type: optional - docs: >- - Time at which the Config was created. Measured in seconds since the - Unix epoch. - modified_on: - type: optional - docs: >- - Time at which the Config was last modified. Measured in seconds since - the Unix epoch. - prompt: optional - voice: - type: optional - docs: A voice specification associated with this Config. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config. - - - This model is used to generate longer, more detailed responses from - EVI. Choosing an appropriate supplemental language model for your use - case is crucial for generating fast, high-quality responses from EVI. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config. - - - Hume's eLLM (empathic Large Language Model) is a multimodal language - model that takes into account both expression measures and language. - The eLLM generates short, empathic language responses and guides - text-to-speech (TTS) prosody. - tools: - type: optional>> - docs: List of user-defined tools associated with this Config. - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config. - event_messages: optional - timeouts: optional - source: - openapi: stenographer-openapi.json - ReturnEllmModel: - docs: A specific eLLM Model configuration - properties: - allow_short_responses: - type: boolean - docs: |- - Boolean indicating if the eLLM is allowed to generate short responses. - - If omitted, short responses from the eLLM are enabled by default. - source: - openapi: stenographer-openapi.json - ReturnEventMessageSpec: - docs: A specific event message configuration to be returned from the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this event message is enabled. - - - If set to `true`, a message will be sent when the circumstances for - the specific event are met. - text: - type: optional - docs: >- - Text to use as the event message when the corresponding event occurs. - If no text is specified, EVI will generate an appropriate message - based on its current context and the system prompt. - source: - openapi: stenographer-openapi.json - ReturnEventMessageSpecs: - docs: >- - Collection of event messages returned by the server. - - - Event messages are sent by the server when specific events occur during a - chat session. These messages are used to configure behaviors for EVI, such - as controlling how EVI starts a new conversation. - properties: - on_new_chat: - type: optional - docs: >- - Specifies the initial message EVI provides when a new chat is started, - such as a greeting or welcome message. - on_inactivity_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is about to be - disconnected due to a user inactivity timeout, such as a message - mentioning a lack of user input for a period of time. - - - Enabling an inactivity message allows developers to use this message - event for "checking in" with the user if they are not responding to - see if they are still active. - - - If the user does not respond in the number of seconds specified in the - `inactivity_timeout` field, then EVI will say the message and the user - has 15 seconds to respond. If they respond in time, the conversation - will continue; if not, the conversation will end. - - - However, if the inactivity message is not enabled, then reaching the - inactivity timeout will immediately end the connection. - on_max_duration_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is disconnected due - to reaching the maximum chat duration, such as a message mentioning - the time limit for the chat has been reached. - source: - openapi: stenographer-openapi.json - ReturnLanguageModelModelProvider: - enum: - - OPEN_AI - - CUSTOM_LANGUAGE_MODEL - - ANTHROPIC - - FIREWORKS - - GROQ - - GOOGLE + model_resource: + type: optional + docs: String that specifies the language model to use with `model_provider`. + temperature: + type: optional + docs: >- + The model temperature, with values between 0 to 1 (inclusive). + + + Controls the randomness of the LLM’s output, with values closer to 0 + yielding focused, deterministic responses and values closer to 1 + producing more creative, diverse responses. + source: + openapi: stenographer-openapi.json + PostedTimeoutSpec: + docs: Settings for a specific timeout to be posted to the server + properties: + enabled: + type: boolean + docs: Boolean indicating if this event message is enabled. + duration_secs: + type: optional + docs: Duration in seconds for the timeout. + source: + openapi: stenographer-openapi.json + PostedTimeoutSpecsInactivity: + docs: >- + Specifies the duration of user inactivity (in seconds) after which the EVI + WebSocket connection will be automatically disconnected. Default is 600 + seconds (10 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this timeout is enabled. + + + If set to false, EVI will not timeout due to a specified duration of + user inactivity being reached. However, the conversation will + eventually disconnect after 1,800 seconds (30 minutes), which is the + maximum WebSocket duration limit for EVI. + duration_secs: + type: optional + docs: >- + Duration in seconds for the timeout (e.g. 600 seconds represents 10 + minutes). + source: + openapi: stenographer-openapi.json + PostedTimeoutSpecsMaxDuration: + docs: >- + Specifies the maximum allowed duration (in seconds) for an EVI WebSocket + connection before it is automatically disconnected. Default is 1,800 + seconds (30 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this timeout is enabled. + + + If set to false, EVI will not timeout due to a specified maximum + duration being reached. However, the conversation will eventually + disconnect after 1,800 seconds (30 minutes), which is the maximum + WebSocket duration limit for EVI. + duration_secs: + type: optional + docs: >- + Duration in seconds for the timeout (e.g. 600 seconds represents 10 + minutes). + source: + openapi: stenographer-openapi.json + PostedTimeoutSpecs: + docs: >- + Collection of timeout specifications returned by the server. + + + Timeouts are sent by the server when specific time-based events occur + during a chat session. These specifications set the inactivity timeout and + the maximum duration an EVI WebSocket connection can stay open before it + is automatically disconnected. + properties: + inactivity: + type: optional + docs: >- + Specifies the duration of user inactivity (in seconds) after which the + EVI WebSocket connection will be automatically disconnected. Default + is 600 seconds (10 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 + seconds. + max_duration: + type: optional + docs: >- + Specifies the maximum allowed duration (in seconds) for an EVI + WebSocket connection before it is automatically disconnected. Default + is 1,800 seconds (30 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 + seconds. + source: + openapi: stenographer-openapi.json + PostedUserDefinedToolSpec: + docs: A specific tool identifier to be posted to the server + properties: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: optional + docs: >- + Version number for a Tool. + + + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if needed. + + + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + source: + openapi: stenographer-openapi.json + PostedVoice: + docs: A Voice specification posted to the server + properties: + provider: + type: literal<"HUME_AI"> + docs: >- + The provider of the voice to use. Currently, only `HUME_AI` is + supported as the voice provider. + name: + type: optional + docs: >- + Specifies the name of the voice to use. + + + This can be either the name of a previously created Custom Voice or + one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, + `WHIMSY`, or `STELLA`. + + + The name will be automatically converted to uppercase (e.g., "Ito" + becomes "ITO"). If a name is not specified, then a [Custom + Voice](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.custom_voice) + specification must be provided. + custom_voice: optional + source: + openapi: stenographer-openapi.json + ReturnBuiltinToolToolType: + enum: + - BUILTIN + - FUNCTION + docs: >- + Type of Tool. Either `BUILTIN` for natively implemented tools, like web + search, or `FUNCTION` for user-defined tools. + source: + openapi: stenographer-openapi.json + ReturnBuiltinTool: + docs: A specific builtin tool version returned from the server + properties: + tool_type: + type: ReturnBuiltinToolToolType + docs: >- + Type of Tool. Either `BUILTIN` for natively implemented tools, like + web search, or `FUNCTION` for user-defined tools. + name: + type: string + docs: Name applied to all versions of a particular Tool. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the tool call + result. The LLM then uses this text to generate a response back to the + user, ensuring continuity in the conversation if the Tool errors. + source: + openapi: stenographer-openapi.json + ReturnConfig: + docs: A specific config version returned from the server + properties: + id: + type: optional + docs: Identifier for a Config. Formatted as a UUID. + version: + type: optional + docs: >- + Version number for a Config. + + + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + evi_version: + type: optional + docs: >- + Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` + for the latest enhanced version. For a detailed comparison of the two + versions, refer to our + [guide](/docs/empathic-voice-interface-evi/evi-2). + version_description: + type: optional + docs: An optional description of the Config version. + name: + type: optional + docs: Name applied to all versions of a particular Config. + created_on: + type: optional + docs: >- + Time at which the Config was created. Measured in seconds since the + Unix epoch. + modified_on: + type: optional + docs: >- + Time at which the Config was last modified. Measured in seconds since + the Unix epoch. + prompt: optional + voice: + type: optional + docs: A voice specification associated with this Config. + language_model: + type: optional + docs: >- + The supplemental language model associated with this Config. + + + This model is used to generate longer, more detailed responses from + EVI. Choosing an appropriate supplemental language model for your use + case is crucial for generating fast, high-quality responses from EVI. + ellm_model: + type: optional + docs: >- + The eLLM setup associated with this Config. + + + Hume's eLLM (empathic Large Language Model) is a multimodal language + model that takes into account both expression measures and language. + The eLLM generates short, empathic language responses and guides + text-to-speech (TTS) prosody. + tools: + type: optional>> + docs: List of user-defined tools associated with this Config. + builtin_tools: + type: optional>> + docs: List of built-in tools associated with this Config. + event_messages: optional + timeouts: optional + source: + openapi: stenographer-openapi.json + ReturnEllmModel: + docs: A specific eLLM Model configuration + properties: + allow_short_responses: + type: boolean + docs: |- + Boolean indicating if the eLLM is allowed to generate short responses. + + If omitted, short responses from the eLLM are enabled by default. + source: + openapi: stenographer-openapi.json + ReturnEventMessageSpec: + docs: A specific event message configuration to be returned from the server + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this event message is enabled. + + + If set to `true`, a message will be sent when the circumstances for + the specific event are met. + text: + type: optional + docs: >- + Text to use as the event message when the corresponding event occurs. + If no text is specified, EVI will generate an appropriate message + based on its current context and the system prompt. + source: + openapi: stenographer-openapi.json + ReturnEventMessageSpecs: + docs: >- + Collection of event messages returned by the server. + + + Event messages are sent by the server when specific events occur during a + chat session. These messages are used to configure behaviors for EVI, such + as controlling how EVI starts a new conversation. + properties: + on_new_chat: + type: optional + docs: >- + Specifies the initial message EVI provides when a new chat is started, + such as a greeting or welcome message. + on_inactivity_timeout: + type: optional + docs: >- + Specifies the message EVI provides when the chat is about to be + disconnected due to a user inactivity timeout, such as a message + mentioning a lack of user input for a period of time. + + + Enabling an inactivity message allows developers to use this message + event for "checking in" with the user if they are not responding to + see if they are still active. + + + If the user does not respond in the number of seconds specified in the + `inactivity_timeout` field, then EVI will say the message and the user + has 15 seconds to respond. If they respond in time, the conversation + will continue; if not, the conversation will end. + + + However, if the inactivity message is not enabled, then reaching the + inactivity timeout will immediately end the connection. + on_max_duration_timeout: + type: optional + docs: >- + Specifies the message EVI provides when the chat is disconnected due + to reaching the maximum chat duration, such as a message mentioning + the time limit for the chat has been reached. + source: + openapi: stenographer-openapi.json + ReturnLanguageModelModelProvider: + enum: + - OPEN_AI + - CUSTOM_LANGUAGE_MODEL + - ANTHROPIC + - FIREWORKS + - GROQ + - GOOGLE + docs: The provider of the supplemental language model. + source: + openapi: stenographer-openapi.json + ReturnLanguageModel: + docs: A specific LanguageModel + properties: + model_provider: + type: optional docs: The provider of the supplemental language model. - source: - openapi: stenographer-openapi.json - ReturnLanguageModel: - docs: A specific LanguageModel - properties: - model_provider: - type: optional - docs: The provider of the supplemental language model. - model_resource: - type: optional - docs: String that specifies the language model to use with `model_provider`. - temperature: - type: optional - docs: >- - The model temperature, with values between 0 to 1 (inclusive). - - - Controls the randomness of the LLM’s output, with values closer to 0 - yielding focused, deterministic responses and values closer to 1 - producing more creative, diverse responses. - source: - openapi: stenographer-openapi.json - ReturnTimeoutSpec: - docs: A specific timeout configuration to be returned from the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified duration - being reached. However, the conversation will eventually disconnect - after 1,800 seconds (30 minutes), which is the maximum WebSocket - duration limit for EVI. - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - source: - openapi: stenographer-openapi.json - ReturnTimeoutSpecs: - docs: >- - Collection of timeout specifications returned by the server. - - - Timeouts are sent by the server when specific time-based events occur - during a chat session. These specifications set the inactivity timeout and - the maximum duration an EVI WebSocket connection can stay open before it - is automatically disconnected. - properties: - inactivity: - type: ReturnTimeoutSpec - docs: >- - Specifies the duration of user inactivity (in seconds) after which the - EVI WebSocket connection will be automatically disconnected. Default - is 600 seconds (10 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 - seconds. - max_duration: - type: ReturnTimeoutSpec - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI - WebSocket connection before it is automatically disconnected. Default - is 1,800 seconds (30 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 - seconds. - source: - openapi: stenographer-openapi.json - ReturnVoice: - docs: A specific voice specification - properties: - provider: - type: literal<"HUME_AI"> - docs: >- - The provider of the voice to use. Currently, only `HUME_AI` is - supported as the voice provider. - name: - type: optional - docs: >- - The name of the specified voice. - - - This will either be the name of a previously created Custom Voice or - one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, - `WHIMSY`, or `STELLA`. - custom_voice: ReturnCustomVoice - source: - openapi: stenographer-openapi.json - ReturnPagedUserDefinedTools: - docs: A paginated list of user defined tool versions returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - tools_page: - docs: >- - List of tools returned for the specified `page_number` and - `page_size`. - type: list> - source: - openapi: stenographer-openapi.json - ReturnPagedPrompts: - docs: A paginated list of prompt versions returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - prompts_page: - docs: >- - List of prompts returned for the specified `page_number` and - `page_size`. - type: list> - source: - openapi: stenographer-openapi.json - ReturnPagedCustomVoices: - docs: A paginated list of custom voices returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - custom_voices_page: - docs: List of Custom Voices for the specified `page_number` and `page_size`. - type: list - source: - openapi: stenographer-openapi.json - ReturnPagedConfigs: - docs: A paginated list of config versions returned from the server - properties: - page_number: - type: optional - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: optional - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - configs_page: - type: optional> - docs: >- - List of configs returned for the specified `page_number` and - `page_size`. - source: - openapi: stenographer-openapi.json - ReturnChatStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - MAX_DURATION_TIMEOUT - - INACTIVITY_TIMEOUT - - ERROR - docs: >- - Indicates the current state of the chat. There are six possible statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum - allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - source: - openapi: stenographer-openapi.json - ReturnChat: - docs: A description of chat and its status - properties: - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - chat_group_id: - type: string - docs: >- - Identifier for the Chat Group. Any chat resumed from this Chat will - have the same `chat_group_id`. Formatted as a UUID. - status: - type: ReturnChatStatus - docs: >- - Indicates the current state of the chat. There are six possible - statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the - maximum allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - start_timestamp: - type: long - docs: >- - Time at which the Chat started. Measured in seconds since the Unix - epoch. - end_timestamp: - type: optional - docs: >- - Time at which the Chat ended. Measured in seconds since the Unix - epoch. - event_count: - type: optional - docs: The total number of events currently in this chat. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat. - config: optional - source: - openapi: stenographer-openapi.json - ReturnConfigSpec: - docs: The Config associated with this Chat. - properties: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions if - needed. + model_resource: + type: optional + docs: String that specifies the language model to use with `model_provider`. + temperature: + type: optional + docs: >- + The model temperature, with values between 0 to 1 (inclusive). + + + Controls the randomness of the LLM’s output, with values closer to 0 + yielding focused, deterministic responses and values closer to 1 + producing more creative, diverse responses. + source: + openapi: stenographer-openapi.json + ReturnTimeoutSpec: + docs: A specific timeout configuration to be returned from the server + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this timeout is enabled. + + + If set to false, EVI will not timeout due to a specified duration + being reached. However, the conversation will eventually disconnect + after 1,800 seconds (30 minutes), which is the maximum WebSocket + duration limit for EVI. + duration_secs: + type: optional + docs: >- + Duration in seconds for the timeout (e.g. 600 seconds represents 10 + minutes). + source: + openapi: stenographer-openapi.json + ReturnTimeoutSpecs: + docs: >- + Collection of timeout specifications returned by the server. + + + Timeouts are sent by the server when specific time-based events occur + during a chat session. These specifications set the inactivity timeout and + the maximum duration an EVI WebSocket connection can stay open before it + is automatically disconnected. + properties: + inactivity: + type: ReturnTimeoutSpec + docs: >- + Specifies the duration of user inactivity (in seconds) after which the + EVI WebSocket connection will be automatically disconnected. Default + is 600 seconds (10 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 + seconds. + max_duration: + type: ReturnTimeoutSpec + docs: >- + Specifies the maximum allowed duration (in seconds) for an EVI + WebSocket connection before it is automatically disconnected. Default + is 1,800 seconds (30 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 + seconds. + source: + openapi: stenographer-openapi.json + ReturnVoice: + docs: A specific voice specification + properties: + provider: + type: literal<"HUME_AI"> + docs: >- + The provider of the voice to use. Currently, only `HUME_AI` is + supported as the voice provider. + name: + type: optional + docs: >- + The name of the specified voice. + + + This will either be the name of a previously created Custom Voice or + one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, + `WHIMSY`, or `STELLA`. + custom_voice: ReturnCustomVoice + source: + openapi: stenographer-openapi.json + ReturnPagedUserDefinedTools: + docs: A paginated list of user defined tool versions returned from the server + properties: + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + tools_page: + docs: >- + List of tools returned for the specified `page_number` and + `page_size`. + type: list> + source: + openapi: stenographer-openapi.json + ReturnPagedPrompts: + docs: A paginated list of prompt versions returned from the server + properties: + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + prompts_page: + docs: >- + List of prompts returned for the specified `page_number` and + `page_size`. + type: list> + source: + openapi: stenographer-openapi.json + ReturnPagedCustomVoices: + docs: A paginated list of custom voices returned from the server + properties: + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + custom_voices_page: + docs: List of Custom Voices for the specified `page_number` and `page_size`. + type: list + source: + openapi: stenographer-openapi.json + ReturnPagedConfigs: + docs: A paginated list of config versions returned from the server + properties: + page_number: + type: optional + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: optional + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + configs_page: + type: optional> + docs: >- + List of configs returned for the specified `page_number` and + `page_size`. + source: + openapi: stenographer-openapi.json + ReturnChatStatus: + enum: + - ACTIVE + - USER_ENDED + - USER_TIMEOUT + - MAX_DURATION_TIMEOUT + - INACTIVITY_TIMEOUT + - ERROR + docs: >- + Indicates the current state of the chat. There are six possible statuses: + + + - `ACTIVE`: The chat is currently active and ongoing. + + + - `USER_ENDED`: The chat was manually ended by the user. + + + - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. + + + - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum + allowed duration. + + + - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. + + + - `ERROR`: The chat ended unexpectedly due to an error. + source: + openapi: stenographer-openapi.json + ReturnChat: + docs: A description of chat and its status + properties: + id: + type: string + docs: Identifier for a Chat. Formatted as a UUID. + chat_group_id: + type: string + docs: >- + Identifier for the Chat Group. Any chat resumed from this Chat will + have the same `chat_group_id`. Formatted as a UUID. + status: + type: ReturnChatStatus + docs: >- + Indicates the current state of the chat. There are six possible + statuses: + + + - `ACTIVE`: The chat is currently active and ongoing. + + + - `USER_ENDED`: The chat was manually ended by the user. + + + - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. + + + - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the + maximum allowed duration. - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - source: - openapi: stenographer-openapi.json - ReturnPagedChatsPaginationDirection: - enum: - - ASC - - DESC + - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. + + + - `ERROR`: The chat ended unexpectedly due to an error. + start_timestamp: + type: long + docs: >- + Time at which the Chat started. Measured in seconds since the Unix + epoch. + end_timestamp: + type: optional + docs: >- + Time at which the Chat ended. Measured in seconds since the Unix + epoch. + event_count: + type: optional + docs: The total number of events currently in this chat. + metadata: + type: optional + docs: Stringified JSON with additional metadata about the chat. + config: optional + source: + openapi: stenographer-openapi.json + ReturnConfigSpec: + docs: The Config associated with this Chat. + properties: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + version: + type: optional + docs: >- + Version number for a Config. + + + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + source: + openapi: stenographer-openapi.json + ReturnPagedChatsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnPagedChats: + docs: A paginated list of chats returned from the server + properties: + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + pagination_direction: + type: ReturnPagedChatsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + chats_page: + docs: >- + List of Chats and their metadata returned for the specified + `page_number` and `page_size`. + type: list + source: + openapi: stenographer-openapi.json + ReturnChatEventRole: + enum: + - USER + - AGENT + - SYSTEM + - TOOL + docs: >- + The role of the entity which generated the Chat Event. There are four + possible values: + + + - `USER`: The user, capable of sending user messages and interruptions. + + + - `AGENT`: The assistant, capable of sending agent messages. + + + - `SYSTEM`: The backend server, capable of transmitting errors. + + + - `TOOL`: The function calling mechanism. + source: + openapi: stenographer-openapi.json + ReturnChatEventType: + enum: + - SYSTEM_PROMPT + - USER_MESSAGE + - USER_INTERRUPTION + - AGENT_MESSAGE + - FUNCTION_CALL + - FUNCTION_CALL_RESPONSE + docs: >- + Type of Chat Event. There are six possible values: + + + - `SYSTEM_PROMPT`: Contains the system prompt for use in the session. + + + - `USER_MESSAGE`: Contains the message sent by the user. + + + - `USER_INTERRUPTION`: Contains an interruption made by the user while the + agent is speaking. + + + - `AGENT_MESSAGE`: Contains the assistant’s message, generated by Hume’s + eLLM and supplemental LLM. + + + - `FUNCTION_CALL`: Contains the invocation of a tool. + + + - `FUNCTION_CALL_RESPONSE`: Contains the tool response. + source: + openapi: stenographer-openapi.json + ReturnChatEvent: + docs: A description of a single event in a chat returned from the server + properties: + id: + type: string + docs: Identifier for a Chat Event. Formatted as a UUID. + chat_id: + type: string + docs: Identifier for the Chat this event occurred in. Formatted as a UUID. + timestamp: + type: long + docs: >- + Time at which the Chat Event occurred. Measured in seconds since the + Unix epoch. + role: + type: ReturnChatEventRole + docs: >- + The role of the entity which generated the Chat Event. There are four + possible values: + + + - `USER`: The user, capable of sending user messages and + interruptions. + + + - `AGENT`: The assistant, capable of sending agent messages. + + + - `SYSTEM`: The backend server, capable of transmitting errors. + + + - `TOOL`: The function calling mechanism. + type: + type: ReturnChatEventType + docs: >- + Type of Chat Event. There are six possible values: + + + - `SYSTEM_PROMPT`: Contains the system prompt for use in the session. + + + - `USER_MESSAGE`: Contains the message sent by the user. + + + - `USER_INTERRUPTION`: Contains an interruption made by the user while + the agent is speaking. + + + - `AGENT_MESSAGE`: Contains the assistant’s message, generated by + Hume’s eLLM and supplemental LLM. + + + - `FUNCTION_CALL`: Contains the invocation of a tool. + + + - `FUNCTION_CALL_RESPONSE`: Contains the tool response. + message_text: + type: optional + docs: >- + The text of the Chat Event. This field contains the message content + for each event type listed in the `type` field. + emotion_features: + type: optional + docs: >- + Stringified JSON containing the prosody model inference results. + + + EVI uses the prosody model to measure 48 expressions related to speech + and vocal characteristics. These results contain a detailed emotional + and tonal analysis of the audio. Scores typically range from 0 to 1, + with higher values indicating a stronger confidence level in the + measured attribute. + metadata: + type: optional + docs: Stringified JSON with additional metadata about the chat event. + source: + openapi: stenographer-openapi.json + ReturnChatPagedEventsStatus: + enum: + - ACTIVE + - USER_ENDED + - USER_TIMEOUT + - MAX_DURATION_TIMEOUT + - INACTIVITY_TIMEOUT + - ERROR + docs: >- + Indicates the current state of the chat. There are six possible statuses: + + + - `ACTIVE`: The chat is currently active and ongoing. + + + - `USER_ENDED`: The chat was manually ended by the user. + + + - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. + + + - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum + allowed duration. + + + - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. + + + - `ERROR`: The chat ended unexpectedly due to an error. + source: + openapi: stenographer-openapi.json + ReturnChatPagedEventsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnChatPagedEvents: + docs: >- + A description of chat status with a paginated list of chat events returned + from the server + properties: + id: + type: string + docs: Identifier for a Chat. Formatted as a UUID. + chat_group_id: + type: string + docs: >- + Identifier for the Chat Group. Any chat resumed from this Chat will + have the same `chat_group_id`. Formatted as a UUID. + status: + type: ReturnChatPagedEventsStatus + docs: >- + Indicates the current state of the chat. There are six possible + statuses: + + + - `ACTIVE`: The chat is currently active and ongoing. + + + - `USER_ENDED`: The chat was manually ended by the user. + + + - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. + + + - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the + maximum allowed duration. + + + - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. + + + - `ERROR`: The chat ended unexpectedly due to an error. + start_timestamp: + type: long + docs: >- + Time at which the Chat started. Measured in seconds since the Unix + epoch. + end_timestamp: + type: optional docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. + Time at which the Chat ended. Measured in seconds since the Unix + epoch. + pagination_direction: + type: ReturnChatPagedEventsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + events_page: + docs: List of Chat Events for the specified `page_number` and `page_size`. + type: list + metadata: + type: optional + docs: Stringified JSON with additional metadata about the chat. + page_number: + type: integer + docs: >- + The page number of the returned list. - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - source: - openapi: stenographer-openapi.json - ReturnPagedChats: - docs: A paginated list of chats returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - pagination_direction: - type: ReturnPagedChatsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + config: optional + source: + openapi: stenographer-openapi.json + ReturnActiveChatCount: + docs: A description of current chat chat sessions for a user + properties: + timestamp: + type: long + docs: >- + The timestamp for when chat status was measured. Formatted as a Unix + epoch milliseconds. + total_user_active_chats: + type: integer + docs: The total number of active chats for this user. + max_allowed_active_chats: + type: optional + docs: The maximum number of concurrent active chats for this user. + more_active_chats_allowed: + type: boolean + docs: Boolean indicating if the user is allowed to start more chats. + per_tag: + type: optional>> + docs: Optional List of chat counts per tag. + source: + openapi: stenographer-openapi.json + ReturnActiveChatCountPerTag: + docs: A description of current chat chat sessions per tag + properties: + tag: + type: string + docs: User tag applied to a chat. + total_tag_active_chats: + type: integer + docs: The total number of active chats for this user with the specified tag. + source: + openapi: stenographer-openapi.json + ReturnChatGroup: + docs: A description of chat_group and its status + properties: + id: + type: string + docs: >- + Identifier for the Chat Group. Any Chat resumed from this Chat Group + will have the same `chat_group_id`. Formatted as a UUID. + first_start_timestamp: + type: long + docs: >- + Time at which the first Chat in this Chat Group was created. Measured + in seconds since the Unix epoch. + most_recent_start_timestamp: + type: long + docs: >- + Time at which the most recent Chat in this Chat Group was created. + Measured in seconds since the Unix epoch. + most_recent_chat_id: + type: optional + docs: >- + The `chat_id` of the most recent Chat in this Chat Group. Formatted as + a UUID. + num_chats: + type: integer + docs: The total number of Chats in this Chat Group. + active: + type: optional + docs: >- + Denotes whether there is an active Chat associated with this Chat + Group. + source: + openapi: stenographer-openapi.json + ReturnPagedChatGroupsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnPagedChatGroups: + docs: A paginated list of chat_groups returned from the server + properties: + page_number: + type: integer + docs: >- + The page number of the returned list. + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - chats_page: - docs: >- - List of Chats and their metadata returned for the specified - `page_number` and `page_size`. - type: list - source: - openapi: stenographer-openapi.json - ReturnChatEventRole: - enum: - - USER - - AGENT - - SYSTEM - - TOOL + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + pagination_direction: + type: ReturnPagedChatGroupsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + chat_groups_page: + docs: >- + List of Chat Groups and their metadata returned for the specified + `page_number` and `page_size`. + type: list + source: + openapi: stenographer-openapi.json + ReturnChatGroupPagedChatsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnChatGroupPagedChats: + docs: >- + A description of chat_group and its status with a paginated list of each + chat in the chat_group + properties: + id: + type: string docs: >- - The role of the entity which generated the Chat Event. There are four - possible values: + Identifier for the Chat Group. Any Chat resumed from this Chat Group + will have the same `chat_group_id`. Formatted as a UUID. + first_start_timestamp: + type: long + docs: >- + Time at which the first Chat in this Chat Group was created. Measured + in seconds since the Unix epoch. + most_recent_start_timestamp: + type: long + docs: >- + Time at which the most recent Chat in this Chat Group was created. + Measured in seconds since the Unix epoch. + num_chats: + type: integer + docs: The total number of Chats associated with this Chat Group. + page_number: + type: integer + docs: >- + The page number of the returned list. - - `USER`: The user, capable of sending user messages and interruptions. + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. - - `AGENT`: The assistant, capable of sending agent messages. + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + pagination_direction: + type: ReturnChatGroupPagedChatsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + chats_page: + docs: List of Chats for the specified `page_number` and `page_size`. + type: list + active: + type: optional + docs: >- + Denotes whether there is an active Chat associated with this Chat + Group. + source: + openapi: stenographer-openapi.json + ReturnChatGroupPagedEventsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnChatGroupPagedEvents: + docs: >- + A paginated list of chat events that occurred across chats in this + chat_group from the server + properties: + id: + type: string + docs: >- + Identifier for the Chat Group. Any Chat resumed from this Chat Group + will have the same `chat_group_id`. Formatted as a UUID. + page_number: + type: integer + docs: >- + The page number of the returned list. - - `SYSTEM`: The backend server, capable of transmitting errors. + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. - - `TOOL`: The function calling mechanism. - source: - openapi: stenographer-openapi.json - ReturnChatEventType: - enum: - - SYSTEM_PROMPT - - USER_MESSAGE - - USER_INTERRUPTION - - AGENT_MESSAGE - - FUNCTION_CALL - - FUNCTION_CALL_RESPONSE + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + pagination_direction: + type: ReturnChatGroupPagedEventsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + events_page: + docs: List of Chat Events for the specified `page_number` and `page_size`. + type: list + source: + openapi: stenographer-openapi.json + PostedPromptSpec: + docs: A Prompt associated with this Config. + properties: + version: optional + source: + openapi: stenographer-openapi.json + AssistantInput: + docs: When provided, the input is spoken by EVI. + properties: + type: + type: literal<"assistant_input"> + docs: >- + The type of message sent through the socket; must be `assistant_input` + for our server to correctly identify and process it as an Assistant + Input message. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + text: + type: string docs: >- - Type of Chat Event. There are six possible values: + Assistant text to synthesize into spoken audio and insert into the + conversation. + + + EVI uses this text to generate spoken audio using our proprietary + expressive text-to-speech model. Our model adds appropriate emotional + inflections and tones to the text based on the user’s expressions and + the context of the conversation. The synthesized audio is streamed + back to the user as an [Assistant + Message](/reference/empathic-voice-interface-evi/chat/chat#receive.Assistant%20Message.type). + source: + openapi: assistant-asyncapi.json + AudioConfiguration: + properties: + encoding: + type: Encoding + docs: Encoding format of the audio input, such as `linear16`. + channels: + type: integer + docs: Number of audio channels. + sample_rate: + type: integer + docs: >- + Audio sample rate. Number of samples per second in the audio input, + measured in Hertz. + source: + openapi: assistant-asyncapi.json + AudioInput: + docs: When provided, the input is audio. + properties: + type: + type: literal<"audio_input"> + docs: >- + The type of message sent through the socket; must be `audio_input` for + our server to correctly identify and process it as an Audio Input + message. - - `SYSTEM_PROMPT`: Contains the system prompt for use in the session. + This message is used for sending audio input data to EVI for + processing and expression measurement. Audio data should be sent as a + continuous stream, encoded in Base64. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + data: + type: string + docs: >- + Base64 encoded audio input to insert into the conversation. + + + The content of an Audio Input message is treated as the user’s speech + to EVI and must be streamed continuously. Pre-recorded audio files are + not supported. + + + For optimal transcription quality, the audio data should be + transmitted in small chunks. + + + Hume recommends streaming audio with a buffer window of 20 + milliseconds (ms), or 100 milliseconds (ms) for web applications. + source: + openapi: assistant-asyncapi.json + BuiltInTool: + enum: + - web_search + - hang_up + docs: >- + Name of the built-in tool. Set to `web_search` to equip EVI with the + built-in Web Search tool. + source: + openapi: assistant-asyncapi.json + BuiltinToolConfig: + properties: + name: + type: BuiltInTool + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM if the tool call fails. + The LLM then uses this text to generate a response back to the user, + ensuring continuity in the conversation. + source: + openapi: assistant-asyncapi.json + Context: + properties: + type: + type: optional + docs: >- + The persistence level of the injected context. Specifies how long the + injected context will remain active in the session. - - `USER_MESSAGE`: Contains the message sent by the user. + There are three possible context types: - - `USER_INTERRUPTION`: Contains an interruption made by the user while the - agent is speaking. + - **Persistent**: The context is appended to all user messages for the + duration of the session. - - `AGENT_MESSAGE`: Contains the assistant’s message, generated by Hume’s - eLLM and supplemental LLM. + - **Temporary**: The context is appended only to the next user + message. + - **Editable**: The original context is updated to reflect the new context. - - `FUNCTION_CALL`: Contains the invocation of a tool. + If the type is not specified, it will default to `temporary`. + text: + type: string + docs: >- + The context to be injected into the conversation. Helps inform the + LLM's response by providing relevant information about the ongoing + conversation. + + + This text will be appended to the end of user messages based on the + chosen persistence level. For example, if you want to remind EVI of + its role as a helpful weather assistant, the context you insert will + be appended to the end of user messages as `{Context: You are a + helpful weather assistant}`. + source: + openapi: assistant-asyncapi.json + ContextType: + enum: + - editable + - persistent + - temporary + source: + openapi: assistant-asyncapi.json + Encoding: + type: literal<"linear16"> + ErrorLevel: + type: literal<"warn"> + PauseAssistantMessage: + docs: >- + Pause responses from EVI. Chat history is still saved and sent after + resuming. + properties: + type: + type: literal<"pause_assistant_message"> + docs: >- + The type of message sent through the socket; must be + `pause_assistant_message` for our server to correctly identify and + process it as a Pause Assistant message. + + + Once this message is sent, EVI will not respond until a [Resume + Assistant + message](/reference/empathic-voice-interface-evi/chat/chat#send.Resume%20Assistant%20Message.type) + is sent. When paused, EVI won’t respond, but transcriptions of your + audio inputs will still be recorded. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + source: + openapi: assistant-asyncapi.json + ResumeAssistantMessage: + docs: >- + Resume responses from EVI. Chat history sent while paused will now be + sent. + properties: + type: + type: literal<"resume_assistant_message"> + docs: >- + The type of message sent through the socket; must be + `resume_assistant_message` for our server to correctly identify and + process it as a Resume Assistant message. + + + Upon resuming, if any audio input was sent during the pause, EVI will + retain context from all messages sent but only respond to the last + user message. (e.g., If you ask EVI two questions while paused and + then send a `resume_assistant_message`, EVI will respond to the second + question and have added the first question to its conversation + context.) + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + source: + openapi: assistant-asyncapi.json + SessionSettings: + docs: Settings for this chat session. + properties: + type: + type: literal<"session_settings"> + docs: >- + The type of message sent through the socket; must be + `session_settings` for our server to correctly identify and process it + as a Session Settings message. - - `FUNCTION_CALL_RESPONSE`: Contains the tool response. - source: - openapi: stenographer-openapi.json - ReturnChatEvent: - docs: A description of a single event in a chat returned from the server - properties: - id: - type: string - docs: Identifier for a Chat Event. Formatted as a UUID. - chat_id: - type: string - docs: Identifier for the Chat this event occurred in. Formatted as a UUID. - timestamp: - type: long - docs: >- - Time at which the Chat Event occurred. Measured in seconds since the - Unix epoch. - role: - type: ReturnChatEventRole - docs: >- - The role of the entity which generated the Chat Event. There are four - possible values: + Session settings are temporary and apply only to the current Chat + session. These settings can be adjusted dynamically based on the + requirements of each session to ensure optimal performance and user + experience. - - `USER`: The user, capable of sending user messages and - interruptions. + For more information, please refer to the [Session Settings + section](/docs/empathic-voice-interface-evi/configuration#session-settings) + on the EVI Configuration page. + custom_session_id: + type: optional + docs: >- + Unique identifier for the session. Used to manage conversational + state, correlate frontend and backend data, and persist conversations + across EVI sessions. - - `AGENT`: The assistant, capable of sending agent messages. + If included, the response sent from Hume to your backend will include + this ID. This allows you to correlate frontend users with their + incoming messages. - - `SYSTEM`: The backend server, capable of transmitting errors. + It is recommended to pass a `custom_session_id` if you are using a + Custom Language Model. Please see our guide to [using a custom + language + model](/docs/empathic-voice-interface-evi/custom-language-model) with + EVI to learn more. + system_prompt: + type: optional + docs: >- + Instructions used to shape EVI’s behavior, responses, and style for + the session. - - `TOOL`: The function calling mechanism. - type: - type: ReturnChatEventType - docs: >- - Type of Chat Event. There are six possible values: + When included in a Session Settings message, the provided Prompt + overrides the existing one specified in the EVI configuration. If no + Prompt was defined in the configuration, this Prompt will be the one + used for the session. - - `SYSTEM_PROMPT`: Contains the system prompt for use in the session. + You can use the Prompt to define a specific goal or role for EVI, + specifying how it should act or what it should focus on during the + conversation. For example, EVI can be instructed to act as a customer + support representative, a fitness coach, or a travel advisor, each + with its own set of behaviors and response styles. - - `USER_MESSAGE`: Contains the message sent by the user. + For help writing a system prompt, see our [Prompting + Guide](/docs/empathic-voice-interface-evi/prompting). + context: + type: optional + docs: >- + Allows developers to inject additional context into the conversation, + which is appended to the end of user messages for the session. - - `USER_INTERRUPTION`: Contains an interruption made by the user while - the agent is speaking. + When included in a Session Settings message, the provided context can + be used to remind the LLM of its role in every user message, prevent + it from forgetting important details, or add new relevant information + to the conversation. - - `AGENT_MESSAGE`: Contains the assistant’s message, generated by - Hume’s eLLM and supplemental LLM. + Set to `null` to disable context injection. + audio: + type: optional + docs: >- + Configuration details for the audio input used during the session. + Ensures the audio is being correctly set up for processing. + + + This optional field is only required when the audio input is encoded + in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For + detailed instructions on how to configure session settings for PCM + Linear 16 audio, please refer to the [Session Settings + section](/docs/empathic-voice-interface-evi/configuration#session-settings) + on the EVI Configuration page. + language_model_api_key: + type: optional + docs: >- + Third party API key for the supplemental language model. - - `FUNCTION_CALL`: Contains the invocation of a tool. + When provided, EVI will use this key instead of Hume’s API key for the + supplemental LLM. This allows you to bypass rate limits and utilize + your own API key as needed. + tools: + type: optional> + docs: >- + List of user-defined tools to enable for the session. - - `FUNCTION_CALL_RESPONSE`: Contains the tool response. - message_text: - type: optional - docs: >- - The text of the Chat Event. This field contains the message content - for each event type listed in the `type` field. - emotion_features: - type: optional - docs: >- - Stringified JSON containing the prosody model inference results. + Tools are resources used by EVI to perform various tasks, such as + searching the web or calling external APIs. Built-in tools, like web + search, are natively integrated, while user-defined tools are created + and invoked by the user. To learn more, see our [Tool Use + Guide](/docs/empathic-voice-interface-evi/tool-use). + builtin_tools: + type: optional> + docs: >- + List of built-in tools to enable for the session. + + + Tools are resources used by EVI to perform various tasks, such as + searching the web or calling external APIs. Built-in tools, like web + search, are natively integrated, while user-defined tools are created + and invoked by the user. To learn more, see our [Tool Use + Guide](/docs/empathic-voice-interface-evi/tool-use). + + + Currently, the only built-in tool Hume provides is **Web Search**. + When enabled, Web Search equips EVI with the ability to search the web + for up-to-date information. + metadata: + type: optional> + variables: + type: optional> + docs: Dynamic values that can be used to populate EVI prompts. + source: + openapi: assistant-asyncapi.json + Tool: + properties: + type: + type: ToolType + docs: Type of tool. Set to `function` for user-defined tools. + name: + type: string + docs: Name of the user-defined tool to be enabled. + parameters: + type: string + docs: >- + Parameters of the tool. Is a stringified JSON schema. - EVI uses the prosody model to measure 48 expressions related to speech - and vocal characteristics. These results contain a detailed emotional - and tonal analysis of the audio. Scores typically range from 0 to 1, - with higher values indicating a stronger confidence level in the - measured attribute. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat event. - source: - openapi: stenographer-openapi.json - ReturnChatPagedEventsStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - MAX_DURATION_TIMEOUT - - INACTIVITY_TIMEOUT - - ERROR + These parameters define the inputs needed for the tool’s execution, + including the expected data type and description for each input field. + Structured as a JSON schema, this format ensures the tool receives + data in the expected format. + description: + type: optional docs: >- - Indicates the current state of the chat. There are six possible statuses: + An optional description of what the tool does, used by the + supplemental LLM to choose when and how to call the function. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM if the tool call fails. + The LLM then uses this text to generate a response back to the user, + ensuring continuity in the conversation. + source: + openapi: assistant-asyncapi.json + ToolErrorMessage: + docs: When provided, the output is a function call error. + properties: + type: + type: literal<"tool_error"> + docs: >- + The type of message sent through the socket; for a Tool Error message, + this must be `tool_error`. - - `ACTIVE`: The chat is currently active and ongoing. + Upon receiving a [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type) + and failing to invoke the function, this message is sent to notify EVI + of the tool's failure. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + tool_type: + type: optional + docs: >- + Type of tool called. Either `builtin` for natively implemented tools, + like web search, or `function` for user-defined tools. + tool_call_id: + type: string + docs: >- + The unique identifier for a specific tool call instance. - - `USER_ENDED`: The chat was manually ended by the user. + This ID is used to track the request and response of a particular tool + invocation, ensuring that the Tool Error message is linked to the + appropriate tool call request. The specified `tool_call_id` must match + the one received in the [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type). + content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the tool call + result. The LLM then uses this text to generate a response back to the + user, ensuring continuity in the conversation if the tool errors. + error: + type: string + docs: Error message from the tool call, not exposed to the LLM or user. + code: + type: optional + docs: Error code. Identifies the type of error encountered. + level: + type: optional + docs: >- + Indicates the severity of an error; for a Tool Error message, this + must be `warn` to signal an unexpected event. + source: + openapi: assistant-asyncapi.json + ToolResponseMessage: + docs: When provided, the output is a function call response. + properties: + type: + type: literal<"tool_response"> + docs: >- + The type of message sent through the socket; for a Tool Response + message, this must be `tool_response`. - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. + Upon receiving a [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type) + and successfully invoking the function, this message is sent to convey + the result of the function call back to EVI. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + tool_call_id: + type: string + docs: >- + The unique identifier for a specific tool call instance. - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum - allowed duration. + This ID is used to track the request and response of a particular tool + invocation, ensuring that the correct response is linked to the + appropriate request. The specified `tool_call_id` must match the one + received in the [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.tool_call_id). + content: + type: string + docs: >- + Return value of the tool call. Contains the output generated by the + tool to pass back to EVI. + tool_name: + type: optional + docs: >- + Name of the tool. - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. + Include this optional field to help the supplemental LLM identify + which tool generated the response. The specified `tool_name` must + match the one received in the [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type). + tool_type: + type: optional + docs: >- + Type of tool called. Either `builtin` for natively implemented tools, + like web search, or `function` for user-defined tools. + source: + openapi: assistant-asyncapi.json + ToolType: + enum: + - builtin + - function + source: + openapi: assistant-asyncapi.json + UserInput: + docs: User text to insert into the conversation. + properties: + type: + type: literal<"user_input"> + docs: >- + The type of message sent through the socket; must be `user_input` for + our server to correctly identify and process it as a User Input + message. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + text: + type: string + docs: >- + User text to insert into the conversation. Text sent through a User + Input message is treated as the user’s speech to EVI. EVI processes + this input and provides a corresponding response. + + + Expression measurement results are not available for User Input + messages, as the prosody model relies on audio input and cannot + process text alone. + source: + openapi: assistant-asyncapi.json + AssistantEnd: + docs: When provided, the output is an assistant end message. + properties: + type: + type: literal<"assistant_end"> + docs: >- + The type of message sent through the socket; for an Assistant End + message, this must be `assistant_end`. - - `ERROR`: The chat ended unexpectedly due to an error. - source: - openapi: stenographer-openapi.json - ReturnChatPagedEventsPaginationDirection: - enum: - - ASC - - DESC + This message indicates the conclusion of the assistant’s response, + signaling that the assistant has finished speaking for the current + conversational turn. + custom_session_id: + type: optional docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + source: + openapi: assistant-asyncapi.json + AssistantMessage: + docs: When provided, the output is an assistant message. + properties: + type: + type: literal<"assistant_message"> + docs: >- + The type of message sent through the socket; for an Assistant Message, + this must be `assistant_message`. - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - source: - openapi: stenographer-openapi.json - ReturnChatPagedEvents: + This message contains both a transcript of the assistant’s response + and the expression measurement predictions of the assistant’s audio + output. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + id: + type: optional + docs: >- + ID of the assistant message. Allows the Assistant Message to be + tracked and referenced. + message: + type: ChatMessage + docs: Transcript of the message. + models: + type: Inference + docs: Inference model results. + from_text: + type: boolean docs: >- - A description of chat status with a paginated list of chat events returned - from the server - properties: - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - chat_group_id: - type: string - docs: >- - Identifier for the Chat Group. Any chat resumed from this Chat will - have the same `chat_group_id`. Formatted as a UUID. - status: - type: ReturnChatPagedEventsStatus - docs: >- - Indicates the current state of the chat. There are six possible - statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the - maximum allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - start_timestamp: - type: long - docs: >- - Time at which the Chat started. Measured in seconds since the Unix - epoch. - end_timestamp: - type: optional - docs: >- - Time at which the Chat ended. Measured in seconds since the Unix - epoch. - pagination_direction: - type: ReturnChatPagedEventsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - events_page: - docs: List of Chat Events for the specified `page_number` and `page_size`. - type: list - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - config: optional - source: - openapi: stenographer-openapi.json - ReturnActiveChatCount: - docs: A description of current chat chat sessions for a user - properties: - timestamp: - type: long - docs: >- - The timestamp for when chat status was measured. Formatted as a Unix - epoch milliseconds. - total_user_active_chats: - type: integer - docs: The total number of active chats for this user. - max_allowed_active_chats: - type: optional - docs: The maximum number of concurrent active chats for this user. - more_active_chats_allowed: - type: boolean - docs: Boolean indicating if the user is allowed to start more chats. - per_tag: - type: optional>> - docs: Optional List of chat counts per tag. - source: - openapi: stenographer-openapi.json - ReturnActiveChatCountPerTag: - docs: A description of current chat chat sessions per tag - properties: - tag: - type: string - docs: User tag applied to a chat. - total_tag_active_chats: - type: integer - docs: The total number of active chats for this user with the specified tag. - source: - openapi: stenographer-openapi.json - ReturnChatGroup: - docs: A description of chat_group and its status - properties: - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - first_start_timestamp: - type: long - docs: >- - Time at which the first Chat in this Chat Group was created. Measured - in seconds since the Unix epoch. - most_recent_start_timestamp: - type: long - docs: >- - Time at which the most recent Chat in this Chat Group was created. - Measured in seconds since the Unix epoch. - most_recent_chat_id: - type: optional - docs: >- - The `chat_id` of the most recent Chat in this Chat Group. Formatted as - a UUID. - num_chats: - type: integer - docs: The total number of Chats in this Chat Group. - active: - type: optional - docs: >- - Denotes whether there is an active Chat associated with this Chat - Group. - source: - openapi: stenographer-openapi.json - ReturnPagedChatGroupsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - source: - openapi: stenographer-openapi.json - ReturnPagedChatGroups: - docs: A paginated list of chat_groups returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - pagination_direction: - type: ReturnPagedChatGroupsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - chat_groups_page: - docs: >- - List of Chat Groups and their metadata returned for the specified - `page_number` and `page_size`. - type: list - source: - openapi: stenographer-openapi.json - ReturnChatGroupPagedChatsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - source: - openapi: stenographer-openapi.json - ReturnChatGroupPagedChats: - docs: >- - A description of chat_group and its status with a paginated list of each - chat in the chat_group - properties: - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - first_start_timestamp: - type: long - docs: >- - Time at which the first Chat in this Chat Group was created. Measured - in seconds since the Unix epoch. - most_recent_start_timestamp: - type: long - docs: >- - Time at which the most recent Chat in this Chat Group was created. - Measured in seconds since the Unix epoch. - num_chats: - type: integer - docs: The total number of Chats associated with this Chat Group. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - pagination_direction: - type: ReturnChatGroupPagedChatsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - chats_page: - docs: List of Chats for the specified `page_number` and `page_size`. - type: list - active: - type: optional - docs: >- - Denotes whether there is an active Chat associated with this Chat - Group. - source: - openapi: stenographer-openapi.json - ReturnChatGroupPagedEventsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - source: - openapi: stenographer-openapi.json - ReturnChatGroupPagedEvents: - docs: >- - A paginated list of chat events that occurred across chats in this - chat_group from the server - properties: - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - pagination_direction: - type: ReturnChatGroupPagedEventsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - events_page: - docs: List of Chat Events for the specified `page_number` and `page_size`. - type: list - source: - openapi: stenographer-openapi.json - PostedPromptSpec: - docs: A Prompt associated with this Config. - properties: - version: optional - source: - openapi: stenographer-openapi.json - AssistantInput: - docs: When provided, the input is spoken by EVI. - properties: - type: - type: literal<"assistant_input"> - docs: >- - The type of message sent through the socket; must be `assistant_input` - for our server to correctly identify and process it as an Assistant - Input message. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - text: - type: string - docs: >- - Assistant text to synthesize into spoken audio and insert into the - conversation. - - - EVI uses this text to generate spoken audio using our proprietary - expressive text-to-speech model. Our model adds appropriate emotional - inflections and tones to the text based on the user’s expressions and - the context of the conversation. The synthesized audio is streamed - back to the user as an [Assistant - Message](/reference/empathic-voice-interface-evi/chat/chat#receive.Assistant%20Message.type). - source: - openapi: assistant-asyncapi.json - AudioConfiguration: - properties: - encoding: - type: Encoding - docs: Encoding format of the audio input, such as `linear16`. - channels: - type: integer - docs: Number of audio channels. - sample_rate: - type: integer - docs: >- - Audio sample rate. Number of samples per second in the audio input, - measured in Hertz. - source: - openapi: assistant-asyncapi.json - AudioInput: - docs: When provided, the input is audio. - properties: - type: - type: literal<"audio_input"> - docs: >- - The type of message sent through the socket; must be `audio_input` for - our server to correctly identify and process it as an Audio Input - message. - - - This message is used for sending audio input data to EVI for - processing and expression measurement. Audio data should be sent as a - continuous stream, encoded in Base64. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - data: - type: string - docs: >- - Base64 encoded audio input to insert into the conversation. - - - The content of an Audio Input message is treated as the user’s speech - to EVI and must be streamed continuously. Pre-recorded audio files are - not supported. - - - For optimal transcription quality, the audio data should be - transmitted in small chunks. - - - Hume recommends streaming audio with a buffer window of 20 - milliseconds (ms), or 100 milliseconds (ms) for web applications. - source: - openapi: assistant-asyncapi.json - BuiltInTool: - enum: - - web_search - - hang_up - docs: >- - Name of the built-in tool. Set to `web_search` to equip EVI with the - built-in Web Search tool. - source: - openapi: assistant-asyncapi.json - BuiltinToolConfig: - properties: - name: - type: BuiltInTool - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM if the tool call fails. - The LLM then uses this text to generate a response back to the user, - ensuring continuity in the conversation. - source: - openapi: assistant-asyncapi.json - Context: - properties: - type: - type: optional - docs: >- - The persistence level of the injected context. Specifies how long the - injected context will remain active in the session. - - - There are three possible context types: - - - - **Persistent**: The context is appended to all user messages for the - duration of the session. - - - - **Temporary**: The context is appended only to the next user - message. - - - **Editable**: The original context is updated to reflect the new context. - - If the type is not specified, it will default to `temporary`. - text: - type: string - docs: >- - The context to be injected into the conversation. Helps inform the - LLM's response by providing relevant information about the ongoing - conversation. - - - This text will be appended to the end of user messages based on the - chosen persistence level. For example, if you want to remind EVI of - its role as a helpful weather assistant, the context you insert will - be appended to the end of user messages as `{Context: You are a - helpful weather assistant}`. - source: - openapi: assistant-asyncapi.json - ContextType: - enum: - - editable - - persistent - - temporary - source: - openapi: assistant-asyncapi.json - Encoding: - type: literal<"linear16"> - ErrorLevel: - type: literal<"warn"> - PauseAssistantMessage: - docs: >- - Pause responses from EVI. Chat history is still saved and sent after - resuming. - properties: - type: - type: literal<"pause_assistant_message"> - docs: >- - The type of message sent through the socket; must be - `pause_assistant_message` for our server to correctly identify and - process it as a Pause Assistant message. - - - Once this message is sent, EVI will not respond until a [Resume - Assistant - message](/reference/empathic-voice-interface-evi/chat/chat#send.Resume%20Assistant%20Message.type) - is sent. When paused, EVI won’t respond, but transcriptions of your - audio inputs will still be recorded. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - source: - openapi: assistant-asyncapi.json - ResumeAssistantMessage: - docs: >- - Resume responses from EVI. Chat history sent while paused will now be - sent. - properties: - type: - type: literal<"resume_assistant_message"> - docs: >- - The type of message sent through the socket; must be - `resume_assistant_message` for our server to correctly identify and - process it as a Resume Assistant message. - - - Upon resuming, if any audio input was sent during the pause, EVI will - retain context from all messages sent but only respond to the last - user message. (e.g., If you ask EVI two questions while paused and - then send a `resume_assistant_message`, EVI will respond to the second - question and have added the first question to its conversation - context.) - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - source: - openapi: assistant-asyncapi.json - SessionSettings: - docs: Settings for this chat session. - properties: - type: - type: literal<"session_settings"> - docs: >- - The type of message sent through the socket; must be - `session_settings` for our server to correctly identify and process it - as a Session Settings message. - - - Session settings are temporary and apply only to the current Chat - session. These settings can be adjusted dynamically based on the - requirements of each session to ensure optimal performance and user - experience. - - - For more information, please refer to the [Session Settings - section](/docs/empathic-voice-interface-evi/configuration#session-settings) - on the EVI Configuration page. - custom_session_id: - type: optional - docs: >- - Unique identifier for the session. Used to manage conversational - state, correlate frontend and backend data, and persist conversations - across EVI sessions. - - - If included, the response sent from Hume to your backend will include - this ID. This allows you to correlate frontend users with their - incoming messages. - - - It is recommended to pass a `custom_session_id` if you are using a - Custom Language Model. Please see our guide to [using a custom - language - model](/docs/empathic-voice-interface-evi/custom-language-model) with - EVI to learn more. - system_prompt: - type: optional - docs: >- - Instructions used to shape EVI’s behavior, responses, and style for - the session. - - - When included in a Session Settings message, the provided Prompt - overrides the existing one specified in the EVI configuration. If no - Prompt was defined in the configuration, this Prompt will be the one - used for the session. - - - You can use the Prompt to define a specific goal or role for EVI, - specifying how it should act or what it should focus on during the - conversation. For example, EVI can be instructed to act as a customer - support representative, a fitness coach, or a travel advisor, each - with its own set of behaviors and response styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/empathic-voice-interface-evi/prompting). - context: - type: optional - docs: >- - Allows developers to inject additional context into the conversation, - which is appended to the end of user messages for the session. - - - When included in a Session Settings message, the provided context can - be used to remind the LLM of its role in every user message, prevent - it from forgetting important details, or add new relevant information - to the conversation. - - - Set to `null` to disable context injection. - audio: - type: optional - docs: >- - Configuration details for the audio input used during the session. - Ensures the audio is being correctly set up for processing. - - - This optional field is only required when the audio input is encoded - in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For - detailed instructions on how to configure session settings for PCM - Linear 16 audio, please refer to the [Session Settings - section](/docs/empathic-voice-interface-evi/configuration#session-settings) - on the EVI Configuration page. - language_model_api_key: - type: optional - docs: >- - Third party API key for the supplemental language model. - - - When provided, EVI will use this key instead of Hume’s API key for the - supplemental LLM. This allows you to bypass rate limits and utilize - your own API key as needed. - tools: - type: optional> - docs: >- - List of user-defined tools to enable for the session. - - - Tools are resources used by EVI to perform various tasks, such as - searching the web or calling external APIs. Built-in tools, like web - search, are natively integrated, while user-defined tools are created - and invoked by the user. To learn more, see our [Tool Use - Guide](/docs/empathic-voice-interface-evi/tool-use). - builtin_tools: - type: optional> - docs: >- - List of built-in tools to enable for the session. - - - Tools are resources used by EVI to perform various tasks, such as - searching the web or calling external APIs. Built-in tools, like web - search, are natively integrated, while user-defined tools are created - and invoked by the user. To learn more, see our [Tool Use - Guide](/docs/empathic-voice-interface-evi/tool-use). - - - Currently, the only built-in tool Hume provides is **Web Search**. - When enabled, Web Search equips EVI with the ability to search the web - for up-to-date information. - metadata: - type: optional> - variables: - type: optional> - docs: Dynamic values that can be used to populate EVI prompts. - source: - openapi: assistant-asyncapi.json - Tool: - properties: - type: - type: ToolType - docs: Type of tool. Set to `function` for user-defined tools. - name: - type: string - docs: Name of the user-defined tool to be enabled. - parameters: - type: string - docs: >- - Parameters of the tool. Is a stringified JSON schema. - - - These parameters define the inputs needed for the tool’s execution, - including the expected data type and description for each input field. - Structured as a JSON schema, this format ensures the tool receives - data in the expected format. - description: - type: optional - docs: >- - An optional description of what the tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM if the tool call fails. - The LLM then uses this text to generate a response back to the user, - ensuring continuity in the conversation. - source: - openapi: assistant-asyncapi.json - ToolErrorMessage: - docs: When provided, the output is a function call error. - properties: - type: - type: literal<"tool_error"> - docs: >- - The type of message sent through the socket; for a Tool Error message, - this must be `tool_error`. - - - Upon receiving a [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type) - and failing to invoke the function, this message is sent to notify EVI - of the tool's failure. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the Tool Error message is linked to the - appropriate tool call request. The specified `tool_call_id` must match - the one received in the [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type). - content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the tool errors. - error: - type: string - docs: Error message from the tool call, not exposed to the LLM or user. - code: - type: optional - docs: Error code. Identifies the type of error encountered. - level: - type: optional - docs: >- - Indicates the severity of an error; for a Tool Error message, this - must be `warn` to signal an unexpected event. - source: - openapi: assistant-asyncapi.json - ToolResponseMessage: - docs: When provided, the output is a function call response. - properties: - type: - type: literal<"tool_response"> - docs: >- - The type of message sent through the socket; for a Tool Response - message, this must be `tool_response`. - - - Upon receiving a [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type) - and successfully invoking the function, this message is sent to convey - the result of the function call back to EVI. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the correct response is linked to the - appropriate request. The specified `tool_call_id` must match the one - received in the [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.tool_call_id). - content: - type: string - docs: >- - Return value of the tool call. Contains the output generated by the - tool to pass back to EVI. - tool_name: - type: optional - docs: >- - Name of the tool. - - - Include this optional field to help the supplemental LLM identify - which tool generated the response. The specified `tool_name` must - match the one received in the [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type). - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - source: - openapi: assistant-asyncapi.json - ToolType: - enum: - - builtin - - function - source: - openapi: assistant-asyncapi.json - UserInput: - docs: User text to insert into the conversation. - properties: - type: - type: literal<"user_input"> - docs: >- - The type of message sent through the socket; must be `user_input` for - our server to correctly identify and process it as a User Input - message. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - text: - type: string - docs: >- - User text to insert into the conversation. Text sent through a User - Input message is treated as the user’s speech to EVI. EVI processes - this input and provides a corresponding response. - - - Expression measurement results are not available for User Input - messages, as the prosody model relies on audio input and cannot - process text alone. - source: - openapi: assistant-asyncapi.json - AssistantEnd: - docs: When provided, the output is an assistant end message. - properties: - type: - type: literal<"assistant_end"> - docs: >- - The type of message sent through the socket; for an Assistant End - message, this must be `assistant_end`. - - - This message indicates the conclusion of the assistant’s response, - signaling that the assistant has finished speaking for the current - conversational turn. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - source: - openapi: assistant-asyncapi.json - AssistantMessage: - docs: When provided, the output is an assistant message. - properties: - type: - type: literal<"assistant_message"> - docs: >- - The type of message sent through the socket; for an Assistant Message, - this must be `assistant_message`. - - - This message contains both a transcript of the assistant’s response - and the expression measurement predictions of the assistant’s audio - output. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - id: - type: optional - docs: >- - ID of the assistant message. Allows the Assistant Message to be - tracked and referenced. - message: - type: ChatMessage - docs: Transcript of the message. - models: - type: Inference - docs: Inference model results. - from_text: - type: boolean - docs: >- - Indicates if this message was inserted into the conversation as text - from an [Assistant Input - message](/reference/empathic-voice-interface-evi/chat/chat#send.Assistant%20Input.text). - source: - openapi: assistant-asyncapi.json - AudioOutput: - docs: When provided, the output is audio. - properties: - type: - type: literal<"audio_output"> - docs: >- - The type of message sent through the socket; for an Audio Output - message, this must be `audio_output`. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - id: - type: string - docs: >- - ID of the audio output. Allows the Audio Output message to be tracked - and referenced. - index: - type: integer - docs: Index of the chunk of audio relative to the whole audio segment. - data: - type: string - docs: >- - Base64 encoded audio output. This encoded audio is transmitted to the - client, where it can be decoded and played back as part of the user - interaction. - source: - openapi: assistant-asyncapi.json - ChatMessageToolResult: - discriminated: false + Indicates if this message was inserted into the conversation as text + from an [Assistant Input + message](/reference/empathic-voice-interface-evi/chat/chat#send.Assistant%20Input.text). + source: + openapi: assistant-asyncapi.json + AudioOutput: + docs: When provided, the output is audio. + properties: + type: + type: literal<"audio_output"> + docs: >- + The type of message sent through the socket; for an Audio Output + message, this must be `audio_output`. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + id: + type: string + docs: >- + ID of the audio output. Allows the Audio Output message to be tracked + and referenced. + index: + type: integer + docs: Index of the chunk of audio relative to the whole audio segment. + data: + type: string + docs: >- + Base64 encoded audio output. This encoded audio is transmitted to the + client, where it can be decoded and played back as part of the user + interaction. + source: + openapi: assistant-asyncapi.json + ChatMessageToolResult: + discriminated: false + docs: Function call response from client. + union: + - type: ToolResponseMessage + - type: ToolErrorMessage + source: + openapi: assistant-asyncapi.json + ChatMessage: + properties: + role: + type: Role + docs: Role of who is providing the message. + content: + type: optional + docs: Transcript of the message. + tool_call: + type: optional + docs: Function call name and arguments. + tool_result: + type: optional docs: Function call response from client. - union: - - type: ToolResponseMessage - - type: ToolErrorMessage - source: - openapi: assistant-asyncapi.json - ChatMessage: - properties: - role: - type: Role - docs: Role of who is providing the message. - content: - type: optional - docs: Transcript of the message. - tool_call: - type: optional - docs: Function call name and arguments. - tool_result: - type: optional - docs: Function call response from client. - source: - openapi: assistant-asyncapi.json - ChatMetadata: - docs: When provided, the output is a chat metadata message. - properties: - type: - type: literal<"chat_metadata"> - docs: >- - The type of message sent through the socket; for a Chat Metadata - message, this must be `chat_metadata`. - - - The Chat Metadata message is the first message you receive after - establishing a connection with EVI and contains important identifiers - for the current Chat session. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - chat_group_id: - type: string - docs: >- - ID of the Chat Group. - - - Used to resume a Chat when passed in the - [resumed_chat_group_id](/reference/empathic-voice-interface-evi/chat/chat#request.query.resumed_chat_group_id) - query parameter of a subsequent connection request. This allows EVI to - continue the conversation from where it left off within the Chat - Group. - - - Learn more about [supporting chat - resumability](/docs/empathic-voice-interface-evi/faq#does-evi-support-chat-resumability) - from the EVI FAQ. - chat_id: - type: string - docs: >- - ID of the Chat session. Allows the Chat session to be tracked and - referenced. - request_id: - type: optional - docs: ID of the initiating request. - source: - openapi: assistant-asyncapi.json - EmotionScores: - properties: - Admiration: double - Adoration: double - Aesthetic Appreciation: double - Amusement: double - Anger: double - Anxiety: double - Awe: double - Awkwardness: double - Boredom: double - Calmness: double - Concentration: double - Confusion: double - Contemplation: double - Contempt: double - Contentment: double - Craving: double - Desire: double - Determination: double - Disappointment: double - Disgust: double - Distress: double - Doubt: double - Ecstasy: double - Embarrassment: double - Empathic Pain: double - Entrancement: double - Envy: double - Excitement: double - Fear: double - Guilt: double - Horror: double - Interest: double - Joy: double - Love: double - Nostalgia: double - Pain: double - Pride: double - Realization: double - Relief: double - Romance: double - Sadness: double - Satisfaction: double - Shame: double - Surprise (negative): double - Surprise (positive): double - Sympathy: double - Tiredness: double - Triumph: double - source: - openapi: assistant-asyncapi.json - WebSocketError: - docs: When provided, the output is an error message. - properties: - type: - type: literal<"error"> - docs: >- - The type of message sent through the socket; for a Web Socket Error - message, this must be `error`. - - - This message indicates a disruption in the WebSocket connection, such - as an unexpected disconnection, protocol error, or data transmission - issue. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - code: - type: string - docs: Error code. Identifies the type of error encountered. - slug: - type: string - docs: >- - Short, human-readable identifier and description for the error. See a - complete list of error slugs on the [Errors - page](/docs/resources/errors). - message: - type: string - docs: Detailed description of the error. - source: - openapi: assistant-asyncapi.json - Inference: - properties: - prosody: - type: optional - docs: >- - Prosody model inference results. - - - EVI uses the prosody model to measure 48 emotions related to speech - and vocal characteristics within a given expression. - source: - openapi: assistant-asyncapi.json - MillisecondInterval: - properties: - begin: - type: integer - docs: Start time of the interval in milliseconds. - end: - type: integer - docs: End time of the interval in milliseconds. - source: - openapi: assistant-asyncapi.json - ProsodyInference: - properties: - scores: - type: EmotionScores - docs: >- - The confidence scores for 48 emotions within the detected expression - of an audio sample. - - - Scores typically range from 0 to 1, with higher values indicating a - stronger confidence level in the measured attribute. - - - See our guide on [interpreting expression measurement - results](/docs/expression-measurement/faq#how-do-i-interpret-my-results) - to learn more. - source: - openapi: assistant-asyncapi.json - Role: - enum: - - assistant - - system - - user - - all - - tool - source: - openapi: assistant-asyncapi.json - ToolCallMessage: - docs: When provided, the output is a tool call. - properties: - name: - type: string - docs: Name of the tool called. - parameters: - type: string - docs: >- - Parameters of the tool. - - - These parameters define the inputs needed for the tool’s execution, - including the expected data type and description for each input field. - Structured as a stringified JSON schema, this format ensures the tool - receives data in the expected format. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the correct response is linked to the - appropriate request. - type: - type: literal<"tool_call"> - docs: >- - The type of message sent through the socket; for a Tool Call message, - this must be `tool_call`. - - - This message indicates that the supplemental LLM has detected a need - to invoke the specified tool. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - response_required: - type: boolean - docs: >- - Indicates whether a response to the tool call is required from the - developer, either in the form of a [Tool Response - message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Response%20Message.type) - or a [Tool Error - message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Error%20Message.type). - source: - openapi: assistant-asyncapi.json - UserInterruption: - docs: When provided, the output is an interruption. - properties: - type: - type: literal<"user_interruption"> - docs: >- - The type of message sent through the socket; for a User Interruption - message, this must be `user_interruption`. - - - This message indicates the user has interrupted the assistant’s - response. EVI detects the interruption in real-time and sends this - message to signal the interruption event. This message allows the - system to stop the current audio playback, clear the audio queue, and - prepare to handle new user input. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - time: - type: integer - docs: Unix timestamp of the detected user interruption. - source: - openapi: assistant-asyncapi.json - UserMessage: - docs: When provided, the output is a user message. - properties: - type: - type: literal<"user_message"> - docs: >- - The type of message sent through the socket; for a User Message, this - must be `user_message`. - - - This message contains both a transcript of the user’s input and the - expression measurement predictions if the input was sent as an [Audio - Input - message](/reference/empathic-voice-interface-evi/chat/chat#send.Audio%20Input.type). - Expression measurement predictions are not provided for a [User Input - message](/reference/empathic-voice-interface-evi/chat/chat#send.User%20Input.type), - as the prosody model relies on audio input and cannot process text - alone. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - message: - type: ChatMessage - docs: Transcript of the message. - models: - type: Inference - docs: Inference model results. - time: - type: MillisecondInterval - docs: Start and End time of user message. - from_text: - type: boolean - docs: >- - Indicates if this message was inserted into the conversation as text - from a [User - Input](/reference/empathic-voice-interface-evi/chat/chat#send.User%20Input.text) - message. - source: - openapi: assistant-asyncapi.json - JsonMessage: - discriminated: false - union: - - type: AssistantEnd - - type: AssistantMessage - - type: ChatMetadata - - type: WebSocketError - - type: UserInterruption - - type: UserMessage - - type: ToolCallMessage - - type: ToolResponseMessage - - type: ToolErrorMessage - source: - openapi: assistant-asyncapi.json - TtsInput: - properties: - type: optional> - source: - openapi: assistant-asyncapi.json - TextInput: - properties: - type: optional> - source: - openapi: assistant-asyncapi.json - FunctionCallResponseInput: - properties: - type: optional> - source: - openapi: assistant-asyncapi.json - ExtendedVoiceArgs: - properties: - text: string - use_s2a: - type: optional - default: true - voice_args: - type: VoiceArgs - source: - openapi: assistant-openapi.json - HTTPValidationError: - properties: - detail: - type: optional> - source: - openapi: assistant-openapi.json - ValidationErrorLocItem: - discriminated: false - union: - - string - - integer - source: - openapi: assistant-openapi.json - ValidationError: - properties: - loc: - type: list - msg: string - type: string - source: - openapi: assistant-openapi.json - VoiceArgs: - properties: - voice: - type: optional - speech_rate_multiplier: - type: optional - default: 1 - validation: - min: 0.5 - max: 2 - baseline: - type: optional - default: false - reconstruct: - type: optional - default: false - additive: - type: optional - default: false - drift: - type: optional - default: true - use_v2: - type: optional - default: true - source: - openapi: assistant-openapi.json - VoiceNameEnum: - enum: - - ITO - - KORA - - DACHER - - AURA - - FINN - - SIENNA - - WILLOW - - SCOUT - - WHIMSY - - ACE - - JUNO - - STELLA - - HIRO - source: - openapi: assistant-openapi.json + source: + openapi: assistant-asyncapi.json + ChatMetadata: + docs: When provided, the output is a chat metadata message. + properties: + type: + type: literal<"chat_metadata"> + docs: >- + The type of message sent through the socket; for a Chat Metadata + message, this must be `chat_metadata`. + + + The Chat Metadata message is the first message you receive after + establishing a connection with EVI and contains important identifiers + for the current Chat session. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + chat_group_id: + type: string + docs: >- + ID of the Chat Group. + + + Used to resume a Chat when passed in the + [resumed_chat_group_id](/reference/empathic-voice-interface-evi/chat/chat#request.query.resumed_chat_group_id) + query parameter of a subsequent connection request. This allows EVI to + continue the conversation from where it left off within the Chat + Group. + + + Learn more about [supporting chat + resumability](/docs/empathic-voice-interface-evi/faq#does-evi-support-chat-resumability) + from the EVI FAQ. + chat_id: + type: string + docs: >- + ID of the Chat session. Allows the Chat session to be tracked and + referenced. + request_id: + type: optional + docs: ID of the initiating request. + source: + openapi: assistant-asyncapi.json + EmotionScores: + properties: + Admiration: double + Adoration: double + Aesthetic Appreciation: double + Amusement: double + Anger: double + Anxiety: double + Awe: double + Awkwardness: double + Boredom: double + Calmness: double + Concentration: double + Confusion: double + Contemplation: double + Contempt: double + Contentment: double + Craving: double + Desire: double + Determination: double + Disappointment: double + Disgust: double + Distress: double + Doubt: double + Ecstasy: double + Embarrassment: double + Empathic Pain: double + Entrancement: double + Envy: double + Excitement: double + Fear: double + Guilt: double + Horror: double + Interest: double + Joy: double + Love: double + Nostalgia: double + Pain: double + Pride: double + Realization: double + Relief: double + Romance: double + Sadness: double + Satisfaction: double + Shame: double + Surprise (negative): double + Surprise (positive): double + Sympathy: double + Tiredness: double + Triumph: double + source: + openapi: assistant-asyncapi.json + WebSocketError: + docs: When provided, the output is an error message. + properties: + type: + type: literal<"error"> + docs: >- + The type of message sent through the socket; for a Web Socket Error + message, this must be `error`. + + + This message indicates a disruption in the WebSocket connection, such + as an unexpected disconnection, protocol error, or data transmission + issue. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + code: + type: string + docs: Error code. Identifies the type of error encountered. + slug: + type: string + docs: >- + Short, human-readable identifier and description for the error. See a + complete list of error slugs on the [Errors + page](/docs/resources/errors). + message: + type: string + docs: Detailed description of the error. + source: + openapi: assistant-asyncapi.json + Inference: + properties: + prosody: + type: optional + docs: >- + Prosody model inference results. + + + EVI uses the prosody model to measure 48 emotions related to speech + and vocal characteristics within a given expression. + source: + openapi: assistant-asyncapi.json + MillisecondInterval: + properties: + begin: + type: integer + docs: Start time of the interval in milliseconds. + end: + type: integer + docs: End time of the interval in milliseconds. + source: + openapi: assistant-asyncapi.json + ProsodyInference: + properties: + scores: + type: EmotionScores + docs: >- + The confidence scores for 48 emotions within the detected expression + of an audio sample. + + + Scores typically range from 0 to 1, with higher values indicating a + stronger confidence level in the measured attribute. + + + See our guide on [interpreting expression measurement + results](/docs/expression-measurement/faq#how-do-i-interpret-my-results) + to learn more. + source: + openapi: assistant-asyncapi.json + Role: + enum: + - assistant + - system + - user + - all + - tool + source: + openapi: assistant-asyncapi.json + ToolCallMessage: + docs: When provided, the output is a tool call. + properties: + name: + type: string + docs: Name of the tool called. + parameters: + type: string + docs: >- + Parameters of the tool. + + + These parameters define the inputs needed for the tool’s execution, + including the expected data type and description for each input field. + Structured as a stringified JSON schema, this format ensures the tool + receives data in the expected format. + tool_call_id: + type: string + docs: >- + The unique identifier for a specific tool call instance. + + + This ID is used to track the request and response of a particular tool + invocation, ensuring that the correct response is linked to the + appropriate request. + type: + type: literal<"tool_call"> + docs: >- + The type of message sent through the socket; for a Tool Call message, + this must be `tool_call`. + + + This message indicates that the supplemental LLM has detected a need + to invoke the specified tool. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + tool_type: + type: optional + docs: >- + Type of tool called. Either `builtin` for natively implemented tools, + like web search, or `function` for user-defined tools. + response_required: + type: boolean + docs: >- + Indicates whether a response to the tool call is required from the + developer, either in the form of a [Tool Response + message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Response%20Message.type) + or a [Tool Error + message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Error%20Message.type). + source: + openapi: assistant-asyncapi.json + UserInterruption: + docs: When provided, the output is an interruption. + properties: + type: + type: literal<"user_interruption"> + docs: >- + The type of message sent through the socket; for a User Interruption + message, this must be `user_interruption`. + + + This message indicates the user has interrupted the assistant’s + response. EVI detects the interruption in real-time and sends this + message to signal the interruption event. This message allows the + system to stop the current audio playback, clear the audio queue, and + prepare to handle new user input. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + time: + type: integer + docs: Unix timestamp of the detected user interruption. + source: + openapi: assistant-asyncapi.json + UserMessage: + docs: When provided, the output is a user message. + properties: + type: + type: literal<"user_message"> + docs: >- + The type of message sent through the socket; for a User Message, this + must be `user_message`. + + + This message contains both a transcript of the user’s input and the + expression measurement predictions if the input was sent as an [Audio + Input + message](/reference/empathic-voice-interface-evi/chat/chat#send.Audio%20Input.type). + Expression measurement predictions are not provided for a [User Input + message](/reference/empathic-voice-interface-evi/chat/chat#send.User%20Input.type), + as the prosody model relies on audio input and cannot process text + alone. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + message: + type: ChatMessage + docs: Transcript of the message. + models: + type: Inference + docs: Inference model results. + time: + type: MillisecondInterval + docs: Start and End time of user message. + from_text: + type: boolean + docs: >- + Indicates if this message was inserted into the conversation as text + from a [User + Input](/reference/empathic-voice-interface-evi/chat/chat#send.User%20Input.text) + message. + source: + openapi: assistant-asyncapi.json + JsonMessage: + discriminated: false + union: + - type: AssistantEnd + - type: AssistantMessage + - type: ChatMetadata + - type: WebSocketError + - type: UserInterruption + - type: UserMessage + - type: ToolCallMessage + - type: ToolResponseMessage + - type: ToolErrorMessage + source: + openapi: assistant-asyncapi.json + TtsInput: + properties: + type: optional> + source: + openapi: assistant-asyncapi.json + TextInput: + properties: + type: optional> + source: + openapi: assistant-asyncapi.json + FunctionCallResponseInput: + properties: + type: optional> + source: + openapi: assistant-asyncapi.json + ExtendedVoiceArgs: + properties: + text: string + use_s2a: + type: optional + default: true + voice_args: + type: VoiceArgs + source: + openapi: assistant-openapi.json + HTTPValidationError: + properties: + detail: + type: optional> + source: + openapi: assistant-openapi.json + ValidationErrorLocItem: + discriminated: false + union: + - string + - integer + source: + openapi: assistant-openapi.json + ValidationError: + properties: + loc: + type: list + msg: string + type: string + source: + openapi: assistant-openapi.json + VoiceArgs: + properties: + voice: + type: optional + speech_rate_multiplier: + type: optional + default: 1 + validation: + min: 0.5 + max: 2 + baseline: + type: optional + default: false + reconstruct: + type: optional + default: false + additive: + type: optional + default: false + drift: + type: optional + default: true + use_v2: + type: optional + default: true + source: + openapi: assistant-openapi.json + VoiceNameEnum: + enum: + - ITO + - KORA + - DACHER + - AURA + - FINN + - SIENNA + - WILLOW + - SCOUT + - WHIMSY + - ACE + - JUNO + - STELLA + - HIRO + source: + openapi: assistant-openapi.json diff --git a/.mock/definition/empathic-voice/chat.yml b/.mock/definition/empathic-voice/chat.yml index d9fab0b..143a85c 100644 --- a/.mock/definition/empathic-voice/chat.yml +++ b/.mock/definition/empathic-voice/chat.yml @@ -1,146 +1,146 @@ channel: - path: /v0/evi/chat - auth: false - query-parameters: - config_id: - type: optional - docs: >- - The unique identifier for an EVI configuration. - - - Include this ID in your connection request to equip EVI with the Prompt, - Language Model, Voice, and Tools associated with the specified - configuration. If omitted, EVI will apply [default configuration - settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). - - - For help obtaining this ID, see our [Configuration - Guide](/docs/empathic-voice-interface-evi/configuration). - config_version: - type: optional - docs: >- - The version number of the EVI configuration specified by the - `config_id`. - - - Configs, as well as Prompts and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine configurations and revert to previous versions if needed. - - - Include this parameter to apply a specific version of an EVI - configuration. If omitted, the latest version will be applied. - resumed_chat_group_id: - type: optional - docs: >- - The unique identifier for a Chat Group. Use this field to preserve - context from a previous Chat session. - - - A Chat represents a single session from opening to closing a WebSocket - connection. In contrast, a Chat Group is a series of resumed Chats that - collectively represent a single conversation spanning multiple sessions. - Each Chat includes a Chat Group ID, which is used to preserve the - context of previous Chat sessions when starting a new one. - - - Including the Chat Group ID in the `resumed_chat_group_id` query - parameter is useful for seamlessly resuming a Chat after unexpected - network disconnections and for picking up conversations exactly where - you left off at a later time. This ensures preserved context across - multiple sessions. - - - There are three ways to obtain the Chat Group ID: - - - - [Chat - Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): - Upon establishing a WebSocket connection with EVI, the user receives a - Chat Metadata message. This message contains a `chat_group_id`, which - can be used to resume conversations within this chat group in future - sessions. - - - - [List Chats - endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use - the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of - individual Chat sessions. This endpoint lists all available Chat - sessions and their associated Chat Group ID. - - - - [List Chat Groups - endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): - Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs - of all Chat Groups associated with an API key. This endpoint returns a - list of all available chat groups. - access_token: - type: optional - docs: >- - Access token used for authenticating the client. If not provided, an - `api_key` must be provided to authenticate. - - - The access token is generated using both an API key and a Secret key, - which provides an additional layer of security compared to using just an - API key. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - api_key: - type: optional - docs: >- - API key used for authenticating the client. If not provided, an - `access_token` must be provided to authenticate. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - messages: - subscribe: - origin: server - body: SubscribeEvent - publish: - origin: client - body: PublishEvent - examples: - - messages: - - type: publish - body: - type: audio_input - data: data - - type: subscribe - body: - type: assistant_end + path: /v0/evi/chat + auth: false + query-parameters: + config_id: + type: optional + docs: >- + The unique identifier for an EVI configuration. + + + Include this ID in your connection request to equip EVI with the Prompt, + Language Model, Voice, and Tools associated with the specified + configuration. If omitted, EVI will apply [default configuration + settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). + + + For help obtaining this ID, see our [Configuration + Guide](/docs/empathic-voice-interface-evi/configuration). + config_version: + type: optional + docs: >- + The version number of the EVI configuration specified by the + `config_id`. + + + Configs, as well as Prompts and Tools, are versioned. This versioning + system supports iterative development, allowing you to progressively + refine configurations and revert to previous versions if needed. + + + Include this parameter to apply a specific version of an EVI + configuration. If omitted, the latest version will be applied. + resumed_chat_group_id: + type: optional + docs: >- + The unique identifier for a Chat Group. Use this field to preserve + context from a previous Chat session. + + + A Chat represents a single session from opening to closing a WebSocket + connection. In contrast, a Chat Group is a series of resumed Chats that + collectively represent a single conversation spanning multiple sessions. + Each Chat includes a Chat Group ID, which is used to preserve the + context of previous Chat sessions when starting a new one. + + + Including the Chat Group ID in the `resumed_chat_group_id` query + parameter is useful for seamlessly resuming a Chat after unexpected + network disconnections and for picking up conversations exactly where + you left off at a later time. This ensures preserved context across + multiple sessions. + + + There are three ways to obtain the Chat Group ID: + + + - [Chat + Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): + Upon establishing a WebSocket connection with EVI, the user receives a + Chat Metadata message. This message contains a `chat_group_id`, which + can be used to resume conversations within this chat group in future + sessions. + + + - [List Chats + endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use + the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of + individual Chat sessions. This endpoint lists all available Chat + sessions and their associated Chat Group ID. + + + - [List Chat Groups + endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): + Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs + of all Chat Groups associated with an API key. This endpoint returns a + list of all available chat groups. + access_token: + type: optional + docs: >- + Access token used for authenticating the client. If not provided, an + `api_key` must be provided to authenticate. + + + The access token is generated using both an API key and a Secret key, + which provides an additional layer of security compared to using just an + API key. + + + For more details, refer to the [Authentication Strategies + Guide](/docs/introduction/api-key#authentication-strategies). + api_key: + type: optional + docs: >- + API key used for authenticating the client. If not provided, an + `access_token` must be provided to authenticate. + + + For more details, refer to the [Authentication Strategies + Guide](/docs/introduction/api-key#authentication-strategies). + messages: + subscribe: + origin: server + body: SubscribeEvent + publish: + origin: client + body: PublishEvent + examples: + - messages: + - type: publish + body: + type: audio_input + data: data + - type: subscribe + body: + type: assistant_end imports: - root: __package__.yml + root: __package__.yml types: - SubscribeEvent: - discriminated: false - union: - - type: root.AssistantEnd - - type: root.AssistantMessage - - type: root.AudioOutput - - type: root.ChatMetadata - - type: root.WebSocketError - - type: root.UserInterruption - - type: root.UserMessage - - type: root.ToolCallMessage - - type: root.ToolResponseMessage - - type: root.ToolErrorMessage - source: - openapi: assistant-asyncapi.json - PublishEvent: - discriminated: false - union: - - type: root.AudioInput - - type: root.SessionSettings - - type: root.UserInput - - type: root.AssistantInput - - type: root.ToolResponseMessage - - type: root.ToolErrorMessage - - type: root.PauseAssistantMessage - - type: root.ResumeAssistantMessage - source: - openapi: assistant-asyncapi.json + SubscribeEvent: + discriminated: false + union: + - type: root.AssistantEnd + - type: root.AssistantMessage + - type: root.AudioOutput + - type: root.ChatMetadata + - type: root.WebSocketError + - type: root.UserInterruption + - type: root.UserMessage + - type: root.ToolCallMessage + - type: root.ToolResponseMessage + - type: root.ToolErrorMessage + source: + openapi: assistant-asyncapi.json + PublishEvent: + discriminated: false + union: + - type: root.AudioInput + - type: root.SessionSettings + - type: root.UserInput + - type: root.AssistantInput + - type: root.ToolResponseMessage + - type: root.ToolErrorMessage + - type: root.PauseAssistantMessage + - type: root.ResumeAssistantMessage + source: + openapi: assistant-asyncapi.json diff --git a/.mock/definition/empathic-voice/chatGroups.yml b/.mock/definition/empathic-voice/chatGroups.yml index 98bc496..7d3a5e9 100644 --- a/.mock/definition/empathic-voice/chatGroups.yml +++ b/.mock/definition/empathic-voice/chatGroups.yml @@ -1,526 +1,526 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-chat-groups: - path: /v0/evi/chat_groups - method: GET - auth: true - docs: Fetches a paginated list of **Chat Groups**. - display-name: List chat_groups - request: - name: ChatGroupsListChatGroupsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + auth: false + base-path: '' + endpoints: + list-chat-groups: + path: /v0/evi/chat_groups + method: GET + auth: true + docs: Fetches a paginated list of **Chat Groups**. + display-name: List chat_groups + request: + name: ChatGroupsListChatGroupsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - config_id: - type: optional - docs: >- - The unique identifier for an EVI configuration. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + ascending_order: + type: optional + docs: >- + Specifies the sorting order of the results based on their creation + date. Set to true for ascending order (chronological, with the + oldest records first) and false for descending order + (reverse-chronological, with the newest records first). Defaults + to true. + config_id: + type: optional + docs: >- + The unique identifier for an EVI configuration. - Filter Chat Groups to only include Chats that used this - `config_id` in their most recent Chat. - response: - docs: Success - type: root.ReturnPagedChatGroups - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chat_groups_page: - - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - first_start_timestamp: 1721844196397 - most_recent_start_timestamp: 1721861821717 - active: false - most_recent_chat_id: dfdbdd4d-0ddf-418b-8fc4-80a266579d36 - num_chats: 5 - get-chat-group: - path: /v0/evi/chat_groups/{id} - method: GET - auth: true + Filter Chat Groups to only include Chats that used this + `config_id` in their most recent Chat. + response: + docs: Success + type: root.ReturnPagedChatGroups + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 1 + ascending_order: true + config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + response: + body: + page_number: 0 + page_size: 1 + total_pages: 1 + pagination_direction: ASC + chat_groups_page: + - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f + first_start_timestamp: 1721844196397 + most_recent_start_timestamp: 1721861821717 + active: false + most_recent_chat_id: dfdbdd4d-0ddf-418b-8fc4-80a266579d36 + num_chats: 5 + get-chat-group: + path: /v0/evi/chat_groups/{id} + method: GET + auth: true + docs: >- + Fetches a **ChatGroup** by ID, including a paginated list of **Chats** + associated with the **ChatGroup**. + path-parameters: + id: + type: string + docs: Identifier for a Chat Group. Formatted as a UUID. + display-name: Get chat_group + request: + name: ChatGroupsGetChatGroupRequest + query-parameters: + page_size: + type: optional docs: >- - Fetches a **ChatGroup** by ID, including a paginated list of **Chats** - associated with the **ChatGroup**. - path-parameters: - id: - type: string - docs: Identifier for a Chat Group. Formatted as a UUID. - display-name: Get chat_group - request: - name: ChatGroupsGetChatGroupRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatGroupPagedChats - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - response: - body: - id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - first_start_timestamp: 1712334213647 - most_recent_start_timestamp: 1712334213647 - num_chats: 1 - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chats_page: - - id: 6375d4f8-cd3e-4d6b-b13b-ace66b7c8aaa - chat_group_id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 - status: USER_ENDED - start_timestamp: 1712334213647 - end_timestamp: 1712334332571 - event_count: 0 - active: false - list-chat-group-events: - path: /v0/evi/chat_groups/{id}/events - method: GET - auth: true + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + ascending_order: + type: optional + docs: >- + Specifies the sorting order of the results based on their creation + date. Set to true for ascending order (chronological, with the + oldest records first) and false for descending order + (reverse-chronological, with the newest records first). Defaults + to true. + response: + docs: Success + type: root.ReturnChatGroupPagedChats + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 697056f0-6c7e-487d-9bd8-9c19df79f05f + query-parameters: + page_number: 0 + page_size: 1 + ascending_order: true + response: + body: + id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 + first_start_timestamp: 1712334213647 + most_recent_start_timestamp: 1712334213647 + num_chats: 1 + page_number: 0 + page_size: 1 + total_pages: 1 + pagination_direction: ASC + chats_page: + - id: 6375d4f8-cd3e-4d6b-b13b-ace66b7c8aaa + chat_group_id: 369846cf-6ad5-404d-905e-a8acb5cdfc78 + status: USER_ENDED + start_timestamp: 1712334213647 + end_timestamp: 1712334332571 + event_count: 0 + active: false + list-chat-group-events: + path: /v0/evi/chat_groups/{id}/events + method: GET + auth: true + docs: >- + Fetches a paginated list of **Chat** events associated with a **Chat + Group**. + path-parameters: + id: + type: string + docs: Identifier for a Chat Group. Formatted as a UUID. + display-name: List chat events from a specific chat_group + request: + name: ChatGroupsListChatGroupEventsRequest + query-parameters: + page_size: + type: optional docs: >- - Fetches a paginated list of **Chat** events associated with a **Chat - Group**. - path-parameters: - id: - type: string - docs: Identifier for a Chat Group. Formatted as a UUID. - display-name: List chat events from a specific chat_group - request: - name: ChatGroupsListChatGroupEventsRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatGroupPagedEvents - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - query-parameters: - page_number: 0 - page_size: 3 - ascending_order: true - response: - body: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - page_number: 0 - page_size: 3 - total_pages: 1 - pagination_direction: ASC - events_page: - - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244940762 - role: SYSTEM - type: SYSTEM_PROMPT - message_text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - emotion_features: "" - metadata: "" - - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244956278 - role: USER - type: USER_MESSAGE - message_text: Hello. - emotion_features: >- - {"Admiration": 0.09906005859375, "Adoration": - 0.12213134765625, "Aesthetic Appreciation": - 0.05035400390625, "Amusement": 0.16552734375, "Anger": - 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": - 0.058197021484375, "Awkwardness": 0.10552978515625, - "Boredom": 0.1141357421875, "Calmness": 0.115234375, - "Concentration": 0.00444793701171875, "Confusion": - 0.0343017578125, "Contemplation": 0.00812530517578125, - "Contempt": 0.009002685546875, "Contentment": - 0.087158203125, "Craving": 0.00818634033203125, "Desire": - 0.018310546875, "Determination": 0.003238677978515625, - "Disappointment": 0.024169921875, "Disgust": - 0.00702667236328125, "Distress": 0.00936126708984375, - "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, - "Embarrassment": 0.01800537109375, "Empathic Pain": - 0.0088348388671875, "Entrancement": 0.013397216796875, - "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": - 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": - 0.00274658203125, "Interest": 0.2142333984375, "Joy": - 0.29638671875, "Love": 0.16015625, "Nostalgia": - 0.007843017578125, "Pain": 0.007160186767578125, "Pride": - 0.00508880615234375, "Realization": 0.054229736328125, - "Relief": 0.048736572265625, "Romance": 0.026397705078125, - "Sadness": 0.0265350341796875, "Satisfaction": - 0.051361083984375, "Shame": 0.00974273681640625, "Surprise - (negative)": 0.0218963623046875, "Surprise (positive)": - 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": - 0.0173797607421875, "Triumph": 0.004520416259765625} - metadata: >- - {"segments": [{"content": "Hello.", "embedding": - [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, - 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, - 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, - 0.416259765625, 0.99462890625, -0.333740234375, - 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, - 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, - 0.228515625, 2.087890625, -0.311767578125, - 0.053680419921875, 1.3349609375, 0.95068359375, - 0.00441741943359375, 0.705078125, 1.8916015625, - -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, - 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, - -0.28857421875, -0.4560546875, -0.1500244140625, - -0.1102294921875, -0.222412109375, 0.8779296875, - 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, - -0.325439453125, 0.412841796875, 0.81689453125, - 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, - 1.50390625, 1.0224609375, -1.671875, 0.7373046875, - 2.1328125, 2.166015625, 0.41015625, -0.127685546875, - 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, - 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, - -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, - -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, - -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, - -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, - 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, - 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, - 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, - 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, - 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, - -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, - 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, - -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, - -0.00984954833984375, -0.6865234375, -0.0272979736328125, - -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, - 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, - 0.384521484375, 0.385986328125, 2.0546875, - -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, - 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, - -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, - -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, - -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, - 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, - -0.1552734375, 0.6474609375, -0.08331298828125, - 0.00740814208984375, -0.045501708984375, -0.578125, - 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, - 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, - -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, - 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, - 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, - 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, - 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, - 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, - 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, - 1.99609375, 1.171875, 1.181640625, 1.5126953125, - 0.0224456787109375, 0.58349609375, -1.4931640625, - 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, - 1.7802734375, 0.01526641845703125, -0.423095703125, - 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, - 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, - 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, - 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, - 0.69384765625, 1.375, 0.8916015625, 1.0107421875, - 0.1304931640625, 2.009765625, 0.06402587890625, - -0.08428955078125, 0.04351806640625, -1.7529296875, - 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, - -0.276611328125, 0.8837890625, -0.1287841796875, - 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, - 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, - 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, - 1.3623046875, 2.267578125, 0.484375, 0.9150390625, - 0.56787109375, -0.70068359375, 0.27587890625, - -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, - 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, - 1.328125, 1.232421875, 0.6806640625, 0.9365234375, - 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, - 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, - 1.34765625, 2.8203125, 2.025390625, -0.48583984375, - 0.7626953125, 0.01007843017578125, 1.435546875, - 0.007205963134765625, 0.05157470703125, -0.9853515625, - 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, - -0.07916259765625, 1.244140625, -0.32080078125, - 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, - 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, - 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, - 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, - -0.263427734375, -0.019866943359375, -0.24658203125, - -0.1871337890625, 0.927734375, 0.62255859375, - 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, - -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, - 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, - 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, - -1.92578125, 1.154296875, 0.389892578125, 1.130859375, - 0.95947265625, 0.72314453125, 2.244140625, - 0.048553466796875, 0.626953125, 0.42919921875, - 0.82275390625, 0.311767578125, -0.320556640625, - 0.01041412353515625, 0.1483154296875, 0.10809326171875, - -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, - 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, - 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, - 0.39208984375, 0.83251953125, 0.224365234375, - 0.0019989013671875, 0.87548828125, 1.6572265625, - 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, - 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, - 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, - 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, - 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, - -0.40283203125, 4.109375, 2.533203125, 1.2529296875, - 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, - 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, - -0.79443359375, 0.71630859375, 0.97998046875, - -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, - 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, - -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, - 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, - -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, - -1.947265625, 1.3544921875, -3.935546875, 2.544921875, - 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, - -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, - -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, - 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, - 14.0703125, -2.0078125, -0.381591796875, 1.228515625, - 0.08282470703125, -0.67822265625, -0.04339599609375, - 0.397216796875, 0.1656494140625, 0.137451171875, - 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, - 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, - 0.232177734375, -0.020172119140625, 0.64404296875, - -0.01100921630859375, -1.9267578125, 0.222412109375, - 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, - 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, - 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, - 1.828125, 1.115234375, 1.931640625, -0.517578125, - 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, - 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, - 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, - 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, - 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, - 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, - 2.046875, 3.212890625, 1.68359375, 1.07421875, - -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, - 0.2440185546875, 0.62646484375, -0.1280517578125, - 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, - 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, - 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, - 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, - 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, - 0.08807373046875, 0.18505859375, 0.8857421875, - -0.57177734375, 0.251708984375, 0.234375, 2.57421875, - 0.9599609375, 0.5029296875, 0.10382080078125, - 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, - 0.259765625, 2.015625, 2.828125, -0.3095703125, - -0.164306640625, -0.3408203125, 0.486572265625, - 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, - 0.00972747802734375, -0.83154296875, 1.755859375, - 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, - -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, - 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, - -0.454833984375, 0.75439453125, 0.68505859375, - 0.210693359375, -0.283935546875, -0.53564453125, - 0.96826171875, 0.861328125, -3.33984375, -0.26171875, - 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, - -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, - -0.5380859375, 0.1529541015625, -0.360595703125, - -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, - 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, - 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, - 1.392578125, 0.5068359375, 0.962890625, 0.736328125, - 1.55078125, 0.50390625, -0.398681640625, 2.361328125, - 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, - -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, - 0.1119384765625, -0.1834716796875, 1.4599609375, - -0.77587890625, 0.5556640625, 0.09954833984375, - 0.0285186767578125, 0.58935546875, -0.501953125, - 0.212890625, 0.02679443359375, 0.1715087890625, - 0.03466796875, -0.564453125, 2.029296875, 2.45703125, - -0.72216796875, 2.138671875, 0.50830078125, - -0.09356689453125, 0.230224609375, 1.6943359375, - 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, - -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, - 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, - 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, - -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, - 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, - 1.5791015625, -0.0921630859375, 0.484619140625, - 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, - -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, - 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, - 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, - 1.30859375, 1.0859375, 0.56494140625, 2.322265625, - 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, - 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, - 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, - -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, - -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, - 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, - -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, - 0.8427734375, 2.431640625, 0.66357421875, 3.203125, - 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, - 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, - 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, - 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, - 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, - 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, - 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, - -1.1396484375, 1.6533203125, 0.375244140625], "scores": - [0.09906005859375, 0.12213134765625, 0.05035400390625, - 0.16552734375, 0.0037384033203125, 0.010101318359375, - 0.058197021484375, 0.10552978515625, 0.1141357421875, - 0.115234375, 0.00444793701171875, 0.00812530517578125, - 0.0343017578125, 0.009002685546875, 0.087158203125, - 0.00818634033203125, 0.003238677978515625, 0.024169921875, - 0.00702667236328125, 0.00936126708984375, - 0.00632476806640625, 0.0293731689453125, 0.01800537109375, - 0.0088348388671875, 0.013397216796875, 0.02557373046875, - 0.12109375, 0.004413604736328125, 0.016571044921875, - 0.00274658203125, 0.2142333984375, 0.29638671875, - 0.16015625, 0.007843017578125, 0.007160186767578125, - 0.00508880615234375, 0.054229736328125, 0.048736572265625, - 0.026397705078125, 0.0265350341796875, 0.051361083984375, - 0.018310546875, 0.00974273681640625, 0.0218963623046875, - 0.216064453125, 0.021728515625, 0.0173797607421875, - 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, - 374, 303, 303, 303, 427], "time": {"begin_ms": 640, - "end_ms": 1140}}]} - - id: 7645a0d1-2e64-410d-83a8-b96040432e9a - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244957031 - role: AGENT - type: AGENT_MESSAGE - message_text: Hello! - emotion_features: >- - {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, - "Aesthetic Appreciation": 0.03265380859375, "Amusement": - 0.118408203125, "Anger": 0.06719970703125, "Anxiety": - 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": - 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": - 0.08709716796875, "Concentration": 0.070556640625, - "Confusion": 0.06964111328125, "Contemplation": - 0.0343017578125, "Contempt": 0.037689208984375, - "Contentment": 0.059417724609375, "Craving": - 0.01132965087890625, "Desire": 0.01406097412109375, - "Determination": 0.1143798828125, "Disappointment": - 0.051177978515625, "Disgust": 0.028594970703125, "Distress": - 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": - 0.0258026123046875, "Embarrassment": 0.0222015380859375, - "Empathic Pain": 0.015777587890625, "Entrancement": - 0.0160980224609375, "Envy": 0.0163421630859375, - "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": - 0.01483917236328125, "Horror": 0.0194549560546875, - "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": - 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": - 0.020721435546875, "Pride": 0.05499267578125, "Realization": - 0.0728759765625, "Relief": 0.04052734375, "Romance": - 0.0129241943359375, "Sadness": 0.0254669189453125, - "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, - "Surprise (negative)": 0.05560302734375, "Surprise - (positive)": 0.07965087890625, "Sympathy": - 0.022247314453125, "Tiredness": 0.0194549560546875, - "Triumph": 0.04107666015625} - metadata: "" - source: - openapi: stenographer-openapi.json + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + ascending_order: + type: optional + docs: >- + Specifies the sorting order of the results based on their creation + date. Set to true for ascending order (chronological, with the + oldest records first) and false for descending order + (reverse-chronological, with the newest records first). Defaults + to true. + response: + docs: Success + type: root.ReturnChatGroupPagedEvents + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 697056f0-6c7e-487d-9bd8-9c19df79f05f + query-parameters: + page_number: 0 + page_size: 3 + ascending_order: true + response: + body: + id: 697056f0-6c7e-487d-9bd8-9c19df79f05f + page_number: 0 + page_size: 3 + total_pages: 1 + pagination_direction: ASC + events_page: + - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244940762 + role: SYSTEM + type: SYSTEM_PROMPT + message_text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + emotion_features: '' + metadata: '' + - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244956278 + role: USER + type: USER_MESSAGE + message_text: Hello. + emotion_features: >- + {"Admiration": 0.09906005859375, "Adoration": + 0.12213134765625, "Aesthetic Appreciation": + 0.05035400390625, "Amusement": 0.16552734375, "Anger": + 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": + 0.058197021484375, "Awkwardness": 0.10552978515625, + "Boredom": 0.1141357421875, "Calmness": 0.115234375, + "Concentration": 0.00444793701171875, "Confusion": + 0.0343017578125, "Contemplation": 0.00812530517578125, + "Contempt": 0.009002685546875, "Contentment": + 0.087158203125, "Craving": 0.00818634033203125, "Desire": + 0.018310546875, "Determination": 0.003238677978515625, + "Disappointment": 0.024169921875, "Disgust": + 0.00702667236328125, "Distress": 0.00936126708984375, + "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, + "Embarrassment": 0.01800537109375, "Empathic Pain": + 0.0088348388671875, "Entrancement": 0.013397216796875, + "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": + 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": + 0.00274658203125, "Interest": 0.2142333984375, "Joy": + 0.29638671875, "Love": 0.16015625, "Nostalgia": + 0.007843017578125, "Pain": 0.007160186767578125, "Pride": + 0.00508880615234375, "Realization": 0.054229736328125, + "Relief": 0.048736572265625, "Romance": 0.026397705078125, + "Sadness": 0.0265350341796875, "Satisfaction": + 0.051361083984375, "Shame": 0.00974273681640625, "Surprise + (negative)": 0.0218963623046875, "Surprise (positive)": + 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": + 0.0173797607421875, "Triumph": 0.004520416259765625} + metadata: >- + {"segments": [{"content": "Hello.", "embedding": + [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, + 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, + 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, + 0.416259765625, 0.99462890625, -0.333740234375, + 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, + 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, + 0.228515625, 2.087890625, -0.311767578125, + 0.053680419921875, 1.3349609375, 0.95068359375, + 0.00441741943359375, 0.705078125, 1.8916015625, + -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, + 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, + -0.28857421875, -0.4560546875, -0.1500244140625, + -0.1102294921875, -0.222412109375, 0.8779296875, + 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, + -0.325439453125, 0.412841796875, 0.81689453125, + 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, + 1.50390625, 1.0224609375, -1.671875, 0.7373046875, + 2.1328125, 2.166015625, 0.41015625, -0.127685546875, + 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, + 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, + -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, + -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, + -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, + -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, + 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, + 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, + 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, + 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, + 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, + -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, + 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, + -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, + -0.00984954833984375, -0.6865234375, -0.0272979736328125, + -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, + 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, + 0.384521484375, 0.385986328125, 2.0546875, + -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, + 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, + -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, + -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, + -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, + 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, + -0.1552734375, 0.6474609375, -0.08331298828125, + 0.00740814208984375, -0.045501708984375, -0.578125, + 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, + 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, + -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, + 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, + 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, + 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, + 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, + 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, + 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, + 1.99609375, 1.171875, 1.181640625, 1.5126953125, + 0.0224456787109375, 0.58349609375, -1.4931640625, + 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, + 1.7802734375, 0.01526641845703125, -0.423095703125, + 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, + 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, + 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, + 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, + 0.69384765625, 1.375, 0.8916015625, 1.0107421875, + 0.1304931640625, 2.009765625, 0.06402587890625, + -0.08428955078125, 0.04351806640625, -1.7529296875, + 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, + -0.276611328125, 0.8837890625, -0.1287841796875, + 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, + 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, + 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, + 1.3623046875, 2.267578125, 0.484375, 0.9150390625, + 0.56787109375, -0.70068359375, 0.27587890625, + -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, + 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, + 1.328125, 1.232421875, 0.6806640625, 0.9365234375, + 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, + 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, + 1.34765625, 2.8203125, 2.025390625, -0.48583984375, + 0.7626953125, 0.01007843017578125, 1.435546875, + 0.007205963134765625, 0.05157470703125, -0.9853515625, + 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, + -0.07916259765625, 1.244140625, -0.32080078125, + 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, + 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, + 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, + 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, + -0.263427734375, -0.019866943359375, -0.24658203125, + -0.1871337890625, 0.927734375, 0.62255859375, + 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, + -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, + 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, + 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, + -1.92578125, 1.154296875, 0.389892578125, 1.130859375, + 0.95947265625, 0.72314453125, 2.244140625, + 0.048553466796875, 0.626953125, 0.42919921875, + 0.82275390625, 0.311767578125, -0.320556640625, + 0.01041412353515625, 0.1483154296875, 0.10809326171875, + -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, + 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, + 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, + 0.39208984375, 0.83251953125, 0.224365234375, + 0.0019989013671875, 0.87548828125, 1.6572265625, + 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, + 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, + 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, + 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, + 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, + -0.40283203125, 4.109375, 2.533203125, 1.2529296875, + 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, + 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, + -0.79443359375, 0.71630859375, 0.97998046875, + -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, + 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, + -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, + 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, + -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, + -1.947265625, 1.3544921875, -3.935546875, 2.544921875, + 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, + -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, + -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, + 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, + 14.0703125, -2.0078125, -0.381591796875, 1.228515625, + 0.08282470703125, -0.67822265625, -0.04339599609375, + 0.397216796875, 0.1656494140625, 0.137451171875, + 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, + 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, + 0.232177734375, -0.020172119140625, 0.64404296875, + -0.01100921630859375, -1.9267578125, 0.222412109375, + 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, + 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, + 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, + 1.828125, 1.115234375, 1.931640625, -0.517578125, + 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, + 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, + 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, + 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, + 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, + 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, + 2.046875, 3.212890625, 1.68359375, 1.07421875, + -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, + 0.2440185546875, 0.62646484375, -0.1280517578125, + 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, + 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, + 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, + 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, + 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, + 0.08807373046875, 0.18505859375, 0.8857421875, + -0.57177734375, 0.251708984375, 0.234375, 2.57421875, + 0.9599609375, 0.5029296875, 0.10382080078125, + 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, + 0.259765625, 2.015625, 2.828125, -0.3095703125, + -0.164306640625, -0.3408203125, 0.486572265625, + 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, + 0.00972747802734375, -0.83154296875, 1.755859375, + 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, + -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, + 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, + -0.454833984375, 0.75439453125, 0.68505859375, + 0.210693359375, -0.283935546875, -0.53564453125, + 0.96826171875, 0.861328125, -3.33984375, -0.26171875, + 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, + -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, + -0.5380859375, 0.1529541015625, -0.360595703125, + -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, + 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, + 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, + 1.392578125, 0.5068359375, 0.962890625, 0.736328125, + 1.55078125, 0.50390625, -0.398681640625, 2.361328125, + 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, + -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, + 0.1119384765625, -0.1834716796875, 1.4599609375, + -0.77587890625, 0.5556640625, 0.09954833984375, + 0.0285186767578125, 0.58935546875, -0.501953125, + 0.212890625, 0.02679443359375, 0.1715087890625, + 0.03466796875, -0.564453125, 2.029296875, 2.45703125, + -0.72216796875, 2.138671875, 0.50830078125, + -0.09356689453125, 0.230224609375, 1.6943359375, + 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, + -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, + 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, + 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, + -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, + 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, + 1.5791015625, -0.0921630859375, 0.484619140625, + 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, + -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, + 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, + 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, + 1.30859375, 1.0859375, 0.56494140625, 2.322265625, + 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, + 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, + 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, + -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, + -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, + 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, + -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, + 0.8427734375, 2.431640625, 0.66357421875, 3.203125, + 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, + 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, + 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, + 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, + 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, + 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, + 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, + -1.1396484375, 1.6533203125, 0.375244140625], "scores": + [0.09906005859375, 0.12213134765625, 0.05035400390625, + 0.16552734375, 0.0037384033203125, 0.010101318359375, + 0.058197021484375, 0.10552978515625, 0.1141357421875, + 0.115234375, 0.00444793701171875, 0.00812530517578125, + 0.0343017578125, 0.009002685546875, 0.087158203125, + 0.00818634033203125, 0.003238677978515625, 0.024169921875, + 0.00702667236328125, 0.00936126708984375, + 0.00632476806640625, 0.0293731689453125, 0.01800537109375, + 0.0088348388671875, 0.013397216796875, 0.02557373046875, + 0.12109375, 0.004413604736328125, 0.016571044921875, + 0.00274658203125, 0.2142333984375, 0.29638671875, + 0.16015625, 0.007843017578125, 0.007160186767578125, + 0.00508880615234375, 0.054229736328125, 0.048736572265625, + 0.026397705078125, 0.0265350341796875, 0.051361083984375, + 0.018310546875, 0.00974273681640625, 0.0218963623046875, + 0.216064453125, 0.021728515625, 0.0173797607421875, + 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, + 374, 303, 303, 303, 427], "time": {"begin_ms": 640, + "end_ms": 1140}}]} + - id: 7645a0d1-2e64-410d-83a8-b96040432e9a + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244957031 + role: AGENT + type: AGENT_MESSAGE + message_text: Hello! + emotion_features: >- + {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, + "Aesthetic Appreciation": 0.03265380859375, "Amusement": + 0.118408203125, "Anger": 0.06719970703125, "Anxiety": + 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": + 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": + 0.08709716796875, "Concentration": 0.070556640625, + "Confusion": 0.06964111328125, "Contemplation": + 0.0343017578125, "Contempt": 0.037689208984375, + "Contentment": 0.059417724609375, "Craving": + 0.01132965087890625, "Desire": 0.01406097412109375, + "Determination": 0.1143798828125, "Disappointment": + 0.051177978515625, "Disgust": 0.028594970703125, "Distress": + 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": + 0.0258026123046875, "Embarrassment": 0.0222015380859375, + "Empathic Pain": 0.015777587890625, "Entrancement": + 0.0160980224609375, "Envy": 0.0163421630859375, + "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": + 0.01483917236328125, "Horror": 0.0194549560546875, + "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": + 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": + 0.020721435546875, "Pride": 0.05499267578125, "Realization": + 0.0728759765625, "Relief": 0.04052734375, "Romance": + 0.0129241943359375, "Sadness": 0.0254669189453125, + "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, + "Surprise (negative)": 0.05560302734375, "Surprise + (positive)": 0.07965087890625, "Sympathy": + 0.022247314453125, "Tiredness": 0.0194549560546875, + "Triumph": 0.04107666015625} + metadata: '' + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/chats.yml b/.mock/definition/empathic-voice/chats.yml index 1486a27..ebda1a7 100644 --- a/.mock/definition/empathic-voice/chats.yml +++ b/.mock/definition/empathic-voice/chats.yml @@ -1,459 +1,459 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-chats: - path: /v0/evi/chats - method: GET - auth: true - docs: Fetches a paginated list of **Chats**. - pagination: - offset: $request.page_number - results: $response.chats_page - display-name: List chats - request: - name: ChatsListChatsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + auth: false + base-path: '' + endpoints: + list-chats: + path: /v0/evi/chats + method: GET + auth: true + docs: Fetches a paginated list of **Chats**. + pagination: + offset: $request.page_number + results: $response.chats_page + display-name: List chats + request: + name: ChatsListChatsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnPagedChats - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chats_page: - - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f - status: USER_ENDED - start_timestamp: 1716244940648 - end_timestamp: 1716244958546 - event_count: 3 - metadata: "" - config: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - list-chat-events: - path: /v0/evi/chats/{id} - method: GET - auth: true - docs: Fetches a paginated list of **Chat** events. - pagination: - offset: $request.page_number - results: $response.events_page - path-parameters: - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - display-name: List chat events - request: - name: ChatsListChatEventsRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + ascending_order: + type: optional + docs: >- + Specifies the sorting order of the results based on their creation + date. Set to true for ascending order (chronological, with the + oldest records first) and false for descending order + (reverse-chronological, with the newest records first). Defaults + to true. + response: + docs: Success + type: root.ReturnPagedChats + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 1 + ascending_order: true + response: + body: + page_number: 0 + page_size: 1 + total_pages: 1 + pagination_direction: ASC + chats_page: + - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f + status: USER_ENDED + start_timestamp: 1716244940648 + end_timestamp: 1716244958546 + event_count: 3 + metadata: '' + config: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + list-chat-events: + path: /v0/evi/chats/{id} + method: GET + auth: true + docs: Fetches a paginated list of **Chat** events. + pagination: + offset: $request.page_number + results: $response.events_page + path-parameters: + id: + type: string + docs: Identifier for a Chat. Formatted as a UUID. + display-name: List chat events + request: + name: ChatsListChatEventsRequest + query-parameters: + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatPagedEvents - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - query-parameters: - page_number: 0 - page_size: 3 - ascending_order: true - response: - body: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f - status: USER_ENDED - start_timestamp: 1716244940648 - pagination_direction: ASC - events_page: - - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244940762 - role: SYSTEM - type: SYSTEM_PROMPT - message_text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - emotion_features: "" - metadata: "" - - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244956278 - role: USER - type: USER_MESSAGE - message_text: Hello. - emotion_features: >- - {"Admiration": 0.09906005859375, "Adoration": - 0.12213134765625, "Aesthetic Appreciation": - 0.05035400390625, "Amusement": 0.16552734375, "Anger": - 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": - 0.058197021484375, "Awkwardness": 0.10552978515625, - "Boredom": 0.1141357421875, "Calmness": 0.115234375, - "Concentration": 0.00444793701171875, "Confusion": - 0.0343017578125, "Contemplation": 0.00812530517578125, - "Contempt": 0.009002685546875, "Contentment": - 0.087158203125, "Craving": 0.00818634033203125, "Desire": - 0.018310546875, "Determination": 0.003238677978515625, - "Disappointment": 0.024169921875, "Disgust": - 0.00702667236328125, "Distress": 0.00936126708984375, - "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, - "Embarrassment": 0.01800537109375, "Empathic Pain": - 0.0088348388671875, "Entrancement": 0.013397216796875, - "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": - 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": - 0.00274658203125, "Interest": 0.2142333984375, "Joy": - 0.29638671875, "Love": 0.16015625, "Nostalgia": - 0.007843017578125, "Pain": 0.007160186767578125, "Pride": - 0.00508880615234375, "Realization": 0.054229736328125, - "Relief": 0.048736572265625, "Romance": 0.026397705078125, - "Sadness": 0.0265350341796875, "Satisfaction": - 0.051361083984375, "Shame": 0.00974273681640625, "Surprise - (negative)": 0.0218963623046875, "Surprise (positive)": - 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": - 0.0173797607421875, "Triumph": 0.004520416259765625} - metadata: >- - {"segments": [{"content": "Hello.", "embedding": - [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, - 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, - 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, - 0.416259765625, 0.99462890625, -0.333740234375, - 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, - 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, - 0.228515625, 2.087890625, -0.311767578125, - 0.053680419921875, 1.3349609375, 0.95068359375, - 0.00441741943359375, 0.705078125, 1.8916015625, - -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, - 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, - -0.28857421875, -0.4560546875, -0.1500244140625, - -0.1102294921875, -0.222412109375, 0.8779296875, - 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, - -0.325439453125, 0.412841796875, 0.81689453125, - 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, - 1.50390625, 1.0224609375, -1.671875, 0.7373046875, - 2.1328125, 2.166015625, 0.41015625, -0.127685546875, - 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, - 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, - -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, - -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, - -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, - -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, - 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, - 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, - 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, - 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, - 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, - -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, - 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, - -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, - -0.00984954833984375, -0.6865234375, -0.0272979736328125, - -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, - 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, - 0.384521484375, 0.385986328125, 2.0546875, - -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, - 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, - -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, - -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, - -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, - 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, - -0.1552734375, 0.6474609375, -0.08331298828125, - 0.00740814208984375, -0.045501708984375, -0.578125, - 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, - 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, - -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, - 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, - 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, - 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, - 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, - 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, - 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, - 1.99609375, 1.171875, 1.181640625, 1.5126953125, - 0.0224456787109375, 0.58349609375, -1.4931640625, - 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, - 1.7802734375, 0.01526641845703125, -0.423095703125, - 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, - 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, - 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, - 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, - 0.69384765625, 1.375, 0.8916015625, 1.0107421875, - 0.1304931640625, 2.009765625, 0.06402587890625, - -0.08428955078125, 0.04351806640625, -1.7529296875, - 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, - -0.276611328125, 0.8837890625, -0.1287841796875, - 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, - 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, - 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, - 1.3623046875, 2.267578125, 0.484375, 0.9150390625, - 0.56787109375, -0.70068359375, 0.27587890625, - -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, - 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, - 1.328125, 1.232421875, 0.6806640625, 0.9365234375, - 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, - 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, - 1.34765625, 2.8203125, 2.025390625, -0.48583984375, - 0.7626953125, 0.01007843017578125, 1.435546875, - 0.007205963134765625, 0.05157470703125, -0.9853515625, - 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, - -0.07916259765625, 1.244140625, -0.32080078125, - 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, - 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, - 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, - 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, - -0.263427734375, -0.019866943359375, -0.24658203125, - -0.1871337890625, 0.927734375, 0.62255859375, - 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, - -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, - 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, - 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, - -1.92578125, 1.154296875, 0.389892578125, 1.130859375, - 0.95947265625, 0.72314453125, 2.244140625, - 0.048553466796875, 0.626953125, 0.42919921875, - 0.82275390625, 0.311767578125, -0.320556640625, - 0.01041412353515625, 0.1483154296875, 0.10809326171875, - -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, - 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, - 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, - 0.39208984375, 0.83251953125, 0.224365234375, - 0.0019989013671875, 0.87548828125, 1.6572265625, - 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, - 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, - 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, - 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, - 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, - -0.40283203125, 4.109375, 2.533203125, 1.2529296875, - 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, - 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, - -0.79443359375, 0.71630859375, 0.97998046875, - -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, - 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, - -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, - 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, - -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, - -1.947265625, 1.3544921875, -3.935546875, 2.544921875, - 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, - -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, - -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, - 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, - 14.0703125, -2.0078125, -0.381591796875, 1.228515625, - 0.08282470703125, -0.67822265625, -0.04339599609375, - 0.397216796875, 0.1656494140625, 0.137451171875, - 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, - 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, - 0.232177734375, -0.020172119140625, 0.64404296875, - -0.01100921630859375, -1.9267578125, 0.222412109375, - 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, - 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, - 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, - 1.828125, 1.115234375, 1.931640625, -0.517578125, - 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, - 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, - 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, - 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, - 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, - 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, - 2.046875, 3.212890625, 1.68359375, 1.07421875, - -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, - 0.2440185546875, 0.62646484375, -0.1280517578125, - 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, - 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, - 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, - 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, - 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, - 0.08807373046875, 0.18505859375, 0.8857421875, - -0.57177734375, 0.251708984375, 0.234375, 2.57421875, - 0.9599609375, 0.5029296875, 0.10382080078125, - 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, - 0.259765625, 2.015625, 2.828125, -0.3095703125, - -0.164306640625, -0.3408203125, 0.486572265625, - 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, - 0.00972747802734375, -0.83154296875, 1.755859375, - 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, - -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, - 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, - -0.454833984375, 0.75439453125, 0.68505859375, - 0.210693359375, -0.283935546875, -0.53564453125, - 0.96826171875, 0.861328125, -3.33984375, -0.26171875, - 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, - -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, - -0.5380859375, 0.1529541015625, -0.360595703125, - -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, - 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, - 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, - 1.392578125, 0.5068359375, 0.962890625, 0.736328125, - 1.55078125, 0.50390625, -0.398681640625, 2.361328125, - 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, - -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, - 0.1119384765625, -0.1834716796875, 1.4599609375, - -0.77587890625, 0.5556640625, 0.09954833984375, - 0.0285186767578125, 0.58935546875, -0.501953125, - 0.212890625, 0.02679443359375, 0.1715087890625, - 0.03466796875, -0.564453125, 2.029296875, 2.45703125, - -0.72216796875, 2.138671875, 0.50830078125, - -0.09356689453125, 0.230224609375, 1.6943359375, - 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, - -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, - 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, - 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, - -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, - 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, - 1.5791015625, -0.0921630859375, 0.484619140625, - 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, - -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, - 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, - 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, - 1.30859375, 1.0859375, 0.56494140625, 2.322265625, - 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, - 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, - 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, - -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, - -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, - 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, - -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, - 0.8427734375, 2.431640625, 0.66357421875, 3.203125, - 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, - 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, - 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, - 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, - 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, - 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, - 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, - -1.1396484375, 1.6533203125, 0.375244140625], "scores": - [0.09906005859375, 0.12213134765625, 0.05035400390625, - 0.16552734375, 0.0037384033203125, 0.010101318359375, - 0.058197021484375, 0.10552978515625, 0.1141357421875, - 0.115234375, 0.00444793701171875, 0.00812530517578125, - 0.0343017578125, 0.009002685546875, 0.087158203125, - 0.00818634033203125, 0.003238677978515625, 0.024169921875, - 0.00702667236328125, 0.00936126708984375, - 0.00632476806640625, 0.0293731689453125, 0.01800537109375, - 0.0088348388671875, 0.013397216796875, 0.02557373046875, - 0.12109375, 0.004413604736328125, 0.016571044921875, - 0.00274658203125, 0.2142333984375, 0.29638671875, - 0.16015625, 0.007843017578125, 0.007160186767578125, - 0.00508880615234375, 0.054229736328125, 0.048736572265625, - 0.026397705078125, 0.0265350341796875, 0.051361083984375, - 0.018310546875, 0.00974273681640625, 0.0218963623046875, - 0.216064453125, 0.021728515625, 0.0173797607421875, - 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, - 374, 303, 303, 303, 427], "time": {"begin_ms": 640, - "end_ms": 1140}}]} - - id: 7645a0d1-2e64-410d-83a8-b96040432e9a - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244957031 - role: AGENT - type: AGENT_MESSAGE - message_text: Hello! - emotion_features: >- - {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, - "Aesthetic Appreciation": 0.03265380859375, "Amusement": - 0.118408203125, "Anger": 0.06719970703125, "Anxiety": - 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": - 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": - 0.08709716796875, "Concentration": 0.070556640625, - "Confusion": 0.06964111328125, "Contemplation": - 0.0343017578125, "Contempt": 0.037689208984375, - "Contentment": 0.059417724609375, "Craving": - 0.01132965087890625, "Desire": 0.01406097412109375, - "Determination": 0.1143798828125, "Disappointment": - 0.051177978515625, "Disgust": 0.028594970703125, "Distress": - 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": - 0.0258026123046875, "Embarrassment": 0.0222015380859375, - "Empathic Pain": 0.015777587890625, "Entrancement": - 0.0160980224609375, "Envy": 0.0163421630859375, - "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": - 0.01483917236328125, "Horror": 0.0194549560546875, - "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": - 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": - 0.020721435546875, "Pride": 0.05499267578125, "Realization": - 0.0728759765625, "Relief": 0.04052734375, "Romance": - 0.0129241943359375, "Sadness": 0.0254669189453125, - "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, - "Surprise (negative)": 0.05560302734375, "Surprise - (positive)": 0.07965087890625, "Sympathy": - 0.022247314453125, "Tiredness": 0.0194549560546875, - "Triumph": 0.04107666015625} - metadata: "" - page_number: 0 - page_size: 3 - total_pages: 1 - end_timestamp: 1716244958546 - metadata: "" - config: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - source: - openapi: stenographer-openapi.json + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + ascending_order: + type: optional + docs: >- + Specifies the sorting order of the results based on their creation + date. Set to true for ascending order (chronological, with the + oldest records first) and false for descending order + (reverse-chronological, with the newest records first). Defaults + to true. + response: + docs: Success + type: root.ReturnChatPagedEvents + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + query-parameters: + page_number: 0 + page_size: 3 + ascending_order: true + response: + body: + id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f + status: USER_ENDED + start_timestamp: 1716244940648 + pagination_direction: ASC + events_page: + - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244940762 + role: SYSTEM + type: SYSTEM_PROMPT + message_text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + emotion_features: '' + metadata: '' + - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244956278 + role: USER + type: USER_MESSAGE + message_text: Hello. + emotion_features: >- + {"Admiration": 0.09906005859375, "Adoration": + 0.12213134765625, "Aesthetic Appreciation": + 0.05035400390625, "Amusement": 0.16552734375, "Anger": + 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": + 0.058197021484375, "Awkwardness": 0.10552978515625, + "Boredom": 0.1141357421875, "Calmness": 0.115234375, + "Concentration": 0.00444793701171875, "Confusion": + 0.0343017578125, "Contemplation": 0.00812530517578125, + "Contempt": 0.009002685546875, "Contentment": + 0.087158203125, "Craving": 0.00818634033203125, "Desire": + 0.018310546875, "Determination": 0.003238677978515625, + "Disappointment": 0.024169921875, "Disgust": + 0.00702667236328125, "Distress": 0.00936126708984375, + "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, + "Embarrassment": 0.01800537109375, "Empathic Pain": + 0.0088348388671875, "Entrancement": 0.013397216796875, + "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": + 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": + 0.00274658203125, "Interest": 0.2142333984375, "Joy": + 0.29638671875, "Love": 0.16015625, "Nostalgia": + 0.007843017578125, "Pain": 0.007160186767578125, "Pride": + 0.00508880615234375, "Realization": 0.054229736328125, + "Relief": 0.048736572265625, "Romance": 0.026397705078125, + "Sadness": 0.0265350341796875, "Satisfaction": + 0.051361083984375, "Shame": 0.00974273681640625, "Surprise + (negative)": 0.0218963623046875, "Surprise (positive)": + 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": + 0.0173797607421875, "Triumph": 0.004520416259765625} + metadata: >- + {"segments": [{"content": "Hello.", "embedding": + [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, + 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, + 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, + 0.416259765625, 0.99462890625, -0.333740234375, + 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, + 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, + 0.228515625, 2.087890625, -0.311767578125, + 0.053680419921875, 1.3349609375, 0.95068359375, + 0.00441741943359375, 0.705078125, 1.8916015625, + -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, + 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, + -0.28857421875, -0.4560546875, -0.1500244140625, + -0.1102294921875, -0.222412109375, 0.8779296875, + 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, + -0.325439453125, 0.412841796875, 0.81689453125, + 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, + 1.50390625, 1.0224609375, -1.671875, 0.7373046875, + 2.1328125, 2.166015625, 0.41015625, -0.127685546875, + 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, + 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, + -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, + -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, + -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, + -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, + 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, + 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, + 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, + 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, + 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, + -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, + 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, + -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, + -0.00984954833984375, -0.6865234375, -0.0272979736328125, + -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, + 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, + 0.384521484375, 0.385986328125, 2.0546875, + -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, + 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, + -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, + -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, + -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, + 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, + -0.1552734375, 0.6474609375, -0.08331298828125, + 0.00740814208984375, -0.045501708984375, -0.578125, + 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, + 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, + -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, + 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, + 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, + 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, + 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, + 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, + 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, + 1.99609375, 1.171875, 1.181640625, 1.5126953125, + 0.0224456787109375, 0.58349609375, -1.4931640625, + 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, + 1.7802734375, 0.01526641845703125, -0.423095703125, + 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, + 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, + 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, + 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, + 0.69384765625, 1.375, 0.8916015625, 1.0107421875, + 0.1304931640625, 2.009765625, 0.06402587890625, + -0.08428955078125, 0.04351806640625, -1.7529296875, + 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, + -0.276611328125, 0.8837890625, -0.1287841796875, + 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, + 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, + 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, + 1.3623046875, 2.267578125, 0.484375, 0.9150390625, + 0.56787109375, -0.70068359375, 0.27587890625, + -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, + 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, + 1.328125, 1.232421875, 0.6806640625, 0.9365234375, + 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, + 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, + 1.34765625, 2.8203125, 2.025390625, -0.48583984375, + 0.7626953125, 0.01007843017578125, 1.435546875, + 0.007205963134765625, 0.05157470703125, -0.9853515625, + 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, + -0.07916259765625, 1.244140625, -0.32080078125, + 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, + 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, + 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, + 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, + -0.263427734375, -0.019866943359375, -0.24658203125, + -0.1871337890625, 0.927734375, 0.62255859375, + 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, + -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, + 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, + 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, + -1.92578125, 1.154296875, 0.389892578125, 1.130859375, + 0.95947265625, 0.72314453125, 2.244140625, + 0.048553466796875, 0.626953125, 0.42919921875, + 0.82275390625, 0.311767578125, -0.320556640625, + 0.01041412353515625, 0.1483154296875, 0.10809326171875, + -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, + 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, + 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, + 0.39208984375, 0.83251953125, 0.224365234375, + 0.0019989013671875, 0.87548828125, 1.6572265625, + 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, + 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, + 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, + 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, + 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, + -0.40283203125, 4.109375, 2.533203125, 1.2529296875, + 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, + 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, + -0.79443359375, 0.71630859375, 0.97998046875, + -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, + 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, + -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, + 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, + -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, + -1.947265625, 1.3544921875, -3.935546875, 2.544921875, + 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, + -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, + -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, + 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, + 14.0703125, -2.0078125, -0.381591796875, 1.228515625, + 0.08282470703125, -0.67822265625, -0.04339599609375, + 0.397216796875, 0.1656494140625, 0.137451171875, + 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, + 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, + 0.232177734375, -0.020172119140625, 0.64404296875, + -0.01100921630859375, -1.9267578125, 0.222412109375, + 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, + 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, + 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, + 1.828125, 1.115234375, 1.931640625, -0.517578125, + 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, + 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, + 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, + 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, + 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, + 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, + 2.046875, 3.212890625, 1.68359375, 1.07421875, + -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, + 0.2440185546875, 0.62646484375, -0.1280517578125, + 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, + 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, + 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, + 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, + 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, + 0.08807373046875, 0.18505859375, 0.8857421875, + -0.57177734375, 0.251708984375, 0.234375, 2.57421875, + 0.9599609375, 0.5029296875, 0.10382080078125, + 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, + 0.259765625, 2.015625, 2.828125, -0.3095703125, + -0.164306640625, -0.3408203125, 0.486572265625, + 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, + 0.00972747802734375, -0.83154296875, 1.755859375, + 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, + -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, + 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, + -0.454833984375, 0.75439453125, 0.68505859375, + 0.210693359375, -0.283935546875, -0.53564453125, + 0.96826171875, 0.861328125, -3.33984375, -0.26171875, + 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, + -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, + -0.5380859375, 0.1529541015625, -0.360595703125, + -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, + 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, + 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, + 1.392578125, 0.5068359375, 0.962890625, 0.736328125, + 1.55078125, 0.50390625, -0.398681640625, 2.361328125, + 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, + -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, + 0.1119384765625, -0.1834716796875, 1.4599609375, + -0.77587890625, 0.5556640625, 0.09954833984375, + 0.0285186767578125, 0.58935546875, -0.501953125, + 0.212890625, 0.02679443359375, 0.1715087890625, + 0.03466796875, -0.564453125, 2.029296875, 2.45703125, + -0.72216796875, 2.138671875, 0.50830078125, + -0.09356689453125, 0.230224609375, 1.6943359375, + 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, + -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, + 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, + 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, + -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, + 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, + 1.5791015625, -0.0921630859375, 0.484619140625, + 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, + -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, + 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, + 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, + 1.30859375, 1.0859375, 0.56494140625, 2.322265625, + 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, + 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, + 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, + -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, + -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, + 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, + -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, + 0.8427734375, 2.431640625, 0.66357421875, 3.203125, + 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, + 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, + 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, + 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, + 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, + 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, + 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, + -1.1396484375, 1.6533203125, 0.375244140625], "scores": + [0.09906005859375, 0.12213134765625, 0.05035400390625, + 0.16552734375, 0.0037384033203125, 0.010101318359375, + 0.058197021484375, 0.10552978515625, 0.1141357421875, + 0.115234375, 0.00444793701171875, 0.00812530517578125, + 0.0343017578125, 0.009002685546875, 0.087158203125, + 0.00818634033203125, 0.003238677978515625, 0.024169921875, + 0.00702667236328125, 0.00936126708984375, + 0.00632476806640625, 0.0293731689453125, 0.01800537109375, + 0.0088348388671875, 0.013397216796875, 0.02557373046875, + 0.12109375, 0.004413604736328125, 0.016571044921875, + 0.00274658203125, 0.2142333984375, 0.29638671875, + 0.16015625, 0.007843017578125, 0.007160186767578125, + 0.00508880615234375, 0.054229736328125, 0.048736572265625, + 0.026397705078125, 0.0265350341796875, 0.051361083984375, + 0.018310546875, 0.00974273681640625, 0.0218963623046875, + 0.216064453125, 0.021728515625, 0.0173797607421875, + 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, + 374, 303, 303, 303, 427], "time": {"begin_ms": 640, + "end_ms": 1140}}]} + - id: 7645a0d1-2e64-410d-83a8-b96040432e9a + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244957031 + role: AGENT + type: AGENT_MESSAGE + message_text: Hello! + emotion_features: >- + {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, + "Aesthetic Appreciation": 0.03265380859375, "Amusement": + 0.118408203125, "Anger": 0.06719970703125, "Anxiety": + 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": + 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": + 0.08709716796875, "Concentration": 0.070556640625, + "Confusion": 0.06964111328125, "Contemplation": + 0.0343017578125, "Contempt": 0.037689208984375, + "Contentment": 0.059417724609375, "Craving": + 0.01132965087890625, "Desire": 0.01406097412109375, + "Determination": 0.1143798828125, "Disappointment": + 0.051177978515625, "Disgust": 0.028594970703125, "Distress": + 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": + 0.0258026123046875, "Embarrassment": 0.0222015380859375, + "Empathic Pain": 0.015777587890625, "Entrancement": + 0.0160980224609375, "Envy": 0.0163421630859375, + "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": + 0.01483917236328125, "Horror": 0.0194549560546875, + "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": + 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": + 0.020721435546875, "Pride": 0.05499267578125, "Realization": + 0.0728759765625, "Relief": 0.04052734375, "Romance": + 0.0129241943359375, "Sadness": 0.0254669189453125, + "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, + "Surprise (negative)": 0.05560302734375, "Surprise + (positive)": 0.07965087890625, "Sympathy": + 0.022247314453125, "Tiredness": 0.0194549560546875, + "Triumph": 0.04107666015625} + metadata: '' + page_number: 0 + page_size: 3 + total_pages: 1 + end_timestamp: 1716244958546 + metadata: '' + config: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/configs.yml b/.mock/definition/empathic-voice/configs.yml index 1b5bf27..c2ceedd 100644 --- a/.mock/definition/empathic-voice/configs.yml +++ b/.mock/definition/empathic-voice/configs.yml @@ -1,880 +1,880 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-configs: - path: /v0/evi/configs - method: GET - auth: true + auth: false + base-path: '' + endpoints: + list-configs: + path: /v0/evi/configs + method: GET + auth: true + docs: >- + Fetches a paginated list of **Configs**. + + + For more details on configuration options and how to configure EVI, see + our [configuration + guide](/docs/empathic-voice-interface-evi/configuration). + display-name: List configs + request: + name: ConfigsListConfigsRequest + query-parameters: + page_number: + type: optional docs: >- - Fetches a paginated list of **Configs**. - - - For more details on configuration options and how to configure EVI, see - our [configuration - guide](/docs/empathic-voice-interface-evi/configuration). - display-name: List configs - request: - name: ConfigsListConfigsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each config. To include all versions of - each config in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include configs with this name. - response: - docs: Success - type: root.ReturnPagedConfigs - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 1 - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - configs_page: - - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: "" - name: Weather Assistant Config - created_on: 1715267200693 - modified_on: 1715267200693 - evi_version: "2" - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to - user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. - Include helpful tips if severe weather is expected. - voice: - provider: HUME_AI - name: SAMPLE VOICE - custom_voice: - id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 - version: 1 - name: SAMPLE VOICE - created_on: 1724704587367 - modified_on: 1725489961583 - base_voice: KORA - parameter_model: 20240715-4parameter - parameters: - gender: -7 - huskiness: -2 - nasality: -8 - pitch: -9 - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - create-config: - path: /v0/evi/configs - method: POST - auth: true - docs: >- - Creates a **Config** which can be applied to EVI. - - - For more details on configuration options and how to configure EVI, see - our [configuration - guide](/docs/empathic-voice-interface-evi/configuration). - display-name: Create config - request: - name: PostedConfig - body: - properties: - evi_version: - type: string - docs: >- - Specifies the EVI version to use. Use `"1"` for version 1, or - `"2"` for the latest enhanced version. For a detailed comparison - of the two versions, refer to our - [guide](/docs/empathic-voice-interface-evi/evi-2). - name: - type: string - docs: Name applied to all versions of a particular Config. - version_description: - type: optional - docs: An optional description of the Config version. - prompt: optional - voice: - type: optional - docs: A voice specification associated with this Config. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config. - - - This model is used to generate longer, more detailed responses - from EVI. Choosing an appropriate supplemental language model - for your use case is crucial for generating fast, high-quality - responses from EVI. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config. - - - Hume's eLLM (empathic Large Language Model) is a multimodal - language model that takes into account both expression measures - and language. The eLLM generates short, empathic language - responses and guides text-to-speech (TTS) prosody. - tools: - type: optional>> - docs: List of user-defined tools associated with this Config. - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config. - event_messages: optional - timeouts: optional - content-type: application/json - response: - docs: Created - type: root.ReturnConfig - errors: - - root.BadRequestError - examples: - - request: - name: Weather Assistant Config - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - evi_version: "2" - voice: - provider: HUME_AI - name: SAMPLE VOICE - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: "" - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: "2" - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: SAMPLE VOICE - custom_voice: - id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 - version: 1 - name: SAMPLE VOICE - created_on: 1724704587367 - modified_on: 1725489961583 - base_voice: KORA - parameter_model: 20240715-4parameter - parameters: - gender: -7 - huskiness: -2 - nasality: -8 - pitch: -9 - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - list-config-versions: - path: /v0/evi/configs/{id} - method: GET - auth: true - docs: >- - Fetches a list of a **Config's** versions. - - - For more details on configuration options and how to configure EVI, see - our [configuration - guide](/docs/empathic-voice-interface-evi/configuration). - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: List config versions - request: - name: ConfigsListConfigVersionsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each config. To include all versions of - each config in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedConfigs - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - configs_page: - - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: "" - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: "2" - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to - user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. - Include helpful tips if severe weather is expected. - voice: - provider: HUME_AI - name: SAMPLE VOICE - custom_voice: - id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 - version: 1 - name: SAMPLE VOICE - created_on: 1724704587367 - modified_on: 1725489961583 - base_voice: KORA - parameter_model: 20240715-4parameter - parameters: - gender: -7 - huskiness: -2 - nasality: -8 - pitch: -9 - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - create-config-version: - path: /v0/evi/configs/{id} - method: POST - auth: true - docs: >- - Updates a **Config** by creating a new version of the **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration - guide](/docs/empathic-voice-interface-evi/configuration). - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Create config version - request: - name: PostedConfigVersion - body: - properties: - evi_version: - type: string - docs: The version of the EVI used with this config. - version_description: - type: optional - docs: An optional description of the Config version. - prompt: optional - voice: - type: optional - docs: A voice specification associated with this Config version. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config - version. - - - This model is used to generate longer, more detailed responses - from EVI. Choosing an appropriate supplemental language model - for your use case is crucial for generating fast, high-quality - responses from EVI. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config version. - - - Hume's eLLM (empathic Large Language Model) is a multimodal - language model that takes into account both expression measures - and language. The eLLM generates short, empathic language - responses and guides text-to-speech (TTS) prosody. - tools: - type: optional>> - docs: List of user-defined tools associated with this Config version. - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config version. - event_messages: optional - timeouts: optional - content-type: application/json - response: - docs: Created - type: root.ReturnConfig - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - request: - version_description: This is an updated version of the Weather Assistant Config. - evi_version: "2" - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - voice: - provider: HUME_AI - name: ITO - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: true - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: This is an updated version of the Weather Assistant Config. - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1722642242998 - evi_version: "2" - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: ITO - custom_voice: - id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 - version: 1 - name: SAMPLE VOICE - created_on: 1724704587367 - modified_on: 1725489961583 - base_voice: KORA - parameter_model: 20240715-4parameter - parameters: - gender: -7 - huskiness: -2 - nasality: -8 - pitch: -9 - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: true - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - delete-config: - path: /v0/evi/configs/{id} - method: DELETE - auth: true - docs: >- - Deletes a **Config** and its versions. - - - For more details on configuration options and how to configure EVI, see - our [configuration - guide](/docs/empathic-voice-interface-evi/configuration). - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Delete config - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - update-config-name: - path: /v0/evi/configs/{id} - method: PATCH - auth: true + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional docs: >- - Updates the name of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration - guide](/docs/empathic-voice-interface-evi/configuration). - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Update config name - request: - name: PostedConfigName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Config. - content-type: application/json - response: - docs: Success - type: text - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - request: - name: Updated Weather Assistant Config Name - get-config-version: - path: /v0/evi/configs/{id}/version/{version} - method: GET - auth: true + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional docs: >- - Fetches a specified version of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration - guide](/docs/empathic-voice-interface-evi/configuration). - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Get config version - response: - docs: Success - type: root.ReturnConfig - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each config. To include all versions of + each config in the list, set `restrict_to_most_recent` to false. + name: + type: optional + docs: Filter to only include configs with this name. + response: + docs: Success + type: root.ReturnPagedConfigs + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 1 + response: + body: + page_number: 0 + page_size: 1 + total_pages: 1 + configs_page: + - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + version_description: '' + name: Weather Assistant Config + created_on: 1715267200693 + modified_on: 1715267200693 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to + user queries concisely and clearly. Use simple language + and avoid technical jargon. Provide temperature, + precipitation, wind conditions, and any weather alerts. + Include helpful tips if severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 version: 1 - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: "" - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: "2" - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: SAMPLE VOICE - custom_voice: - id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 - version: 1 - name: SAMPLE VOICE - created_on: 1724704587367 - modified_on: 1725489961583 - base_voice: KORA - parameter_model: 20240715-4parameter - parameters: - gender: -7 - huskiness: -2 - nasality: -8 - pitch: -9 - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - delete-config-version: - path: /v0/evi/configs/{id}/version/{version} - method: DELETE - auth: true + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + create-config: + path: /v0/evi/configs + method: POST + auth: true + docs: >- + Creates a **Config** which can be applied to EVI. + + + For more details on configuration options and how to configure EVI, see + our [configuration + guide](/docs/empathic-voice-interface-evi/configuration). + display-name: Create config + request: + name: PostedConfig + body: + properties: + evi_version: + type: string + docs: >- + Specifies the EVI version to use. Use `"1"` for version 1, or + `"2"` for the latest enhanced version. For a detailed comparison + of the two versions, refer to our + [guide](/docs/empathic-voice-interface-evi/evi-2). + name: + type: string + docs: Name applied to all versions of a particular Config. + version_description: + type: optional + docs: An optional description of the Config version. + prompt: optional + voice: + type: optional + docs: A voice specification associated with this Config. + language_model: + type: optional + docs: >- + The supplemental language model associated with this Config. + + + This model is used to generate longer, more detailed responses + from EVI. Choosing an appropriate supplemental language model + for your use case is crucial for generating fast, high-quality + responses from EVI. + ellm_model: + type: optional + docs: >- + The eLLM setup associated with this Config. + + + Hume's eLLM (empathic Large Language Model) is a multimodal + language model that takes into account both expression measures + and language. The eLLM generates short, empathic language + responses and guides text-to-speech (TTS) prosody. + tools: + type: optional>> + docs: List of user-defined tools associated with this Config. + builtin_tools: + type: optional>> + docs: List of built-in tools associated with this Config. + event_messages: optional + timeouts: optional + content-type: application/json + response: + docs: Created + type: root.ReturnConfig + errors: + - root.BadRequestError + examples: + - request: + name: Weather Assistant Config + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + evi_version: '2' + voice: + provider: HUME_AI + name: SAMPLE VOICE + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + response: + body: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + version_description: '' + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1715275452390 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + list-config-versions: + path: /v0/evi/configs/{id} + method: GET + auth: true + docs: >- + Fetches a list of a **Config's** versions. + + + For more details on configuration options and how to configure EVI, see + our [configuration + guide](/docs/empathic-voice-interface-evi/configuration). + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + display-name: List config versions + request: + name: ConfigsListConfigVersionsRequest + query-parameters: + page_number: + type: optional docs: >- - Deletes a specified version of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration - guide](/docs/empathic-voice-interface-evi/configuration). - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Delete config version - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - update-config-description: - path: /v0/evi/configs/{id}/version/{version} - method: PATCH - auth: true + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional docs: >- - Updates the description of a **Config**. - - - For more details on configuration options and how to configure EVI, see - our [configuration - guide](/docs/empathic-voice-interface-evi/configuration). - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. - - - Configs, Prompts, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Update config description - request: - name: PostedConfigVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Config version. - content-type: application/json - response: - docs: Success - type: root.ReturnConfig - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each config. To include all versions of + each config in the list, set `restrict_to_most_recent` to false. + response: + docs: Success + type: root.ReturnPagedConfigs + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + response: + body: + page_number: 0 + page_size: 10 + total_pages: 1 + configs_page: + - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + version_description: '' + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1715275452390 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to + user queries concisely and clearly. Use simple language + and avoid technical jargon. Provide temperature, + precipitation, wind conditions, and any weather alerts. + Include helpful tips if severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 version: 1 - request: - version_description: This is an updated version_description. - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: This is an updated version_description. - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - evi_version: "2" - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: SAMPLE VOICE - custom_voice: - id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 - version: 1 - name: SAMPLE VOICE - created_on: 1724704587367 - modified_on: 1725489961583 - base_voice: KORA - parameter_model: 20240715-4parameter - parameters: - gender: -7 - huskiness: -2 - nasality: -8 - pitch: -9 - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - source: - openapi: stenographer-openapi.json + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + create-config-version: + path: /v0/evi/configs/{id} + method: POST + auth: true + docs: >- + Updates a **Config** by creating a new version of the **Config**. + + + For more details on configuration options and how to configure EVI, see + our [configuration + guide](/docs/empathic-voice-interface-evi/configuration). + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + display-name: Create config version + request: + name: PostedConfigVersion + body: + properties: + evi_version: + type: string + docs: The version of the EVI used with this config. + version_description: + type: optional + docs: An optional description of the Config version. + prompt: optional + voice: + type: optional + docs: A voice specification associated with this Config version. + language_model: + type: optional + docs: >- + The supplemental language model associated with this Config + version. + + + This model is used to generate longer, more detailed responses + from EVI. Choosing an appropriate supplemental language model + for your use case is crucial for generating fast, high-quality + responses from EVI. + ellm_model: + type: optional + docs: >- + The eLLM setup associated with this Config version. + + + Hume's eLLM (empathic Large Language Model) is a multimodal + language model that takes into account both expression measures + and language. The eLLM generates short, empathic language + responses and guides text-to-speech (TTS) prosody. + tools: + type: optional>> + docs: List of user-defined tools associated with this Config version. + builtin_tools: + type: optional>> + docs: List of built-in tools associated with this Config version. + event_messages: optional + timeouts: optional + content-type: application/json + response: + docs: Created + type: root.ReturnConfig + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + request: + version_description: This is an updated version of the Weather Assistant Config. + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + voice: + provider: HUME_AI + name: ITO + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: true + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + response: + body: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + version_description: This is an updated version of the Weather Assistant Config. + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1722642242998 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + voice: + provider: HUME_AI + name: ITO + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: true + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + delete-config: + path: /v0/evi/configs/{id} + method: DELETE + auth: true + docs: >- + Deletes a **Config** and its versions. + + + For more details on configuration options and how to configure EVI, see + our [configuration + guide](/docs/empathic-voice-interface-evi/configuration). + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + display-name: Delete config + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + update-config-name: + path: /v0/evi/configs/{id} + method: PATCH + auth: true + docs: >- + Updates the name of a **Config**. + + + For more details on configuration options and how to configure EVI, see + our [configuration + guide](/docs/empathic-voice-interface-evi/configuration). + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + display-name: Update config name + request: + name: PostedConfigName + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Config. + content-type: application/json + response: + docs: Success + type: text + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + request: + name: Updated Weather Assistant Config Name + get-config-version: + path: /v0/evi/configs/{id}/version/{version} + method: GET + auth: true + docs: >- + Fetches a specified version of a **Config**. + + + For more details on configuration options and how to configure EVI, see + our [configuration + guide](/docs/empathic-voice-interface-evi/configuration). + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Config. + + + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions + if needed. + + + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + display-name: Get config version + response: + docs: Success + type: root.ReturnConfig + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + response: + body: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + version_description: '' + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1715275452390 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + delete-config-version: + path: /v0/evi/configs/{id}/version/{version} + method: DELETE + auth: true + docs: >- + Deletes a specified version of a **Config**. + + + For more details on configuration options and how to configure EVI, see + our [configuration + guide](/docs/empathic-voice-interface-evi/configuration). + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Config. + + + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions + if needed. + + + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + display-name: Delete config version + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + update-config-description: + path: /v0/evi/configs/{id}/version/{version} + method: PATCH + auth: true + docs: >- + Updates the description of a **Config**. + + + For more details on configuration options and how to configure EVI, see + our [configuration + guide](/docs/empathic-voice-interface-evi/configuration). + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Config. + + + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions + if needed. + + + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + display-name: Update config description + request: + name: PostedConfigVersionDescription + body: + properties: + version_description: + type: optional + docs: An optional description of the Config version. + content-type: application/json + response: + docs: Success + type: root.ReturnConfig + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + request: + version_description: This is an updated version_description. + response: + body: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + version_description: This is an updated version_description. + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1715275452390 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/customVoices.yml b/.mock/definition/empathic-voice/customVoices.yml index a48799d..0556302 100644 --- a/.mock/definition/empathic-voice/customVoices.yml +++ b/.mock/definition/empathic-voice/customVoices.yml @@ -1,238 +1,238 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-custom-voices: - path: /v0/evi/custom_voices - method: GET - auth: true + auth: false + base-path: '' + endpoints: + list-custom-voices: + path: /v0/evi/custom_voices + method: GET + auth: true + docs: >- + Fetches a paginated list of **Custom Voices**. + + + Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) + for details on creating a custom voice. + display-name: List custom voices + request: + name: CustomVoicesListCustomVoicesRequest + query-parameters: + page_number: + type: optional docs: >- - Fetches a paginated list of **Custom Voices**. - - - Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) - for details on creating a custom voice. - display-name: List custom voices - request: - name: CustomVoicesListCustomVoicesRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - name: - type: optional - docs: Filter to only include custom voices with this name. - response: - docs: Success - type: root.ReturnPagedCustomVoices - errors: - - root.BadRequestError - examples: - - response: - body: - page_number: 1 - page_size: 1 - total_pages: 1 - custom_voices_page: - - id: id - version: 1 - name: name - created_on: 1000000 - modified_on: 1000000 - base_voice: ITO - parameter_model: 20240715-4parameter - parameters: {} - create-custom-voice: - path: /v0/evi/custom_voices - method: POST - auth: true - docs: >- - Creates a **Custom Voice** that can be added to an [EVI - configuration](/reference/empathic-voice-interface-evi/configs/create-config). - - - Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) - for details on creating a custom voice. - display-name: Create custom voice - request: - body: root.PostedCustomVoice - content-type: application/json - response: - docs: Created - type: root.ReturnCustomVoice - errors: - - root.BadRequestError - examples: - - request: - name: name - base_voice: ITO - parameter_model: 20240715-4parameter - response: - body: - id: id - version: 1 - name: name - created_on: 1000000 - modified_on: 1000000 - base_voice: ITO - parameter_model: 20240715-4parameter - parameters: - gender: 1 - huskiness: 1 - nasality: 1 - pitch: 1 - get-custom-voice: - path: /v0/evi/custom_voices/{id} - method: GET - auth: true - docs: >- - Fetches a specific **Custom Voice** by ID. - - - Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) - for details on creating a custom voice. - path-parameters: - id: - type: string - docs: Identifier for a Custom Voice. Formatted as a UUID. - display-name: Get specific custom voice by ID - response: - docs: Success - type: root.ReturnCustomVoice - errors: - - root.BadRequestError - examples: - - path-parameters: - id: id - response: - body: - id: id - version: 1 - name: name - created_on: 1000000 - modified_on: 1000000 - base_voice: ITO - parameter_model: 20240715-4parameter - parameters: - gender: 1 - huskiness: 1 - nasality: 1 - pitch: 1 - create-custom-voice-version: - path: /v0/evi/custom_voices/{id} - method: POST - auth: true - docs: >- - Updates a **Custom Voice** by creating a new version of the **Custom - Voice**. - - - Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) - for details on creating a custom voice. - path-parameters: - id: - type: string - docs: Identifier for a Custom Voice. Formatted as a UUID. - display-name: Create new version of existing custom voice - request: - body: root.PostedCustomVoice - content-type: application/json - response: - docs: Created - type: root.ReturnCustomVoice - errors: - - root.BadRequestError - examples: - - path-parameters: - id: id - request: - name: name - base_voice: ITO - parameter_model: 20240715-4parameter - response: - body: - id: id - version: 1 - name: name - created_on: 1000000 - modified_on: 1000000 - base_voice: ITO - parameter_model: 20240715-4parameter - parameters: - gender: 1 - huskiness: 1 - nasality: 1 - pitch: 1 - delete-custom-voice: - path: /v0/evi/custom_voices/{id} - method: DELETE - auth: true - docs: >- - Deletes a **Custom Voice** and its versions. - - - Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) - for details on creating a custom voice. - path-parameters: - id: - type: string - docs: Identifier for a Custom Voice. Formatted as a UUID. - display-name: Delete a custom voice - errors: - - root.BadRequestError - examples: - - path-parameters: - id: id - update-custom-voice-name: - path: /v0/evi/custom_voices/{id} - method: PATCH - auth: true + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional docs: >- - Updates the name of a **Custom Voice**. - - - Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) - for details on creating a custom voice. - path-parameters: - id: - type: string - docs: Identifier for a Custom Voice. Formatted as a UUID. - display-name: Update custom voice name - request: - name: PostedCustomVoiceName - body: - properties: - name: - type: string - docs: >- - The name of the Custom Voice. Maximum length of 75 characters. - Will be converted to all-uppercase. (e.g., "sample voice" - becomes "SAMPLE VOICE") - content-type: application/json - response: - docs: Success - type: text - errors: - - root.BadRequestError - source: - openapi: stenographer-openapi.json + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + name: + type: optional + docs: Filter to only include custom voices with this name. + response: + docs: Success + type: root.ReturnPagedCustomVoices + errors: + - root.BadRequestError + examples: + - response: + body: + page_number: 1 + page_size: 1 + total_pages: 1 + custom_voices_page: + - id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: {} + create-custom-voice: + path: /v0/evi/custom_voices + method: POST + auth: true + docs: >- + Creates a **Custom Voice** that can be added to an [EVI + configuration](/reference/empathic-voice-interface-evi/configs/create-config). + + + Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) + for details on creating a custom voice. + display-name: Create custom voice + request: + body: root.PostedCustomVoice + content-type: application/json + response: + docs: Created + type: root.ReturnCustomVoice + errors: + - root.BadRequestError + examples: + - request: + name: name + base_voice: ITO + parameter_model: 20240715-4parameter + response: + body: + id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: + gender: 1 + huskiness: 1 + nasality: 1 + pitch: 1 + get-custom-voice: + path: /v0/evi/custom_voices/{id} + method: GET + auth: true + docs: >- + Fetches a specific **Custom Voice** by ID. + + + Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) + for details on creating a custom voice. + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Get specific custom voice by ID + response: + docs: Success + type: root.ReturnCustomVoice + errors: + - root.BadRequestError + examples: + - path-parameters: + id: id + response: + body: + id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: + gender: 1 + huskiness: 1 + nasality: 1 + pitch: 1 + create-custom-voice-version: + path: /v0/evi/custom_voices/{id} + method: POST + auth: true + docs: >- + Updates a **Custom Voice** by creating a new version of the **Custom + Voice**. + + + Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) + for details on creating a custom voice. + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Create new version of existing custom voice + request: + body: root.PostedCustomVoice + content-type: application/json + response: + docs: Created + type: root.ReturnCustomVoice + errors: + - root.BadRequestError + examples: + - path-parameters: + id: id + request: + name: name + base_voice: ITO + parameter_model: 20240715-4parameter + response: + body: + id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: + gender: 1 + huskiness: 1 + nasality: 1 + pitch: 1 + delete-custom-voice: + path: /v0/evi/custom_voices/{id} + method: DELETE + auth: true + docs: >- + Deletes a **Custom Voice** and its versions. + + + Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) + for details on creating a custom voice. + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Delete a custom voice + errors: + - root.BadRequestError + examples: + - path-parameters: + id: id + update-custom-voice-name: + path: /v0/evi/custom_voices/{id} + method: PATCH + auth: true + docs: >- + Updates the name of a **Custom Voice**. + + + Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) + for details on creating a custom voice. + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Update custom voice name + request: + name: PostedCustomVoiceName + body: + properties: + name: + type: string + docs: >- + The name of the Custom Voice. Maximum length of 75 characters. + Will be converted to all-uppercase. (e.g., "sample voice" + becomes "SAMPLE VOICE") + content-type: application/json + response: + docs: Success + type: text + errors: + - root.BadRequestError + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/prompts.yml b/.mock/definition/empathic-voice/prompts.yml index eb96396..9551af7 100644 --- a/.mock/definition/empathic-voice/prompts.yml +++ b/.mock/definition/empathic-voice/prompts.yml @@ -1,533 +1,533 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-prompts: - path: /v0/evi/prompts - method: GET - auth: true + auth: false + base-path: '' + endpoints: + list-prompts: + path: /v0/evi/prompts + method: GET + auth: true + docs: >- + Fetches a paginated list of **Prompts**. + + + See our [prompting + guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on + crafting your system prompt. + pagination: + offset: $request.page_number + results: $response.prompts_page + display-name: List prompts + request: + name: PromptsListPromptsRequest + query-parameters: + page_number: + type: optional docs: >- - Fetches a paginated list of **Prompts**. - - - See our [prompting - guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on - crafting your system prompt. - pagination: - offset: $request.page_number - results: $response.prompts_page - display-name: List prompts - request: - name: PromptsListPromptsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each prompt. To include all versions of - each prompt in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include prompts with this name. - response: - docs: Success - type: root.ReturnPagedPrompts - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 2 - response: - body: - page_number: 0 - page_size: 2 - total_pages: 1 - prompts_page: - - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - - id: 616b2b4c-a096-4445-9c23-64058b564fc2 - version: 0 - version_type: FIXED - version_description: "" - name: Web Search Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI web search assistant designed to help - users find accurate and relevant information on the web. - Respond to user queries promptly, using the built-in web - search tool to retrieve up-to-date results. Present - information clearly and concisely, summarizing key points - where necessary. Use simple language and avoid technical - jargon. If needed, provide helpful tips for refining search - queries to obtain better results. - create-prompt: - path: /v0/evi/prompts - method: POST - auth: true - docs: >- - Creates a **Prompt** that can be added to an [EVI - configuration](/reference/empathic-voice-interface-evi/configs/create-config). - - - See our [prompting - guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on - crafting your system prompt. - display-name: Create prompt - request: - name: PostedPrompt - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Prompt. - version_description: - type: optional - docs: An optional description of the Prompt version. - text: - type: string - docs: >- - Instructions used to shape EVI’s behavior, responses, and style. - - - You can use the Prompt to define a specific goal or role for - EVI, specifying how it should act or what it should focus on - during the conversation. For example, EVI can be instructed to - act as a customer support representative, a fitness coach, or a - travel advisor, each with its own set of behaviors and response - styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/empathic-voice-interface-evi/prompting). - content-type: application/json - response: - docs: Created - type: optional - errors: - - root.BadRequestError - examples: - - request: - name: Weather Assistant Prompt - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if severe - weather is expected. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - list-prompt-versions: - path: /v0/evi/prompts/{id} - method: GET - auth: true - docs: >- - Fetches a list of a **Prompt's** versions. - - - See our [prompting - guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on - crafting your system prompt. - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: List prompt versions - request: - name: PromptsListPromptVersionsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each prompt. To include all versions of - each prompt in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedPrompts - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - prompts_page: - - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - create-prompt-verison: - path: /v0/evi/prompts/{id} - method: POST - auth: true - docs: >- - Updates a **Prompt** by creating a new version of the **Prompt**. - - - See our [prompting - guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on - crafting your system prompt. - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Create prompt version - request: - name: PostedPromptVersion - body: - properties: - version_description: - type: optional - docs: An optional description of the Prompt version. - text: - type: string - docs: >- - Instructions used to shape EVI’s behavior, responses, and style - for this version of the Prompt. - - - You can use the Prompt to define a specific goal or role for - EVI, specifying how it should act or what it should focus on - during the conversation. For example, EVI can be instructed to - act as a customer support representative, a fitness coach, or a - travel advisor, each with its own set of behaviors and response - styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/empathic-voice-interface-evi/prompting). - content-type: application/json - response: - docs: Created - type: optional - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - request: - text: >- - You are an updated version of an AI weather assistant - providing users with accurate and up-to-date weather information. - Respond to user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, precipitation, - wind conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - version_description: This is an updated version of the Weather Assistant Prompt. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - version_type: FIXED - version_description: This is an updated version of the Weather Assistant Prompt. - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722635140150 - text: >- - You are an updated version of an AI weather assistant - providing users with accurate and up-to-date weather - information. Respond to user queries concisely and clearly. Use - simple language and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. Include - helpful tips if severe weather is expected. - delete-prompt: - path: /v0/evi/prompts/{id} - method: DELETE - auth: true + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional docs: >- - Deletes a **Prompt** and its versions. - - - See our [prompting - guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on - crafting your system prompt. - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Delete prompt - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - update-prompt-name: - path: /v0/evi/prompts/{id} - method: PATCH - auth: true + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional docs: >- - Updates the name of a **Prompt**. - - - See our [prompting - guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on - crafting your system prompt. - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Update prompt name - request: - name: PostedPromptName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Prompt. - content-type: application/json - response: - docs: Success - type: text - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - request: - name: Updated Weather Assistant Prompt Name - get-prompt-version: - path: /v0/evi/prompts/{id}/version/{version} - method: GET - auth: true + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each prompt. To include all versions of + each prompt in the list, set `restrict_to_most_recent` to false. + name: + type: optional + docs: Filter to only include prompts with this name. + response: + docs: Success + type: root.ReturnPagedPrompts + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 2 + response: + body: + page_number: 0 + page_size: 2 + total_pages: 1 + prompts_page: + - id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + - id: 616b2b4c-a096-4445-9c23-64058b564fc2 + version: 0 + version_type: FIXED + version_description: '' + name: Web Search Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI web search assistant designed to help + users find accurate and relevant information on the web. + Respond to user queries promptly, using the built-in web + search tool to retrieve up-to-date results. Present + information clearly and concisely, summarizing key points + where necessary. Use simple language and avoid technical + jargon. If needed, provide helpful tips for refining search + queries to obtain better results. + create-prompt: + path: /v0/evi/prompts + method: POST + auth: true + docs: >- + Creates a **Prompt** that can be added to an [EVI + configuration](/reference/empathic-voice-interface-evi/configs/create-config). + + + See our [prompting + guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on + crafting your system prompt. + display-name: Create prompt + request: + name: PostedPrompt + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Prompt. + version_description: + type: optional + docs: An optional description of the Prompt version. + text: + type: string + docs: >- + Instructions used to shape EVI’s behavior, responses, and style. + + + You can use the Prompt to define a specific goal or role for + EVI, specifying how it should act or what it should focus on + during the conversation. For example, EVI can be instructed to + act as a customer support representative, a fitness coach, or a + travel advisor, each with its own set of behaviors and response + styles. + + + For help writing a system prompt, see our [Prompting + Guide](/docs/empathic-voice-interface-evi/prompting). + content-type: application/json + response: + docs: Created + type: optional + errors: + - root.BadRequestError + examples: + - request: + name: Weather Assistant Prompt + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if severe + weather is expected. + response: + body: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + name: Weather Assistant Prompt + created_on: 1722633247488 + modified_on: 1722633247488 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + list-prompt-versions: + path: /v0/evi/prompts/{id} + method: GET + auth: true + docs: >- + Fetches a list of a **Prompt's** versions. + + + See our [prompting + guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on + crafting your system prompt. + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + display-name: List prompt versions + request: + name: PromptsListPromptVersionsRequest + query-parameters: + page_number: + type: optional docs: >- - Fetches a specified version of a **Prompt**. - - - See our [prompting - guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on - crafting your system prompt. - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Get prompt version - response: - docs: Success - type: optional - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - delete-prompt-version: - path: /v0/evi/prompts/{id}/version/{version} - method: DELETE - auth: true + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional docs: >- - Deletes a specified version of a **Prompt**. - - - See our [prompting - guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on - crafting your system prompt. - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Delete prompt version - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - update-prompt-description: - path: /v0/evi/prompts/{id}/version/{version} - method: PATCH - auth: true + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional docs: >- - Updates the description of a **Prompt**. - - - See our [prompting - guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on - crafting your system prompt. - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, Configs, Custom Voices, and Tools are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Update prompt description - request: - name: PostedPromptVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Prompt version. - content-type: application/json - response: - docs: Success - type: optional - errors: - - root.BadRequestError - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - request: - version_description: This is an updated version_description. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - version_type: FIXED - version_description: This is an updated version_description. - name: string - created_on: 1722633247488 - modified_on: 1722634770585 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - source: - openapi: stenographer-openapi.json + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each prompt. To include all versions of + each prompt in the list, set `restrict_to_most_recent` to false. + response: + docs: Success + type: root.ReturnPagedPrompts + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + response: + body: + page_number: 0 + page_size: 10 + total_pages: 1 + prompts_page: + - id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1722633247488 + modified_on: 1722633247488 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + create-prompt-verison: + path: /v0/evi/prompts/{id} + method: POST + auth: true + docs: >- + Updates a **Prompt** by creating a new version of the **Prompt**. + + + See our [prompting + guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on + crafting your system prompt. + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + display-name: Create prompt version + request: + name: PostedPromptVersion + body: + properties: + version_description: + type: optional + docs: An optional description of the Prompt version. + text: + type: string + docs: >- + Instructions used to shape EVI’s behavior, responses, and style + for this version of the Prompt. + + + You can use the Prompt to define a specific goal or role for + EVI, specifying how it should act or what it should focus on + during the conversation. For example, EVI can be instructed to + act as a customer support representative, a fitness coach, or a + travel advisor, each with its own set of behaviors and response + styles. + + + For help writing a system prompt, see our [Prompting + Guide](/docs/empathic-voice-interface-evi/prompting). + content-type: application/json + response: + docs: Created + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + request: + text: >- + You are an updated version of an AI weather assistant + providing users with accurate and up-to-date weather information. + Respond to user queries concisely and clearly. Use simple language + and avoid technical jargon. Provide temperature, precipitation, + wind conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + version_description: This is an updated version of the Weather Assistant Prompt. + response: + body: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 1 + version_type: FIXED + version_description: This is an updated version of the Weather Assistant Prompt. + name: Weather Assistant Prompt + created_on: 1722633247488 + modified_on: 1722635140150 + text: >- + You are an updated version of an AI weather assistant + providing users with accurate and up-to-date weather + information. Respond to user queries concisely and clearly. Use + simple language and avoid technical jargon. Provide temperature, + precipitation, wind conditions, and any weather alerts. Include + helpful tips if severe weather is expected. + delete-prompt: + path: /v0/evi/prompts/{id} + method: DELETE + auth: true + docs: >- + Deletes a **Prompt** and its versions. + + + See our [prompting + guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on + crafting your system prompt. + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + display-name: Delete prompt + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + update-prompt-name: + path: /v0/evi/prompts/{id} + method: PATCH + auth: true + docs: >- + Updates the name of a **Prompt**. + + + See our [prompting + guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on + crafting your system prompt. + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + display-name: Update prompt name + request: + name: PostedPromptName + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Prompt. + content-type: application/json + response: + docs: Success + type: text + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + request: + name: Updated Weather Assistant Prompt Name + get-prompt-version: + path: /v0/evi/prompts/{id}/version/{version} + method: GET + auth: true + docs: >- + Fetches a specified version of a **Prompt**. + + + See our [prompting + guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on + crafting your system prompt. + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Prompt. + + + Prompts, Configs, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine prompts and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Prompt. Each update to the Prompt increments its version + number. + display-name: Get prompt version + response: + docs: Success + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + response: + body: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1722633247488 + modified_on: 1722633247488 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + delete-prompt-version: + path: /v0/evi/prompts/{id}/version/{version} + method: DELETE + auth: true + docs: >- + Deletes a specified version of a **Prompt**. + + + See our [prompting + guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on + crafting your system prompt. + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Prompt. + + + Prompts, Configs, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine prompts and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Prompt. Each update to the Prompt increments its version + number. + display-name: Delete prompt version + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 1 + update-prompt-description: + path: /v0/evi/prompts/{id}/version/{version} + method: PATCH + auth: true + docs: >- + Updates the description of a **Prompt**. + + + See our [prompting + guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on + crafting your system prompt. + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Prompt. + + + Prompts, Configs, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine prompts and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Prompt. Each update to the Prompt increments its version + number. + display-name: Update prompt description + request: + name: PostedPromptVersionDescription + body: + properties: + version_description: + type: optional + docs: An optional description of the Prompt version. + content-type: application/json + response: + docs: Success + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 1 + request: + version_description: This is an updated version_description. + response: + body: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 1 + version_type: FIXED + version_description: This is an updated version_description. + name: string + created_on: 1722633247488 + modified_on: 1722634770585 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/tools.yml b/.mock/definition/empathic-voice/tools.yml index 9280765..bf2a290 100644 --- a/.mock/definition/empathic-voice/tools.yml +++ b/.mock/definition/empathic-voice/tools.yml @@ -1,596 +1,596 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-tools: - path: /v0/evi/tools - method: GET - auth: true + auth: false + base-path: '' + endpoints: + list-tools: + path: /v0/evi/tools + method: GET + auth: true + docs: >- + Fetches a paginated list of **Tools**. + + + Refer to our [tool + use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide + for comprehensive instructions on defining and integrating tools into + EVI. + pagination: + offset: $request.page_number + results: $response.tools_page + display-name: List tools + request: + name: ToolsListToolsRequest + query-parameters: + page_number: + type: optional docs: >- - Fetches a paginated list of **Tools**. - - - Refer to our [tool - use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide - for comprehensive instructions on defining and integrating tools into - EVI. - pagination: - offset: $request.page_number - results: $response.tools_page - display-name: List tools - request: - name: ToolsListToolsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include tools with this name. - response: - docs: Success - type: root.ReturnPagedUserDefinedTools - errors: - - root.BadRequestError - examples: - - query-parameters: - page_number: 0 - page_size: 2 - response: - body: - page_number: 0 - page_size: 2 - total_pages: 1 - tools_page: - - tool_type: FUNCTION - id: d20827af-5d8d-4f66-b6b9-ce2e3e1ea2b2 - version: 0 - version_type: FIXED - version_description: Fetches user's current location. - name: get_current_location - created_on: 1715267200693 - modified_on: 1715267200693 - fallback_content: Unable to fetch location. - description: Fetches user's current location. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }}, "required": ["location"] } - - tool_type: FUNCTION - id: 4442f3ea-9038-40e3-a2ce-1522b7de770f - version: 0 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius or fahrenheit based - on location of user. - name: get_current_weather - created_on: 1715266126705 - modified_on: 1715266126705 - fallback_content: Unable to fetch location. - description: >- - Fetches current weather and uses celsius or fahrenheit based - on location of user. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit"], "description": "The temperature - unit to use. Infer this from the users location." } }, - "required": ["location", "format"] } - create-tool: - path: /v0/evi/tools - method: POST - auth: true - docs: >- - Creates a **Tool** that can be added to an [EVI - configuration](/reference/empathic-voice-interface-evi/configs/create-config). - - - Refer to our [tool - use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide - for comprehensive instructions on defining and integrating tools into - EVI. - display-name: Create tool - request: - name: PostedUserDefinedTool - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Tool. - version_description: - type: optional - docs: An optional description of the Tool version. - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of - the Tool. - - - These parameters define the inputs needed for the Tool’s - execution, including the expected data type and description for - each input field. Structured as a stringified JSON schema, this - format ensures the Tool receives data in the expected format. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the - tool call result. The LLM then uses this text to generate a - response back to the user, ensuring continuity in the - conversation if the Tool errors. - content-type: application/json - response: - docs: Created - type: optional - errors: - - root.BadRequestError - examples: - - request: - name: get_current_weather - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San Francisco, - CA" }, "format": { "type": "string", "enum": ["celsius", - "fahrenheit"], "description": "The temperature unit to use. Infer - this from the users location." } }, "required": ["location", - "format"] } - version_description: >- - Fetches current weather and uses celsius or fahrenheit based on - location of user. - description: This tool is for getting the current weather. - fallback_content: Unable to fetch current weather. - response: - body: - tool_type: FUNCTION - id: aa9b71c4-723c-47ff-9f83-1a1829e74376 - version: 0 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius or fahrenheit based on - location of user. - name: get_current_weather - created_on: 1715275452390 - modified_on: 1715275452390 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit"], "description": "The temperature unit - to use. Infer this from the users location." } }, "required": - ["location", "format"] } - list-tool-versions: - path: /v0/evi/tools/{id} - method: GET - auth: true - docs: >- - Fetches a list of a **Tool's** versions. - - - Refer to our [tool - use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide - for comprehensive instructions on defining and integrating tools into - EVI. - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: List tool versions - request: - name: ToolsListToolVersionsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedUserDefinedTools - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - tools_page: - - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or - kelvin based on location of user. - name: get_current_weather - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users - location." } }, "required": ["location", "format"] } - create-tool-version: - path: /v0/evi/tools/{id} - method: POST - auth: true - docs: >- - Updates a **Tool** by creating a new version of the **Tool**. - - - Refer to our [tool - use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide - for comprehensive instructions on defining and integrating tools into - EVI. - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Create tool version - request: - name: PostedUserDefinedToolVersion - body: - properties: - version_description: - type: optional - docs: An optional description of the Tool version. - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of - the Tool. - - - These parameters define the inputs needed for the Tool’s - execution, including the expected data type and description for - each input field. Structured as a stringified JSON schema, this - format ensures the Tool receives data in the expected format. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the - tool call result. The LLM then uses this text to generate a - response back to the user, ensuring continuity in the - conversation if the Tool errors. - content-type: application/json - response: - docs: Created - type: optional - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - request: - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San Francisco, - CA" }, "format": { "type": "string", "enum": ["celsius", - "fahrenheit", "kelvin"], "description": "The temperature unit to - use. Infer this from the users location." } }, "required": - ["location", "format"] } - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - name: get_current_weather - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - delete-tool: - path: /v0/evi/tools/{id} - method: DELETE - auth: true + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional docs: >- - Deletes a **Tool** and its versions. - - - Refer to our [tool - use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide - for comprehensive instructions on defining and integrating tools into - EVI. - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Delete tool - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - update-tool-name: - path: /v0/evi/tools/{id} - method: PATCH - auth: true + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional docs: >- - Updates the name of a **Tool**. - - - Refer to our [tool - use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide - for comprehensive instructions on defining and integrating tools into - EVI. - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Update tool name - request: - name: PostedUserDefinedToolName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Tool. - content-type: application/json - response: - docs: Success - type: text - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - request: - name: get_current_temperature - get-tool-version: - path: /v0/evi/tools/{id}/version/{version} - method: GET - auth: true + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each tool. To include all versions of + each tool in the list, set `restrict_to_most_recent` to false. + name: + type: optional + docs: Filter to only include tools with this name. + response: + docs: Success + type: root.ReturnPagedUserDefinedTools + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 2 + response: + body: + page_number: 0 + page_size: 2 + total_pages: 1 + tools_page: + - tool_type: FUNCTION + id: d20827af-5d8d-4f66-b6b9-ce2e3e1ea2b2 + version: 0 + version_type: FIXED + version_description: Fetches user's current location. + name: get_current_location + created_on: 1715267200693 + modified_on: 1715267200693 + fallback_content: Unable to fetch location. + description: Fetches user's current location. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }}, "required": ["location"] } + - tool_type: FUNCTION + id: 4442f3ea-9038-40e3-a2ce-1522b7de770f + version: 0 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius or fahrenheit based + on location of user. + name: get_current_weather + created_on: 1715266126705 + modified_on: 1715266126705 + fallback_content: Unable to fetch location. + description: >- + Fetches current weather and uses celsius or fahrenheit based + on location of user. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit"], "description": "The temperature + unit to use. Infer this from the users location." } }, + "required": ["location", "format"] } + create-tool: + path: /v0/evi/tools + method: POST + auth: true + docs: >- + Creates a **Tool** that can be added to an [EVI + configuration](/reference/empathic-voice-interface-evi/configs/create-config). + + + Refer to our [tool + use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide + for comprehensive instructions on defining and integrating tools into + EVI. + display-name: Create tool + request: + name: PostedUserDefinedTool + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Tool. + version_description: + type: optional + docs: An optional description of the Tool version. + description: + type: optional + docs: >- + An optional description of what the Tool does, used by the + supplemental LLM to choose when and how to call the function. + parameters: + type: string + docs: >- + Stringified JSON defining the parameters used by this version of + the Tool. + + + These parameters define the inputs needed for the Tool’s + execution, including the expected data type and description for + each input field. Structured as a stringified JSON schema, this + format ensures the Tool receives data in the expected format. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the + tool call result. The LLM then uses this text to generate a + response back to the user, ensuring continuity in the + conversation if the Tool errors. + content-type: application/json + response: + docs: Created + type: optional + errors: + - root.BadRequestError + examples: + - request: + name: get_current_weather + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San Francisco, + CA" }, "format": { "type": "string", "enum": ["celsius", + "fahrenheit"], "description": "The temperature unit to use. Infer + this from the users location." } }, "required": ["location", + "format"] } + version_description: >- + Fetches current weather and uses celsius or fahrenheit based on + location of user. + description: This tool is for getting the current weather. + fallback_content: Unable to fetch current weather. + response: + body: + tool_type: FUNCTION + id: aa9b71c4-723c-47ff-9f83-1a1829e74376 + version: 0 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius or fahrenheit based on + location of user. + name: get_current_weather + created_on: 1715275452390 + modified_on: 1715275452390 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit"], "description": "The temperature unit + to use. Infer this from the users location." } }, "required": + ["location", "format"] } + list-tool-versions: + path: /v0/evi/tools/{id} + method: GET + auth: true + docs: >- + Fetches a list of a **Tool's** versions. + + + Refer to our [tool + use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide + for comprehensive instructions on defining and integrating tools into + EVI. + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + display-name: List tool versions + request: + name: ToolsListToolVersionsRequest + query-parameters: + page_number: + type: optional docs: >- - Fetches a specified version of a **Tool**. - - - Refer to our [tool - use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide - for comprehensive instructions on defining and integrating tools into - EVI. - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Get tool version - response: - docs: Success - type: optional - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - name: string - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - delete-tool-version: - path: /v0/evi/tools/{id}/version/{version} - method: DELETE - auth: true + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional docs: >- - Deletes a specified version of a **Tool**. - - - Refer to our [tool - use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide - for comprehensive instructions on defining and integrating tools into - EVI. - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Delete tool version - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - update-tool-description: - path: /v0/evi/tools/{id}/version/{version} - method: PATCH - auth: true + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional docs: >- - Updates the description of a specified **Tool** version. - - - Refer to our [tool - use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide - for comprehensive instructions on defining and integrating tools into - EVI. - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, Configs, Custom Voices, and Prompts are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Update tool description - request: - name: PostedUserDefinedToolVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Tool version. - content-type: application/json - response: - docs: Success - type: optional - errors: - - root.BadRequestError - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - request: - version_description: >- - Fetches current temperature, precipitation, wind speed, AQI, and - other weather conditions. Uses Celsius, Fahrenheit, or kelvin - depending on user's region. - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current temperature, precipitation, wind speed, AQI, and - other weather conditions. Uses Celsius, Fahrenheit, or kelvin - depending on user's region. - name: string - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - source: - openapi: stenographer-openapi.json + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each tool. To include all versions of + each tool in the list, set `restrict_to_most_recent` to false. + response: + docs: Success + type: root.ReturnPagedUserDefinedTools + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + response: + body: + page_number: 0 + page_size: 10 + total_pages: 1 + tools_page: + - tool_type: FUNCTION + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius, fahrenheit, or + kelvin based on location of user. + name: get_current_weather + created_on: 1715277014228 + modified_on: 1715277602313 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit", "kelvin"], "description": "The + temperature unit to use. Infer this from the users + location." } }, "required": ["location", "format"] } + create-tool-version: + path: /v0/evi/tools/{id} + method: POST + auth: true + docs: >- + Updates a **Tool** by creating a new version of the **Tool**. + + + Refer to our [tool + use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide + for comprehensive instructions on defining and integrating tools into + EVI. + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + display-name: Create tool version + request: + name: PostedUserDefinedToolVersion + body: + properties: + version_description: + type: optional + docs: An optional description of the Tool version. + description: + type: optional + docs: >- + An optional description of what the Tool does, used by the + supplemental LLM to choose when and how to call the function. + parameters: + type: string + docs: >- + Stringified JSON defining the parameters used by this version of + the Tool. + + + These parameters define the inputs needed for the Tool’s + execution, including the expected data type and description for + each input field. Structured as a stringified JSON schema, this + format ensures the Tool receives data in the expected format. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the + tool call result. The LLM then uses this text to generate a + response back to the user, ensuring continuity in the + conversation if the Tool errors. + content-type: application/json + response: + docs: Created + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + request: + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San Francisco, + CA" }, "format": { "type": "string", "enum": ["celsius", + "fahrenheit", "kelvin"], "description": "The temperature unit to + use. Infer this from the users location." } }, "required": + ["location", "format"] } + version_description: >- + Fetches current weather and uses celsius, fahrenheit, or kelvin + based on location of user. + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + response: + body: + tool_type: FUNCTION + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius, fahrenheit, or kelvin + based on location of user. + name: get_current_weather + created_on: 1715277014228 + modified_on: 1715277602313 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit", "kelvin"], "description": "The + temperature unit to use. Infer this from the users location." } + }, "required": ["location", "format"] } + delete-tool: + path: /v0/evi/tools/{id} + method: DELETE + auth: true + docs: >- + Deletes a **Tool** and its versions. + + + Refer to our [tool + use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide + for comprehensive instructions on defining and integrating tools into + EVI. + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + display-name: Delete tool + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + update-tool-name: + path: /v0/evi/tools/{id} + method: PATCH + auth: true + docs: >- + Updates the name of a **Tool**. + + + Refer to our [tool + use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide + for comprehensive instructions on defining and integrating tools into + EVI. + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + display-name: Update tool name + request: + name: PostedUserDefinedToolName + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Tool. + content-type: application/json + response: + docs: Success + type: text + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + request: + name: get_current_temperature + get-tool-version: + path: /v0/evi/tools/{id}/version/{version} + method: GET + auth: true + docs: >- + Fetches a specified version of a **Tool**. + + + Refer to our [tool + use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide + for comprehensive instructions on defining and integrating tools into + EVI. + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Tool. + + + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + display-name: Get tool version + response: + docs: Success + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + response: + body: + tool_type: FUNCTION + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius, fahrenheit, or kelvin + based on location of user. + name: string + created_on: 1715277014228 + modified_on: 1715277602313 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit", "kelvin"], "description": "The + temperature unit to use. Infer this from the users location." } + }, "required": ["location", "format"] } + delete-tool-version: + path: /v0/evi/tools/{id}/version/{version} + method: DELETE + auth: true + docs: >- + Deletes a specified version of a **Tool**. + + + Refer to our [tool + use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide + for comprehensive instructions on defining and integrating tools into + EVI. + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Tool. + + + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + display-name: Delete tool version + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + update-tool-description: + path: /v0/evi/tools/{id}/version/{version} + method: PATCH + auth: true + docs: >- + Updates the description of a specified **Tool** version. + + + Refer to our [tool + use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide + for comprehensive instructions on defining and integrating tools into + EVI. + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Tool. + + + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + display-name: Update tool description + request: + name: PostedUserDefinedToolVersionDescription + body: + properties: + version_description: + type: optional + docs: An optional description of the Tool version. + content-type: application/json + response: + docs: Success + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + request: + version_description: >- + Fetches current temperature, precipitation, wind speed, AQI, and + other weather conditions. Uses Celsius, Fahrenheit, or kelvin + depending on user's region. + response: + body: + tool_type: FUNCTION + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + version_type: FIXED + version_description: >- + Fetches current temperature, precipitation, wind speed, AQI, and + other weather conditions. Uses Celsius, Fahrenheit, or kelvin + depending on user's region. + name: string + created_on: 1715277014228 + modified_on: 1715277602313 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit", "kelvin"], "description": "The + temperature unit to use. Infer this from the users location." } + }, "required": ["location", "format"] } + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/expression-measurement/batch/__package__.yml b/.mock/definition/expression-measurement/batch/__package__.yml index 3e0f931..53803c5 100644 --- a/.mock/definition/expression-measurement/batch/__package__.yml +++ b/.mock/definition/expression-measurement/batch/__package__.yml @@ -1,1768 +1,1768 @@ service: - auth: false - base-path: "" - endpoints: - list-jobs: - path: /v0/batch/jobs - method: GET - auth: true - docs: Sort and filter jobs. - display-name: List jobs - request: - name: BatchListJobsRequest - query-parameters: - limit: - type: optional - docs: The maximum number of jobs to include in the response. - status: - type: optional - allow-multiple: true - docs: >- - Include only jobs of this status in the response. There are four - possible statuses: - - - - `QUEUED`: The job has been received and is waiting to be - processed. - - - - `IN_PROGRESS`: The job is currently being processed. - - - - `COMPLETED`: The job has finished processing. - - - - `FAILED`: The job encountered an error and could not be - completed successfully. - when: - type: optional - docs: >- - Specify whether to include jobs created before or after a given - `timestamp_ms`. - timestamp_ms: - type: optional - docs: |- - Provide a timestamp in milliseconds to filter jobs. - - When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. - sort_by: - type: optional - docs: >- - Specify which timestamp to sort the jobs by. - - - - `created`: Sort jobs by the time of creation, indicated by - `created_timestamp_ms`. - - - - `started`: Sort jobs by the time processing started, indicated - by `started_timestamp_ms`. - - - - `ended`: Sort jobs by the time processing ended, indicated by - `ended_timestamp_ms`. - direction: - type: optional - docs: >- - Specify the order in which to sort the jobs. Defaults to - descending order. - - - - `asc`: Sort in ascending order (chronological, with the oldest - records first). - - - - `desc`: Sort in descending order (reverse-chronological, with - the newest records first). - response: - docs: "" - type: list - examples: - - response: - body: - - job_id: job_id - request: - files: - - filename: filename - md5sum: md5sum - content_type: content_type - models: - burst: {} - face: - fps_pred: 3 - identify_faces: false - min_face_size: 60 - prob_threshold: 0.99 - save_faces: false - facemesh: {} - language: - granularity: word - identify_speakers: false - ner: - identify_speakers: false - prosody: - granularity: utterance - identify_speakers: false - notify: true - text: [] - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - state: - created_timestamp_ms: 1712587158717 - ended_timestamp_ms: 1712587159274 - num_errors: 0 - num_predictions: 10 - started_timestamp_ms: 1712587158800 - status: COMPLETED - type: INFERENCE - start-inference-job: - path: /v0/batch/jobs - method: POST - auth: true - docs: Start a new measurement inference job. - display-name: Start inference job - request: - body: InferenceBaseRequest - content-type: application/json - response: - docs: "" - type: JobId - property: job_id - examples: - - request: - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - notify: true - response: - body: - job_id: job_id - get-job-details: - path: /v0/batch/jobs/{id} - method: GET - auth: true - docs: Get the request details and state of a given job. - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job details - response: - docs: "" - type: UnionJob - examples: - - name: Inference - path-parameters: - id: job_id - response: - body: - type: INFERENCE - job_id: job_id - request: - files: [] - models: - burst: {} - face: - fps_pred: 3 - identify_faces: false - min_face_size: 60 - prob_threshold: 0.99 - save_faces: false - facemesh: {} - language: - granularity: word - identify_speakers: false - ner: - identify_speakers: false - prosody: - granularity: utterance - identify_speakers: false - notify: true - text: [] - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - state: - created_timestamp_ms: 1712590457884 - ended_timestamp_ms: 1712590462252 - num_errors: 0 - num_predictions: 10 - started_timestamp_ms: 1712590457995 - status: COMPLETED - get-job-predictions: - path: /v0/batch/jobs/{id}/predictions - method: GET - auth: true - docs: Get the JSON predictions of a completed inference job. - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job predictions - response: - docs: "" - type: list - examples: - - path-parameters: - id: job_id - response: - body: - - source: - type: url - url: https://hume-tutorials.s3.amazonaws.com/faces.zip - results: - predictions: - - file: faces/100.jpg - models: - face: - grouped_predictions: - - id: unknown - predictions: - - frame: 0 - time: 0 - prob: 0.9994111061096191 - box: - x: 1187.885986328125 - "y": 1397.697509765625 - w: 1401.668701171875 - h: 1961.424560546875 - emotions: - - name: Admiration - score: 0.10722749680280685 - - name: Adoration - score: 0.06395940482616425 - - name: Aesthetic Appreciation - score: 0.05811462551355362 - - name: Amusement - score: 0.14187128841876984 - - name: Anger - score: 0.02804684266448021 - - name: Anxiety - score: 0.2713485360145569 - - name: Awe - score: 0.33812594413757324 - - name: Awkwardness - score: 0.1745193600654602 - - name: Boredom - score: 0.23600080609321594 - - name: Calmness - score: 0.18988418579101562 - - name: Concentration - score: 0.44288986921310425 - - name: Confusion - score: 0.39346569776535034 - - name: Contemplation - score: 0.31002455949783325 - - name: Contempt - score: 0.048870109021663666 - - name: Contentment - score: 0.0579497292637825 - - name: Craving - score: 0.06544201076030731 - - name: Desire - score: 0.05526508390903473 - - name: Determination - score: 0.08590991795063019 - - name: Disappointment - score: 0.19508258998394012 - - name: Disgust - score: 0.031529419124126434 - - name: Distress - score: 0.23210826516151428 - - name: Doubt - score: 0.3284550905227661 - - name: Ecstasy - score: 0.040716782212257385 - - name: Embarrassment - score: 0.1467227339744568 - - name: Empathic Pain - score: 0.07633581757545471 - - name: Entrancement - score: 0.16245244443416595 - - name: Envy - score: 0.03267110139131546 - - name: Excitement - score: 0.10656816512346268 - - name: Fear - score: 0.3115977346897125 - - name: Guilt - score: 0.11615975946187973 - - name: Horror - score: 0.19795553386211395 - - name: Interest - score: 0.3136432468891144 - - name: Joy - score: 0.06285581737756729 - - name: Love - score: 0.06339752674102783 - - name: Nostalgia - score: 0.05866732448339462 - - name: Pain - score: 0.07684041559696198 - - name: Pride - score: 0.026822954416275024 - - name: Realization - score: 0.30000734329223633 - - name: Relief - score: 0.04414166510105133 - - name: Romance - score: 0.042728863656520844 - - name: Sadness - score: 0.14773206412792206 - - name: Satisfaction - score: 0.05902980640530586 - - name: Shame - score: 0.08103451132774353 - - name: Surprise (negative) - score: 0.25518184900283813 - - name: Surprise (positive) - score: 0.28845661878585815 - - name: Sympathy - score: 0.062488824129104614 - - name: Tiredness - score: 0.1559651643037796 - - name: Triumph - score: 0.01955239288508892 - errors: [] - get-job-artifacts: - path: /v0/batch/jobs/{id}/artifacts - method: GET - auth: true - docs: Get the artifacts ZIP of a completed inference job. - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job artifacts - response: - docs: "" - type: file - start-inference-job-from-local-file: - path: /v0/batch/jobs - method: POST - auth: true - docs: Start a new batch inference job. - display-name: Start inference job from local file - request: - name: BatchStartInferenceJobFromLocalFileRequest - body: - properties: - json: - type: optional - docs: >- - Stringified JSON object containing the inference job - configuration. - file: - type: list - docs: >- - Local media files (see recommended input filetypes) to be - processed. - - - If you wish to supply more than 100 files, consider providing - them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - content-type: multipart/form-data - response: - docs: "" - type: JobId - property: job_id - examples: - - request: {} - response: - body: - job_id: job_id - source: - openapi: batch-files-openapi.yml -types: - Alternative: literal<"language_only"> - Bcp47Tag: - enum: - - zh - - da - - nl - - en - - value: en-AU - name: EnAu - - value: en-IN - name: EnIn - - value: en-NZ - name: EnNz - - value: en-GB - name: EnGb - - fr - - value: fr-CA - name: FrCa - - de - - hi - - value: hi-Latn - name: HiLatn - - id - - it - - ja - - ko - - "no" - - pl - - pt - - value: pt-BR - name: PtBr - - value: pt-PT - name: PtPt - - ru - - es - - value: es-419 - name: Es419 - - sv - - ta - - tr - - uk - source: - openapi: batch-openapi.json - BoundingBox: - docs: A bounding box around a face. - properties: - x: - type: double - docs: x-coordinate of bounding box top left corner. - "y": - type: double - docs: y-coordinate of bounding box top left corner. - w: - type: double - docs: Bounding box width. - h: - type: double - docs: Bounding box height. - source: - openapi: batch-openapi.json - BurstPrediction: - properties: - time: TimeInterval - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - descriptions: - docs: Modality-specific descriptive features and their scores. - type: list - source: - openapi: batch-openapi.json - Classification: map - CompletedEmbeddingGeneration: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - source: - openapi: batch-openapi.json - CompletedInference: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - num_predictions: - type: uint64 - docs: The number of predictions that were generated by this job. - num_errors: - type: uint64 - docs: The number of errors that occurred while running this job. - source: - openapi: batch-openapi.json - CompletedTlInference: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - num_predictions: - type: uint64 - docs: The number of predictions that were generated by this job. - num_errors: - type: uint64 - docs: The number of errors that occurred while running this job. - source: - openapi: batch-openapi.json - CompletedTraining: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - custom_model: TrainingCustomModel - alternatives: optional> - source: - openapi: batch-openapi.json - CustomModelPrediction: - properties: - output: map - error: string - task_type: string - source: - openapi: batch-openapi.json - CustomModelRequest: - properties: - name: string - description: optional - tags: optional> - source: - openapi: batch-openapi.json - Dataset: - discriminated: false - union: - - DatasetId - - DatasetVersionId - source: - openapi: batch-openapi.json - DatasetId: - properties: - id: - type: string - validation: - format: uuid - source: - openapi: batch-openapi.json - DatasetVersionId: - properties: - version_id: - type: string - validation: - format: uuid - source: - openapi: batch-openapi.json - DescriptionsScore: - properties: - name: - type: string - docs: Name of the descriptive feature being expressed. - score: - type: float - docs: Embedding value for the descriptive feature being expressed. - source: - openapi: batch-openapi.json - Direction: - enum: - - asc - - desc - source: - openapi: batch-openapi.json - EmbeddingGenerationBaseRequest: - properties: - registry_file_details: - type: optional> - docs: File ID and File URL pairs for an asset registry file - source: - openapi: batch-openapi.json - EmotionScore: - properties: - name: - type: string - docs: Name of the emotion being expressed. - score: - type: float - docs: Embedding value for the emotion being expressed. - source: - openapi: batch-openapi.json - Error: - properties: - message: - type: string - docs: An error message. + auth: false + base-path: '' + endpoints: + list-jobs: + path: /v0/batch/jobs + method: GET + auth: true + docs: Sort and filter jobs. + display-name: List jobs + request: + name: BatchListJobsRequest + query-parameters: + limit: + type: optional + docs: The maximum number of jobs to include in the response. + status: + type: optional + allow-multiple: true + docs: >- + Include only jobs of this status in the response. There are four + possible statuses: + + + - `QUEUED`: The job has been received and is waiting to be + processed. + + + - `IN_PROGRESS`: The job is currently being processed. + + + - `COMPLETED`: The job has finished processing. + + + - `FAILED`: The job encountered an error and could not be + completed successfully. + when: + type: optional + docs: >- + Specify whether to include jobs created before or after a given + `timestamp_ms`. + timestamp_ms: + type: optional + docs: |- + Provide a timestamp in milliseconds to filter jobs. + + When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. + sort_by: + type: optional + docs: >- + Specify which timestamp to sort the jobs by. + + + - `created`: Sort jobs by the time of creation, indicated by + `created_timestamp_ms`. + + + - `started`: Sort jobs by the time processing started, indicated + by `started_timestamp_ms`. + + + - `ended`: Sort jobs by the time processing ended, indicated by + `ended_timestamp_ms`. + direction: + type: optional + docs: >- + Specify the order in which to sort the jobs. Defaults to + descending order. + + + - `asc`: Sort in ascending order (chronological, with the oldest + records first). + + + - `desc`: Sort in descending order (reverse-chronological, with + the newest records first). + response: + docs: '' + type: list + examples: + - response: + body: + - job_id: job_id + request: + files: + - filename: filename + md5sum: md5sum + content_type: content_type + models: + burst: {} + face: + fps_pred: 3 + identify_faces: false + min_face_size: 60 + prob_threshold: 0.99 + save_faces: false + facemesh: {} + language: + granularity: word + identify_speakers: false + ner: + identify_speakers: false + prosody: + granularity: utterance + identify_speakers: false + notify: true + text: [] + urls: + - https://hume-tutorials.s3.amazonaws.com/faces.zip + state: + created_timestamp_ms: 1712587158717 + ended_timestamp_ms: 1712587159274 + num_errors: 0 + num_predictions: 10 + started_timestamp_ms: 1712587158800 + status: COMPLETED + type: INFERENCE + start-inference-job: + path: /v0/batch/jobs + method: POST + auth: true + docs: Start a new measurement inference job. + display-name: Start inference job + request: + body: InferenceBaseRequest + content-type: application/json + response: + docs: '' + type: JobId + property: job_id + examples: + - request: + urls: + - https://hume-tutorials.s3.amazonaws.com/faces.zip + notify: true + response: + body: + job_id: job_id + get-job-details: + path: /v0/batch/jobs/{id} + method: GET + auth: true + docs: Get the request details and state of a given job. + path-parameters: + id: + type: string + docs: The unique identifier for the job. + display-name: Get job details + response: + docs: '' + type: UnionJob + examples: + - name: Inference + path-parameters: + id: job_id + response: + body: + type: INFERENCE + job_id: job_id + request: + files: [] + models: + burst: {} + face: + fps_pred: 3 + identify_faces: false + min_face_size: 60 + prob_threshold: 0.99 + save_faces: false + facemesh: {} + language: + granularity: word + identify_speakers: false + ner: + identify_speakers: false + prosody: + granularity: utterance + identify_speakers: false + notify: true + text: [] + urls: + - https://hume-tutorials.s3.amazonaws.com/faces.zip + state: + created_timestamp_ms: 1712590457884 + ended_timestamp_ms: 1712590462252 + num_errors: 0 + num_predictions: 10 + started_timestamp_ms: 1712590457995 + status: COMPLETED + get-job-predictions: + path: /v0/batch/jobs/{id}/predictions + method: GET + auth: true + docs: Get the JSON predictions of a completed inference job. + path-parameters: + id: + type: string + docs: The unique identifier for the job. + display-name: Get job predictions + response: + docs: '' + type: list + examples: + - path-parameters: + id: job_id + response: + body: + - source: + type: url + url: https://hume-tutorials.s3.amazonaws.com/faces.zip + results: + predictions: + - file: faces/100.jpg + models: + face: + grouped_predictions: + - id: unknown + predictions: + - frame: 0 + time: 0 + prob: 0.9994111061096191 + box: + x: 1187.885986328125 + 'y': 1397.697509765625 + w: 1401.668701171875 + h: 1961.424560546875 + emotions: + - name: Admiration + score: 0.10722749680280685 + - name: Adoration + score: 0.06395940482616425 + - name: Aesthetic Appreciation + score: 0.05811462551355362 + - name: Amusement + score: 0.14187128841876984 + - name: Anger + score: 0.02804684266448021 + - name: Anxiety + score: 0.2713485360145569 + - name: Awe + score: 0.33812594413757324 + - name: Awkwardness + score: 0.1745193600654602 + - name: Boredom + score: 0.23600080609321594 + - name: Calmness + score: 0.18988418579101562 + - name: Concentration + score: 0.44288986921310425 + - name: Confusion + score: 0.39346569776535034 + - name: Contemplation + score: 0.31002455949783325 + - name: Contempt + score: 0.048870109021663666 + - name: Contentment + score: 0.0579497292637825 + - name: Craving + score: 0.06544201076030731 + - name: Desire + score: 0.05526508390903473 + - name: Determination + score: 0.08590991795063019 + - name: Disappointment + score: 0.19508258998394012 + - name: Disgust + score: 0.031529419124126434 + - name: Distress + score: 0.23210826516151428 + - name: Doubt + score: 0.3284550905227661 + - name: Ecstasy + score: 0.040716782212257385 + - name: Embarrassment + score: 0.1467227339744568 + - name: Empathic Pain + score: 0.07633581757545471 + - name: Entrancement + score: 0.16245244443416595 + - name: Envy + score: 0.03267110139131546 + - name: Excitement + score: 0.10656816512346268 + - name: Fear + score: 0.3115977346897125 + - name: Guilt + score: 0.11615975946187973 + - name: Horror + score: 0.19795553386211395 + - name: Interest + score: 0.3136432468891144 + - name: Joy + score: 0.06285581737756729 + - name: Love + score: 0.06339752674102783 + - name: Nostalgia + score: 0.05866732448339462 + - name: Pain + score: 0.07684041559696198 + - name: Pride + score: 0.026822954416275024 + - name: Realization + score: 0.30000734329223633 + - name: Relief + score: 0.04414166510105133 + - name: Romance + score: 0.042728863656520844 + - name: Sadness + score: 0.14773206412792206 + - name: Satisfaction + score: 0.05902980640530586 + - name: Shame + score: 0.08103451132774353 + - name: Surprise (negative) + score: 0.25518184900283813 + - name: Surprise (positive) + score: 0.28845661878585815 + - name: Sympathy + score: 0.062488824129104614 + - name: Tiredness + score: 0.1559651643037796 + - name: Triumph + score: 0.01955239288508892 + errors: [] + get-job-artifacts: + path: /v0/batch/jobs/{id}/artifacts + method: GET + auth: true + docs: Get the artifacts ZIP of a completed inference job. + path-parameters: + id: + type: string + docs: The unique identifier for the job. + display-name: Get job artifacts + response: + docs: '' + type: file + start-inference-job-from-local-file: + path: /v0/batch/jobs + method: POST + auth: true + docs: Start a new batch inference job. + display-name: Start inference job from local file + request: + name: BatchStartInferenceJobFromLocalFileRequest + body: + properties: + json: + type: optional + docs: >- + Stringified JSON object containing the inference job + configuration. file: - type: string - docs: A file path relative to the top level source URL or file. - source: - openapi: batch-openapi.json - EvaluationArgs: - properties: - validation: optional - source: - openapi: batch-openapi.json - Face: + type: list + docs: >- + Local media files (see recommended input filetypes) to be + processed. + + + If you wish to supply more than 100 files, consider providing + them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). + content-type: multipart/form-data + response: + docs: '' + type: JobId + property: job_id + examples: + - request: {} + response: + body: + job_id: job_id + source: + openapi: batch-files-openapi.yml +types: + Alternative: literal<"language_only"> + Bcp47Tag: + enum: + - zh + - da + - nl + - en + - value: en-AU + name: EnAu + - value: en-IN + name: EnIn + - value: en-NZ + name: EnNz + - value: en-GB + name: EnGb + - fr + - value: fr-CA + name: FrCa + - de + - hi + - value: hi-Latn + name: HiLatn + - id + - it + - ja + - ko + - 'no' + - pl + - pt + - value: pt-BR + name: PtBr + - value: pt-PT + name: PtPt + - ru + - es + - value: es-419 + name: Es419 + - sv + - ta + - tr + - uk + source: + openapi: batch-openapi.json + BoundingBox: + docs: A bounding box around a face. + properties: + x: + type: double + docs: x-coordinate of bounding box top left corner. + 'y': + type: double + docs: y-coordinate of bounding box top left corner. + w: + type: double + docs: Bounding box width. + h: + type: double + docs: Bounding box height. + source: + openapi: batch-openapi.json + BurstPrediction: + properties: + time: TimeInterval + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + descriptions: + docs: Modality-specific descriptive features and their scores. + type: list + source: + openapi: batch-openapi.json + Classification: map + CompletedEmbeddingGeneration: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + source: + openapi: batch-openapi.json + CompletedInference: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + num_predictions: + type: uint64 + docs: The number of predictions that were generated by this job. + num_errors: + type: uint64 + docs: The number of errors that occurred while running this job. + source: + openapi: batch-openapi.json + CompletedTlInference: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + num_predictions: + type: uint64 + docs: The number of predictions that were generated by this job. + num_errors: + type: uint64 + docs: The number of errors that occurred while running this job. + source: + openapi: batch-openapi.json + CompletedTraining: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + custom_model: TrainingCustomModel + alternatives: optional> + source: + openapi: batch-openapi.json + CustomModelPrediction: + properties: + output: map + error: string + task_type: string + source: + openapi: batch-openapi.json + CustomModelRequest: + properties: + name: string + description: optional + tags: optional> + source: + openapi: batch-openapi.json + Dataset: + discriminated: false + union: + - DatasetId + - DatasetVersionId + source: + openapi: batch-openapi.json + DatasetId: + properties: + id: + type: string + validation: + format: uuid + source: + openapi: batch-openapi.json + DatasetVersionId: + properties: + version_id: + type: string + validation: + format: uuid + source: + openapi: batch-openapi.json + DescriptionsScore: + properties: + name: + type: string + docs: Name of the descriptive feature being expressed. + score: + type: float + docs: Embedding value for the descriptive feature being expressed. + source: + openapi: batch-openapi.json + Direction: + enum: + - asc + - desc + source: + openapi: batch-openapi.json + EmbeddingGenerationBaseRequest: + properties: + registry_file_details: + type: optional> + docs: File ID and File URL pairs for an asset registry file + source: + openapi: batch-openapi.json + EmotionScore: + properties: + name: + type: string + docs: Name of the emotion being expressed. + score: + type: float + docs: Embedding value for the emotion being expressed. + source: + openapi: batch-openapi.json + Error: + properties: + message: + type: string + docs: An error message. + file: + type: string + docs: A file path relative to the top level source URL or file. + source: + openapi: batch-openapi.json + EvaluationArgs: + properties: + validation: optional + source: + openapi: batch-openapi.json + Face: + docs: >- + The Facial Emotional Expression model analyzes human facial expressions in + images and videos. Results will be provided per frame for video files. + + + Recommended input file types: `.png`, `.jpeg`, `.mp4` + properties: + fps_pred: + type: optional docs: >- - The Facial Emotional Expression model analyzes human facial expressions in - images and videos. Results will be provided per frame for video files. - - - Recommended input file types: `.png`, `.jpeg`, `.mp4` - properties: - fps_pred: - type: optional - docs: >- - Number of frames per second to process. Other frames will be omitted - from the response. Set to `0` to process every frame. - default: 3 - prob_threshold: - type: optional - docs: >- - Face detection probability threshold. Faces detected with a - probability less than this threshold will be omitted from the - response. - default: 0.99 - validation: - min: 0 - max: 1 - identify_faces: - type: optional - docs: >- - Whether to return identifiers for faces across frames. If `true`, - unique identifiers will be assigned to face bounding boxes to - differentiate different faces. If `false`, all faces will be tagged - with an `unknown` ID. - default: false - min_face_size: - type: optional - docs: >- - Minimum bounding box side length in pixels to treat as a face. Faces - detected with a bounding box side length in pixels less than this - threshold will be omitted from the response. - facs: optional - descriptions: optional - save_faces: - type: optional - docs: >- - Whether to extract and save the detected faces in the artifacts zip - created by each job. - default: false - source: - openapi: batch-openapi.json - FacePrediction: - properties: - frame: - type: uint64 - docs: Frame number - time: - type: double - docs: Time in seconds when face detection occurred. - prob: - type: double - docs: The predicted probability that a detected face was actually a face. - box: BoundingBox - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - facs: - type: optional> - docs: FACS 2.0 features and their scores. - descriptions: - type: optional> - docs: Modality-specific descriptive features and their scores. - source: - openapi: batch-openapi.json - FacemeshPrediction: - properties: - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: batch-openapi.json - FacsScore: - properties: - name: - type: string - docs: Name of the FACS 2.0 feature being expressed. - score: - type: float - docs: Embedding value for the FACS 2.0 feature being expressed. - source: - openapi: batch-openapi.json - Failed: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - message: - type: string - docs: An error message. - source: - openapi: batch-openapi.json - File: - docs: The list of files submitted for analysis. - properties: - filename: - type: optional - docs: The name of the file. - content_type: - type: optional - docs: The content type of the file. - md5sum: - type: string - docs: The MD5 checksum of the file. - source: - openapi: batch-openapi.json - Granularity: - enum: - - word - - sentence - - utterance - - conversational_turn + Number of frames per second to process. Other frames will be omitted + from the response. Set to `0` to process every frame. + default: 3 + prob_threshold: + type: optional docs: >- - The granularity at which to generate predictions. The `granularity` field - is ignored if transcription is not enabled or if the `window` field has - been set. - - - - `word`: At the word level, our model provides a separate output for each - word, offering the most granular insight into emotional expression during - speech. - - - - `sentence`: At the sentence level of granularity, we annotate the - emotional tone of each spoken sentence with our Prosody and Emotional - Language models. - - - - `utterance`: Utterance-level granularity is between word- and - sentence-level. It takes into account natural pauses or breaks in speech, - providing more rapidly updated measures of emotional expression within a - flowing conversation. For text inputs, utterance-level granularity will - produce results identical to sentence-level granularity. - - - - `conversational_turn`: Conversational turn-level granularity provides a - distinct output for each change in speaker. It captures the full sequence - of words and sentences spoken uninterrupted by each person. This approach - provides a higher-level view of the emotional dynamics in a - multi-participant dialogue. For text inputs, specifying conversational - turn-level granularity for our Emotional Language model will produce - results for the entire passage. - source: - openapi: batch-openapi.json - GroupedPredictionsBurstPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsFacePrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsFacemeshPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsLanguagePrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsNerPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - GroupedPredictionsProsodyPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: batch-openapi.json - InProgress: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - source: - openapi: batch-openapi.json - InferenceBaseRequest: - properties: - models: - type: optional - docs: >- - Specify the models to use for inference. - - - If this field is not explicitly set, then all models will run by - default. - transcription: optional - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - text: - type: optional> - docs: >- - Text supplied directly to our Emotional Language and NER models for - analysis. - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - source: - openapi: batch-openapi.json - InferencePrediction: - properties: - file: - type: string - docs: A file path relative to the top level source URL or file. - models: ModelsPredictions - source: - openapi: batch-openapi.json - InferenceRequest: - properties: - models: optional - transcription: optional - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - text: - type: optional> - docs: Text to supply directly to our language and NER models. - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - files: list - source: - openapi: batch-openapi.json - InferenceResults: - properties: - predictions: list - errors: list - source: - openapi: batch-openapi.json - InferenceSourcePredictResult: - properties: - source: Source - results: optional - error: - type: optional - docs: An error message. - source: - openapi: batch-openapi.json - JobEmbeddingGeneration: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: EmbeddingGenerationBaseRequest - state: StateEmbeddingGeneration - source: - openapi: batch-openapi.json - JobInference: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - request: - type: InferenceRequest - docs: The request that initiated the job. - state: - type: StateInference - docs: The current state of the job. - source: - openapi: batch-openapi.json - JobTlInference: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: TlInferenceBaseRequest - state: StateTlInference - source: - openapi: batch-openapi.json - JobTraining: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: TrainingBaseRequest - state: StateTraining - source: - openapi: batch-openapi.json - JobId: - properties: - job_id: - type: string - docs: The ID of the started job. - validation: - format: uuid - source: - openapi: batch-files-openapi.yml - Language: + Face detection probability threshold. Faces detected with a + probability less than this threshold will be omitted from the + response. + default: 0.99 + validation: + min: 0 + max: 1 + identify_faces: + type: optional + docs: >- + Whether to return identifiers for faces across frames. If `true`, + unique identifiers will be assigned to face bounding boxes to + differentiate different faces. If `false`, all faces will be tagged + with an `unknown` ID. + default: false + min_face_size: + type: optional docs: >- - The Emotional Language model analyzes passages of text. This also supports - audio and video files by transcribing and then directly analyzing the - transcribed text. - - - Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` - properties: - granularity: optional - sentiment: optional - toxicity: optional - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: batch-openapi.json - LanguagePrediction: - properties: - text: - type: string - docs: A segment of text (like a word or a sentence). - position: PositionInterval - time: optional - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - sentiment: - type: optional> - docs: >- - Sentiment predictions returned as a distribution. This model predicts - the probability that a given text could be interpreted as having each - sentiment level from `1` (negative) to `9` (positive). - - - Compared to returning one estimate of sentiment, this enables a more - nuanced analysis of a text's meaning. For example, a text with very - neutral sentiment would have an average rating of `5`. But also a text - that could be interpreted as having very positive sentiment or very - negative sentiment would also have an average rating of `5`. The - average sentiment is less informative than the distribution over - sentiment, so this API returns a value for each sentiment level. - toxicity: - type: optional> - docs: >- - Toxicity predictions returned as probabilities that the text can be - classified into the following categories: `toxic`, `severe_toxic`, - `obscene`, `threat`, `insult`, and `identity_hate`. - source: - openapi: batch-openapi.json - Models: - docs: The models used for inference. - properties: - face: optional - burst: optional - prosody: optional - language: optional - ner: optional - facemesh: optional - source: - openapi: batch-openapi.json - ModelsPredictions: - properties: - face: optional - burst: optional - prosody: optional - language: optional - ner: optional - facemesh: optional - source: - openapi: batch-openapi.json - Ner: + Minimum bounding box side length in pixels to treat as a face. Faces + detected with a bounding box side length in pixels less than this + threshold will be omitted from the response. + facs: optional + descriptions: optional + save_faces: + type: optional docs: >- - The NER (Named-entity Recognition) model identifies real-world objects and - concepts in passages of text. This also supports audio and video files by - transcribing and then directly analyzing the transcribed text. - - - Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` - properties: - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: batch-openapi.json - NerPrediction: - properties: - entity: - type: string - docs: The recognized topic or entity. - position: PositionInterval - entity_confidence: - type: double - docs: Our NER model's relative confidence in the recognized topic or entity. - support: - type: double - docs: A measure of how often the entity is linked to by other entities. - uri: - type: string - docs: >- - A URL which provides more information about the recognized topic or - entity. - link_word: - type: string - docs: The specific word to which the emotion predictions are linked. - time: optional - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: batch-openapi.json - "Null": - type: map - docs: No associated metadata for this model. Value will be `null`. - PositionInterval: + Whether to extract and save the detected faces in the artifacts zip + created by each job. + default: false + source: + openapi: batch-openapi.json + FacePrediction: + properties: + frame: + type: uint64 + docs: Frame number + time: + type: double + docs: Time in seconds when face detection occurred. + prob: + type: double + docs: The predicted probability that a detected face was actually a face. + box: BoundingBox + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + facs: + type: optional> + docs: FACS 2.0 features and their scores. + descriptions: + type: optional> + docs: Modality-specific descriptive features and their scores. + source: + openapi: batch-openapi.json + FacemeshPrediction: + properties: + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + source: + openapi: batch-openapi.json + FacsScore: + properties: + name: + type: string + docs: Name of the FACS 2.0 feature being expressed. + score: + type: float + docs: Embedding value for the FACS 2.0 feature being expressed. + source: + openapi: batch-openapi.json + Failed: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + message: + type: string + docs: An error message. + source: + openapi: batch-openapi.json + File: + docs: The list of files submitted for analysis. + properties: + filename: + type: optional + docs: The name of the file. + content_type: + type: optional + docs: The content type of the file. + md5sum: + type: string + docs: The MD5 checksum of the file. + source: + openapi: batch-openapi.json + Granularity: + enum: + - word + - sentence + - utterance + - conversational_turn + docs: >- + The granularity at which to generate predictions. The `granularity` field + is ignored if transcription is not enabled or if the `window` field has + been set. + + + - `word`: At the word level, our model provides a separate output for each + word, offering the most granular insight into emotional expression during + speech. + + + - `sentence`: At the sentence level of granularity, we annotate the + emotional tone of each spoken sentence with our Prosody and Emotional + Language models. + + + - `utterance`: Utterance-level granularity is between word- and + sentence-level. It takes into account natural pauses or breaks in speech, + providing more rapidly updated measures of emotional expression within a + flowing conversation. For text inputs, utterance-level granularity will + produce results identical to sentence-level granularity. + + + - `conversational_turn`: Conversational turn-level granularity provides a + distinct output for each change in speaker. It captures the full sequence + of words and sentences spoken uninterrupted by each person. This approach + provides a higher-level view of the emotional dynamics in a + multi-participant dialogue. For text inputs, specifying conversational + turn-level granularity for our Emotional Language model will produce + results for the entire passage. + source: + openapi: batch-openapi.json + GroupedPredictionsBurstPrediction: + properties: + id: + type: string docs: >- - Position of a segment of text within a larger document, measured in - characters. Uses zero-based indexing. The beginning index is inclusive and - the end index is exclusive. - properties: - begin: - type: uint64 - docs: The index of the first character in the text segment, inclusive. - end: - type: uint64 - docs: The index of the last character in the text segment, exclusive. - source: - openapi: batch-openapi.json - PredictionsOptionalNullBurstPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalNullFacePrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalNullFacemeshPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalTranscriptionMetadataLanguagePrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalTranscriptionMetadataNerPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - PredictionsOptionalTranscriptionMetadataProsodyPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: batch-openapi.json - Prosody: + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsFacePrediction: + properties: + id: + type: string docs: >- - The Speech Prosody model analyzes the intonation, stress, and rhythm of - spoken word. - - - Recommended input file types: `.wav`, `.mp3`, `.mp4` - properties: - granularity: optional - window: optional - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: batch-openapi.json - ProsodyPrediction: - properties: - text: - type: optional - docs: A segment of text (like a word or a sentence). - time: TimeInterval - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: batch-openapi.json - Queued: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - source: - openapi: batch-openapi.json - RegistryFileDetail: - properties: - file_id: - type: string - docs: File ID in the Asset Registry - file_url: - type: string - docs: URL to the file in the Asset Registry - source: - openapi: batch-openapi.json - Regression: map - SentimentScore: - properties: - name: - type: string - docs: Level of sentiment, ranging from `1` (negative) to `9` (positive) - score: - type: float - docs: Prediction for this level of sentiment - source: - openapi: batch-openapi.json - SortBy: - enum: - - created - - started - - ended - source: - openapi: batch-openapi.json - Source: - discriminant: type - base-properties: {} - union: - url: SourceUrl - file: SourceFile - text: SourceTextSource - source: - openapi: batch-openapi.json - SourceFile: - properties: {} - extends: - - File - source: - openapi: batch-openapi.json - SourceTextSource: - properties: {} - source: - openapi: batch-openapi.json - SourceUrl: - properties: {} - extends: - - Url - source: - openapi: batch-openapi.json - Url: - properties: - url: - type: string - docs: The URL of the source media file. - source: - openapi: batch-openapi.json - StateEmbeddingGeneration: - discriminant: status - base-properties: {} - union: - QUEUED: StateEmbeddingGenerationQueued - IN_PROGRESS: StateEmbeddingGenerationInProgress - COMPLETED: StateEmbeddingGenerationCompletedEmbeddingGeneration - FAILED: StateEmbeddingGenerationFailed - source: - openapi: batch-openapi.json - StateEmbeddingGenerationCompletedEmbeddingGeneration: - properties: {} - extends: - - CompletedEmbeddingGeneration - source: - openapi: batch-openapi.json - StateEmbeddingGenerationFailed: - properties: {} - extends: - - Failed - source: - openapi: batch-openapi.json - StateEmbeddingGenerationInProgress: - properties: {} - extends: - - InProgress - source: - openapi: batch-openapi.json - StateEmbeddingGenerationQueued: - properties: {} - extends: - - Queued - source: - openapi: batch-openapi.json - StateInference: - discriminant: status - base-properties: {} - union: - QUEUED: QueuedState - IN_PROGRESS: InProgressState - COMPLETED: CompletedState - FAILED: FailedState - source: - openapi: batch-openapi.json - CompletedState: - properties: {} - extends: - - CompletedInference - source: - openapi: batch-openapi.json - FailedState: - properties: {} - extends: - - Failed - source: - openapi: batch-openapi.json - InProgressState: - properties: {} - extends: - - InProgress - source: - openapi: batch-openapi.json - QueuedState: - properties: {} - extends: - - Queued - source: - openapi: batch-openapi.json - StateTlInference: - discriminant: status - base-properties: {} - union: - QUEUED: StateTlInferenceQueued - IN_PROGRESS: StateTlInferenceInProgress - COMPLETED: StateTlInferenceCompletedTlInference - FAILED: StateTlInferenceFailed - source: - openapi: batch-openapi.json - StateTlInferenceCompletedTlInference: - properties: {} - extends: - - CompletedTlInference - source: - openapi: batch-openapi.json - StateTlInferenceFailed: - properties: {} - extends: - - Failed - source: - openapi: batch-openapi.json - StateTlInferenceInProgress: - properties: {} - extends: - - InProgress - source: - openapi: batch-openapi.json - StateTlInferenceQueued: - properties: {} - extends: - - Queued - source: - openapi: batch-openapi.json - StateTraining: - discriminant: status - base-properties: {} - union: - QUEUED: StateTrainingQueued - IN_PROGRESS: StateTrainingInProgress - COMPLETED: StateTrainingCompletedTraining - FAILED: StateTrainingFailed - source: - openapi: batch-openapi.json - StateTrainingCompletedTraining: - properties: {} - extends: - - CompletedTraining - source: - openapi: batch-openapi.json - StateTrainingFailed: - properties: {} - extends: - - Failed - source: - openapi: batch-openapi.json - StateTrainingInProgress: - properties: {} - extends: - - InProgress - source: - openapi: batch-openapi.json - StateTrainingQueued: - properties: {} - extends: - - Queued - source: - openapi: batch-openapi.json - Status: - enum: - - QUEUED - - IN_PROGRESS - - COMPLETED - - FAILED - source: - openapi: batch-openapi.json - TlInferencePrediction: - properties: - file: - type: string - docs: A file path relative to the top level source URL or file. - file_type: string - custom_models: map - source: - openapi: batch-openapi.json - TlInferenceResults: - properties: - predictions: list - errors: list - source: - openapi: batch-openapi.json - TlInferenceSourcePredictResult: - properties: - source: Source - results: optional - error: - type: optional - docs: An error message. - source: - openapi: batch-openapi.json - Tag: - properties: - key: string - value: string - source: - openapi: batch-openapi.json - Target: - discriminated: false - union: - - long - - double - - string - source: - openapi: batch-openapi.json - Task: - discriminant: type - base-properties: {} - union: - classification: TaskClassification - regression: TaskRegression - source: - openapi: batch-openapi.json - TaskClassification: - properties: {} - source: - openapi: batch-openapi.json - TaskRegression: - properties: {} - source: - openapi: batch-openapi.json - TextSource: map - TimeInterval: - docs: A time range with a beginning and end, measured in seconds. - properties: - begin: - type: double - docs: Beginning of time range in seconds. - end: - type: double - docs: End of time range in seconds. - source: - openapi: batch-openapi.json - TlInferenceBaseRequest: - properties: - custom_model: CustomModel - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - source: - openapi: batch-openapi.json - CustomModel: - discriminated: false - union: - - CustomModelId - - CustomModelVersionId - source: - openapi: batch-openapi.json - CustomModelId: - properties: - id: string - source: - openapi: batch-openapi.json - CustomModelVersionId: - properties: - version_id: string - source: - openapi: batch-openapi.json - ToxicityScore: - properties: - name: - type: string - docs: Category of toxicity. - score: - type: float - docs: Prediction for this category of toxicity - source: - openapi: batch-openapi.json - TrainingBaseRequest: - properties: - custom_model: CustomModelRequest - dataset: Dataset - target_feature: - type: optional - default: label - task: optional - evaluation: optional - alternatives: optional> - callback_url: optional - notify: - type: optional - default: false - source: - openapi: batch-openapi.json - TrainingCustomModel: - properties: - id: string - version_id: optional - source: - openapi: batch-openapi.json - Transcription: - docs: |- - Transcription-related configuration options. - - To disable transcription, explicitly set this field to `null`. - properties: - language: - type: optional - docs: >- - By default, we use an automated language detection method for our - Speech Prosody, Language, and NER models. However, if you know what - language is being spoken in your media samples, you can specify it via - its BCP-47 tag and potentially obtain more accurate results. - - - You can specify any of the following languages: - - - Chinese: `zh` - - - Danish: `da` - - - Dutch: `nl` - - - English: `en` - - - English (Australia): `en-AU` - - - English (India): `en-IN` - - - English (New Zealand): `en-NZ` - - - English (United Kingdom): `en-GB` - - - French: `fr` - - - French (Canada): `fr-CA` - - - German: `de` - - - Hindi: `hi` - - - Hindi (Roman Script): `hi-Latn` - - - Indonesian: `id` - - - Italian: `it` - - - Japanese: `ja` - - - Korean: `ko` - - - Norwegian: `no` - - - Polish: `pl` - - - Portuguese: `pt` - - - Portuguese (Brazil): `pt-BR` - - - Portuguese (Portugal): `pt-PT` - - - Russian: `ru` - - - Spanish: `es` - - - Spanish (Latin America): `es-419` - - - Swedish: `sv` - - - Tamil: `ta` - - - Turkish: `tr` - - - Ukrainian: `uk` - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - confidence_threshold: - type: optional - docs: >- - Transcript confidence threshold. Transcripts generated with a - confidence less than this threshold will be considered invalid and not - used as an input for model inference. - default: 0.5 - validation: - min: 0 - max: 1 - source: - openapi: batch-openapi.json - TranscriptionMetadata: - docs: Transcription metadata for your media file. - properties: - confidence: - type: double - docs: >- - Value between `0.0` and `1.0` indicating our transcription model's - relative confidence in the transcription of your media file. - detected_language: optional - source: - openapi: batch-openapi.json - Type: - enum: - - EMBEDDING_GENERATION - - INFERENCE - - TL_INFERENCE - - TRAINING - source: - openapi: batch-openapi.json - Unconfigurable: - type: map + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsFacemeshPrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsLanguagePrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsNerPrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsProsodyPrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + InProgress: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + source: + openapi: batch-openapi.json + InferenceBaseRequest: + properties: + models: + type: optional docs: >- - To include predictions for this model type, set this field to `{}`. It is - currently not configurable further. - UnionJob: InferenceJob - EmbeddingGenerationJob: - properties: - type: string - extends: - - JobEmbeddingGeneration - source: - openapi: batch-openapi.json - InferenceJob: - properties: - type: - type: string - docs: >- - Denotes the job type. - - - Jobs created with the Expression Measurement API will have this field - set to `INFERENCE`. - extends: - - JobInference - source: - openapi: batch-openapi.json - CustomModelsInferenceJob: - properties: - type: string - extends: - - JobTlInference - source: - openapi: batch-openapi.json - CustomModelsTrainingJob: - properties: - type: string - extends: - - JobTraining - source: - openapi: batch-openapi.json - UnionPredictResult: InferenceSourcePredictResult - ValidationArgs: - properties: - positive_label: optional - source: - openapi: batch-openapi.json - When: - enum: - - created_before - - created_after - source: - openapi: batch-openapi.json - Window: + Specify the models to use for inference. + + + If this field is not explicitly set, then all models will run by + default. + transcription: optional + urls: + type: optional> docs: >- - Generate predictions based on time. - - - Setting the `window` field allows for a 'sliding window' approach, where a - fixed-size window moves across the audio or video file in defined steps. - This enables continuous analysis of prosody within subsets of the file, - providing dynamic and localized insights into emotional expression. - properties: - length: - type: optional - docs: The length of the sliding window. - default: 4 - validation: - min: 0.5 - step: - type: optional - docs: The step size of the sliding window. - default: 1 - validation: - min: 0.5 - source: - openapi: batch-openapi.json + URLs to the media files to be processed. Each must be a valid public + URL to a media file (see recommended input filetypes) or an archive + (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + + + If you wish to supply more than 100 URLs, consider providing them as + an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). + text: + type: optional> + docs: >- + Text supplied directly to our Emotional Language and NER models for + analysis. + callback_url: + type: optional + docs: >- + If provided, a `POST` request will be made to the URL with the + generated predictions on completion or the error message on failure. + notify: + type: optional + docs: >- + Whether to send an email notification to the user upon job + completion/failure. + default: false + source: + openapi: batch-openapi.json + InferencePrediction: + properties: + file: + type: string + docs: A file path relative to the top level source URL or file. + models: ModelsPredictions + source: + openapi: batch-openapi.json + InferenceRequest: + properties: + models: optional + transcription: optional + urls: + type: optional> + docs: >- + URLs to the media files to be processed. Each must be a valid public + URL to a media file (see recommended input filetypes) or an archive + (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + + + If you wish to supply more than 100 URLs, consider providing them as + an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). + text: + type: optional> + docs: Text to supply directly to our language and NER models. + callback_url: + type: optional + docs: >- + If provided, a `POST` request will be made to the URL with the + generated predictions on completion or the error message on failure. + notify: + type: optional + docs: >- + Whether to send an email notification to the user upon job + completion/failure. + default: false + files: list + source: + openapi: batch-openapi.json + InferenceResults: + properties: + predictions: list + errors: list + source: + openapi: batch-openapi.json + InferenceSourcePredictResult: + properties: + source: Source + results: optional + error: + type: optional + docs: An error message. + source: + openapi: batch-openapi.json + JobEmbeddingGeneration: + properties: + job_id: + type: string + docs: The ID associated with this job. + validation: + format: uuid + user_id: + type: string + validation: + format: uuid + request: EmbeddingGenerationBaseRequest + state: StateEmbeddingGeneration + source: + openapi: batch-openapi.json + JobInference: + properties: + job_id: + type: string + docs: The ID associated with this job. + validation: + format: uuid + request: + type: InferenceRequest + docs: The request that initiated the job. + state: + type: StateInference + docs: The current state of the job. + source: + openapi: batch-openapi.json + JobTlInference: + properties: + job_id: + type: string + docs: The ID associated with this job. + validation: + format: uuid + user_id: + type: string + validation: + format: uuid + request: TlInferenceBaseRequest + state: StateTlInference + source: + openapi: batch-openapi.json + JobTraining: + properties: + job_id: + type: string + docs: The ID associated with this job. + validation: + format: uuid + user_id: + type: string + validation: + format: uuid + request: TrainingBaseRequest + state: StateTraining + source: + openapi: batch-openapi.json + JobId: + properties: + job_id: + type: string + docs: The ID of the started job. + validation: + format: uuid + source: + openapi: batch-files-openapi.yml + Language: + docs: >- + The Emotional Language model analyzes passages of text. This also supports + audio and video files by transcribing and then directly analyzing the + transcribed text. + + + Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` + properties: + granularity: optional + sentiment: optional + toxicity: optional + identify_speakers: + type: optional + docs: >- + Whether to return identifiers for speakers over time. If `true`, + unique identifiers will be assigned to spoken words to differentiate + different speakers. If `false`, all speakers will be tagged with an + `unknown` ID. + default: false + source: + openapi: batch-openapi.json + LanguagePrediction: + properties: + text: + type: string + docs: A segment of text (like a word or a sentence). + position: PositionInterval + time: optional + confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence in this text. + speaker_confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence that this text was spoken by this speaker. + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + sentiment: + type: optional> + docs: >- + Sentiment predictions returned as a distribution. This model predicts + the probability that a given text could be interpreted as having each + sentiment level from `1` (negative) to `9` (positive). + + + Compared to returning one estimate of sentiment, this enables a more + nuanced analysis of a text's meaning. For example, a text with very + neutral sentiment would have an average rating of `5`. But also a text + that could be interpreted as having very positive sentiment or very + negative sentiment would also have an average rating of `5`. The + average sentiment is less informative than the distribution over + sentiment, so this API returns a value for each sentiment level. + toxicity: + type: optional> + docs: >- + Toxicity predictions returned as probabilities that the text can be + classified into the following categories: `toxic`, `severe_toxic`, + `obscene`, `threat`, `insult`, and `identity_hate`. + source: + openapi: batch-openapi.json + Models: + docs: The models used for inference. + properties: + face: optional + burst: optional + prosody: optional + language: optional + ner: optional + facemesh: optional + source: + openapi: batch-openapi.json + ModelsPredictions: + properties: + face: optional + burst: optional + prosody: optional + language: optional + ner: optional + facemesh: optional + source: + openapi: batch-openapi.json + Ner: + docs: >- + The NER (Named-entity Recognition) model identifies real-world objects and + concepts in passages of text. This also supports audio and video files by + transcribing and then directly analyzing the transcribed text. + + + Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` + properties: + identify_speakers: + type: optional + docs: >- + Whether to return identifiers for speakers over time. If `true`, + unique identifiers will be assigned to spoken words to differentiate + different speakers. If `false`, all speakers will be tagged with an + `unknown` ID. + default: false + source: + openapi: batch-openapi.json + NerPrediction: + properties: + entity: + type: string + docs: The recognized topic or entity. + position: PositionInterval + entity_confidence: + type: double + docs: Our NER model's relative confidence in the recognized topic or entity. + support: + type: double + docs: A measure of how often the entity is linked to by other entities. + uri: + type: string + docs: >- + A URL which provides more information about the recognized topic or + entity. + link_word: + type: string + docs: The specific word to which the emotion predictions are linked. + time: optional + confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence in this text. + speaker_confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence that this text was spoken by this speaker. + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + source: + openapi: batch-openapi.json + 'Null': + type: map + docs: No associated metadata for this model. Value will be `null`. + PositionInterval: + docs: >- + Position of a segment of text within a larger document, measured in + characters. Uses zero-based indexing. The beginning index is inclusive and + the end index is exclusive. + properties: + begin: + type: uint64 + docs: The index of the first character in the text segment, inclusive. + end: + type: uint64 + docs: The index of the last character in the text segment, exclusive. + source: + openapi: batch-openapi.json + PredictionsOptionalNullBurstPrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalNullFacePrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalNullFacemeshPrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalTranscriptionMetadataLanguagePrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalTranscriptionMetadataNerPrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalTranscriptionMetadataProsodyPrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + Prosody: + docs: >- + The Speech Prosody model analyzes the intonation, stress, and rhythm of + spoken word. + + + Recommended input file types: `.wav`, `.mp3`, `.mp4` + properties: + granularity: optional + window: optional + identify_speakers: + type: optional + docs: >- + Whether to return identifiers for speakers over time. If `true`, + unique identifiers will be assigned to spoken words to differentiate + different speakers. If `false`, all speakers will be tagged with an + `unknown` ID. + default: false + source: + openapi: batch-openapi.json + ProsodyPrediction: + properties: + text: + type: optional + docs: A segment of text (like a word or a sentence). + time: TimeInterval + confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence in this text. + speaker_confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence that this text was spoken by this speaker. + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + source: + openapi: batch-openapi.json + Queued: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + source: + openapi: batch-openapi.json + RegistryFileDetail: + properties: + file_id: + type: string + docs: File ID in the Asset Registry + file_url: + type: string + docs: URL to the file in the Asset Registry + source: + openapi: batch-openapi.json + Regression: map + SentimentScore: + properties: + name: + type: string + docs: Level of sentiment, ranging from `1` (negative) to `9` (positive) + score: + type: float + docs: Prediction for this level of sentiment + source: + openapi: batch-openapi.json + SortBy: + enum: + - created + - started + - ended + source: + openapi: batch-openapi.json + Source: + discriminant: type + base-properties: {} + union: + url: SourceUrl + file: SourceFile + text: SourceTextSource + source: + openapi: batch-openapi.json + SourceFile: + properties: {} + extends: + - File + source: + openapi: batch-openapi.json + SourceTextSource: + properties: {} + source: + openapi: batch-openapi.json + SourceUrl: + properties: {} + extends: + - Url + source: + openapi: batch-openapi.json + Url: + properties: + url: + type: string + docs: The URL of the source media file. + source: + openapi: batch-openapi.json + StateEmbeddingGeneration: + discriminant: status + base-properties: {} + union: + QUEUED: StateEmbeddingGenerationQueued + IN_PROGRESS: StateEmbeddingGenerationInProgress + COMPLETED: StateEmbeddingGenerationCompletedEmbeddingGeneration + FAILED: StateEmbeddingGenerationFailed + source: + openapi: batch-openapi.json + StateEmbeddingGenerationCompletedEmbeddingGeneration: + properties: {} + extends: + - CompletedEmbeddingGeneration + source: + openapi: batch-openapi.json + StateEmbeddingGenerationFailed: + properties: {} + extends: + - Failed + source: + openapi: batch-openapi.json + StateEmbeddingGenerationInProgress: + properties: {} + extends: + - InProgress + source: + openapi: batch-openapi.json + StateEmbeddingGenerationQueued: + properties: {} + extends: + - Queued + source: + openapi: batch-openapi.json + StateInference: + discriminant: status + base-properties: {} + union: + QUEUED: QueuedState + IN_PROGRESS: InProgressState + COMPLETED: CompletedState + FAILED: FailedState + source: + openapi: batch-openapi.json + CompletedState: + properties: {} + extends: + - CompletedInference + source: + openapi: batch-openapi.json + FailedState: + properties: {} + extends: + - Failed + source: + openapi: batch-openapi.json + InProgressState: + properties: {} + extends: + - InProgress + source: + openapi: batch-openapi.json + QueuedState: + properties: {} + extends: + - Queued + source: + openapi: batch-openapi.json + StateTlInference: + discriminant: status + base-properties: {} + union: + QUEUED: StateTlInferenceQueued + IN_PROGRESS: StateTlInferenceInProgress + COMPLETED: StateTlInferenceCompletedTlInference + FAILED: StateTlInferenceFailed + source: + openapi: batch-openapi.json + StateTlInferenceCompletedTlInference: + properties: {} + extends: + - CompletedTlInference + source: + openapi: batch-openapi.json + StateTlInferenceFailed: + properties: {} + extends: + - Failed + source: + openapi: batch-openapi.json + StateTlInferenceInProgress: + properties: {} + extends: + - InProgress + source: + openapi: batch-openapi.json + StateTlInferenceQueued: + properties: {} + extends: + - Queued + source: + openapi: batch-openapi.json + StateTraining: + discriminant: status + base-properties: {} + union: + QUEUED: StateTrainingQueued + IN_PROGRESS: StateTrainingInProgress + COMPLETED: StateTrainingCompletedTraining + FAILED: StateTrainingFailed + source: + openapi: batch-openapi.json + StateTrainingCompletedTraining: + properties: {} + extends: + - CompletedTraining + source: + openapi: batch-openapi.json + StateTrainingFailed: + properties: {} + extends: + - Failed + source: + openapi: batch-openapi.json + StateTrainingInProgress: + properties: {} + extends: + - InProgress + source: + openapi: batch-openapi.json + StateTrainingQueued: + properties: {} + extends: + - Queued + source: + openapi: batch-openapi.json + Status: + enum: + - QUEUED + - IN_PROGRESS + - COMPLETED + - FAILED + source: + openapi: batch-openapi.json + TlInferencePrediction: + properties: + file: + type: string + docs: A file path relative to the top level source URL or file. + file_type: string + custom_models: map + source: + openapi: batch-openapi.json + TlInferenceResults: + properties: + predictions: list + errors: list + source: + openapi: batch-openapi.json + TlInferenceSourcePredictResult: + properties: + source: Source + results: optional + error: + type: optional + docs: An error message. + source: + openapi: batch-openapi.json + Tag: + properties: + key: string + value: string + source: + openapi: batch-openapi.json + Target: + discriminated: false + union: + - long + - double + - string + source: + openapi: batch-openapi.json + Task: + discriminant: type + base-properties: {} + union: + classification: TaskClassification + regression: TaskRegression + source: + openapi: batch-openapi.json + TaskClassification: + properties: {} + source: + openapi: batch-openapi.json + TaskRegression: + properties: {} + source: + openapi: batch-openapi.json + TextSource: map + TimeInterval: + docs: A time range with a beginning and end, measured in seconds. + properties: + begin: + type: double + docs: Beginning of time range in seconds. + end: + type: double + docs: End of time range in seconds. + source: + openapi: batch-openapi.json + TlInferenceBaseRequest: + properties: + custom_model: CustomModel + urls: + type: optional> + docs: >- + URLs to the media files to be processed. Each must be a valid public + URL to a media file (see recommended input filetypes) or an archive + (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + + + If you wish to supply more than 100 URLs, consider providing them as + an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). + callback_url: + type: optional + docs: >- + If provided, a `POST` request will be made to the URL with the + generated predictions on completion or the error message on failure. + notify: + type: optional + docs: >- + Whether to send an email notification to the user upon job + completion/failure. + default: false + source: + openapi: batch-openapi.json + CustomModel: + discriminated: false + union: + - CustomModelId + - CustomModelVersionId + source: + openapi: batch-openapi.json + CustomModelId: + properties: + id: string + source: + openapi: batch-openapi.json + CustomModelVersionId: + properties: + version_id: string + source: + openapi: batch-openapi.json + ToxicityScore: + properties: + name: + type: string + docs: Category of toxicity. + score: + type: float + docs: Prediction for this category of toxicity + source: + openapi: batch-openapi.json + TrainingBaseRequest: + properties: + custom_model: CustomModelRequest + dataset: Dataset + target_feature: + type: optional + default: label + task: optional + evaluation: optional + alternatives: optional> + callback_url: optional + notify: + type: optional + default: false + source: + openapi: batch-openapi.json + TrainingCustomModel: + properties: + id: string + version_id: optional + source: + openapi: batch-openapi.json + Transcription: + docs: |- + Transcription-related configuration options. + + To disable transcription, explicitly set this field to `null`. + properties: + language: + type: optional + docs: >- + By default, we use an automated language detection method for our + Speech Prosody, Language, and NER models. However, if you know what + language is being spoken in your media samples, you can specify it via + its BCP-47 tag and potentially obtain more accurate results. + + + You can specify any of the following languages: + + - Chinese: `zh` + + - Danish: `da` + + - Dutch: `nl` + + - English: `en` + + - English (Australia): `en-AU` + + - English (India): `en-IN` + + - English (New Zealand): `en-NZ` + + - English (United Kingdom): `en-GB` + + - French: `fr` + + - French (Canada): `fr-CA` + + - German: `de` + + - Hindi: `hi` + + - Hindi (Roman Script): `hi-Latn` + + - Indonesian: `id` + + - Italian: `it` + + - Japanese: `ja` + + - Korean: `ko` + + - Norwegian: `no` + + - Polish: `pl` + + - Portuguese: `pt` + + - Portuguese (Brazil): `pt-BR` + + - Portuguese (Portugal): `pt-PT` + + - Russian: `ru` + + - Spanish: `es` + + - Spanish (Latin America): `es-419` + + - Swedish: `sv` + + - Tamil: `ta` + + - Turkish: `tr` + + - Ukrainian: `uk` + identify_speakers: + type: optional + docs: >- + Whether to return identifiers for speakers over time. If `true`, + unique identifiers will be assigned to spoken words to differentiate + different speakers. If `false`, all speakers will be tagged with an + `unknown` ID. + default: false + confidence_threshold: + type: optional + docs: >- + Transcript confidence threshold. Transcripts generated with a + confidence less than this threshold will be considered invalid and not + used as an input for model inference. + default: 0.5 + validation: + min: 0 + max: 1 + source: + openapi: batch-openapi.json + TranscriptionMetadata: + docs: Transcription metadata for your media file. + properties: + confidence: + type: double + docs: >- + Value between `0.0` and `1.0` indicating our transcription model's + relative confidence in the transcription of your media file. + detected_language: optional + source: + openapi: batch-openapi.json + Type: + enum: + - EMBEDDING_GENERATION + - INFERENCE + - TL_INFERENCE + - TRAINING + source: + openapi: batch-openapi.json + Unconfigurable: + type: map + docs: >- + To include predictions for this model type, set this field to `{}`. It is + currently not configurable further. + UnionJob: InferenceJob + EmbeddingGenerationJob: + properties: + type: string + extends: + - JobEmbeddingGeneration + source: + openapi: batch-openapi.json + InferenceJob: + properties: + type: + type: string + docs: >- + Denotes the job type. + + + Jobs created with the Expression Measurement API will have this field + set to `INFERENCE`. + extends: + - JobInference + source: + openapi: batch-openapi.json + CustomModelsInferenceJob: + properties: + type: string + extends: + - JobTlInference + source: + openapi: batch-openapi.json + CustomModelsTrainingJob: + properties: + type: string + extends: + - JobTraining + source: + openapi: batch-openapi.json + UnionPredictResult: InferenceSourcePredictResult + ValidationArgs: + properties: + positive_label: optional + source: + openapi: batch-openapi.json + When: + enum: + - created_before + - created_after + source: + openapi: batch-openapi.json + Window: + docs: >- + Generate predictions based on time. + + + Setting the `window` field allows for a 'sliding window' approach, where a + fixed-size window moves across the audio or video file in defined steps. + This enables continuous analysis of prosody within subsets of the file, + providing dynamic and localized insights into emotional expression. + properties: + length: + type: optional + docs: The length of the sliding window. + default: 4 + validation: + min: 0.5 + step: + type: optional + docs: The step size of the sliding window. + default: 1 + validation: + min: 0.5 + source: + openapi: batch-openapi.json diff --git a/.mock/definition/expression-measurement/stream/__package__.yml b/.mock/definition/expression-measurement/stream/__package__.yml index 5ef4fe0..d43dd43 100644 --- a/.mock/definition/expression-measurement/stream/__package__.yml +++ b/.mock/definition/expression-measurement/stream/__package__.yml @@ -1,528 +1,528 @@ channel: - path: /v0/stream/models - auth: false - headers: - X-Hume-Api-Key: - type: string - name: humeApiKey - messages: - subscribe: - origin: server - body: SubscribeEvent - publish: - origin: client - body: - type: StreamModelsEndpointPayload - docs: Models endpoint payload - examples: - - messages: - - type: publish - body: {} - - type: subscribe - body: {} + path: /v0/stream/models + auth: false + headers: + X-Hume-Api-Key: + type: string + name: humeApiKey + messages: + subscribe: + origin: server + body: SubscribeEvent + publish: + origin: client + body: + type: StreamModelsEndpointPayload + docs: Models endpoint payload + examples: + - messages: + - type: publish + body: {} + - type: subscribe + body: {} types: - StreamModelPredictionsJobDetails: + StreamModelPredictionsJobDetails: + docs: > + If the job_details flag was set in the request, details about the current + streaming job will be returned in the response body. + properties: + job_id: + type: optional + docs: ID of the current streaming job. + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsBurstPredictionsItem: + properties: + time: optional + emotions: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsBurst: + docs: Response for the vocal burst emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsFacePredictionsItem: + properties: + frame: + type: optional + docs: Frame number + time: + type: optional + docs: Time in seconds when face detection occurred. + bbox: optional + prob: + type: optional + docs: The predicted probability that a detected face was actually a face. + face_id: + type: optional + docs: >- + Identifier for a face. Not that this defaults to `unknown` unless face + identification is enabled in the face model configuration. + emotions: optional + facs: optional + descriptions: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsFace: + docs: Response for the facial expression emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsFacemeshPredictionsItem: + properties: + emotions: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsFacemesh: + docs: Response for the facemesh emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsLanguagePredictionsItem: + properties: + text: + type: optional + docs: A segment of text (like a word or a sentence). + position: optional + emotions: optional + sentiment: optional + toxicity: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsLanguage: + docs: Response for the language emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsProsodyPredictionsItem: + properties: + time: optional + emotions: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsProsody: + docs: Response for the speech prosody emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + StreamModelPredictions: + docs: Model predictions + properties: + payload_id: + type: optional docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsBurstPredictionsItem: - properties: - time: optional - emotions: optional - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsBurst: + If a payload ID was passed in the request, the same payload ID will be + sent back in the response body. + job_details: + type: optional + docs: > + If the job_details flag was set in the request, details about the + current streaming job will be returned in the response body. + burst: + type: optional docs: Response for the vocal burst emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsFacePredictionsItem: - properties: - frame: - type: optional - docs: Frame number - time: - type: optional - docs: Time in seconds when face detection occurred. - bbox: optional - prob: - type: optional - docs: The predicted probability that a detected face was actually a face. - face_id: - type: optional - docs: >- - Identifier for a face. Not that this defaults to `unknown` unless face - identification is enabled in the face model configuration. - emotions: optional - facs: optional - descriptions: optional - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsFace: + face: + type: optional docs: Response for the facial expression emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsFacemeshPredictionsItem: - properties: - emotions: optional - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsFacemesh: + facemesh: + type: optional docs: Response for the facemesh emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsLanguagePredictionsItem: - properties: - text: - type: optional - docs: A segment of text (like a word or a sentence). - position: optional - emotions: optional - sentiment: optional - toxicity: optional - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsLanguage: + language: + type: optional docs: Response for the language emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsProsodyPredictionsItem: - properties: - time: optional - emotions: optional - source: - openapi: streaming-asyncapi.yml - StreamModelPredictionsProsody: + prosody: + type: optional docs: Response for the speech prosody emotion model. - properties: - predictions: optional> - source: - openapi: streaming-asyncapi.yml - StreamModelPredictions: - docs: Model predictions - properties: - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - burst: - type: optional - docs: Response for the vocal burst emotion model. - face: - type: optional - docs: Response for the facial expression emotion model. - facemesh: - type: optional - docs: Response for the facemesh emotion model. - language: - type: optional - docs: Response for the language emotion model. - prosody: - type: optional - docs: Response for the speech prosody emotion model. - source: - openapi: streaming-asyncapi.yml - JobDetails: + source: + openapi: streaming-asyncapi.yml + JobDetails: + docs: > + If the job_details flag was set in the request, details about the current + streaming job will be returned in the response body. + properties: + job_id: + type: optional + docs: ID of the current streaming job. + source: + openapi: streaming-asyncapi.yml + StreamErrorMessage: + docs: Error message + properties: + error: + type: optional + docs: Error message text. + code: + type: optional + docs: Unique identifier for the error. + payload_id: + type: optional docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: streaming-asyncapi.yml - StreamErrorMessage: - docs: Error message - properties: - error: - type: optional - docs: Error message text. - code: - type: optional - docs: Unique identifier for the error. - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - source: - openapi: streaming-asyncapi.yml - StreamWarningMessageJobDetails: + If a payload ID was passed in the request, the same payload ID will be + sent back in the response body. + job_details: + type: optional docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: streaming-asyncapi.yml - StreamWarningMessage: - docs: Warning message - properties: - warning: - type: optional - docs: Warning message text. - code: - type: optional - docs: Unique identifier for the error. - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - source: - openapi: streaming-asyncapi.yml - SubscribeEvent: - discriminated: false - union: - - type: StreamModelPredictions - docs: Model predictions - - type: StreamErrorMessage - docs: Error message - - type: StreamWarningMessage - docs: Warning message - source: - openapi: streaming-asyncapi.yml - StreamFace: + If the job_details flag was set in the request, details about the + current streaming job will be returned in the response body. + source: + openapi: streaming-asyncapi.yml + StreamWarningMessageJobDetails: + docs: > + If the job_details flag was set in the request, details about the current + streaming job will be returned in the response body. + properties: + job_id: + type: optional + docs: ID of the current streaming job. + source: + openapi: streaming-asyncapi.yml + StreamWarningMessage: + docs: Warning message + properties: + warning: + type: optional + docs: Warning message text. + code: + type: optional + docs: Unique identifier for the error. + payload_id: + type: optional docs: > - Configuration for the facial expression emotion model. + If a payload ID was passed in the request, the same payload ID will be + sent back in the response body. + job_details: + type: optional + docs: > + If the job_details flag was set in the request, details about the + current streaming job will be returned in the response body. + source: + openapi: streaming-asyncapi.yml + SubscribeEvent: + discriminated: false + union: + - type: StreamModelPredictions + docs: Model predictions + - type: StreamErrorMessage + docs: Error message + - type: StreamWarningMessage + docs: Warning message + source: + openapi: streaming-asyncapi.yml + StreamFace: + docs: > + Configuration for the facial expression emotion model. - Note: Using the `reset_stream` parameter does not have any effect on face - identification. A single face identifier cache is maintained over a full - session whether `reset_stream` is used or not. - properties: - facs: - type: optional> - docs: >- - Configuration for FACS predictions. If missing or null, no FACS - predictions will be generated. - descriptions: - type: optional> - docs: >- - Configuration for Descriptions predictions. If missing or null, no - Descriptions predictions will be generated. - identify_faces: - type: optional - docs: > - Whether to return identifiers for faces across frames. If true, unique - identifiers will be assigned to face bounding boxes to differentiate - different faces. If false, all faces will be tagged with an "unknown" - ID. - default: false - fps_pred: - type: optional - docs: > - Number of frames per second to process. Other frames will be omitted - from the response. - default: 3 - prob_threshold: - type: optional - docs: > - Face detection probability threshold. Faces detected with a - probability less than this threshold will be omitted from the - response. - default: 3 - min_face_size: - type: optional - docs: > - Minimum bounding box side length in pixels to treat as a face. Faces - detected with a bounding box side length in pixels less than this - threshold will be omitted from the response. - default: 3 - source: - openapi: streaming-asyncapi.yml - StreamLanguage: - docs: Configuration for the language emotion model. - properties: - sentiment: - type: optional> - docs: >- - Configuration for sentiment predictions. If missing or null, no - sentiment predictions will be generated. - toxicity: - type: optional> - docs: >- - Configuration for toxicity predictions. If missing or null, no - toxicity predictions will be generated. - granularity: - type: optional - docs: >- - The granularity at which to generate predictions. Values are `word`, - `sentence`, `utterance`, or `passage`. To get a single prediction for - the entire text of your streaming payload use `passage`. Default value - is `word`. - source: - openapi: streaming-asyncapi.yml - Config: + Note: Using the `reset_stream` parameter does not have any effect on face + identification. A single face identifier cache is maintained over a full + session whether `reset_stream` is used or not. + properties: + facs: + type: optional> + docs: >- + Configuration for FACS predictions. If missing or null, no FACS + predictions will be generated. + descriptions: + type: optional> + docs: >- + Configuration for Descriptions predictions. If missing or null, no + Descriptions predictions will be generated. + identify_faces: + type: optional + docs: > + Whether to return identifiers for faces across frames. If true, unique + identifiers will be assigned to face bounding boxes to differentiate + different faces. If false, all faces will be tagged with an "unknown" + ID. + default: false + fps_pred: + type: optional + docs: > + Number of frames per second to process. Other frames will be omitted + from the response. + default: 3 + prob_threshold: + type: optional + docs: > + Face detection probability threshold. Faces detected with a + probability less than this threshold will be omitted from the + response. + default: 3 + min_face_size: + type: optional docs: > - Configuration used to specify which models should be used and with what - settings. - properties: - burst: - type: optional> - docs: | - Configuration for the vocal burst emotion model. + Minimum bounding box side length in pixels to treat as a face. Faces + detected with a bounding box side length in pixels less than this + threshold will be omitted from the response. + default: 3 + source: + openapi: streaming-asyncapi.yml + StreamLanguage: + docs: Configuration for the language emotion model. + properties: + sentiment: + type: optional> + docs: >- + Configuration for sentiment predictions. If missing or null, no + sentiment predictions will be generated. + toxicity: + type: optional> + docs: >- + Configuration for toxicity predictions. If missing or null, no + toxicity predictions will be generated. + granularity: + type: optional + docs: >- + The granularity at which to generate predictions. Values are `word`, + `sentence`, `utterance`, or `passage`. To get a single prediction for + the entire text of your streaming payload use `passage`. Default value + is `word`. + source: + openapi: streaming-asyncapi.yml + Config: + docs: > + Configuration used to specify which models should be used and with what + settings. + properties: + burst: + type: optional> + docs: | + Configuration for the vocal burst emotion model. - Note: Model configuration is not currently available in streaming. + Note: Model configuration is not currently available in streaming. - Please use the default configuration by passing an empty object `{}`. - face: - type: optional - docs: > - Configuration for the facial expression emotion model. + Please use the default configuration by passing an empty object `{}`. + face: + type: optional + docs: > + Configuration for the facial expression emotion model. - Note: Using the `reset_stream` parameter does not have any effect on - face identification. A single face identifier cache is maintained over - a full session whether `reset_stream` is used or not. - facemesh: - type: optional> - docs: | - Configuration for the facemesh emotion model. + Note: Using the `reset_stream` parameter does not have any effect on + face identification. A single face identifier cache is maintained over + a full session whether `reset_stream` is used or not. + facemesh: + type: optional> + docs: | + Configuration for the facemesh emotion model. - Note: Model configuration is not currently available in streaming. + Note: Model configuration is not currently available in streaming. - Please use the default configuration by passing an empty object `{}`. - language: - type: optional - docs: Configuration for the language emotion model. - prosody: - type: optional> - docs: | - Configuration for the speech prosody emotion model. + Please use the default configuration by passing an empty object `{}`. + language: + type: optional + docs: Configuration for the language emotion model. + prosody: + type: optional> + docs: | + Configuration for the speech prosody emotion model. - Note: Model configuration is not currently available in streaming. + Note: Model configuration is not currently available in streaming. - Please use the default configuration by passing an empty object `{}`. - source: - openapi: streaming-asyncapi.yml - StreamModelsEndpointPayload: - docs: Models endpoint payload - properties: - data: - type: optional - models: - type: optional - docs: > - Configuration used to specify which models should be used and with - what settings. - stream_window_ms: - type: optional - docs: > - Length in milliseconds of streaming sliding window. + Please use the default configuration by passing an empty object `{}`. + source: + openapi: streaming-asyncapi.yml + StreamModelsEndpointPayload: + docs: Models endpoint payload + properties: + data: + type: optional + models: + type: optional + docs: > + Configuration used to specify which models should be used and with + what settings. + stream_window_ms: + type: optional + docs: > + Length in milliseconds of streaming sliding window. - Extending the length of this window will prepend media context from - past payloads into the current payload. + Extending the length of this window will prepend media context from + past payloads into the current payload. - For example, if on the first payload you send 500ms of data and on the - second payload you send an additional 500ms of data, a window of at - least 1000ms will allow the model to process all 1000ms of stream - data. + For example, if on the first payload you send 500ms of data and on the + second payload you send an additional 500ms of data, a window of at + least 1000ms will allow the model to process all 1000ms of stream + data. - A window of 600ms would append the full 500ms of the second payload to - the last 100ms of the first payload. + A window of 600ms would append the full 500ms of the second payload to + the last 100ms of the first payload. - Note: This feature is currently only supported for audio data and - audio models. For other file types and models this parameter will be - ignored. - default: 5000 - validation: - min: 500 - max: 10000 - reset_stream: - type: optional - docs: > - Whether to reset the streaming sliding window before processing the - current payload. + Note: This feature is currently only supported for audio data and + audio models. For other file types and models this parameter will be + ignored. + default: 5000 + validation: + min: 500 + max: 10000 + reset_stream: + type: optional + docs: > + Whether to reset the streaming sliding window before processing the + current payload. - If this parameter is set to `true` then past context will be deleted - before processing the current payload. + If this parameter is set to `true` then past context will be deleted + before processing the current payload. - Use reset_stream when one audio file is done being processed and you - do not want context to leak across files. - default: false - raw_text: - type: optional - docs: > - Set to `true` to enable the data parameter to be parsed as raw text - rather than base64 encoded bytes. + Use reset_stream when one audio file is done being processed and you + do not want context to leak across files. + default: false + raw_text: + type: optional + docs: > + Set to `true` to enable the data parameter to be parsed as raw text + rather than base64 encoded bytes. - This parameter is useful if you want to send text to be processed by - the language model, but it cannot be used with other file types like - audio, image, or video. - default: false - job_details: - type: optional - docs: > - Set to `true` to get details about the job. + This parameter is useful if you want to send text to be processed by + the language model, but it cannot be used with other file types like + audio, image, or video. + default: false + job_details: + type: optional + docs: > + Set to `true` to get details about the job. - This parameter can be set in the same payload as data or it can be set - without data and models configuration to get the job details between - payloads. + This parameter can be set in the same payload as data or it can be set + without data and models configuration to get the job details between + payloads. - This parameter is useful to get the unique job ID. - default: false - payload_id: - type: optional - docs: > - Pass an arbitrary string as the payload ID and get it back at the top - level of the socket response. + This parameter is useful to get the unique job ID. + default: false + payload_id: + type: optional + docs: > + Pass an arbitrary string as the payload ID and get it back at the top + level of the socket response. - This can be useful if you have multiple requests running - asynchronously and want to disambiguate responses as they are - received. - source: - openapi: streaming-asyncapi.yml - EmotionEmbeddingItem: - properties: - name: - type: optional - docs: Name of the emotion being expressed. - score: - type: optional - docs: Embedding value for the emotion being expressed. - source: - openapi: streaming-asyncapi.yml - EmotionEmbedding: - docs: A high-dimensional embedding in emotion space. - type: list - StreamBoundingBox: - docs: A bounding box around a face. - properties: - x: - type: optional - docs: x-coordinate of bounding box top left corner. - validation: - min: 0 - "y": - type: optional - docs: y-coordinate of bounding box top left corner. - validation: - min: 0 - w: - type: optional - docs: Bounding box width. - validation: - min: 0 - h: - type: optional - docs: Bounding box height. - validation: - min: 0 - source: - openapi: streaming-asyncapi.yml - TimeRange: - docs: A time range with a beginning and end, measured in seconds. - properties: - begin: - type: optional - docs: Beginning of time range in seconds. - validation: - min: 0 - end: - type: optional - docs: End of time range in seconds. - validation: - min: 0 - source: - openapi: streaming-asyncapi.yml - TextPosition: - docs: > - Position of a segment of text within a larger document, measured in - characters. Uses zero-based indexing. The beginning index is inclusive and - the end index is exclusive. - properties: - begin: - type: optional - docs: The index of the first character in the text segment, inclusive. - validation: - min: 0 - end: - type: optional - docs: The index of the last character in the text segment, exclusive. - validation: - min: 0 - source: - openapi: streaming-asyncapi.yml - SentimentItem: - properties: - name: - type: optional - docs: Level of sentiment, ranging from 1 (negative) to 9 (positive) - score: - type: optional - docs: Prediction for this level of sentiment - source: - openapi: streaming-asyncapi.yml - Sentiment: - docs: >- - Sentiment predictions returned as a distribution. This model predicts the - probability that a given text could be interpreted as having each - sentiment level from 1 (negative) to 9 (positive). + This can be useful if you have multiple requests running + asynchronously and want to disambiguate responses as they are + received. + source: + openapi: streaming-asyncapi.yml + EmotionEmbeddingItem: + properties: + name: + type: optional + docs: Name of the emotion being expressed. + score: + type: optional + docs: Embedding value for the emotion being expressed. + source: + openapi: streaming-asyncapi.yml + EmotionEmbedding: + docs: A high-dimensional embedding in emotion space. + type: list + StreamBoundingBox: + docs: A bounding box around a face. + properties: + x: + type: optional + docs: x-coordinate of bounding box top left corner. + validation: + min: 0 + 'y': + type: optional + docs: y-coordinate of bounding box top left corner. + validation: + min: 0 + w: + type: optional + docs: Bounding box width. + validation: + min: 0 + h: + type: optional + docs: Bounding box height. + validation: + min: 0 + source: + openapi: streaming-asyncapi.yml + TimeRange: + docs: A time range with a beginning and end, measured in seconds. + properties: + begin: + type: optional + docs: Beginning of time range in seconds. + validation: + min: 0 + end: + type: optional + docs: End of time range in seconds. + validation: + min: 0 + source: + openapi: streaming-asyncapi.yml + TextPosition: + docs: > + Position of a segment of text within a larger document, measured in + characters. Uses zero-based indexing. The beginning index is inclusive and + the end index is exclusive. + properties: + begin: + type: optional + docs: The index of the first character in the text segment, inclusive. + validation: + min: 0 + end: + type: optional + docs: The index of the last character in the text segment, exclusive. + validation: + min: 0 + source: + openapi: streaming-asyncapi.yml + SentimentItem: + properties: + name: + type: optional + docs: Level of sentiment, ranging from 1 (negative) to 9 (positive) + score: + type: optional + docs: Prediction for this level of sentiment + source: + openapi: streaming-asyncapi.yml + Sentiment: + docs: >- + Sentiment predictions returned as a distribution. This model predicts the + probability that a given text could be interpreted as having each + sentiment level from 1 (negative) to 9 (positive). - Compared to returning one estimate of sentiment, this enables a more - nuanced analysis of a text's meaning. For example, a text with very - neutral sentiment would have an average rating of 5. But also a text that - could be interpreted as having very positive sentiment or very negative - sentiment would also have an average rating of 5. The average sentiment is - less informative than the distribution over sentiment, so this API returns - a value for each sentiment level. - type: list - ToxicityItem: - properties: - name: - type: optional - docs: Category of toxicity. - score: - type: optional - docs: Prediction for this category of toxicity - source: - openapi: streaming-asyncapi.yml - Toxicity: - docs: >- - Toxicity predictions returned as probabilities that the text can be - classified into the following categories: toxic, severe_toxic, obscene, - threat, insult, and identity_hate. - type: list + Compared to returning one estimate of sentiment, this enables a more + nuanced analysis of a text's meaning. For example, a text with very + neutral sentiment would have an average rating of 5. But also a text that + could be interpreted as having very positive sentiment or very negative + sentiment would also have an average rating of 5. The average sentiment is + less informative than the distribution over sentiment, so this API returns + a value for each sentiment level. + type: list + ToxicityItem: + properties: + name: + type: optional + docs: Category of toxicity. + score: + type: optional + docs: Prediction for this category of toxicity + source: + openapi: streaming-asyncapi.yml + Toxicity: + docs: >- + Toxicity predictions returned as probabilities that the text can be + classified into the following categories: toxic, severe_toxic, obscene, + threat, insult, and identity_hate. + type: list diff --git a/.mock/fern.config.json b/.mock/fern.config.json index b84e570..335f627 100644 --- a/.mock/fern.config.json +++ b/.mock/fern.config.json @@ -1,4 +1,4 @@ { - "organization": "hume", - "version": "0.43.5" -} + "organization" : "hume", + "version" : "0.43.5" +} \ No newline at end of file diff --git a/package.json b/package.json index dd32a7b..52a667e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "hume", - "version": "0.9.0", + "version": "0.9.1", "private": false, "repository": "https://github.com/HumeAI/hume-typescript-sdk", "main": "./index.js", diff --git a/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts b/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts index 4f8c59e..ffd419b 100644 --- a/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts @@ -76,8 +76,8 @@ export class ChatGroups { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -176,8 +176,8 @@ export class ChatGroups { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -276,8 +276,8 @@ export class ChatGroups { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/empathicVoice/resources/chats/client/Client.ts b/src/api/resources/empathicVoice/resources/chats/client/Client.ts index a05733f..72e1e4a 100644 --- a/src/api/resources/empathicVoice/resources/chats/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/chats/client/Client.ts @@ -71,8 +71,8 @@ export class Chats { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -180,8 +180,8 @@ export class Chats { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/empathicVoice/resources/configs/client/Client.ts b/src/api/resources/empathicVoice/resources/configs/client/Client.ts index d177c3c..d7593a3 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/Client.ts @@ -76,8 +76,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -188,8 +188,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -286,8 +286,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -403,8 +403,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -484,8 +484,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -564,8 +564,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -648,8 +648,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -735,8 +735,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -821,8 +821,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/empathicVoice/resources/customVoices/client/Client.ts b/src/api/resources/empathicVoice/resources/customVoices/client/Client.ts index 85c1378..7154669 100644 --- a/src/api/resources/empathicVoice/resources/customVoices/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/customVoices/client/Client.ts @@ -69,8 +69,8 @@ export class CustomVoices { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -155,8 +155,8 @@ export class CustomVoices { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -237,8 +237,8 @@ export class CustomVoices { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -324,8 +324,8 @@ export class CustomVoices { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -403,8 +403,8 @@ export class CustomVoices { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -483,8 +483,8 @@ export class CustomVoices { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/empathicVoice/resources/prompts/client/Client.ts b/src/api/resources/empathicVoice/resources/prompts/client/Client.ts index 6977c31..ac8fb57 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/Client.ts @@ -75,8 +75,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -169,8 +169,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -267,8 +267,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -354,8 +354,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -435,8 +435,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -515,8 +515,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -599,8 +599,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -686,8 +686,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -772,8 +772,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/empathicVoice/resources/tools/client/Client.ts b/src/api/resources/empathicVoice/resources/tools/client/Client.ts index c063263..0e1b599 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/Client.ts @@ -75,8 +75,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -175,8 +175,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -275,8 +275,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -364,8 +364,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -445,8 +445,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -525,8 +525,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -611,8 +611,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -694,8 +694,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -780,8 +780,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts b/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts index c72634b..fee323b 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts @@ -84,8 +84,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -153,8 +153,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -221,8 +221,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -286,8 +286,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -342,8 +342,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -415,8 +415,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.11", - "User-Agent": "hume/0.8.11", + "X-Fern-SDK-Version": "0.9.1", + "User-Agent": "hume/0.9.1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/version.ts b/src/version.ts index 2b00a52..9ba59d6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const SDK_VERSION = "0.8.11"; +export const SDK_VERSION = "0.9.1";