diff --git a/fastagency/studio/models/agents/assistant.py b/fastagency/studio/models/agents/assistant.py index b323744b..201a2d7c 100644 --- a/fastagency/studio/models/agents/assistant.py +++ b/fastagency/studio/models/agents/assistant.py @@ -14,7 +14,12 @@ class AssistantAgent(AgentBaseModel): system_message: Annotated[ str, Field( - description="The system message of the agent. This message is used to inform the agent about his role in the conversation" + description="The system message of the agent. This message is used to inform the agent about his role in the conversation", + json_schema_extra={ + "metadata": { + "tooltip_message": "The system message defines the agent's role and influences its responses. For example, telling the agent 'You are an expert in travel advice' will make its responses focus on travel." + } + }, ), ] = "You are a helpful assistant. After you successfully answer all questions and there are no new questions asked after your response (e.g. there is no specific direction or question asked after you give a response), terminate the chat by outputting 'TERMINATE' (IMPORTANT: use all caps)" diff --git a/fastagency/studio/models/agents/base.py b/fastagency/studio/models/agents/base.py index 66f142ae..74af2ec3 100644 --- a/fastagency/studio/models/agents/base.py +++ b/fastagency/studio/models/agents/base.py @@ -24,6 +24,11 @@ class AgentBaseModel(Model): Field( title="LLM", description="LLM used by the agent for producing responses", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the LLM the agent will use to generate responses." + } + }, ), ] @@ -32,6 +37,11 @@ class AgentBaseModel(Model): Field( title="Toolbox", description="Toolbox used by the agent for producing responses", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + } + }, ), ] = None @@ -40,6 +50,11 @@ class AgentBaseModel(Model): Field( title="Toolbox", description="Toolbox used by the agent for producing responses", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + } + }, ), ] = None @@ -48,6 +63,11 @@ class AgentBaseModel(Model): Field( title="Toolbox", description="Toolbox used by the agent for producing responses", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + } + }, ), ] = None diff --git a/fastagency/studio/models/agents/user_proxy.py b/fastagency/studio/models/agents/user_proxy.py index ebe6c381..010d8887 100644 --- a/fastagency/studio/models/agents/user_proxy.py +++ b/fastagency/studio/models/agents/user_proxy.py @@ -14,7 +14,12 @@ class UserProxyAgent(Model): max_consecutive_auto_reply: Annotated[ Optional[int], Field( - description="The maximum number of consecutive auto-replies the agent can make" + description="The maximum number of consecutive auto-replies the agent can make", + json_schema_extra={ + "metadata": { + "tooltip_message": "Set the maximum number of consecutive auto replies the agent can make before requiring human approval. A higher value gives the agent more autonomy, while leaving it blank prompts permission for each reply. For example, if you set this to 2, the agent will reply twice and then require human approval before replying again." + } + }, ), ] = None diff --git a/fastagency/studio/models/agents/web_surfer.py b/fastagency/studio/models/agents/web_surfer.py index 88ef8c08..240915cc 100644 --- a/fastagency/studio/models/agents/web_surfer.py +++ b/fastagency/studio/models/agents/web_surfer.py @@ -16,7 +16,18 @@ @register("secret") class BingAPIKey(Model): - api_key: Annotated[str, Field(title="API Key", description="The API Key from Bing")] + api_key: Annotated[ + str, + Field( + title="API Key", + description="The API Key from Bing", + json_schema_extra={ + "metadata": { + "tooltip_message": "The API key specified here will be used to authenticate requests to Bing services." + } + }, + ), + ] @classmethod async def create_autogen(cls, model_id: UUID, user_id: UUID, **kwargs: Any) -> str: @@ -87,14 +98,35 @@ class WebSurferAgent(AgentBaseModel): Field( title="Summarizer LLM", description="This LLM will be used to generated summary of all pages visited", + json_schema_extra={ + "metadata": { + "tooltip_message": "Select the summarizer LLM, which is used for generating precise and accurate summaries of web pages, while the LLM chosen above is used for handling regular web searches." + } + }, ), ] viewport_size: Annotated[ - int, Field(description="The viewport size of the browser") + int, + Field( + description="The viewport size of the browser", + json_schema_extra={ + "metadata": { + "tooltip_message": "Viewport size refers to the visible area of a webpage in the browser. Default is 4096. Modify only if a custom size is needed." + } + }, + ), ] = 4096 bing_api_key: Annotated[ Optional[BingAPIKeyRef], - Field(title="Bing API Key", description="The Bing API key for the browser"), + Field( + title="Bing API Key", + description="The Bing API key for the browser", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose a Bing API key to allow the browser to access Bing's search and data services, improving information retrieval." + } + }, + ), ] = None @classmethod diff --git a/fastagency/studio/models/deployments/deployment.py b/fastagency/studio/models/deployments/deployment.py index 98b6e552..fa68f08f 100644 --- a/fastagency/studio/models/deployments/deployment.py +++ b/fastagency/studio/models/deployments/deployment.py @@ -29,7 +29,14 @@ class Deployment(Model): name: Annotated[ str, Field( - ..., description="The application name to use on the website.", min_length=1 + ..., + description="The name of the SaaS application.", + min_length=1, + json_schema_extra={ + "metadata": { + "tooltip_message": "The application name to be used in the deployed SaaS application." + } + }, ), ] @@ -39,7 +46,12 @@ class Deployment(Model): ..., description="The name of the GitHub repository.", min_length=1, - json_schema_extra={"metadata": {"immutable_after_creation": True}}, + json_schema_extra={ + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "The GitHub repository to be created. If the name contains spaces or special characters, GitHub will adjust it according to its naming rules. A random suffix will be added if the repository name already exists.", + } + }, ), ] @@ -50,7 +62,12 @@ class Deployment(Model): description="The name of the Fly.io application.", min_length=1, max_length=30, - json_schema_extra={"metadata": {"immutable_after_creation": True}}, + json_schema_extra={ + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "The Fly.io application. This will be used to create and deploy your React, Node.js, and PostgreSQL apps to Fly.io.", + } + }, ), ] @@ -59,6 +76,11 @@ class Deployment(Model): Field( title="Team Name", description="The team that is used in the deployment", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the team to be used for deployment. User messages are sent to the Initial agent of the chosen team, and the agent's responses are sent back to the user. This field can be updated anytime to switch teams, with changes reflected in real-time in your deployments." + } + }, ), ] gh_token: Annotated[ @@ -66,7 +88,12 @@ class Deployment(Model): Field( title="GH Token", description="The GitHub token to use for creating a new repository", - json_schema_extra={"metadata": {"immutable_after_creation": True}}, + json_schema_extra={ + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "Choose the GitHub token used for authenticating and managing access to your GitHub account.", + } + }, ), ] fly_token: Annotated[ @@ -74,7 +101,12 @@ class Deployment(Model): Field( title="Fly Token", description="The Fly.io token to use for deploying the deployment", - json_schema_extra={"metadata": {"immutable_after_creation": True}}, + json_schema_extra={ + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "Choose the Fly.io token used for authenticating and managing access to your Fly.io account.", + } + }, ), ] diff --git a/fastagency/studio/models/llms/anthropic.py b/fastagency/studio/models/llms/anthropic.py index feb53d9c..36bd2c1d 100644 --- a/fastagency/studio/models/llms/anthropic.py +++ b/fastagency/studio/models/llms/anthropic.py @@ -28,6 +28,11 @@ class AnthropicAPIKey(Model): Field( title="API Key", description="The API Key from Anthropic", + json_schema_extra={ + "metadata": { + "tooltip_message": "The API key specified here will be used to authenticate requests to Anthropic services." + } + }, ), ] @@ -57,7 +62,12 @@ class Anthropic(Model): model: Annotated[ # type: ignore[valid-type] AnthropicModels, Field( - description="The model to use for the Anthropic API, e.g. 'claude-3-5-sonnet-20240620'" + description="The model to use for the Anthropic API, e.g. 'claude-3-5-sonnet-20240620'", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the model that the LLM should use to generate responses." + } + }, ), ] = "claude-3-5-sonnet-20240620" @@ -66,11 +76,25 @@ class Anthropic(Model): Field( title="API Key", description="The API Key from Anthropic", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to Anthropic services." + } + }, ), ] base_url: Annotated[ - URL, Field(title="Base URL", description="The base URL of the Anthropic API") + URL, + Field( + title="Base URL", + description="The base URL of the Anthropic API", + json_schema_extra={ + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with Anthropic services." + } + }, + ), ] = URL(url="https://api.anthropic.com/v1") api_type: Annotated[ @@ -82,6 +106,11 @@ class Anthropic(Model): float, Field( description="The temperature to use for the model, must be between 0 and 2", + json_schema_extra={ + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + } + }, ge=0.0, le=2.0, ), diff --git a/fastagency/studio/models/llms/azure.py b/fastagency/studio/models/llms/azure.py index c6a6d348..938ce141 100644 --- a/fastagency/studio/models/llms/azure.py +++ b/fastagency/studio/models/llms/azure.py @@ -28,7 +28,16 @@ @register("secret") class AzureOAIAPIKey(Model): api_key: Annotated[ - str, Field(title="API Key", description="The API Key from Azure OpenAI") + str, + Field( + title="API Key", + description="The API Key from Azure OpenAI", + json_schema_extra={ + "metadata": { + "tooltip_message": "The API key specified here will be used to authenticate requests to Azure OpenAI services." + } + }, + ), ] @classmethod @@ -57,17 +66,39 @@ class AzureOAI(Model): model: Annotated[ str, Field( - description="The model to use for the Azure OpenAI API, e.g. 'gpt-3.5-turbo'" + description="The model to use for the Azure OpenAI API, e.g. 'gpt-3.5-turbo'", + json_schema_extra={ + "metadata": { + "tooltip_message": "The model that the LLM uses to interact with Azure OpenAI services." + } + }, ), ] = "gpt-3.5-turbo" api_key: Annotated[ AzureOAIAPIKeyRef, - Field(title="API Key", description="The API Key from Azure OpenAI"), + Field( + title="API Key", + description="The API Key from Azure OpenAI", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to Azure OpenAI services." + } + }, + ), ] base_url: Annotated[ - URL, Field(title="Base URL", description="The base URL of the Azure OpenAI API") + URL, + Field( + title="Base URL", + description="The base URL of the Azure OpenAI API", + json_schema_extra={ + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with Azure OpenAI services." + } + }, + ), ] = UrlModel(url="https://{your-resource-name}.openai.azure.com").url api_type: Annotated[ @@ -80,6 +111,11 @@ class AzureOAI(Model): Field( title="API Version", description="The version of the Azure OpenAI API, e.g. '2024-02-01'", + json_schema_extra={ + "metadata": { + "tooltip_message": "The version of the Azure OpenAI API that the LLM uses to interact with Azure OpenAI services." + } + }, ), ] = "2024-02-01" @@ -87,6 +123,11 @@ class AzureOAI(Model): float, Field( description="The temperature to use for the model, must be between 0 and 2", + json_schema_extra={ + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + } + }, ge=0.0, le=2.0, ), diff --git a/fastagency/studio/models/llms/openai.py b/fastagency/studio/models/llms/openai.py index 27fb8036..abdafec7 100644 --- a/fastagency/studio/models/llms/openai.py +++ b/fastagency/studio/models/llms/openai.py @@ -43,6 +43,11 @@ class OpenAIAPIKey(Model): Field( title="API Key", description="The API Key from OpenAI", + json_schema_extra={ + "metadata": { + "tooltip_message": "The API key specified here will be used to authenticate requests to OpenAI services." + } + }, ), ] @@ -74,7 +79,14 @@ def validate_api_key(cls: type["OpenAIAPIKey"], value: Any) -> Any: class OpenAI(Model): model: Annotated[ # type: ignore[valid-type] OpenAIModels, - Field(description="The model to use for the OpenAI API, e.g. 'gpt-3.5-turbo'"), + Field( + description="The model to use for the OpenAI API, e.g. 'gpt-3.5-turbo'", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the model that the LLM uses to interact with OpenAI services." + } + }, + ), ] = "gpt-3.5-turbo" api_key: Annotated[ @@ -82,11 +94,25 @@ class OpenAI(Model): Field( title="API Key", description="The API Key from OpenAI", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to OpenAI services." + } + }, ), ] base_url: Annotated[ - URL, Field(title="Base URL", description="The base URL of the OpenAI API") + URL, + Field( + title="Base URL", + description="The base URL of the OpenAI API", + json_schema_extra={ + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with OpenAI services." + } + }, + ), ] = URL(url="https://api.openai.com/v1") api_type: Annotated[ @@ -98,6 +124,11 @@ class OpenAI(Model): float, Field( description="The temperature to use for the model, must be between 0 and 2", + json_schema_extra={ + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + } + }, ge=0.0, le=2.0, ), diff --git a/fastagency/studio/models/llms/together.py b/fastagency/studio/models/llms/together.py index 05404687..4af8e8d5 100644 --- a/fastagency/studio/models/llms/together.py +++ b/fastagency/studio/models/llms/together.py @@ -115,7 +115,12 @@ class TogetherAIAPIKey(Model): str, Field( title="API Key", - description="The API Key from Together.ai", + description="The API Key from Together AI", + json_schema_extra={ + "metadata": { + "tooltip_message": "The API key specified here will be used to authenticate requests to Together AI services." + } + }, min_length=64, max_length=64, ), @@ -139,16 +144,40 @@ async def create_autogen(cls, model_id: UUID, user_id: UUID, **kwargs: Any) -> s class TogetherAI(Model): model: Annotated[ # type: ignore[valid-type] TogetherModels, - Field(description="The model to use for the Together API"), + Field( + description="The model to use for the Together API", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the model that the LLM uses to interact with Together AI services." + } + }, + ), ] = "Meta Llama 3 70B Instruct Reference" api_key: Annotated[ TogetherAIAPIKeyRef, - Field(title="API Key", description="The API Key from Together.ai"), + Field( + title="API Key", + description="The API Key from Together.ai", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to Together AI services." + } + }, + ), ] base_url: Annotated[ - URL, Field(title="Base URL", description="The base URL of the OpenAI API") + URL, + Field( + title="Base URL", + description="The base URL of the OpenAI API", + json_schema_extra={ + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with Together AI services." + } + }, + ), ] = URL(url="https://api.together.xyz/v1") api_type: Annotated[ @@ -162,6 +191,11 @@ class TogetherAI(Model): float, Field( description="The temperature to use for the model, must be between 0 and 2", + json_schema_extra={ + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + } + }, ge=0.0, le=2.0, ), diff --git a/fastagency/studio/models/secrets/fly_token.py b/fastagency/studio/models/secrets/fly_token.py index 44970148..793fb83c 100644 --- a/fastagency/studio/models/secrets/fly_token.py +++ b/fastagency/studio/models/secrets/fly_token.py @@ -16,6 +16,11 @@ class FlyToken(Model): Field( title="Fly Token", description="The Fly.io token to use for deploying the deployment", + json_schema_extra={ + "metadata": { + "tooltip_message": "The token specified here will be used to authenticate your access to Fly.io services." + } + }, ), ] diff --git a/fastagency/studio/models/secrets/github_token.py b/fastagency/studio/models/secrets/github_token.py index 7d15b9d1..8d7fe456 100644 --- a/fastagency/studio/models/secrets/github_token.py +++ b/fastagency/studio/models/secrets/github_token.py @@ -16,6 +16,11 @@ class GitHubToken(Model): Field( title="GH Token", description="The GitHub token to use for creating a new repository", + json_schema_extra={ + "metadata": { + "tooltip_message": "The token specified here will be used to authenticate your access to GitHub services." + } + }, ), ] diff --git a/fastagency/studio/models/teams/base.py b/fastagency/studio/models/teams/base.py index 61b7bcca..ea776cc4 100644 --- a/fastagency/studio/models/teams/base.py +++ b/fastagency/studio/models/teams/base.py @@ -21,7 +21,12 @@ class TeamBaseModel(Model): is_termination_msg_regex: Annotated[ str, Field( - description="Whether the message is a termination message or not. If it is a termination message, the chat will terminate." + description="Whether the message is a termination message or not. If it is a termination message, the chat will terminate.", + json_schema_extra={ + "metadata": { + "tooltip_message": "The termination message regular expression format. The LLM uses this pattern to decide when to end the chat if the message matches." + } + }, ), ] = "TERMINATE" @@ -30,6 +35,11 @@ class TeamBaseModel(Model): Field( title="Human Input Mode", description="Mode for human input", + json_schema_extra={ + "metadata": { + "tooltip_message": "Select the human input mode to control the level of human involvement. Modes include NEVER (full autonomy), TERMINATE (human input requested upon termination), and ALWAYS (input required after every message)." + } + }, ), ] = "ALWAYS" diff --git a/fastagency/studio/models/teams/two_agent_teams.py b/fastagency/studio/models/teams/two_agent_teams.py index 4592c618..b74408a1 100644 --- a/fastagency/studio/models/teams/two_agent_teams.py +++ b/fastagency/studio/models/teams/two_agent_teams.py @@ -43,6 +43,11 @@ class TwoAgentTeam(TeamBaseModel): Field( title="Initial Agent", description="Agent that starts the conversation", + json_schema_extra={ + "metadata": { + "tooltip_message": "Select the Initial Agent, the agent responsible for task orchestration. It interacts with users and assigns tasks to Secondary Agent, enhancing the efficiency of complex operations." + } + }, ), ] secondary_agent: Annotated[ @@ -50,6 +55,11 @@ class TwoAgentTeam(TeamBaseModel): Field( title="Secondary Agent", description="Agent that continues the conversation", + json_schema_extra={ + "metadata": { + "tooltip_message": "Select the Secondary Agent, the agent responsible for collaborating with the Initial Agent in performing specialized tasks. Secondary Agents enhance efficiency by focusing on specific roles, such as data analysis or code execution." + } + }, ), ] diff --git a/fastagency/studio/models/toolboxes/toolbox.py b/fastagency/studio/models/toolboxes/toolbox.py index b1113099..d3b8b956 100644 --- a/fastagency/studio/models/toolboxes/toolbox.py +++ b/fastagency/studio/models/toolboxes/toolbox.py @@ -27,6 +27,11 @@ class OpenAPIAuthToken(Model): str, Field( description="Authentication token for OpenAPI routes", + json_schema_extra={ + "metadata": { + "tooltip_message": "The token specified here will be used to authenticate requests to OpenAPI routes." + } + }, ), ] @@ -72,6 +77,11 @@ class Toolbox(Model): Field( title="OpenAPI URL", description="The URL of OpenAPI specification file", + json_schema_extra={ + "metadata": { + "tooltip_message": "Enter the URL of the openapi.json file for your hosted OpenAPI docs. For example, if your docs are hosted at 'https://weather.tools.fastagency.ai/docs', enter 'https://weather.tools.fastagency.ai/openapi.json'." + } + }, ), ] openapi_auth: Annotated[ @@ -79,6 +89,11 @@ class Toolbox(Model): Field( title="OpenAPI Auth", description="Authentication information for the API mentioned in the OpenAPI specification", + json_schema_extra={ + "metadata": { + "tooltip_message": "Choose the authentication method that will be used to authenticate requests to the above OpenAPI routes. Leave this field as it is if the OpenAPI routes do not require authentication." + } + }, ), ] = None diff --git a/tests/studio/models/agents/test_assistant.py b/tests/studio/models/agents/test_assistant.py index 1c47309e..078abf45 100644 --- a/tests/studio/models/agents/test_assistant.py +++ b/tests/studio/models/agents/test_assistant.py @@ -188,29 +188,44 @@ def test_assistant_model_schema(self) -> None: {"$ref": "#/$defs/TogetherAIRef"}, ], "description": "LLM used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the LLM the agent will use to generate responses." + }, "title": "LLM", }, "toolbox_1": { "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], "default": None, "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, "title": "Toolbox", }, "toolbox_2": { "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], "default": None, "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, "title": "Toolbox", }, "toolbox_3": { "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], "default": None, "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, "title": "Toolbox", }, "system_message": { "default": "You are a helpful assistant. After you successfully answer all questions and there are no new questions asked after your response (e.g. there is no specific direction or question asked after you give a response), terminate the chat by outputting 'TERMINATE' (IMPORTANT: use all caps)", "description": "The system message of the agent. This message is used to inform the agent about his role in the conversation", + "metadata": { + "tooltip_message": "The system message defines the agent's role and influences its responses. For example, telling the agent 'You are an expert in travel advice' will make its responses focus on travel." + }, "title": "System Message", "type": "string", }, diff --git a/tests/studio/models/agents/test_web_surfer.py b/tests/studio/models/agents/test_web_surfer.py index 45b31603..01292201 100644 --- a/tests/studio/models/agents/test_web_surfer.py +++ b/tests/studio/models/agents/test_web_surfer.py @@ -236,24 +236,36 @@ def test_web_surfer_model_schema(self) -> None: {"$ref": "#/$defs/TogetherAIRef"}, ], "description": "LLM used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the LLM the agent will use to generate responses." + }, "title": "LLM", }, "toolbox_1": { "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], "default": None, "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, "title": "Toolbox", }, "toolbox_2": { "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], "default": None, "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, "title": "Toolbox", }, "toolbox_3": { "anyOf": [{"$ref": "#/$defs/ToolboxRef"}, {"type": "null"}], "default": None, "description": "Toolbox used by the agent for producing responses", + "metadata": { + "tooltip_message": "Choose the toolbox that the agent will use automatically when needed to solve user queries." + }, "title": "Toolbox", }, "summarizer_llm": { @@ -264,11 +276,17 @@ def test_web_surfer_model_schema(self) -> None: {"$ref": "#/$defs/TogetherAIRef"}, ], "description": "This LLM will be used to generated summary of all pages visited", + "metadata": { + "tooltip_message": "Select the summarizer LLM, which is used for generating precise and accurate summaries of web pages, while the LLM chosen above is used for handling regular web searches." + }, "title": "Summarizer LLM", }, "viewport_size": { "default": 4096, "description": "The viewport size of the browser", + "metadata": { + "tooltip_message": "Viewport size refers to the visible area of a webpage in the browser. Default is 4096. Modify only if a custom size is needed." + }, "title": "Viewport Size", "type": "integer", }, @@ -276,6 +294,9 @@ def test_web_surfer_model_schema(self) -> None: "anyOf": [{"$ref": "#/$defs/BingAPIKeyRef"}, {"type": "null"}], "default": None, "description": "The Bing API key for the browser", + "metadata": { + "tooltip_message": "Choose a Bing API key to allow the browser to access Bing's search and data services, improving information retrieval." + }, "title": "Bing API Key", }, }, diff --git a/tests/studio/models/deployments/test_deployment.py b/tests/studio/models/deployments/test_deployment.py index 34acfc4d..5b4aa341 100644 --- a/tests/studio/models/deployments/test_deployment.py +++ b/tests/studio/models/deployments/test_deployment.py @@ -141,14 +141,20 @@ def test_deployment_model_schema(self, pydantic_version: float) -> None: }, "properties": { "name": { - "description": "The application name to use on the website.", + "description": "The name of the SaaS application.", + "metadata": { + "tooltip_message": "The application name to be used in the deployed SaaS application." + }, "minLength": 1, "title": "Name", "type": "string", }, "repo_name": { "description": "The name of the GitHub repository.", - "metadata": {"immutable_after_creation": True}, + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "The GitHub repository to be created. If the name contains spaces or special characters, GitHub will adjust it according to its naming rules. A random suffix will be added if the repository name already exists.", + }, "minLength": 1, "title": "Repo Name", "type": "string", @@ -156,7 +162,10 @@ def test_deployment_model_schema(self, pydantic_version: float) -> None: "fly_app_name": { "description": "The name of the Fly.io application.", "maxLength": 30, - "metadata": {"immutable_after_creation": True}, + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "The Fly.io application. This will be used to create and deploy your React, Node.js, and PostgreSQL apps to Fly.io.", + }, "minLength": 1, "title": "Fly App Name", "type": "string", @@ -164,18 +173,27 @@ def test_deployment_model_schema(self, pydantic_version: float) -> None: "team": { "$ref": "#/$defs/TwoAgentTeamRef", "description": "The team that is used in the deployment", + "metadata": { + "tooltip_message": "Choose the team to be used for deployment. User messages are sent to the Initial agent of the chosen team, and the agent's responses are sent back to the user. This field can be updated anytime to switch teams, with changes reflected in real-time in your deployments." + }, "title": "Team Name", }, "gh_token": { "$ref": "#/$defs/GitHubTokenRef", "description": "The GitHub token to use for creating a new repository", - "metadata": {"immutable_after_creation": True}, + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "Choose the GitHub token used for authenticating and managing access to your GitHub account.", + }, "title": "GH Token", }, "fly_token": { "$ref": "#/$defs/FlyTokenRef", "description": "The Fly.io token to use for deploying the deployment", - "metadata": {"immutable_after_creation": True}, + "metadata": { + "immutable_after_creation": True, + "tooltip_message": "Choose the Fly.io token used for authenticating and managing access to your Fly.io account.", + }, "title": "Fly Token", }, }, diff --git a/tests/studio/models/llms/test_anthropic.py b/tests/studio/models/llms/test_anthropic.py index df1622dd..1fe2510d 100644 --- a/tests/studio/models/llms/test_anthropic.py +++ b/tests/studio/models/llms/test_anthropic.py @@ -101,12 +101,18 @@ def test_anthropic_model_schema(self, pydantic_version: float) -> None: "claude-3-sonnet-20240229", "claude-3-haiku-20240307", ], + "metadata": { + "tooltip_message": "Choose the model that the LLM should use to generate responses." + }, "title": "Model", "type": "string", }, "api_key": { "$ref": "#/$defs/AnthropicAPIKeyRef", "description": "The API Key from Anthropic", + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to Anthropic services." + }, "title": "API Key", }, "base_url": { @@ -114,6 +120,9 @@ def test_anthropic_model_schema(self, pydantic_version: float) -> None: "description": "The base URL of the Anthropic API", "format": "uri", "maxLength": 2083, + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with Anthropic services." + }, "minLength": 1, "title": "Base URL", "type": "string", @@ -129,6 +138,9 @@ def test_anthropic_model_schema(self, pydantic_version: float) -> None: "temperature": { "default": 0.8, "description": "The temperature to use for the model, must be between 0 and 2", + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + }, "maximum": 2.0, "minimum": 0.0, "title": "Temperature", diff --git a/tests/studio/models/llms/test_azure.py b/tests/studio/models/llms/test_azure.py index c6f4ddd5..91c3699a 100644 --- a/tests/studio/models/llms/test_azure.py +++ b/tests/studio/models/llms/test_azure.py @@ -126,12 +126,18 @@ def test_azure_model_schema(self, pydantic_version: float) -> None: "model": { "default": "gpt-3.5-turbo", "description": "The model to use for the Azure OpenAI API, e.g. 'gpt-3.5-turbo'", + "metadata": { + "tooltip_message": "The model that the LLM uses to interact with Azure OpenAI services." + }, "title": "Model", "type": "string", }, "api_key": { "$ref": "#/$defs/AzureOAIAPIKeyRef", "description": "The API Key from Azure OpenAI", + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to Azure OpenAI services." + }, "title": "API Key", }, "base_url": { @@ -139,6 +145,9 @@ def test_azure_model_schema(self, pydantic_version: float) -> None: "description": "The base URL of the Azure OpenAI API", "format": "uri", "maxLength": 2083, + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with Azure OpenAI services." + }, "minLength": 1, "title": "Base URL", "type": "string", @@ -164,6 +173,9 @@ def test_azure_model_schema(self, pydantic_version: float) -> None: "2024-05-01-preview", "2024-02-01", ], + "metadata": { + "tooltip_message": "The version of the Azure OpenAI API that the LLM uses to interact with Azure OpenAI services." + }, "title": "API Version", "type": "string", }, @@ -171,6 +183,9 @@ def test_azure_model_schema(self, pydantic_version: float) -> None: "default": 0.8, "description": "The temperature to use for the model, must be between 0 and 2", "maximum": 2.0, + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + }, "minimum": 0.0, "title": "Temperature", "type": "number", @@ -185,6 +200,7 @@ def test_azure_model_schema(self, pydantic_version: float) -> None: if pydantic_version < 2.9: # print(f"pydantic28_delta = '{jsondiff.diff(expected, schema, dump=True)}'") expected = jsondiff.patch(json.dumps(expected), pydantic28_delta, load=True) + assert schema == expected @pytest.mark.asyncio diff --git a/tests/studio/models/llms/test_openai.py b/tests/studio/models/llms/test_openai.py index 29fec5ed..87ffb339 100644 --- a/tests/studio/models/llms/test_openai.py +++ b/tests/studio/models/llms/test_openai.py @@ -150,12 +150,18 @@ def test_openai_schema(self, pydantic_version: float) -> None: "gpt-4o-2024-05-13", "gpt-4-turbo", ], + "metadata": { + "tooltip_message": "Choose the model that the LLM uses to interact with OpenAI services." + }, "title": "Model", "type": "string", }, "api_key": { "$ref": "#/$defs/OpenAIAPIKeyRef", "description": "The API Key from OpenAI", + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to OpenAI services." + }, "title": "API Key", }, "base_url": { @@ -163,6 +169,9 @@ def test_openai_schema(self, pydantic_version: float) -> None: "description": "The base URL of the OpenAI API", "format": "uri", "maxLength": 2083, + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with OpenAI services." + }, "minLength": 1, "title": "Base URL", "type": "string", @@ -180,6 +189,9 @@ def test_openai_schema(self, pydantic_version: float) -> None: "description": "The temperature to use for the model, must be between 0 and 2", "maximum": 2.0, "minimum": 0.0, + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + }, "title": "Temperature", "type": "number", }, diff --git a/tests/studio/models/llms/test_together.py b/tests/studio/models/llms/test_together.py index a051e3d3..d1ec3184 100644 --- a/tests/studio/models/llms/test_together.py +++ b/tests/studio/models/llms/test_together.py @@ -130,12 +130,18 @@ def test_togetherai_schema(self, pydantic_version: float) -> None: "model": { "default": "Meta Llama 3 70B Instruct Reference", "description": "The model to use for the Together API", + "metadata": { + "tooltip_message": "Choose the model that the LLM uses to interact with Together AI services." + }, "title": "Model", "type": "string", }, "api_key": { "$ref": "#/$defs/TogetherAIAPIKeyRef", "description": "The API Key from Together.ai", + "metadata": { + "tooltip_message": "Choose the API key that will be used to authenticate requests to Together AI services." + }, "title": "API Key", }, "base_url": { @@ -144,6 +150,9 @@ def test_togetherai_schema(self, pydantic_version: float) -> None: "format": "uri", "maxLength": 2083, "minLength": 1, + "metadata": { + "tooltip_message": "The base URL that the LLM uses to interact with Together AI services." + }, "title": "Base URL", "type": "string", }, @@ -159,6 +168,9 @@ def test_togetherai_schema(self, pydantic_version: float) -> None: "default": 0.8, "description": "The temperature to use for the model, must be between 0 and 2", "maximum": 2.0, + "metadata": { + "tooltip_message": "Adjust the temperature to change the response style. Lower values lead to more consistent answers, while higher values make the responses more creative. The values must be between 0 and 2." + }, "minimum": 0.0, "title": "Temperature", "type": "number", diff --git a/tests/studio/models/teams/test_two_agents_team.py b/tests/studio/models/teams/test_two_agents_team.py index 5a8689a3..1a304223 100644 --- a/tests/studio/models/teams/test_two_agents_team.py +++ b/tests/studio/models/teams/test_two_agents_team.py @@ -148,6 +148,9 @@ def test_two_agents_team_schema(self) -> None: "is_termination_msg_regex": { "default": "TERMINATE", "description": "Whether the message is a termination message or not. If it is a termination message, the chat will terminate.", + "metadata": { + "tooltip_message": "The termination message regular expression format. The LLM uses this pattern to decide when to end the chat if the message matches." + }, "title": "Is Termination Msg Regex", "type": "string", }, @@ -155,6 +158,9 @@ def test_two_agents_team_schema(self) -> None: "default": "ALWAYS", "description": "Mode for human input", "enum": ["ALWAYS", "TERMINATE", "NEVER"], + "metadata": { + "tooltip_message": "Select the human input mode to control the level of human involvement. Modes include NEVER (full autonomy), TERMINATE (human input requested upon termination), and ALWAYS (input required after every message)." + }, "title": "Human Input Mode", "type": "string", }, @@ -165,6 +171,9 @@ def test_two_agents_team_schema(self) -> None: {"$ref": "#/$defs/WebSurferAgentRef"}, ], "description": "Agent that starts the conversation", + "metadata": { + "tooltip_message": "Select the Initial Agent, the agent responsible for task orchestration. It interacts with users and assigns tasks to Secondary Agent, enhancing the efficiency of complex operations." + }, "title": "Initial Agent", }, "secondary_agent": { @@ -174,6 +183,9 @@ def test_two_agents_team_schema(self) -> None: {"$ref": "#/$defs/WebSurferAgentRef"}, ], "description": "Agent that continues the conversation", + "metadata": { + "tooltip_message": "Select the Secondary Agent, the agent responsible for collaborating with the Initial Agent in performing specialized tasks. Secondary Agents enhance efficiency by focusing on specific roles, such as data analysis or code execution." + }, "title": "Secondary Agent", }, },