From 69ef1d1c27ddd25a650fe4c6685198fd6fb478e4 Mon Sep 17 00:00:00 2001 From: Jeremiah Lowin <153965+jlowin@users.noreply.github.com> Date: Thu, 12 Sep 2024 08:51:38 -0400 Subject: [PATCH 1/3] Support automatic model configuration on agents --- docs/concepts/agents.mdx | 3 +- docs/examples/call-routing.mdx | 1 - docs/examples/features/multi-llm.mdx | 9 +- docs/examples/headline-categorization.mdx | 5 +- docs/examples/named-entity-recognition.mdx | 5 +- docs/examples/sentiment-classifier.mdx | 5 +- docs/guides/llms.mdx | 102 +++++----- docs/patterns/running-tasks.mdx | 5 +- docs/quickstart.mdx | 12 +- pyproject.toml | 2 + src/controlflow/agents/agent.py | 12 +- src/controlflow/defaults.py | 4 +- src/controlflow/llm/models.py | 18 +- src/controlflow/planning/auto_tasks.py | 216 --------------------- tests/agents/test_agents.py | 16 +- tests/llm/__init__.py | 0 tests/llm/test_models.py | 55 ++++++ 17 files changed, 158 insertions(+), 312 deletions(-) delete mode 100644 src/controlflow/planning/auto_tasks.py create mode 100644 tests/llm/__init__.py create mode 100644 tests/llm/test_models.py diff --git a/docs/concepts/agents.mdx b/docs/concepts/agents.mdx index 59288b9e..0576d716 100644 --- a/docs/concepts/agents.mdx +++ b/docs/concepts/agents.mdx @@ -31,7 +31,6 @@ A more complex agent can be created by providing additional configuration. This ```python import controlflow as cf -from langchain_openai import ChatOpenAI agent = cf.Agent( name="Data Analyst", @@ -41,7 +40,7 @@ agent = cf.Agent( "Browse the web for data and use Python to analyze it." ), tools=[cf.tools.web.get_url, cf.tools.code.python], - model=ChatpOpenAI('gpt-4o-mini'), + model="openai/gpt-4o", interactive=True, ) ``` diff --git a/docs/examples/call-routing.mdx b/docs/examples/call-routing.mdx index 09a6b989..6f019174 100644 --- a/docs/examples/call-routing.mdx +++ b/docs/examples/call-routing.mdx @@ -17,7 +17,6 @@ As you run this example, you'll see the conversation unfold in real-time, culmin ```python import random -from enum import Enum import controlflow as cf DEPARTMENTS = [ diff --git a/docs/examples/features/multi-llm.mdx b/docs/examples/features/multi-llm.mdx index de0a5c12..e349b479 100644 --- a/docs/examples/features/multi-llm.mdx +++ b/docs/examples/features/multi-llm.mdx @@ -16,17 +16,12 @@ In this scenario, we'll create a workflow that analyzes customer feedback for a ```python import controlflow as cf -from langchain_openai import ChatOpenAI from pydantic import BaseModel from typing import Literal -# Define our models -gpt4_mini = ChatOpenAI(model="gpt-4o-mini") -gpt4 = ChatOpenAI(model="gpt-4o") - # Create specialized agents -classifier = cf.Agent(name="Classifier", model=gpt4_mini) -summarizer = cf.Agent(name="Summarizer", model=gpt4) +classifier = cf.Agent(name="Classifier", model="openai/gpt-4o-mini") +summarizer = cf.Agent(name="Summarizer", model="openai/gpt-4o") # Define our data models class Feedback(BaseModel): diff --git a/docs/examples/headline-categorization.mdx b/docs/examples/headline-categorization.mdx index cb94b9ae..67d244bc 100644 --- a/docs/examples/headline-categorization.mdx +++ b/docs/examples/headline-categorization.mdx @@ -12,9 +12,8 @@ The following code creates a function that classifies a given news headline into ```python import controlflow as cf -from langchain_openai import ChatOpenAI -classifier = cf.Agent(model=ChatOpenAI(model="gpt-4o-mini")) +classifier = cf.Agent(model="openai/gpt-4o-mini") def classify_news(headline: str) -> str: return cf.run( @@ -57,7 +56,7 @@ This implementation showcases several important ControlFlow features that enable 1. **[Agents](/concepts/agents)**: We create an agent with a specific LLM model (GPT-4o mini) to perform the headline classification. ```python - classifier = cf.Agent(model=ChatOpenAI(model="gpt-4o-mini")) + classifier = cf.Agent(model="openai/gpt-4o-mini") ``` 2. **[Result types](/concepts/tasks/task-results)**: We use a list of strings as the `result_type` to constrain the output to one of the predefined categories. This ensures that the classification result is always one of the specified options. diff --git a/docs/examples/named-entity-recognition.mdx b/docs/examples/named-entity-recognition.mdx index 47a845fe..740b0c34 100644 --- a/docs/examples/named-entity-recognition.mdx +++ b/docs/examples/named-entity-recognition.mdx @@ -12,12 +12,11 @@ First, let's implement a function that extracts a simple list of entities: ```python import controlflow as cf -from langchain_openai import ChatOpenAI from typing import List extractor = cf.Agent( name="Named Entity Recognizer", - model=ChatOpenAI(model="gpt-4o-mini"), + model="openai/gpt-4o-mini", ) def extract_entities(text: str) -> List[str]: @@ -86,7 +85,7 @@ This implementation showcases several important ControlFlow features that enable ```python extractor = cf.Agent( name="Named Entity Recognizer", - model=ChatOpenAI(model="gpt-4o-mini"), + model="openai/gpt-4o-mini", ) ``` diff --git a/docs/examples/sentiment-classifier.mdx b/docs/examples/sentiment-classifier.mdx index 4a520066..284503e0 100644 --- a/docs/examples/sentiment-classifier.mdx +++ b/docs/examples/sentiment-classifier.mdx @@ -13,9 +13,8 @@ The following code creates a function that classifies the sentiment of a given t ```python import controlflow as cf from controlflow.tasks.validators import between -from langchain_openai import ChatOpenAI -optimist = cf.Agent(model=ChatOpenAI(model="gpt-4o-mini")) +optimist = cf.Agent(model="openai/gpt-4o-mini") def sentiment(text: str) -> float: return cf.run( @@ -58,7 +57,7 @@ This implementation showcases several important ControlFlow features that enable 1. **[Agents](/concepts/agents)**: We create an agent with a specific LLM model (GPT-4o mini) to perform the sentiment analysis. ```python - optimist = cf.Agent(model=ChatOpenAI(model="gpt-4o-mini")) + optimist = cf.Agent(model="openai/gpt-4o-mini") ``` 3. **[Result types](/concepts/tasks/task-results)**: We specify `result_type=float` to ensure the sentiment score is returned as a float value. diff --git a/docs/guides/llms.mdx b/docs/guides/llms.mdx index 6311f781..f40128c6 100644 --- a/docs/guides/llms.mdx +++ b/docs/guides/llms.mdx @@ -19,28 +19,61 @@ Every ControlFlow agent can be assigned a specific LLM. When instantiating an ag ControlFlow agents can use any LangChain LLM class that supports chat-based APIs and tool calling. For a complete list of available models, settings, and instructions, please see LangChain's [LLM provider documentation](https://python.langchain.com/docs/integrations/chat/). -ControlFlow includes OpenAI and Azure OpenAI models by default. To use other models, you'll need to first install the corresponding LangChain package and supply any required credentials. See the model's [documentation](https://python.langchain.com/docs/integrations/chat/) for more information. +ControlFlow includes the required packages for OpenAI, Azure OpenAI, and Anthropic models by default. To use other models, you'll need to first install the corresponding LangChain package and supply any required credentials. See the model's [documentation](https://python.langchain.com/docs/integrations/chat/) for more information. +### Automatic configuration + +ControlFlow can automatically load LLMs from certain providers, based on a parameter. The model parameter must have the form `{provider key}/{model name}`. + +For example: +```python +import controlflow as cf + +openai_agent = cf.Agent(model="openai/gpt-4o-mini") +anthropic_agent = cf.Agent(model="anthropic/claude-3-haiku-20240307") +groq_agent = cf.Agent(model="groq/mixtral-8x7b-32768") +``` + +Note that loading a model from a string is convenient, but does not allow you to configure all of the model's parameters. For full control, see the docs on [manual configuration](#manual-configuration). + +At this time, supported providers for automatic configuration include: + +| Provider | Provider key | Required dependencies | +| -------- | ----------------- | ----------------- | +| OpenAI | `openai` | (included) | +| Azure OpenAI | `azure-openai` | (included) | +| Anthropic | `anthropic` | (included) | +| Google | `google` | `langchain_google_genai` | +| Groq | `groq` | `langchain_groq` | + +If the required dependencies are not installed, ControlFlow will be unable to load the model and will raise an error. + + +### Manual configuration + + To configure a different LLM, follow these steps: -To use an LLM, first make sure you have installed the appropriate provider package. ControlFlow only includes `langchain_openai` by default. For example, to use an Anthropic model, first run: -``` -pip install langchain_anthropic +To use an LLM, first make sure you have installed the appropriate [provider package](https://python.langchain.com/docs/integrations/chat/). For example, to use a Google model, run: + +```bash +pip install langchain_google_genai ``` -You must provide the correct API keys and configuration for the LLM you want to use. These can be provided as environment variables or when you create the model in your script. For example, to use an Anthropic model, set the `ANTHROPIC_API_KEY` environment variable: +You must provide the correct API keys and configuration for the LLM you want to use. These can be provided as environment variables or when you create the model in your script. For example, to use an OpenAI model, you must set the `OPENAI_API_KEY` environment variable: +```bash +export OPENAI_API_KEY= ``` -export ANTHROPIC_API_KEY= -``` -For model-specific instructions, please refer to the provider's documentation. +For model-specific instructions, please refer to the provider's [documentation](https://python.langchain.com/docs/integrations/chat/). + -Begin by creating the LLM object in your script. For example, to use Claude 3 Opus: +Create the LLM model in your script, including any additional parameters. For example, to use Claude 3 Opus: ```python from langchain_anthropic import ChatAnthropic @@ -48,9 +81,10 @@ from langchain_anthropic import ChatAnthropic # create the model model = ChatAnthropic(model='claude-3-opus-20240229') ``` + -Next, create an agent with the specified model: +Finally, configure an agent with the model: ```python import controlflow as cf @@ -59,40 +93,8 @@ import controlflow as cf agent = cf.Agent(model=model) ``` - -Finally, assign your agent to a task: - -```python -# assign the agent to a task -task = cf.Task('Write a short poem about LLMs', agents=[agent]) - -# (optional) run the task -task.run() -``` - - - -```python -import controlflow as cf -from langchain_anthropic import ChatAnthropic - -# create the model -model = ChatAnthropic(model='claude-3-opus-20240229') - -# provide the model to an agent -agent = cf.Agent(model=model) - -# assign the agent to a task -task = cf.Task('Write a short poem about LLMs', agents=[agent]) - -# (optional) run the task -task.run() -``` - - -### Model configuration In addition to choosing a specific model, you can also configure the model's parameters. For example, you can set the temperature for GPT-4o: @@ -133,33 +135,25 @@ assert cf.Agent('Marvin').model.model_name == 'claude-3-opus-20240229' ``` ### From a string setting -You can also specify a default model using a string, which is convenient though it doesn't allow you to configure advanced model settings. The string must have the form `/`. +You can also specify a default model using a string, which is convenient though it doesn't allow you to configure advanced model settings. This must be a string in the form `{provider key}/{model name}`, following the same guidelines as [automatic LLM configuration](#automatic-configuration). You can apply this setting either by using an environment variable before you import ControlFlow or in your script at runtime. For example, to use GPT 3.5 Turbo as the default model: ```bash Set an environment variable -export CONTROLFLOW_LLM_MODEL=openai/gpt-3.5-turbo +export CONTROLFLOW_LLM_MODEL=openai/gpt-4o-mini ``` ```python Set a runtime variable import controlflow as cf # set the default model as a string -cf.defaults.model = "openai/gpt-3.5-turbo" +cf.defaults.model = "openai/gpt-4o-mini" # check that the default model is loaded -assert cf.Agent('Marvin').model.model_name == 'gpt-3.5-turbo' +assert cf.Agent('Marvin').model.model_name == 'gpt-4o-mini' ``` The default model can only be set by environment variable before importing ControlFlow. Once ControlFlow is imported, it reads the `controlflow.settings.llm_model` value to create the default model object. - - -At this time, setting the default model via string is only supported for the following providers: -- `openai` -- `azure-openai` -- `anthropic` -- `google` -- `groq` \ No newline at end of file diff --git a/docs/patterns/running-tasks.mdx b/docs/patterns/running-tasks.mdx index 7a14da7e..6ac5344c 100644 --- a/docs/patterns/running-tasks.mdx +++ b/docs/patterns/running-tasks.mdx @@ -207,10 +207,9 @@ We can also use the `Moderated` strategy to have a more powerful model orchestra ```python Moderated import controlflow as cf -from langchain_openai import ChatOpenAI -optimist = cf.Agent(name="Optimist", model=ChatOpenAI(model="gpt-4o-mini")) -pessimist = cf.Agent(name="Pessimist", model=ChatOpenAI(model="gpt-4o-mini")) +optimist = cf.Agent(name="Optimist", model="gpt-4o-mini") +pessimist = cf.Agent(name="Pessimist", model="gpt-4o-mini") moderator = cf.Agent(name="Moderator") cf.run( diff --git a/docs/quickstart.mdx b/docs/quickstart.mdx index 5c5c5085..02af1b9d 100644 --- a/docs/quickstart.mdx +++ b/docs/quickstart.mdx @@ -114,15 +114,13 @@ This example uses an OpenAI model, but you can use any LangChain-compatible LLM -```python Code -from langchain_openai import ChatOpenAI -from enum import Enum +```python Codem # Create a specialized agent classifier = cf.Agent( name="Email Classifier", - model=ChatOpenAI(model="gpt-4o-mini"), + model="openai/gpt-4o-mini", instructions="You are an expert at quickly classifying emails.", ) @@ -166,21 +164,19 @@ A flow provides a shared context and history for all agents, even across multipl ```python Code import controlflow as cf -from langchain_openai import ChatOpenAI -from enum import Enum # Create agents classifier = cf.Agent( name="Email Classifier", - model=ChatOpenAI(model="gpt-4o-mini"), + model="openai/gpt-4o-mini", instructions="You are an expert at quickly classifying emails. Always " "respond with exactly one word: either 'important' or 'spam'." ) responder = cf.Agent( name="Email Responder", - model=ChatOpenAI(model="gpt-4o"), + model="openai/gpt-4o", instructions="You are an expert at crafting professional email responses. " "Your replies should be concise but friendly." ) diff --git a/pyproject.toml b/pyproject.toml index 32a8df2f..bff67093 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,6 +52,8 @@ tests = [ "pytest-timeout", "pytest-xdist", "langchain_community", + "langchain_google_genai", + "langchain_groq", "duckduckgo-search", ] dev = [ diff --git a/src/controlflow/agents/agent.py b/src/controlflow/agents/agent.py index a1b2165b..0b391950 100644 --- a/src/controlflow/agents/agent.py +++ b/src/controlflow/agents/agent.py @@ -9,6 +9,7 @@ AsyncGenerator, Generator, Optional, + Union, ) from langchain_core.language_models import BaseChatModel @@ -19,6 +20,7 @@ from controlflow.events.base import Event from controlflow.instructions import get_instructions from controlflow.llm.messages import AIMessage, BaseMessage +from controlflow.llm.models import get_model as get_model_from_string from controlflow.llm.rules import LLMRules from controlflow.tools.tools import ( Tool, @@ -78,9 +80,9 @@ class Agent(ControlFlowModel, abc.ABC): # note: `model` should be typed as Optional[BaseChatModel] but V2 models can't have # V1 attributes without erroring, so we have to use Any. - model: Optional[Any] = Field( + model: Optional[Union[str, Any]] = Field( None, - description="The LangChain BaseChatModel used by the agent. If not provided, the default model will be used.", + description="The LangChain BaseChatModel used by the agent. If not provided, the default model will be used. A compatible string can be passed to automatically retrieve the model.", exclude=True, ) @@ -130,6 +132,12 @@ def _generate_id(self): def _validate_tools(cls, tools: list[Tool]): return as_tools(tools or []) + @field_validator("model", mode="before") + def _validate_model(cls, model: Optional[Union[str, Any]]): + if isinstance(model, str): + return get_model_from_string(model) + return model + @field_serializer("tools") def _serialize_tools(self, tools: list[Tool]): tools = controlflow.tools.as_tools(tools) diff --git a/src/controlflow/defaults.py b/src/controlflow/defaults.py index 823fbbd5..53204294 100644 --- a/src/controlflow/defaults.py +++ b/src/controlflow/defaults.py @@ -10,7 +10,7 @@ from .agents import Agent from .events.history import History, InMemoryHistory -from .llm.models import _get_initial_default_model, model_from_string +from .llm.models import _get_initial_default_model, get_model __all__ = ["defaults"] @@ -43,7 +43,7 @@ def __repr__(self) -> str: @field_validator("model") def _model(cls, v): if isinstance(v, str): - v = model_from_string(v) + v = get_model(v) elif v is not None and not isinstance(v, BaseChatModel): raise ValueError("Input must be an instance of BaseChatModel") return v diff --git a/src/controlflow/llm/models.py b/src/controlflow/llm/models.py index 1e86c4ff..ea5428f2 100644 --- a/src/controlflow/llm/models.py +++ b/src/controlflow/llm/models.py @@ -12,16 +12,20 @@ def get_default_model() -> BaseChatModel: if getattr(controlflow.defaults, "model", None) is None: - return model_from_string(controlflow.settings.llm_model) + return get_model(controlflow.settings.llm_model) else: return controlflow.defaults.model -def model_from_string( +def get_model( model: str, temperature: Optional[float] = None, **kwargs: Any ) -> BaseChatModel: + """Get a model from a string.""" if "/" not in model: - provider, model = "openai", model + raise ValueError( + f"The model `{model}` is not valid. Please specify the provider " + "and model name, e.g. 'openai/gpt-4o-mini' or 'anthropic/claude-3-haiku-20240307'." + ) provider, model = model.split("/") if temperature is None: @@ -44,7 +48,7 @@ def model_from_string( from langchain_google_genai import ChatGoogleGenerativeAI except ImportError: raise ImportError( - "To use Google models, please install the `langchain_google_genai` package." + "To use Google as an LLM provider, please install the `langchain_google_genai` package." ) cls = ChatGoogleGenerativeAI elif provider == "groq": @@ -52,12 +56,12 @@ def model_from_string( from langchain_groq import ChatGroq except ImportError: raise ImportError( - "To use Groq models, please install the `langchain_groq` package." + "To use Groq as an LLM provider, please install the `langchain_groq` package." ) cls = ChatGroq else: raise ValueError( - f"Could not load provider automatically: {provider}. Please create your model manually." + f"Could not load provider `{provider}` automatically. Please provide the LLM class manually." ) return cls(model=model, temperature=temperature, **kwargs) @@ -66,7 +70,7 @@ def model_from_string( def _get_initial_default_model() -> BaseChatModel: # special error messages for the initial attempt to create a model try: - return model_from_string(controlflow.settings.llm_model) + return get_model(controlflow.settings.llm_model) except Exception as exc: if isinstance(exc, ValidationError) and "Did not find openai_api_key" in str( exc diff --git a/src/controlflow/planning/auto_tasks.py b/src/controlflow/planning/auto_tasks.py deleted file mode 100644 index ce7607c5..00000000 --- a/src/controlflow/planning/auto_tasks.py +++ /dev/null @@ -1,216 +0,0 @@ -from enum import Enum -from typing import Any, Callable, Literal, Optional, TypeVar, Union - -from pydantic import Field - -from controlflow.agents import Agent -from controlflow.tasks.task import Task -from controlflow.utilities.general import ControlFlowModel - -ToolLiteral = TypeVar("ToolLiteral", bound=str) - - -class ResultType(Enum): - STRING = "STRING" - NONE = "NONE" - - -class TaskReference(ControlFlowModel): - """ - A reference to a task by its ID. Used for indicating task depenencies. - """ - - id: int - - -class AgentReference(ControlFlowModel): - """ - A reference to an agent by its name. Used for assigning agents to tasks. - """ - - name: str - - -class AgentTemplate(ControlFlowModel): - name: str - description: Optional[str] = Field( - None, - description="A brief description of the agent that will be visible to other agents.", - ) - instructions: Optional[str] = Field( - None, - description="Private instructions for the agent to follow when completing tasks.", - ) - interactive: bool = Field( - False, description="If True, the agent can interact with a human user." - ) - tools: list[str] = Field([], description="The tools that the agent has access to.") - - -class TaskTemplate(ControlFlowModel): - id: int - objective: str = Field(description="The task's objective.") - instructions: Optional[str] = Field( - None, description="Instructions for completing the task." - ) - result_type: Union[ResultType, list[str]] = Field( - ResultType.STRING, - description="The type of result expected from the task, defaults to a string output. " - "Can also be `NONE` if the task does not produce a result (but may have side effects) or " - "a list of choices if the task has a discrete set of possible outputs.", - ) - context: dict[str, Union[TaskReference, Any]] = Field( - default_factory=dict, - description="The task's context. Values may be constants, TaskReferences, or " - "collections of either. Any `TaskReferences` will create upstream dependencies, meaning " - "this task will receive the referenced task's output as input.", - ) - depends_on: list[TaskReference] = Field( - default_factory=list, - description="Tasks that must be completed before this task can be started, " - "though their outputs are not used.", - ) - parent: Optional[TaskReference] = Field( - None, - description="Indicate that this task is a subtask of a parent. Not required for top-level tasks.", - ) - agents: list[AgentReference] = Field( - default_factory=list, - description="Any agents assigned to the task. If not specified, the default agent will be used.", - ) - tools: list[str] = Field([], description="The tools available for this task.") - interactive: bool = Field( - False, description="If True, the task requires interaction with a human user." - ) - - -def create_tasks( - task_templates: list[TaskTemplate], - agent_templates: list[AgentTemplate] = None, - agents: list[Agent] = None, - tools: dict[str, Any] = None, -) -> list[Task]: - """ - Create tasks from task templates, agent templates, agents, and tools. - - Task templates and agent templates are JSON-serializable objects that define the tasks and agents to be created. - - Agents and tools represent pre-existing agents and tools that can be used to complete the task and agent templates. - """ - agents: dict[str, Agent] = {a.name: a for a in agents or []} - tasks: dict[int, Task] = {} - task_templates: dict[int, TaskTemplate] = {t.id: t for t in task_templates} - - # create agents from templates - for agent_template in agent_templates: - agents[agent_template.name] = Agent( - name=agent_template.name, - description=agent_template.description, - instructions=agent_template.instructions, - interactive=agent_template.interactive, - tools=[tools[tool] for tool in agent_template.tools], - ) - - # create tasks from templates - for task_template in task_templates.values(): - if task_template.result_type == ResultType.NONE: - result_type = None - elif result_type == ResultType.STRING: - result_type = str - else: - result_type = task_template.result_type - - tasks[task_template.id] = Task( - objective=task_template.objective, - instructions=task_template.instructions, - result_type=result_type, - tools=[tools[tool] for tool in task_template.tools], - use_access=task_template.interactive, - ) - - # resolve references - for template_id, task in tasks.items(): - task_template = task_templates[template_id] - if task_agents := [ - agents[agent_ref.name] for agent_ref in task_template.agents - ]: - task.agents = task_agents - task.depends_on = [tasks[d.id] for d in task_template.depends_on] - task.context = { - key: tasks[value.id] if isinstance(value, TaskReference) else value - for key, value in task_template.context.items() - } - - if parent := tasks[task_template.parent.id] if task_template.parent else None: - parent.add_subtask(task) - - return list(tasks.values()) - - -class Templates(ControlFlowModel): - task_templates: list[TaskTemplate] - agent_templates: list[AgentTemplate] - - -def auto_tasks( - description: str, - available_agents: list[Agent] = None, - available_tools: list[Callable] = None, -) -> list[Task]: - tool_names = [] - for tool in available_tools or []: - tool_names.append(tool.__name__) - - if tool_names: - literal_tool_names = Literal[*tool_names] # type: ignore - else: - literal_tool_names = None - - class TaskTemplate_Tools(TaskTemplate): - tools: list[literal_tool_names] = Field( - [], description="The tools available for this task." - ) - - class AgentTemplate_Tools(AgentTemplate): - tools: list[literal_tool_names] = Field( - [], description="The tools that the agent has access to." - ) - - class Templates_Tools(Templates): - task_templates: list[TaskTemplate_Tools] - agent_templates: list[AgentTemplate_Tools] - - task = Task( - objective=""" - Generate the minimal set of tasks required to complete the provided - `description` of an objective. Also reference any tools or agents (or - create new agents) that your tasks require. - """, - instructions=""" - Each task will be executed by an agent like you, working in a workflow - like this one. Your job is to define the workflow. Choose your tasks to - be achievable by agents with the tools and skills you deem necessary. - Create only as many tasks as you need. - - Each task should be well-defined, with a single objective and clear - instructions. The tasks should be independent of each other, but may - have dependencies on other tasks. If you do not choose - agents for your tasks, the default agent will be used. Do not post messages, just return your - result. - """, - result_type=Templates, - context=dict( - description=description, - available_agents=available_agents, - available_tools=available_tools, - ), - ) - - task.run() - - return create_tasks( - task_templates=task.result.task_templates, - agent_templates=task.result.agent_templates, - agents=available_agents, - tools=available_tools, - ) diff --git a/tests/agents/test_agents.py b/tests/agents/test_agents.py index 9a82b158..51a712f4 100644 --- a/tests/agents/test_agents.py +++ b/tests/agents/test_agents.py @@ -1,4 +1,5 @@ import pytest +from langchain_anthropic import ChatAnthropic from langchain_openai import ChatOpenAI import controlflow @@ -31,6 +32,19 @@ def test_agent_model(self): assert agent.model is model assert agent.get_model() is model + def test_agent_model_from_string(self): + agent1 = Agent(model="openai/gpt-4o-mini") + assert isinstance(agent1.model, ChatOpenAI) + assert agent1.model.model_name == "gpt-4o-mini" + + agent2 = Agent(model="anthropic/claude-3-haiku-20240307") + assert isinstance(agent2.model, ChatAnthropic) + assert agent2.model.model == "claude-3-haiku-20240307" + + def test_agent_model_from_unsupported_provider(self): + with pytest.raises(ValueError, match="Unsupported model provider: abc"): + Agent(model="abc/def") + def test_agent_loads_instructions_at_creation(self): with instructions("test instruction"): agent = Agent() @@ -73,7 +87,7 @@ def test_default_agent_can_be_assigned(self): assert Task("task").get_agents()[0].name == "New Agent" def test_updating_the_default_model_updates_the_default_agent_model(self): - new_model = ChatOpenAI(model="gpt-3.5-turbo") + new_model = ChatOpenAI(model="gpt-4o-mini") controlflow.defaults.model = new_model new_agent = controlflow.defaults.agent diff --git a/tests/llm/__init__.py b/tests/llm/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/llm/test_models.py b/tests/llm/test_models.py new file mode 100644 index 00000000..77e2e2c4 --- /dev/null +++ b/tests/llm/test_models.py @@ -0,0 +1,55 @@ +import pytest +from langchain_anthropic import ChatAnthropic +from langchain_google_genai import ChatGoogleGenerativeAI +from langchain_groq import ChatGroq +from langchain_openai import AzureChatOpenAI, ChatOpenAI + +from controlflow.llm.models import get_model + + +def test_get_model_from_openai(): + model = get_model("openai/gpt-4o-mini") + assert isinstance(model, ChatOpenAI) + assert model.model_name == "gpt-4o-mini" + + +def test_get_model_from_anthropic(): + model = get_model("anthropic/claude-3-haiku-20240307") + assert isinstance(model, ChatAnthropic) + assert model.model == "claude-3-haiku-20240307" + + +def test_get_azure_openai_model(): + model = get_model("azure-openai/gpt-4") + assert isinstance(model, AzureChatOpenAI) + assert model.deployment_name == "gpt-4" + + +def test_get_google_model(): + model = get_model("google/gemini-1.5-pro") + assert isinstance(model, ChatGoogleGenerativeAI) + assert model.model == "models/gemini-1.5-pro" + + +def test_get_groq_model(): + model = get_model("groq/mixtral-8x7b-32768") + assert isinstance(model, ChatGroq) + assert model.model_name == "mixtral-8x7b-32768" + + +def test_get_model_with_bad_format(): + with pytest.raises(ValueError, match="The model `xyz` is not valid."): + get_model("xyz") + + +def test_get_model_with_unsupported_provider(): + with pytest.raises( + ValueError, match="Could not load provider `unsupported` automatically." + ): + get_model("unsupported/model-name") + + +def test_get_model_with_temperature(): + model = get_model("anthropic/claude-3-haiku-20240307", temperature=0.7) + assert isinstance(model, ChatAnthropic) + assert model.temperature == 0.7 From 2764e154dd3aa8b428748088a4601d920af7678c Mon Sep 17 00:00:00 2001 From: Jeremiah Lowin <153965+jlowin@users.noreply.github.com> Date: Thu, 12 Sep 2024 08:55:41 -0400 Subject: [PATCH 2/3] Fix tests --- tests/agents/test_agents.py | 8 +++++++- tests/llm/test_models.py | 6 +++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/agents/test_agents.py b/tests/agents/test_agents.py index 51a712f4..44010868 100644 --- a/tests/agents/test_agents.py +++ b/tests/agents/test_agents.py @@ -41,8 +41,14 @@ def test_agent_model_from_string(self): assert isinstance(agent2.model, ChatAnthropic) assert agent2.model.model == "claude-3-haiku-20240307" + def test_agent_model_with_invalid_format(self): + with pytest.raises(ValueError, match="The model `gpt-4o` is not valid."): + Agent(model="gpt-4o") + def test_agent_model_from_unsupported_provider(self): - with pytest.raises(ValueError, match="Unsupported model provider: abc"): + with pytest.raises( + ValueError, match="Could not load provider `abc` automatically" + ): Agent(model="abc/def") def test_agent_loads_instructions_at_creation(self): diff --git a/tests/llm/test_models.py b/tests/llm/test_models.py index 77e2e2c4..dc455d1c 100644 --- a/tests/llm/test_models.py +++ b/tests/llm/test_models.py @@ -37,9 +37,9 @@ def test_get_groq_model(): assert model.model_name == "mixtral-8x7b-32768" -def test_get_model_with_bad_format(): - with pytest.raises(ValueError, match="The model `xyz` is not valid."): - get_model("xyz") +def test_get_model_with_invalid_format(): + with pytest.raises(ValueError, match="The model `gpt-4o` is not valid."): + get_model("gpt-4o") def test_get_model_with_unsupported_provider(): From 8a507bcd7392cfc6ea8dcb19331a97c7655c5094 Mon Sep 17 00:00:00 2001 From: Jeremiah Lowin <153965+jlowin@users.noreply.github.com> Date: Thu, 12 Sep 2024 09:06:03 -0400 Subject: [PATCH 3/3] Set env vars for tests --- tests/llm/test_models.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/tests/llm/test_models.py b/tests/llm/test_models.py index dc455d1c..b46bb0d0 100644 --- a/tests/llm/test_models.py +++ b/tests/llm/test_models.py @@ -7,31 +7,40 @@ from controlflow.llm.models import get_model -def test_get_model_from_openai(): +def test_get_model_from_openai(monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "fake_openai_api_key") model = get_model("openai/gpt-4o-mini") assert isinstance(model, ChatOpenAI) assert model.model_name == "gpt-4o-mini" -def test_get_model_from_anthropic(): +def test_get_model_from_anthropic(monkeypatch): + monkeypatch.setenv("ANTHROPIC_API_KEY", "fake_anthropic_api_key") model = get_model("anthropic/claude-3-haiku-20240307") assert isinstance(model, ChatAnthropic) assert model.model == "claude-3-haiku-20240307" -def test_get_azure_openai_model(): +def test_get_azure_openai_model(monkeypatch): + monkeypatch.setenv("AZURE_OPENAI_API_KEY", "fake_azure_openai_api_key") + monkeypatch.setenv( + "AZURE_OPENAI_ENDPOINT", "https://fake-endpoint.openai.azure.com" + ) + monkeypatch.setenv("OPENAI_API_VERSION", "2024-05-01-preview") model = get_model("azure-openai/gpt-4") assert isinstance(model, AzureChatOpenAI) - assert model.deployment_name == "gpt-4" + assert model.model_name == "gpt-4" -def test_get_google_model(): +def test_get_google_model(monkeypatch): + monkeypatch.setenv("GOOGLE_API_KEY", "fake_google_api_key") model = get_model("google/gemini-1.5-pro") assert isinstance(model, ChatGoogleGenerativeAI) assert model.model == "models/gemini-1.5-pro" -def test_get_groq_model(): +def test_get_groq_model(monkeypatch): + monkeypatch.setenv("GROQ_API_KEY", "fake_groq_api_key") model = get_model("groq/mixtral-8x7b-32768") assert isinstance(model, ChatGroq) assert model.model_name == "mixtral-8x7b-32768" @@ -49,7 +58,8 @@ def test_get_model_with_unsupported_provider(): get_model("unsupported/model-name") -def test_get_model_with_temperature(): +def test_get_model_with_temperature(monkeypatch): + monkeypatch.setenv("ANTHROPIC_API_KEY", "fake_anthropic_api_key") model = get_model("anthropic/claude-3-haiku-20240307", temperature=0.7) assert isinstance(model, ChatAnthropic) assert model.temperature == 0.7