From 63544365633556703bbb0bc69dbf0801ad820221 Mon Sep 17 00:00:00 2001 From: fernando-plank <121121695+fernando-plank@users.noreply.github.com> Date: Tue, 13 May 2025 20:26:46 +0000 Subject: [PATCH] GitHub Action - Update examples in docs from notebooks --- docs/v1/examples/camel.mdx | 2 +- docs/v1/examples/langchain.mdx | 2 +- docs/v1/examples/multi_agent.mdx | 20 +++------ docs/v1/examples/multion.mdx | 2 +- docs/v1/examples/ollama.mdx | 2 +- docs/v1/examples/openai_assistants.mdx | 56 ++++++++------------------ docs/v1/examples/recording_events.mdx | 7 +--- docs/v1/examples/simple_agent.mdx | 2 +- 8 files changed, 29 insertions(+), 64 deletions(-) diff --git a/docs/v1/examples/camel.mdx b/docs/v1/examples/camel.mdx index f70c28198..7ea0459c3 100644 --- a/docs/v1/examples/camel.mdx +++ b/docs/v1/examples/camel.mdx @@ -58,7 +58,7 @@ Now we will initialize our AgentOps client. ```python -agentops.init(tags=["camel", "multi-agent", "example"]) +agentops.init(default_tags=["camel", "multi-agent", "example"]) ``` Let's start with setting our task prompt and setting our tools. diff --git a/docs/v1/examples/langchain.mdx b/docs/v1/examples/langchain.mdx index bb0738180..00eb91620 100644 --- a/docs/v1/examples/langchain.mdx +++ b/docs/v1/examples/langchain.mdx @@ -65,7 +65,7 @@ Pass in your API key, and optionally any tags to describe this session for easie ```python agentops_handler = AgentOpsLangchainCallbackHandler( - api_key=AGENTOPS_API_KEY, tags=["Langchain Example"] + api_key=AGENTOPS_API_KEY, default_tags=["Langchain Example"] ) llm = ChatOpenAI( diff --git a/docs/v1/examples/multi_agent.mdx b/docs/v1/examples/multi_agent.mdx index 6fbdc4dc1..be023ee66 100644 --- a/docs/v1/examples/multi_agent.mdx +++ b/docs/v1/examples/multi_agent.mdx @@ -9,7 +9,7 @@ _View Notebook on " ```python # Initialize AgentOps with some default tags -agentops.init(AGENTOPS_API_KEY, tags=["ollama-example"]) +agentops.init(AGENTOPS_API_KEY, default_tags=["ollama-example"]) ``` Now let's make some basic calls to Ollama. Make sure you have pulled the model first, use the following or replace with whichever model you want to use. diff --git a/docs/v1/examples/openai_assistants.mdx b/docs/v1/examples/openai_assistants.mdx index bdf4aa713..f87f1dc52 100644 --- a/docs/v1/examples/openai_assistants.mdx +++ b/docs/v1/examples/openai_assistants.mdx @@ -49,6 +49,7 @@ We'll take a look at how these can be used to create powerful, stateful experien ```python import json + def show_json(obj): display(json.loads(obj.model_dump_json())) ``` @@ -105,7 +106,7 @@ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or "" ```python -agentops.init(api_key=AGENTOPS_API_KEY, tags=["openai", "beta-assistants"]) +agentops.init(api_key=AGENTOPS_API_KEY, default_tags=["openai", "beta-assistants"]) client = OpenAI(api_key=OPENAI_API_KEY) ``` @@ -186,6 +187,7 @@ To know when the Assistant has completed processing, we can poll the Run in a lo ```python import time + def wait_on_run(run, thread): while run.status == "queued" or run.status == "in_progress": run = client.beta.threads.runs.retrieve( @@ -223,9 +225,7 @@ Let's ask our Assistant to explain the result a bit further! ```python # Create a message to append to our thread -message = client.beta.threads.messages.create( - thread_id=thread.id, role="user", content="Could you explain this to me?" -) +message = client.beta.threads.messages.create(thread_id=thread.id, role="user", content="Could you explain this to me?") # Execute our run run = client.beta.threads.runs.create( @@ -237,9 +237,7 @@ run = client.beta.threads.runs.create( wait_on_run(run, thread) # Retrieve all the messages added after our last user message -messages = client.beta.threads.messages.list( - thread_id=thread.id, order="asc", after=message.id -) +messages = client.beta.threads.messages.list(thread_id=thread.id, order="asc", after=message.id) show_json(messages) ``` @@ -265,10 +263,9 @@ MATH_ASSISTANT_ID = assistant.id # or a hard-coded ID like "asst-..." client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "")) + def submit_message(assistant_id, thread, user_message): - client.beta.threads.messages.create( - thread_id=thread.id, role="user", content=user_message - ) + client.beta.threads.messages.create(thread_id=thread.id, role="user", content=user_message) return client.beta.threads.runs.create( thread_id=thread.id, assistant_id=assistant_id, @@ -293,9 +290,7 @@ def create_thread_and_run(user_input): # Emulating concurrent user requests -thread1, run1 = create_thread_and_run( - "I need to solve the equation `3x + 11 = 14`. Can you help me?" -) +thread1, run1 = create_thread_and_run("I need to solve the equation `3x + 11 = 14`. Can you help me?") thread2, run2 = create_thread_and_run("Could you explain linear algebra to me?") thread3, run3 = create_thread_and_run("I don't like math. What can I do?") @@ -307,8 +302,6 @@ Once all Runs are going, we can wait on each and get the responses. ```python -import time - # Pretty printing helper def pretty_print(messages): print("# Messages") @@ -380,9 +373,7 @@ Now, let's ask the Assistant to use its new tool. ```python -thread, run = create_thread_and_run( - "Generate the first 20 fibbonaci numbers with code." -) +thread, run = create_thread_and_run("Generate the first 20 fibbonaci numbers with code.") run = wait_on_run(run, thread) pretty_print(get_response(thread)) ``` @@ -399,9 +390,7 @@ A Run is composed of one or more Steps. Like a Run, each Step has a `status` tha ```python -run_steps = client.beta.threads.runs.steps.list( - thread_id=thread.id, run_id=run.id, order="asc" -) +run_steps = client.beta.threads.runs.steps.list(thread_id=thread.id, run_id=run.id, order="asc") ``` Let's take a look at each Step's `step_details`. @@ -653,19 +642,17 @@ tool_calls = run.required_action.submit_tool_outputs.tool_calls for tool_call in tool_calls: arguments = json.loads(tool_call.function.arguments) responses = display_quiz(arguments["title"], arguments["questions"]) - tool_outputs.append({ - "tool_call_id": tool_call.id, - "output": json.dumps(responses), - }) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": json.dumps(responses), + } + ) ``` ```python -run = client.beta.threads.runs.submit_tool_outputs( - thread_id=thread.id, - run_id=run.id, - tool_outputs=tool_outputs -) +run = client.beta.threads.runs.submit_tool_outputs(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) show_json(run) ``` @@ -678,15 +665,6 @@ run = wait_on_run(run, thread) pretty_print(get_response(thread)) ``` -Now let's end the AgentOps session. By default, AgentOps will end the session in the "Intedeterminate" state. You can also end the session in the "Success" or "Failure" state. - -We will end the session in the "Success" state. - - -```python -agentops.end_session(end_state="Success") -``` - Woohoo 🎉 ## Conclusion diff --git a/docs/v1/examples/recording_events.mdx b/docs/v1/examples/recording_events.mdx index 5c28150f1..d075f9f08 100644 --- a/docs/v1/examples/recording_events.mdx +++ b/docs/v1/examples/recording_events.mdx @@ -11,7 +11,7 @@ _View Notebook on