Skip to content

Commit 9295194

Browse files
committed
adding notebooks
1 parent 2e3fc31 commit 9295194

File tree

17 files changed

+3844
-0
lines changed

17 files changed

+3844
-0
lines changed

Notebooks/CrewAI Flows & Langgraph/Coding Assistant/coding_assistant_eval.ipynb

Lines changed: 2192 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 337 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,337 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"# Install dependencies"
8+
]
9+
},
10+
{
11+
"cell_type": "code",
12+
"execution_count": 1,
13+
"metadata": {},
14+
"outputs": [],
15+
"source": [
16+
"%%capture --no-stderr\n",
17+
"%pip install -U --quiet 'crewai[tools]' aisuite"
18+
]
19+
},
20+
{
21+
"cell_type": "markdown",
22+
"metadata": {},
23+
"source": [
24+
"# Set environment variables"
25+
]
26+
},
27+
{
28+
"cell_type": "code",
29+
"execution_count": 2,
30+
"metadata": {
31+
"jupyter": {
32+
"source_hidden": true
33+
}
34+
},
35+
"outputs": [],
36+
"source": [
37+
"import getpass\n",
38+
"import time\n",
39+
"initial_time = time.time()\n",
40+
"\n",
41+
"import os\n",
42+
"\n",
43+
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API key: \")\n",
44+
"\n",
45+
"# Apply a patch to allow nested asyncio loops in Jupyter\n",
46+
"import nest_asyncio\n",
47+
"nest_asyncio.apply()"
48+
]
49+
},
50+
{
51+
"cell_type": "markdown",
52+
"metadata": {},
53+
"source": [
54+
"# Create Crew"
55+
]
56+
},
57+
{
58+
"cell_type": "code",
59+
"execution_count": 3,
60+
"metadata": {},
61+
"outputs": [
62+
{
63+
"name": "stderr",
64+
"output_type": "stream",
65+
"text": [
66+
"/Users/joaomoura/.pyenv/versions/3.11.7/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
67+
" from .autonotebook import tqdm as notebook_tqdm\n",
68+
"Inserting batches in chromadb: 100%|██████████| 1/1 [00:01<00:00, 1.47s/it]\n",
69+
"Inserting batches in chromadb: 100%|██████████| 1/1 [00:00<00:00, 1.18it/s]\n",
70+
"Inserting batches in chromadb: 100%|██████████| 1/1 [00:01<00:00, 1.65s/it]\n"
71+
]
72+
}
73+
],
74+
"source": [
75+
"# Importing Crew related components\n",
76+
"# Importing CrewAI Flow related components\n",
77+
"# Importing CrewAI Tools\n",
78+
"from crewai import Agent, Task, Crew\n",
79+
"from crewai.flow.flow import Flow, listen, start\n",
80+
"from crewai_tools import WebsiteSearchTool\n",
81+
"\n",
82+
"# Importing AI Suite for adhoc LLM calls and Pydantic\n",
83+
"from pydantic import BaseModel\n",
84+
"import aisuite as ai\n",
85+
"\n",
86+
"urls = [\n",
87+
" \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n",
88+
" \"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/\",\n",
89+
" \"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/\",\n",
90+
"]\n",
91+
"\n",
92+
"research_agent = Agent(\n",
93+
" role=\"You are a helpful assistant that can answer questions about the web.\",\n",
94+
" goal=\"Answer the user's question.\",\n",
95+
" backstory=\"You have access to a vast knowledge base of information from the web.\",\n",
96+
" tools=[\n",
97+
" WebsiteSearchTool(website=urls[0]),\n",
98+
" WebsiteSearchTool(website=urls[1]),\n",
99+
" WebsiteSearchTool(website=urls[2]),\n",
100+
" ],\n",
101+
" llm=\"gpt-4o-mini\",\n",
102+
")\n",
103+
"\n",
104+
"task = Task(\n",
105+
" description=\"Answer the following question: {question}\",\n",
106+
" expected_output=\"A detailed and accurate answer to the user's question.\",\n",
107+
" agent=research_agent,\n",
108+
")\n",
109+
"\n",
110+
"crew = Crew(\n",
111+
" agents=[research_agent],\n",
112+
" tasks=[task],\n",
113+
")"
114+
]
115+
},
116+
{
117+
"cell_type": "markdown",
118+
"metadata": {},
119+
"source": [
120+
"# Creating State"
121+
]
122+
},
123+
{
124+
"cell_type": "code",
125+
"execution_count": 4,
126+
"metadata": {},
127+
"outputs": [],
128+
"source": [
129+
"class QAState(BaseModel):\n",
130+
" \"\"\"\n",
131+
" State for the documentation flow\n",
132+
" \"\"\"\n",
133+
" question: str = \"What does Lilian Weng say about the types of agent memory?\"\n",
134+
" improved_question: str = \"\"\n",
135+
" answer: str = \"\""
136+
]
137+
},
138+
{
139+
"cell_type": "markdown",
140+
"metadata": {},
141+
"source": [
142+
"# Creating Flow"
143+
]
144+
},
145+
{
146+
"cell_type": "code",
147+
"execution_count": 5,
148+
"metadata": {},
149+
"outputs": [],
150+
"source": [
151+
"class QAFlow(Flow[QAState]):\n",
152+
" @start()\n",
153+
" def rewrite_question(self):\n",
154+
" print(f\"# Rewriting question: {self.state.question}\")\n",
155+
" client = ai.Client()\n",
156+
" messages = [\n",
157+
" {\n",
158+
" \"role\": \"system\",\n",
159+
" \"content\": f\"\"\"Look at the input and try to reason about the underlying semantic intent / meaning.\n",
160+
" Here is the initial question:\n",
161+
" -------\n",
162+
" {self.state.question}\n",
163+
" -------\n",
164+
" Formulate an improved question:\"\"\"\n",
165+
" }\n",
166+
" ]\n",
167+
"\n",
168+
" response = client.chat.completions.create(\n",
169+
" model=\"openai:gpt-4o-mini\",\n",
170+
" messages=messages,\n",
171+
" temperature=0.3\n",
172+
" )\n",
173+
"\n",
174+
" print(response)\n",
175+
"\n",
176+
" improved_question = response.choices[0].message.content\n",
177+
" self.state.improved_question = improved_question\n",
178+
"\n",
179+
" @listen(rewrite_question)\n",
180+
" def answer_question(self):\n",
181+
" print(f\"# Answering question: {self.state.improved_question}\")\n",
182+
" result = crew.kickoff(inputs={'question': self.state.improved_question})\n",
183+
" self.state.answer = result.raw\n",
184+
" return result\n"
185+
]
186+
},
187+
{
188+
"cell_type": "markdown",
189+
"metadata": {},
190+
"source": [
191+
"# Plotting Flow"
192+
]
193+
},
194+
{
195+
"cell_type": "code",
196+
"execution_count": 12,
197+
"metadata": {},
198+
"outputs": [
199+
{
200+
"name": "stdout",
201+
"output_type": "stream",
202+
"text": [
203+
"Plot saved as crewai_flow.html\n"
204+
]
205+
},
206+
{
207+
"data": {
208+
"text/html": [
209+
"\n",
210+
" <iframe\n",
211+
" width=\"100%\"\n",
212+
" height=\"600\"\n",
213+
" src=\"crewai_flow.html\"\n",
214+
" frameborder=\"0\"\n",
215+
" allowfullscreen\n",
216+
" \n",
217+
" ></iframe>\n",
218+
" "
219+
],
220+
"text/plain": [
221+
"<IPython.lib.display.IFrame at 0x10350b310>"
222+
]
223+
},
224+
"execution_count": 12,
225+
"metadata": {},
226+
"output_type": "execute_result"
227+
}
228+
],
229+
"source": [
230+
"flow = QAFlow()\n",
231+
"flow.plot()\n",
232+
"\n",
233+
"# Display the flow visualization using HTML\n",
234+
"from IPython.display import IFrame\n",
235+
"IFrame(src='crewai_flow.html', width='100%', height=600)"
236+
]
237+
},
238+
{
239+
"cell_type": "markdown",
240+
"metadata": {},
241+
"source": [
242+
"# Kicking off Flow"
243+
]
244+
},
245+
{
246+
"cell_type": "code",
247+
"execution_count": 7,
248+
"metadata": {},
249+
"outputs": [
250+
{
251+
"name": "stdout",
252+
"output_type": "stream",
253+
"text": [
254+
"# Rewriting question: What does Lilian Weng say about the types of agent memory?\n",
255+
"ChatCompletion(id='chatcmpl-Aeo4gBp6YJNqtm6QW3RVqcSoIvcBo', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='What insights does Lilian Weng provide regarding the different types of agent memory in her work?', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1734288970, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier=None, system_fingerprint='fp_6fc10e10eb', usage=CompletionUsage(completion_tokens=19, prompt_tokens=56, total_tokens=75, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
256+
"# Answering question: What insights does Lilian Weng provide regarding the different types of agent memory in her work?\n",
257+
"==========\n",
258+
"In her work, Lilian Weng provides insights into the different types of memory used in LLM-powered autonomous agents. She categorizes memory into several types, drawing parallels to the functioning of human memory:\n",
259+
"\n",
260+
"1. **Short-term Memory**: This is associated with in-context learning utilized by the model, which allows it to learn and process information temporarily.\n",
261+
"\n",
262+
"2. **Long-term Memory**: This type enables the agent to retain and recall information over extended periods. It often utilizes an external vector store for fast retrieval, thus facilitating the storage of infinite information.\n",
263+
"\n",
264+
"In relation to human memory, Weng elaborates on the following categories:\n",
265+
"\n",
266+
"- **Sensory Memory**: The initial stage of memory retaining sensory impressions after stimuli have ended, lasting only a few seconds. Subcategories include:\n",
267+
" - **Iconic Memory** (visual)\n",
268+
" - **Echoic Memory** (auditory)\n",
269+
" - **Haptic Memory** (touch)\n",
270+
"\n",
271+
"- **Short-Term Memory (STM) or Working Memory**: This type holds information currently in awareness, which is essential for cognitive tasks like learning and reasoning. STM is believed to hold about 7 items for approximately 20-30 seconds.\n",
272+
"\n",
273+
"- **Long-Term Memory (LTM)**: This can retain information for prolonged periods, ranging from days to decades, with virtually unlimited capacity. It is divided into:\n",
274+
" - **Explicit/Declarative Memory**: Conscious recollections of facts and experiences, including:\n",
275+
" - **Episodic Memory**: Events and experiences\n",
276+
" - **Semantic Memory**: Facts and concepts\n",
277+
" - **Implicit/Procedural Memory**: Unconscious memory for skills and routines performed automatically, such as riding a bike or typing.\n",
278+
"\n",
279+
"Weng suggests that sensory memory can be viewed as the process of learning embedding representations for raw inputs, while short-term and long-term memories serve to organize, retain, and retrieve complex information and experiences that enhance an agent's performance as it interacts with environments.\n"
280+
]
281+
}
282+
],
283+
"source": [
284+
"result = flow.kickoff()\n",
285+
"print(\"=\" * 10)\n",
286+
"print(result)"
287+
]
288+
},
289+
{
290+
"cell_type": "code",
291+
"execution_count": 8,
292+
"metadata": {},
293+
"outputs": [
294+
{
295+
"name": "stdout",
296+
"output_type": "stream",
297+
"text": [
298+
"Total execution time: 158.21 seconds\n"
299+
]
300+
}
301+
],
302+
"source": [
303+
"import time\n",
304+
"final_time = time.time()\n",
305+
"print(f\"Total execution time: {final_time - initial_time:.2f} seconds\")"
306+
]
307+
},
308+
{
309+
"cell_type": "code",
310+
"execution_count": null,
311+
"metadata": {},
312+
"outputs": [],
313+
"source": []
314+
}
315+
],
316+
"metadata": {
317+
"kernelspec": {
318+
"display_name": "Python 3 (ipykernel)",
319+
"language": "python",
320+
"name": "python3"
321+
},
322+
"language_info": {
323+
"codemirror_mode": {
324+
"name": "ipython",
325+
"version": 3
326+
},
327+
"file_extension": ".py",
328+
"mimetype": "text/x-python",
329+
"name": "python",
330+
"nbconvert_exporter": "python",
331+
"pygments_lexer": "ipython3",
332+
"version": "3.11.7"
333+
}
334+
},
335+
"nbformat": 4,
336+
"nbformat_minor": 4
337+
}

0 commit comments

Comments
 (0)