From 2dc237c5a1f20247969978a52a4a19d58cdd9371 Mon Sep 17 00:00:00 2001 From: Mark Whitby Date: Fri, 3 May 2024 18:01:50 +0100 Subject: [PATCH] Sections 1 - 4 updated --- .../02-OpenAIPackages/openai.ipynb | 2 +- .../03-Langchain/langchain.ipynb | 105 ++++++++++++------ .../04-SemanticKernel/semantickernel.ipynb | 28 ++++- labs/03-orchestration/01-Tokens/tokens.ipynb | 2 +- .../02-Embeddings/embeddings.ipynb | 16 +-- labs/03-orchestration/03-Qdrant/qdrant.ipynb | 11 +- .../04-ACS/acs-lc-python.ipynb | 2 +- .../04-ACS/acs-sk-csharp.ipynb | 12 +- requirements.txt | 20 ++-- 9 files changed, 121 insertions(+), 77 deletions(-) diff --git a/labs/02-integrating-ai/02-OpenAIPackages/openai.ipynb b/labs/02-integrating-ai/02-OpenAIPackages/openai.ipynb index 69a2100..989803d 100644 --- a/labs/02-integrating-ai/02-OpenAIPackages/openai.ipynb +++ b/labs/02-integrating-ai/02-OpenAIPackages/openai.ipynb @@ -168,7 +168,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.9" }, "orig_nbformat": 4 }, diff --git a/labs/02-integrating-ai/03-Langchain/langchain.ipynb b/labs/02-integrating-ai/03-Langchain/langchain.ipynb index 7906d25..39e732a 100644 --- a/labs/02-integrating-ai/03-Langchain/langchain.ipynb +++ b/labs/02-integrating-ai/03-Langchain/langchain.ipynb @@ -26,9 +26,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import AzureOpenAI\n", - "from langchain_openai import AzureChatOpenAI\n", - "from langchain.schema import HumanMessage" + "from langchain_openai import AzureChatOpenAI" ] }, { @@ -91,24 +89,29 @@ "metadata": {}, "outputs": [], "source": [ - "# Define the prompt we want the AI to respond to - the message the Human user is asking\n", - "msg = HumanMessage(content=\"Explain step by step. How old is the president of USA?\")\n", - "\n", - "# Call the API\n", - "r = llm.invoke([msg])\n", + "r = llm.invoke(\"What things could I make with a Raspberry Pi?\")\n", "\n", "# Print the response\n", "print(r.content)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Compared to using the OpenAI Python library as we did in the previous lab, Langchain further simplified the process of interacting with the LLM by reducing it to a `llm.invoke` call." + ] + }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Send a prompt to Azure OpenAI using Langchain Chaining\n", + "## Using templates and chains\n", + "\n", + "We've seen that we can use Langchain to interact with the LLM and it's a little easier to work with than the OpenAI Python library. However, that's just the start of how Langchain makes it easier to work with LLM's. Most OpenAI models are designed to be interacted with using a Chat style interface, where you provide a persona or system prompt which helps the LLM understand the context of the conversation. This will then be sent to the LLM along with the user's request.\n", "\n", - "Now that we have seen Langchain in action, let's take a quick peek at chaining and adding variables to our prompt. To do this we will add `LLMChain` to the `llm` instance created above." + "So that you don't have to setup the persona / system prompt every time you want to interact with the LLM, Langchain provides the concept of Templates. Templates are a way to define the persona and system prompt once and then reuse them across multiple interactions." ] }, { @@ -117,18 +120,20 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain_core.prompts import ChatPromptTemplate\n", + "prompt = ChatPromptTemplate.from_messages([\n", + " (\"system\", \"You are a chatbot that helps people generate ideas for their next project. You can help them brainstorm ideas, come up with a plan, or even help them with their project.\"),\n", + " (\"user\", \"{input}\")\n", + "])" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "With the OpenAI API, we still had to pass the prompt in using the `Completion.create()` method. With Langchain, we can create a `PromptTemplate`. This way, we can define our prompt up front and leave placeholders for values that will be set later on. The placeholder could be values that are passed from an end user or application via an API. We don't know what they at this point.\n", + "Above we've defined a \"system\" message which will tell the LLM how we're expecting it to respond, and an `{input}` placeholder for the user's prompt.\n", "\n", - "In the below example, the `{input}` in curly brackets is the placeholder value that will be populated later on." + "Next, we define a chain. A chain allows us to define a sequence of operations that we want to perform. In this case, we're defining a simple chain that will take the prompt we've defined above and send it to the LLM." ] }, { @@ -137,19 +142,14 @@ "metadata": {}, "outputs": [], "source": [ - "# Create a prompt template with variables, note the curly braces\n", - "prompt = PromptTemplate(\n", - " input_variables=[\"input\"],\n", - " template=\"What interesting things can I make with a {input}?\",\n", - ")" + "chain = prompt | llm" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Next we define a chain. In this case, the chain has two components. One component is the prompt template. The other is the object that represents our AI model (`llm`)." + "Now, we can invoke the chain in a similar fashion to how to invoked the LLM earlier. This time, we're passing in the user's input as a parameter to the chain, which will replace the `{input}` placeholder in the prompt." ] }, { @@ -158,16 +158,16 @@ "metadata": {}, "outputs": [], "source": [ - "# Create a chain\n", - "chain = LLMChain(llm=llm, prompt=prompt)" + "chain.invoke({\"input\": \"I've just purchased a Raspberry Pi and I'm looking for a project to work on. Can you help me brainstorm some ideas?\"})" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, we initiate the chain. You can see that we pass in a value for the `input` placeholder." + "The result will be an `AIMessage` object, which contains the response from the LLM.\n", + "\n", + "Let's enhance the chain further to get it to parse the output from the LLM and extract the text from the response. First, we define an output parser." ] }, { @@ -176,13 +176,54 @@ "metadata": {}, "outputs": [], "source": [ - "# Run the chain only specifying the input variable.\n", - "response = chain.invoke({\"input\": \"raspberry pi\"})\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "\n", + "output_parser = StrOutputParser()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we redefine our chain to include the output parser. So now when we invoke the chain, it will \n", "\n", - "# As we are using a single input variable, you could also run the string like this:\n", - "# response = chain.run(\"raspberry pi\")\n", + "- Take the prompt template and add the user's input\n", + "- Send the prompt to the LLM\n", + "- Parse the response from the LLM and extract the text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "chain = prompt | llm | output_parser" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's invoke the chain again with the same prompt as before." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "chain.invoke({\"input\": \"I've just purchased a Raspberry Pi and I'm looking for a project to work on. Can you help me brainstorm some ideas?\"})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This time, you should only get a string containing the text from the response.\n", "\n", - "print(response['text'])" + "We can do much more powerful things with chains than simply setting up and passing prompts to the LLM and parsing the results. We can augment the prompt with external data retrieved from a database, we could add conversation history to provide context for a chatbot, or we could even chain multiple LLMs together to create a more powerful model. We'll explore some of these ideas in future labs." ] }, { @@ -231,7 +272,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.9" }, "orig_nbformat": 4 }, diff --git a/labs/02-integrating-ai/04-SemanticKernel/semantickernel.ipynb b/labs/02-integrating-ai/04-SemanticKernel/semantickernel.ipynb index 2dee653..45a162f 100644 --- a/labs/02-integrating-ai/04-SemanticKernel/semantickernel.ipynb +++ b/labs/02-integrating-ai/04-SemanticKernel/semantickernel.ipynb @@ -76,7 +76,7 @@ }, "outputs": [], "source": [ - "#r \"nuget: Microsoft.SemanticKernel, 1.0.1\"" + "#r \"nuget: Microsoft.SemanticKernel, 1.10.0\"" ] }, { @@ -84,7 +84,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Semantic Kernel works by creating an instance of the Kernel and then adding in various plugins to perform different functions. Those addins or functions can then be called individually or chained together to perform more complex tasks.\n", + "Semantic Kernel works by creating an instance of the Kernel and then adding in various plugins to perform different functions. Those plugins or functions can then be called individually or chained together to perform more complex tasks.\n", "\n", "We use the standard .NET `builder` pattern to initialise the kernel. Notice that we pass in the details of the completion model that we're going to use, the Azure OpenAI API endpoint URL and the API key." ] @@ -112,12 +112,34 @@ "var kernel = builder.Build();" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Send a prompt to Azure OpenAI using Semantic Kernel\n", + "\n", + "Now that we've established a connection to the Azure OpenAI API, we can go ahead and send a prompt to the LLM." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "polyglot-notebook" + } + }, + "outputs": [], + "source": [ + "Console.WriteLine(await kernel.InvokePromptAsync(\"What things could I make with a Raspberry Pi?\"));" + ] + }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Let's create a Semantic Function to perform a simple request to Azure OpenAI. In this case, the function contains a *prompt template*. The template allows us to define a prompt and add placeholders for values that we will provide later. These values could come from user input, or another function, for example." + "Let's take that simple prompt forward and create a function with a prompt template to perform a simple request to Azure OpenAI. The template allows us to define a prompt and add placeholders for values that we will provide later. These values could come from user input, or another function, for example." ] }, { diff --git a/labs/03-orchestration/01-Tokens/tokens.ipynb b/labs/03-orchestration/01-Tokens/tokens.ipynb index 6c195d2..4588f90 100644 --- a/labs/03-orchestration/01-Tokens/tokens.ipynb +++ b/labs/03-orchestration/01-Tokens/tokens.ipynb @@ -322,7 +322,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.9" }, "orig_nbformat": 4 }, diff --git a/labs/03-orchestration/02-Embeddings/embeddings.ipynb b/labs/03-orchestration/02-Embeddings/embeddings.ipynb index e61de2f..881b31d 100644 --- a/labs/03-orchestration/02-Embeddings/embeddings.ipynb +++ b/labs/03-orchestration/02-Embeddings/embeddings.ipynb @@ -53,11 +53,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Define the prompt we want the AI to respond to - the message the Human user is asking\n", - "msg = HumanMessage(content=\"Tell me about the latest Ant-Man movie. When was it released? What is it about?\")\n", - "\n", - "# Call the AI\n", - "r = llm.invoke([msg])\n", + "r = llm.invoke(\"Tell me about the latest Ant-Man movie. When was it released? What is it about?\")\n", "\n", "# Print the response\n", "print(r.content)" @@ -209,13 +205,7 @@ " azure_deployment = os.getenv(\"AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME\"),\n", " openai_api_version = os.getenv(\"OPENAI_EMBEDDING_API_VERSION\"),\n", " model= os.getenv(\"AZURE_OPENAI_EMBEDDING_MODEL\")\n", - ")\n", - "\n", - "\n", - "\n", - "\n", - "\n", - " " + ")" ] }, { @@ -470,7 +460,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.11.9" }, "orig_nbformat": 4 }, diff --git a/labs/03-orchestration/03-Qdrant/qdrant.ipynb b/labs/03-orchestration/03-Qdrant/qdrant.ipynb index 5c8da42..c19c98c 100644 --- a/labs/03-orchestration/03-Qdrant/qdrant.ipynb +++ b/labs/03-orchestration/03-Qdrant/qdrant.ipynb @@ -145,13 +145,6 @@ ")" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**NOTE:** After this has run, you can, if you wish, browse the structure of the persisted data. You should see a `qdrantstorage` folder in the same location as this lab." - ] - }, { "attachments": {}, "cell_type": "markdown", @@ -316,8 +309,6 @@ "\n", "📣 [Azure AI Search with Semantic Kernel and C#](../04-ACS/acs-sk-csharp.ipynb)\n", "\n", - "📣 [Azure AI Search with Semantic Kernel and Python](../04-ACS/acs-sk-python.ipynb)\n", - "\n", "📣 [Azure AI Search with Langchain and Python](../04-ACS/acs-lc-python.ipynb)" ] } @@ -338,7 +329,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.11.9" }, "orig_nbformat": 4 }, diff --git a/labs/03-orchestration/04-ACS/acs-lc-python.ipynb b/labs/03-orchestration/04-ACS/acs-lc-python.ipynb index 17fe219..975184e 100644 --- a/labs/03-orchestration/04-ACS/acs-lc-python.ipynb +++ b/labs/03-orchestration/04-ACS/acs-lc-python.ipynb @@ -628,7 +628,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.11.9" }, "orig_nbformat": 4 }, diff --git a/labs/03-orchestration/04-ACS/acs-sk-csharp.ipynb b/labs/03-orchestration/04-ACS/acs-sk-csharp.ipynb index 5d34d0c..30dc3d2 100644 --- a/labs/03-orchestration/04-ACS/acs-sk-csharp.ipynb +++ b/labs/03-orchestration/04-ACS/acs-sk-csharp.ipynb @@ -88,8 +88,8 @@ "source": [ "// Add the Packages\n", "#r \"nuget: dotenv.net, 3.1.2\"\n", - "#r \"nuget: Azure.AI.OpenAI, 1.0.0-beta.12\"\n", - "#r \"nuget: Azure.Identity, 1.10.4\"\n", + "#r \"nuget: Azure.AI.OpenAI, 1.0.0-beta.16\"\n", + "#r \"nuget: Azure.Identity, 1.11.2\"\n", "#r \"nuget: Azure.Search.Documents, 11.5.0-beta.5\"" ] }, @@ -780,10 +780,10 @@ "source": [ "// Add the Packages\n", "#r \"nuget: dotenv.net, 3.1.2\"\n", - "#r \"nuget: Microsoft.SemanticKernel, 1.0.1\"\n", - "#r \"nuget: Microsoft.SemanticKernel.Connectors.OpenAI, 1.0.1\"\n", - "#r \"nuget: Azure.AI.OpenAI, 1.0.0-beta.12\"\n", - "#r \"nuget: Azure.Identity, 1.10.4\"\n", + "#r \"nuget: Microsoft.SemanticKernel, 1.10.0\"\n", + "#r \"nuget: Microsoft.SemanticKernel.Connectors.OpenAI, 1.10.0\"\n", + "#r \"nuget: Azure.AI.OpenAI, 1.0.0-beta.16\"\n", + "#r \"nuget: Azure.Identity, 1.11.2\"\n", "#r \"nuget: Azure.Search.Documents, 11.5.0-beta.5\"\n", "#r \"nuget: Microsoft.Extensions.Logging, 7.0.0\"\n", "#r \"nuget: Microsoft.Extensions.Logging.Console, 7.0.0\"\n", diff --git a/requirements.txt b/requirements.txt index 51dda33..5b28b13 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,17 +1,17 @@ azure-core==1.30.1 -azure-identity==1.15.0 +azure-identity==1.16.0 azure-search-documents==11.4.0 -semantic-kernel==0.4.5.dev0 -openai==1.14.2 -langchain==0.1.13 -langchain-openai==0.1.1 +semantic-kernel==0.9.5b1 +openai==1.17.1 +langchain==0.1.16 +langchain-openai==0.1.3 tiktoken==0.6.0 -python-dotenv==1.0.0 +python-dotenv==1.0.1 requests==2.31.0 -unstructured==0.12.6 +unstructured==0.13.2 markdown==3.6 -qdrant-client==1.8.0 +qdrant-client==1.8.2 chromadb==0.4.24 yfinance==0.2.37 -langchain-community==0.0.29 -pymongo==4.6.2 \ No newline at end of file +langchain-community==0.0.32 +pymongo==4.6.3 \ No newline at end of file