From 80f0ac6f110ea775a197e3c317f50d2043adb0ee Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Mon, 12 Aug 2024 21:31:48 -0400 Subject: [PATCH] ollama[patch]: Update API Reference for ollama embeddings (#25315) Update API reference for OllamaEmbeddings Issue: https://github.com/langchain-ai/langchain/issues/24856 --- .../ollama/langchain_ollama/embeddings.py | 105 +++++++++++++++++- 1 file changed, 100 insertions(+), 5 deletions(-) diff --git a/libs/partners/ollama/langchain_ollama/embeddings.py b/libs/partners/ollama/langchain_ollama/embeddings.py index 357878d28103e..6f538b0635448 100644 --- a/libs/partners/ollama/langchain_ollama/embeddings.py +++ b/libs/partners/ollama/langchain_ollama/embeddings.py @@ -9,16 +9,111 @@ class OllamaEmbeddings(BaseModel, Embeddings): - """OllamaEmbeddings embedding model. + """Ollama embedding model integration. - Example: + Set up a local Ollama instance: + Install the Ollama package and set up a local Ollama instance + using the instructions here: https://github.com/ollama/ollama . + + You will need to choose a model to serve. + + You can view a list of available models via the model library (https://ollama.com/library). + + To fetch a model from the Ollama model library use ``ollama pull ``. + + For example, to pull the llama3 model: + + .. code-block:: bash + + ollama pull llama3 + + This will download the default tagged version of the model. + Typically, the default points to the latest, smallest sized-parameter model. + + * On Mac, the models will be downloaded to ~/.ollama/models + * On Linux (or WSL), the models will be stored at /usr/share/ollama/.ollama/models + + You can specify the exact version of the model of interest + as such ``ollama pull vicuna:13b-v1.5-16k-q4_0``. + + To view pulled models: + + .. code-block:: bash + + ollama list + + To start serving: + + .. code-block:: bash + + ollama serve + + View the Ollama documentation for more commands. + + .. code-block:: bash + + ollama help + + Install the langchain-ollama integration package: + .. code-block:: bash + + pip install -U langchain_ollama + + Key init args — completion params: + model: str + Name of Ollama model to use. + base_url: Optional[str] + Base url the model is hosted under. + + See full list of supported init args and their descriptions in the params section. + + Instantiate: .. code-block:: python from langchain_ollama import OllamaEmbeddings - embedder = OllamaEmbeddings(model="llama3") - embedder.embed_query("what is the place that jonathan worked at?") - """ + embed = OllamaEmbeddings( + model="llama3" + ) + + Embed single text: + .. code-block:: python + + input_text = "The meaning of life is 42" + vector = embed.embed_query(input_text) + print(vector[:3]) + + .. code-block:: python + + [-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915] + + Embed multiple texts: + .. code-block:: python + + input_texts = ["Document 1...", "Document 2..."] + vectors = embed.embed_documents(input_texts) + print(len(vectors)) + # The first 3 coordinates for the first vector + print(vectors[0][:3]) + + .. code-block:: python + + 2 + [-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915] + + Async: + .. code-block:: python + + vector = await embed.aembed_query(input_text) + print(vector[:3]) + + # multiple: + # await embed.aembed_documents(input_texts) + + .. code-block:: python + + [-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188] + """ # noqa: E501 model: str """Model name to use."""