From 6d3670c7d8d29d7eb971bfa336e32d416da11806 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 15 Sep 2023 10:05:27 -0700 Subject: [PATCH] Use `OllamaEmbeddings` in ollama examples (#10616) This change the Ollama examples to use `OllamaEmbeddings` for generating embeddings. --- docs/extras/integrations/chat/ollama.ipynb | 28 ++++++---------------- docs/extras/integrations/llms/ollama.ipynb | 15 +++--------- 2 files changed, 10 insertions(+), 33 deletions(-) diff --git a/docs/extras/integrations/chat/ollama.ipynb b/docs/extras/integrations/chat/ollama.ipynb index 41a90405b78..d5569397bf2 100644 --- a/docs/extras/integrations/chat/ollama.ipynb +++ b/docs/extras/integrations/chat/ollama.ipynb @@ -132,13 +132,7 @@ "ollama pull llama2:13b\n", "```\n", "\n", - "Or, the 13b-chat model:\n", - "\n", - "```\n", - "ollama pull llama2:13b-chat\n", - "```\n", - "\n", - "Let's also use local embeddings from `GPT4AllEmbeddings` and `Chroma`." + "Let's also use local embeddings from `OllamaEmbeddings` and `Chroma`." ] }, { @@ -147,7 +141,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install gpt4all chromadb" + "! pip install chromadb" ] }, { @@ -167,22 +161,14 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found model file at /Users/rlm/.cache/gpt4all/ggml-all-MiniLM-L6-v2-f16.bin\n" - ] - } - ], + "outputs": [], "source": [ "from langchain.vectorstores import Chroma\n", - "from langchain.embeddings import GPT4AllEmbeddings\n", + "from langchain.embeddings import OllamaEmbeddings\n", "\n", - "vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())" + "vectorstore = Chroma.from_documents(documents=all_splits, embedding=OllamaEmbeddings())" ] }, { @@ -238,7 +224,7 @@ "from langchain.chat_models import ChatOllama\n", "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "chat_model = ChatOllama(model=\"llama2:13b-chat\",\n", + "chat_model = ChatOllama(model=\"llama2:13b\",\n", " verbose=True,\n", " callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))" ] diff --git a/docs/extras/integrations/llms/ollama.ipynb b/docs/extras/integrations/llms/ollama.ipynb index 55e77871c6b..49f8ae8f53b 100644 --- a/docs/extras/integrations/llms/ollama.ipynb +++ b/docs/extras/integrations/llms/ollama.ipynb @@ -137,7 +137,6 @@ "\n", "```\n", "ollama pull llama2:13b\n", - "ollama run llama2:13b \n", "```\n", "\n", "Let's also use local embeddings from `OllamaEmbeddings` and `Chroma`." @@ -149,7 +148,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install gpt4all chromadb" + "! pip install chromadb" ] }, { @@ -169,17 +168,9 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found model file at /Users/rlm/.cache/gpt4all/ggml-all-MiniLM-L6-v2-f16.bin\n" - ] - } - ], + "outputs": [], "source": [ "from langchain.vectorstores import Chroma\n", "from langchain.embeddings import OllamaEmbeddings\n",