Use OllamaEmbeddings in ollama examples (#10616)

This change the Ollama examples to use `OllamaEmbeddings` for generating
embeddings.
This commit is contained in:
Jeffrey Morgan 2023-09-15 10:05:27 -07:00 committed by GitHub
parent 6831a25675
commit 6d3670c7d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 10 additions and 33 deletions

View File

@ -132,13 +132,7 @@
"ollama pull llama2:13b\n", "ollama pull llama2:13b\n",
"```\n", "```\n",
"\n", "\n",
"Or, the 13b-chat model:\n", "Let's also use local embeddings from `OllamaEmbeddings` and `Chroma`."
"\n",
"```\n",
"ollama pull llama2:13b-chat\n",
"```\n",
"\n",
"Let's also use local embeddings from `GPT4AllEmbeddings` and `Chroma`."
] ]
}, },
{ {
@ -147,7 +141,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"! pip install gpt4all chromadb" "! pip install chromadb"
] ]
}, },
{ {
@ -167,22 +161,14 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found model file at /Users/rlm/.cache/gpt4all/ggml-all-MiniLM-L6-v2-f16.bin\n"
]
}
],
"source": [ "source": [
"from langchain.vectorstores import Chroma\n", "from langchain.vectorstores import Chroma\n",
"from langchain.embeddings import GPT4AllEmbeddings\n", "from langchain.embeddings import OllamaEmbeddings\n",
"\n", "\n",
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())" "vectorstore = Chroma.from_documents(documents=all_splits, embedding=OllamaEmbeddings())"
] ]
}, },
{ {
@ -238,7 +224,7 @@
"from langchain.chat_models import ChatOllama\n", "from langchain.chat_models import ChatOllama\n",
"from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"chat_model = ChatOllama(model=\"llama2:13b-chat\",\n", "chat_model = ChatOllama(model=\"llama2:13b\",\n",
" verbose=True,\n", " verbose=True,\n",
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))" " callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))"
] ]

View File

@ -137,7 +137,6 @@
"\n", "\n",
"```\n", "```\n",
"ollama pull llama2:13b\n", "ollama pull llama2:13b\n",
"ollama run llama2:13b \n",
"```\n", "```\n",
"\n", "\n",
"Let's also use local embeddings from `OllamaEmbeddings` and `Chroma`." "Let's also use local embeddings from `OllamaEmbeddings` and `Chroma`."
@ -149,7 +148,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"! pip install gpt4all chromadb" "! pip install chromadb"
] ]
}, },
{ {
@ -169,17 +168,9 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 61, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found model file at /Users/rlm/.cache/gpt4all/ggml-all-MiniLM-L6-v2-f16.bin\n"
]
}
],
"source": [ "source": [
"from langchain.vectorstores import Chroma\n", "from langchain.vectorstores import Chroma\n",
"from langchain.embeddings import OllamaEmbeddings\n", "from langchain.embeddings import OllamaEmbeddings\n",