mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-29 01:48:57 +00:00
Use OllamaEmbeddings
in ollama examples (#10616)
This change the Ollama examples to use `OllamaEmbeddings` for generating embeddings.
This commit is contained in:
parent
6831a25675
commit
6d3670c7d8
@ -132,13 +132,7 @@
|
||||
"ollama pull llama2:13b\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Or, the 13b-chat model:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"ollama pull llama2:13b-chat\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Let's also use local embeddings from `GPT4AllEmbeddings` and `Chroma`."
|
||||
"Let's also use local embeddings from `OllamaEmbeddings` and `Chroma`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -147,7 +141,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install gpt4all chromadb"
|
||||
"! pip install chromadb"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -167,22 +161,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found model file at /Users/rlm/.cache/gpt4all/ggml-all-MiniLM-L6-v2-f16.bin\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.embeddings import GPT4AllEmbeddings\n",
|
||||
"from langchain.embeddings import OllamaEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())"
|
||||
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=OllamaEmbeddings())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -238,7 +224,7 @@
|
||||
"from langchain.chat_models import ChatOllama\n",
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"chat_model = ChatOllama(model=\"llama2:13b-chat\",\n",
|
||||
"chat_model = ChatOllama(model=\"llama2:13b\",\n",
|
||||
" verbose=True,\n",
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))"
|
||||
]
|
||||
|
@ -137,7 +137,6 @@
|
||||
"\n",
|
||||
"```\n",
|
||||
"ollama pull llama2:13b\n",
|
||||
"ollama run llama2:13b \n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Let's also use local embeddings from `OllamaEmbeddings` and `Chroma`."
|
||||
@ -149,7 +148,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install gpt4all chromadb"
|
||||
"! pip install chromadb"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -169,17 +168,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 61,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found model file at /Users/rlm/.cache/gpt4all/ggml-all-MiniLM-L6-v2-f16.bin\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.embeddings import OllamaEmbeddings\n",
|
||||
|
Loading…
Reference in New Issue
Block a user