mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-29 01:48:57 +00:00
docs: ollama nits (#31714)
This commit is contained in:
parent
7cdd53390d
commit
8878a7b143
@ -258,7 +258,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Tool calling\n",
|
"## Tool calling\n",
|
||||||
"\n",
|
"\n",
|
||||||
"We can use [tool calling](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/) with an LLM [that has been fine-tuned for tool use](https://ollama.com/library/llama3.1):\n",
|
"We can use [tool calling](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/) with an LLM [that has been fine-tuned for tool use](https://ollama.com/search?&c=tools) such as `llama3.1`:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"```\n",
|
"```\n",
|
||||||
"ollama pull llama3.1\n",
|
"ollama pull llama3.1\n",
|
||||||
|
@ -23,13 +23,15 @@ Ollama will start as a background service automatically, if this is disabled, ru
|
|||||||
ollama serve
|
ollama serve
|
||||||
```
|
```
|
||||||
|
|
||||||
After starting ollama, run `ollama pull <model_checkpoint>` to download a model
|
After starting ollama, run `ollama pull <name-of-model>` to download a model from the [Ollama model library](https://ollama.ai/library):
|
||||||
from the [Ollama model library](https://ollama.ai/library).
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama3.1
|
ollama pull llama3.1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.
|
||||||
|
- To view all pulled (downloaded) models, use `ollama list`
|
||||||
|
|
||||||
We're now ready to install the `langchain-ollama` partner package and run a model.
|
We're now ready to install the `langchain-ollama` partner package and run a model.
|
||||||
|
|
||||||
### Ollama LangChain partner package install
|
### Ollama LangChain partner package install
|
||||||
|
@ -55,7 +55,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "c84fb993",
|
"id": "c84fb993",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
"source": [
|
||||||
|
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -108,7 +110,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": 2,
|
||||||
"id": "9ea7a09b",
|
"id": "9ea7a09b",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
@ -127,7 +129,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag/).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -139,14 +141,11 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"data": {
|
"name": "stdout",
|
||||||
"text/plain": [
|
"output_type": "stream",
|
||||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
"text": [
|
||||||
|
"LangChain is the framework for building context-aware reasoning applications\n"
|
||||||
]
|
]
|
||||||
},
|
|
||||||
"execution_count": 4,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@ -166,8 +165,8 @@
|
|||||||
"# Retrieve the most similar text\n",
|
"# Retrieve the most similar text\n",
|
||||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# show the retrieved document's content\n",
|
"# Show the retrieved document's content\n",
|
||||||
"retrieved_documents[0].page_content"
|
"print(retrieved_documents[0].page_content)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -252,7 +251,7 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3 (ipykernel)",
|
"display_name": ".venv",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
@ -266,7 +265,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.9.6"
|
"version": "3.13.5"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -8,8 +8,21 @@ This package contains the LangChain integration with Ollama
|
|||||||
pip install -U langchain-ollama
|
pip install -U langchain-ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
You will also need to run the Ollama server locally.
|
For the package to work, you will need to install and run the Ollama server locally ([download](https://ollama.com/download)).
|
||||||
You can download it [here](https://ollama.com/download).
|
|
||||||
|
To run integration tests (`make integration_tests`), you will need the following models installed in your Ollama server:
|
||||||
|
|
||||||
|
- `llama3`
|
||||||
|
- `llama3:latest`
|
||||||
|
- `lamma3.1`
|
||||||
|
- `gemma3:4b`
|
||||||
|
- `deepseek-r1:1.5b`
|
||||||
|
|
||||||
|
Install these models by running:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ollama pull <name-of-model>
|
||||||
|
```
|
||||||
|
|
||||||
## Chat Models
|
## Chat Models
|
||||||
|
|
||||||
@ -34,6 +47,7 @@ embeddings.embed_query("What is the meaning of life?")
|
|||||||
```
|
```
|
||||||
|
|
||||||
## LLMs
|
## LLMs
|
||||||
|
|
||||||
`OllamaLLM` class exposes LLMs from Ollama.
|
`OllamaLLM` class exposes LLMs from Ollama.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -28,7 +28,7 @@ class OllamaLLM(BaseLLM):
|
|||||||
from langchain_ollama import OllamaLLM
|
from langchain_ollama import OllamaLLM
|
||||||
|
|
||||||
model = OllamaLLM(model="llama3")
|
model = OllamaLLM(model="llama3")
|
||||||
model.invoke("Come up with 10 names for a song about parrots")
|
print(model.invoke("Come up with 10 names for a song about parrots"))
|
||||||
"""
|
"""
|
||||||
|
|
||||||
model: str
|
model: str
|
||||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user