mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-29 21:30:18 +00:00
ollama: update tests, docs (#31736)
- docs: for the Ollama notebooks, improve the specificity of some links, add `homebrew` install info, update some wording - tests: reduce number of local models needed to run in half from 4 → 2 (shedding 8gb of required installs) - bump deps (non-breaking) in anticipation of upcoming "thinking" PR
This commit is contained in:
@@ -12,10 +12,7 @@ For the package to work, you will need to install and run the Ollama server loca
|
||||
|
||||
To run integration tests (`make integration_tests`), you will need the following models installed in your Ollama server:
|
||||
|
||||
- `llama3`
|
||||
- `llama3:latest`
|
||||
- `lamma3.1`
|
||||
- `gemma3:4b`
|
||||
- `llama3.1`
|
||||
- `deepseek-r1:1.5b`
|
||||
|
||||
Install these models by running:
|
||||
@@ -24,35 +21,35 @@ Install these models by running:
|
||||
ollama pull <name-of-model>
|
||||
```
|
||||
|
||||
## Chat Models
|
||||
## [Chat Models](https://python.langchain.com/api_reference/ollama/chat_models/langchain_ollama.chat_models.ChatOllama.html#chatollama)
|
||||
|
||||
`ChatOllama` class exposes chat models from Ollama.
|
||||
|
||||
```python
|
||||
from langchain_ollama import ChatOllama
|
||||
|
||||
llm = ChatOllama(model="llama3-groq-tool-use")
|
||||
llm = ChatOllama(model="llama3.1")
|
||||
llm.invoke("Sing a ballad of LangChain.")
|
||||
```
|
||||
|
||||
## Embeddings
|
||||
## [Embeddings](https://python.langchain.com/api_reference/ollama/embeddings/langchain_ollama.embeddings.OllamaEmbeddings.html#ollamaembeddings)
|
||||
|
||||
`OllamaEmbeddings` class exposes embeddings from Ollama.
|
||||
|
||||
```python
|
||||
from langchain_ollama import OllamaEmbeddings
|
||||
|
||||
embeddings = OllamaEmbeddings(model="llama3")
|
||||
embeddings = OllamaEmbeddings(model="llama3.1")
|
||||
embeddings.embed_query("What is the meaning of life?")
|
||||
```
|
||||
|
||||
## LLMs
|
||||
## [LLMs](https://python.langchain.com/api_reference/ollama/llms/langchain_ollama.llms.OllamaLLM.html#ollamallm)
|
||||
|
||||
`OllamaLLM` class exposes LLMs from Ollama.
|
||||
`OllamaLLM` class exposes traditional LLMs from Ollama.
|
||||
|
||||
```python
|
||||
from langchain_ollama import OllamaLLM
|
||||
|
||||
llm = OllamaLLM(model="llama3")
|
||||
llm = OllamaLLM(model="llama3.1")
|
||||
llm.invoke("The meaning of life is")
|
||||
```
|
||||
|
||||
@@ -7,8 +7,8 @@ authors = []
|
||||
license = { text = "MIT" }
|
||||
requires-python = ">=3.9"
|
||||
dependencies = [
|
||||
"ollama>=0.4.8,<1.0.0",
|
||||
"langchain-core<1.0.0,>=0.3.60",
|
||||
"ollama>=0.5.1,<1.0.0",
|
||||
"langchain-core<1.0.0,>=0.3.66",
|
||||
]
|
||||
name = "langchain-ollama"
|
||||
version = "0.3.3"
|
||||
|
||||
@@ -8,6 +8,8 @@ from typing_extensions import TypedDict
|
||||
|
||||
from langchain_ollama import ChatOllama
|
||||
|
||||
DEFAULT_MODEL_NAME = "llama3.1"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("method"), [("function_calling"), ("json_schema")])
|
||||
def test_structured_output(method: str) -> None:
|
||||
@@ -19,7 +21,7 @@ def test_structured_output(method: str) -> None:
|
||||
setup: str = Field(description="question to set up a joke")
|
||||
punchline: str = Field(description="answer to resolve the joke")
|
||||
|
||||
llm = ChatOllama(model="llama3.1", temperature=0)
|
||||
llm = ChatOllama(model=DEFAULT_MODEL_NAME, temperature=0)
|
||||
query = "Tell me a joke about cats."
|
||||
|
||||
# Pydantic
|
||||
@@ -38,7 +40,7 @@ def test_structured_output(method: str) -> None:
|
||||
|
||||
for chunk in structured_llm.stream(query):
|
||||
assert isinstance(chunk, dict)
|
||||
assert isinstance(chunk, dict) # for mypy
|
||||
assert isinstance(chunk, dict)
|
||||
assert set(chunk.keys()) == {"setup", "punchline"}
|
||||
|
||||
# Typed Dict
|
||||
@@ -55,11 +57,11 @@ def test_structured_output(method: str) -> None:
|
||||
|
||||
for chunk in structured_llm.stream(query):
|
||||
assert isinstance(chunk, dict)
|
||||
assert isinstance(chunk, dict) # for mypy
|
||||
assert isinstance(chunk, dict)
|
||||
assert set(chunk.keys()) == {"setup", "punchline"}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("model"), [("llama3.1")])
|
||||
@pytest.mark.parametrize(("model"), [(DEFAULT_MODEL_NAME)])
|
||||
def test_structured_output_deeply_nested(model: str) -> None:
|
||||
"""Test to verify structured output with a nested objects."""
|
||||
llm = ChatOllama(model=model, temperature=0)
|
||||
@@ -80,7 +82,7 @@ def test_structured_output_deeply_nested(model: str) -> None:
|
||||
|
||||
people: list[Person]
|
||||
|
||||
chat = llm.with_structured_output(Data) # type: ignore[arg-type]
|
||||
chat = llm.with_structured_output(Data)
|
||||
text = (
|
||||
"Alan Smith is 6 feet tall and has blond hair."
|
||||
"Alan Poe is 3 feet tall and has grey hair."
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
"""Test chat model integration using standard integration tests."""
|
||||
|
||||
import pytest
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
from langchain_tests.integration_tests import ChatModelIntegrationTests
|
||||
|
||||
from langchain_ollama.chat_models import ChatOllama
|
||||
|
||||
DEFAULT_MODEL_NAME = "llama3.1"
|
||||
|
||||
|
||||
class TestChatOllama(ChatModelIntegrationTests):
|
||||
@property
|
||||
@@ -12,7 +16,7 @@ class TestChatOllama(ChatModelIntegrationTests):
|
||||
|
||||
@property
|
||||
def chat_model_params(self) -> dict:
|
||||
return {"model": "llama3.1"}
|
||||
return {"model": DEFAULT_MODEL_NAME}
|
||||
|
||||
@property
|
||||
def supports_json_mode(self) -> bool:
|
||||
@@ -20,23 +24,26 @@ class TestChatOllama(ChatModelIntegrationTests):
|
||||
|
||||
@property
|
||||
def has_tool_choice(self) -> bool:
|
||||
return False
|
||||
return False # TODO: update after Ollama implements
|
||||
|
||||
@property
|
||||
def supports_image_inputs(self) -> bool:
|
||||
return True
|
||||
|
||||
def test_image_model() -> None:
|
||||
class ImageModelTests(ChatModelIntegrationTests):
|
||||
@property
|
||||
def chat_model_class(self) -> type[ChatOllama]:
|
||||
return ChatOllama
|
||||
@pytest.mark.xfail(
|
||||
reason=(
|
||||
"Will sometime encounter AssertionErrors where tool responses are "
|
||||
"`'3'` instead of `3`"
|
||||
)
|
||||
)
|
||||
def test_tool_calling(self, model: BaseChatModel) -> None:
|
||||
super().test_tool_calling(model)
|
||||
|
||||
@property
|
||||
def chat_model_params(self) -> dict:
|
||||
return {"model": "gemma3:4b"}
|
||||
|
||||
@property
|
||||
def supports_image_inputs(self) -> bool:
|
||||
return True
|
||||
|
||||
test_instance = ImageModelTests()
|
||||
model = test_instance.chat_model_class(**test_instance.chat_model_params)
|
||||
ImageModelTests().test_image_inputs(model)
|
||||
@pytest.mark.xfail(
|
||||
reason=(
|
||||
"Will sometime encounter AssertionErrors where tool responses are "
|
||||
"`'3'` instead of `3`"
|
||||
)
|
||||
)
|
||||
async def test_tool_calling_async(self, model: BaseChatModel) -> None:
|
||||
await super().test_tool_calling_async(model)
|
||||
|
||||
@@ -4,6 +4,8 @@ from langchain_tests.integration_tests import EmbeddingsIntegrationTests
|
||||
|
||||
from langchain_ollama.embeddings import OllamaEmbeddings
|
||||
|
||||
MODEL_NAME = "llama3.1"
|
||||
|
||||
|
||||
class TestOllamaEmbeddings(EmbeddingsIntegrationTests):
|
||||
@property
|
||||
@@ -12,4 +14,4 @@ class TestOllamaEmbeddings(EmbeddingsIntegrationTests):
|
||||
|
||||
@property
|
||||
def embedding_model_params(self) -> dict:
|
||||
return {"model": "llama3:latest"}
|
||||
return {"model": MODEL_NAME}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
"""Test OllamaLLM llm."""
|
||||
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
|
||||
from langchain_ollama.llms import OllamaLLM
|
||||
|
||||
MODEL_NAME = "llama3"
|
||||
MODEL_NAME = "llama3.1"
|
||||
|
||||
|
||||
def test_stream() -> None:
|
||||
@@ -54,13 +56,12 @@ async def test_ainvoke() -> None:
|
||||
"""Test invoke tokens from OllamaLLM."""
|
||||
llm = OllamaLLM(model=MODEL_NAME)
|
||||
|
||||
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||
result = await llm.ainvoke("I'm Pickle Rick", config=RunnableConfig(tags=["foo"]))
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
def test_invoke() -> None:
|
||||
"""Test invoke tokens from OllamaLLM."""
|
||||
llm = OllamaLLM(model=MODEL_NAME)
|
||||
|
||||
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
|
||||
result = llm.invoke("I'm Pickle Rick", config=RunnableConfig(tags=["foo"]))
|
||||
assert isinstance(result, str)
|
||||
|
||||
10
libs/partners/ollama/uv.lock
generated
10
libs/partners/ollama/uv.lock
generated
@@ -363,7 +363,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-ollama"
|
||||
version = "0.3.3"
|
||||
version = "0.3.4"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
@@ -397,7 +397,7 @@ typing = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain-core", editable = "../../core" },
|
||||
{ name = "ollama", specifier = ">=0.4.8,<1.0.0" },
|
||||
{ name = "ollama", specifier = ">=0.5.1,<1.0.0" },
|
||||
]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
@@ -795,15 +795,15 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ollama"
|
||||
version = "0.4.8"
|
||||
version = "0.5.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e2/64/709dc99030f8f46ec552f0a7da73bbdcc2da58666abfec4742ccdb2e800e/ollama-0.4.8.tar.gz", hash = "sha256:1121439d49b96fa8339842965d0616eba5deb9f8c790786cdf4c0b3df4833802", size = 12972, upload-time = "2025-04-16T21:55:14.101Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8d/96/c7fe0d2d1b3053be614822a7b722c7465161b3672ce90df71515137580a0/ollama-0.5.1.tar.gz", hash = "sha256:5a799e4dc4e7af638b11e3ae588ab17623ee019e496caaf4323efbaa8feeff93", size = 41112, upload-time = "2025-05-30T21:32:48.679Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/33/3f/164de150e983b3a16e8bf3d4355625e51a357e7b3b1deebe9cc1f7cb9af8/ollama-0.4.8-py3-none-any.whl", hash = "sha256:04312af2c5e72449aaebac4a2776f52ef010877c554103419d3f36066fe8af4c", size = 13325, upload-time = "2025-04-16T21:55:12.779Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/76/3f96c8cdbf3955d7a73ee94ce3e0db0755d6de1e0098a70275940d1aff2f/ollama-0.5.1-py3-none-any.whl", hash = "sha256:4c8839f35bc173c7057b1eb2cbe7f498c1a7e134eafc9192824c8aecb3617506", size = 13369, upload-time = "2025-05-30T21:32:47.429Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
Reference in New Issue
Block a user