mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-15 15:46:47 +00:00
updates
This commit is contained in:
parent
80971b69d0
commit
589ee059f2
@ -10,6 +10,7 @@ service.
|
||||
exist locally. This is useful for ensuring that the model is available before
|
||||
attempting to use it, especially in environments where models may not be
|
||||
pre-downloaded.
|
||||
|
||||
"""
|
||||
|
||||
from importlib import metadata
|
||||
@ -20,6 +21,8 @@ from langchain_ollama.embeddings import OllamaEmbeddings
|
||||
from langchain_ollama.llms import OllamaLLM
|
||||
|
||||
try:
|
||||
if __package__ is None:
|
||||
raise metadata.PackageNotFoundError
|
||||
__version__ = metadata.version(__package__)
|
||||
except metadata.PackageNotFoundError:
|
||||
# Case where package metadata is not available.
|
||||
|
@ -53,8 +53,8 @@ def _convert_content_blocks_to_ollama_format(
|
||||
text_content += text_block["text"]
|
||||
elif block_type == "image":
|
||||
image_block = cast(ImageContentBlock, block)
|
||||
if image_block.get("source_type") == "base64":
|
||||
images.append(image_block.get("data", ""))
|
||||
if image_block.get("base64"):
|
||||
images.append(image_block.get("base64", ""))
|
||||
else:
|
||||
msg = "Only base64 image data is supported by Ollama"
|
||||
raise ValueError(msg)
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
This implementation provides native support for v1 messages with structured
|
||||
content blocks and always returns AIMessageV1 format responses.
|
||||
|
||||
.. versionadded:: 1.0.0
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
@ -35,12 +35,10 @@ def test_stream_no_reasoning(model: str) -> None:
|
||||
result += chunk
|
||||
assert isinstance(result, AIMessageChunk)
|
||||
assert result.content
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
assert "<think>" not in result.content and "</think>" not in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
assert "<think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
assert "</think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
if hasattr(result, "additional_kwargs"):
|
||||
# v0 format
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
|
||||
@ -62,12 +60,10 @@ async def test_astream_no_reasoning(model: str) -> None:
|
||||
result += chunk
|
||||
assert isinstance(result, AIMessageChunk)
|
||||
assert result.content
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
assert "<think>" not in result.content and "</think>" not in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
assert "<think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
assert "</think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
if hasattr(result, "additional_kwargs"):
|
||||
# v0 format
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
|
||||
@ -91,8 +87,8 @@ def test_stream_reasoning_none(model: str) -> None:
|
||||
assert result.content
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
assert "<think>" in result.content and "</think>" in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
# v0 format (content as string)
|
||||
assert "<think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
assert "</think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
|
||||
@ -118,8 +114,8 @@ async def test_astream_reasoning_none(model: str) -> None:
|
||||
assert result.content
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
assert "<think>" in result.content and "</think>" in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
# v0 format (content as string)
|
||||
assert "<think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
assert "</think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
|
||||
@ -146,8 +142,8 @@ def test_reasoning_stream(model: str) -> None:
|
||||
assert "reasoning_content" in result.additional_kwargs
|
||||
assert len(result.additional_kwargs["reasoning_content"]) > 0
|
||||
assert "<think>" not in result.content and "</think>" not in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
# v0 format (content as string)
|
||||
assert "<think>" not in result.additional_kwargs["reasoning_content"]
|
||||
assert "</think>" not in result.additional_kwargs["reasoning_content"]
|
||||
|
||||
@ -174,8 +170,8 @@ async def test_reasoning_astream(model: str) -> None:
|
||||
assert "reasoning_content" in result.additional_kwargs
|
||||
assert len(result.additional_kwargs["reasoning_content"]) > 0
|
||||
assert "<think>" not in result.content and "</think>" not in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
# v0 format (content as string)
|
||||
assert "<think>" not in result.additional_kwargs["reasoning_content"]
|
||||
assert "</think>" not in result.additional_kwargs["reasoning_content"]
|
||||
|
||||
@ -188,10 +184,9 @@ def test_invoke_no_reasoning(model: str) -> None:
|
||||
result = llm.invoke([message])
|
||||
assert result.content
|
||||
assert "<think>" not in result.content and "</think>" not in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
assert "<think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
assert "</think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
if hasattr(result, "additional_kwargs"):
|
||||
# v0 format
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
|
||||
@ -202,10 +197,9 @@ async def test_ainvoke_no_reasoning(model: str) -> None:
|
||||
result = await llm.ainvoke([message])
|
||||
assert result.content
|
||||
assert "<think>" not in result.content and "</think>" not in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
assert "<think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
assert "</think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
if hasattr(result, "additional_kwargs"):
|
||||
# v0 format
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
|
||||
@ -217,8 +211,8 @@ def test_invoke_reasoning_none(model: str) -> None:
|
||||
assert result.content
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
assert "<think>" in result.content and "</think>" in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
# v0 format (content as string)
|
||||
assert "<think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
assert "</think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
|
||||
@ -232,8 +226,8 @@ async def test_ainvoke_reasoning_none(model: str) -> None:
|
||||
assert result.content
|
||||
assert "reasoning_content" not in result.additional_kwargs
|
||||
assert "<think>" in result.content and "</think>" in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
# v0 format (content as string)
|
||||
assert "<think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
assert "</think>" not in result.additional_kwargs.get("reasoning_content", "")
|
||||
|
||||
@ -248,8 +242,8 @@ def test_reasoning_invoke(model: str) -> None:
|
||||
assert "reasoning_content" in result.additional_kwargs
|
||||
assert len(result.additional_kwargs["reasoning_content"]) > 0
|
||||
assert "<think>" not in result.content and "</think>" not in result.content
|
||||
# Only check additional_kwargs for v0 format (content as string)
|
||||
if not isinstance(result.content, list):
|
||||
# v0 format (content as string)
|
||||
assert "<think>" not in result.additional_kwargs["reasoning_content"]
|
||||
assert "</think>" not in result.additional_kwargs["reasoning_content"]
|
||||
|
||||
|
@ -13,6 +13,8 @@ from langchain_ollama._compat import (
|
||||
)
|
||||
from langchain_ollama.chat_models_v1 import ChatOllamaV1
|
||||
|
||||
MODEL_NAME = "llama3.1"
|
||||
|
||||
|
||||
class TestMessageConversion:
|
||||
"""Test v1 message conversion utilities."""
|
||||
@ -34,11 +36,10 @@ class TestMessageConversion:
|
||||
message = HumanMessageV1(
|
||||
content=[
|
||||
TextContentBlock(type="text", text="Describe this image:"),
|
||||
ImageContentBlock( # type: ignore[typeddict-unknown-key]
|
||||
ImageContentBlock(
|
||||
type="image",
|
||||
mime_type="image/jpeg",
|
||||
data="base64imagedata",
|
||||
source_type="base64",
|
||||
base64="base64imagedata",
|
||||
),
|
||||
]
|
||||
)
|
||||
@ -74,7 +75,7 @@ class TestMessageConversion:
|
||||
def test_convert_from_ollama_format(self) -> None:
|
||||
"""Test converting Ollama response to AIMessageV1."""
|
||||
ollama_response = {
|
||||
"model": "llama3",
|
||||
"model": MODEL_NAME,
|
||||
"created_at": "2024-01-01T00:00:00Z",
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
@ -93,13 +94,13 @@ class TestMessageConversion:
|
||||
assert len(result.content) == 1
|
||||
assert result.content[0]["type"] == "text"
|
||||
assert result.content[0]["text"] == "Hello! How can I help you today?"
|
||||
assert result.response_metadata["model_name"] == "llama3"
|
||||
assert result.response_metadata.get("done") is True # type: ignore[typeddict-item]
|
||||
assert result.response_metadata["model_name"] == MODEL_NAME # type: ignore[typeddict-not-required-key]
|
||||
assert result.response_metadata.get("done") is True
|
||||
|
||||
def test_convert_chunk_to_v1(self) -> None:
|
||||
"""Test converting Ollama streaming chunk to AIMessageChunkV1."""
|
||||
chunk = {
|
||||
"model": "llama3",
|
||||
"model": MODEL_NAME,
|
||||
"created_at": "2024-01-01T00:00:00Z",
|
||||
"message": {"role": "assistant", "content": "Hello"},
|
||||
"done": False,
|
||||
@ -127,14 +128,14 @@ class TestChatOllamaV1:
|
||||
|
||||
def test_initialization(self) -> None:
|
||||
"""Test ChatOllamaV1 initialization."""
|
||||
llm = ChatOllamaV1(model="llama3")
|
||||
llm = ChatOllamaV1(model=MODEL_NAME)
|
||||
|
||||
assert llm.model == "llama3"
|
||||
assert llm.model == MODEL_NAME
|
||||
assert llm._llm_type == "chat-ollama-v1"
|
||||
|
||||
def test_chat_params(self) -> None:
|
||||
"""Test _chat_params method."""
|
||||
llm = ChatOllamaV1(model="llama3", temperature=0.7)
|
||||
llm = ChatOllamaV1(model=MODEL_NAME, temperature=0.7)
|
||||
|
||||
messages: list[MessageV1] = [
|
||||
HumanMessageV1(content=[TextContentBlock(type="text", text="Hello")])
|
||||
@ -142,26 +143,28 @@ class TestChatOllamaV1:
|
||||
|
||||
params = llm._chat_params(messages)
|
||||
|
||||
assert params["model"] == "llama3"
|
||||
assert params["model"] == MODEL_NAME
|
||||
assert len(params["messages"]) == 1
|
||||
assert params["messages"][0]["role"] == "user"
|
||||
assert params["messages"][0]["content"] == "Hello"
|
||||
|
||||
# Ensure options carry thru
|
||||
assert params["options"].temperature == 0.7
|
||||
|
||||
def test_ls_params(self) -> None:
|
||||
"""Test LangSmith parameters."""
|
||||
llm = ChatOllamaV1(model="llama3", temperature=0.5)
|
||||
llm = ChatOllamaV1(model=MODEL_NAME, temperature=0.5)
|
||||
|
||||
ls_params = llm._get_ls_params()
|
||||
|
||||
assert ls_params["ls_provider"] == "ollama"
|
||||
assert ls_params["ls_model_name"] == "llama3"
|
||||
assert ls_params["ls_model_type"] == "chat"
|
||||
assert ls_params["ls_temperature"] == 0.5
|
||||
assert ls_params["ls_provider"] == "ollama" # type: ignore[typeddict-not-required-key]
|
||||
assert ls_params["ls_model_name"] == MODEL_NAME # type: ignore[typeddict-not-required-key]
|
||||
assert ls_params["ls_model_type"] == "chat" # type: ignore[typeddict-not-required-key]
|
||||
assert ls_params["ls_temperature"] == 0.5 # type: ignore[typeddict-not-required-key]
|
||||
|
||||
def test_bind_tools_basic(self) -> None:
|
||||
"""Test basic tool binding functionality."""
|
||||
llm = ChatOllamaV1(model="llama3")
|
||||
llm = ChatOllamaV1(model=MODEL_NAME)
|
||||
|
||||
def test_tool(query: str) -> str:
|
||||
"""A test tool."""
|
||||
|
Loading…
Reference in New Issue
Block a user