mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-08 14:31:55 +00:00
langchain-ollama (partners) / langchain-core: allow passing ChatMessages to Ollama (including arbitrary roles) (#30411)
Replacement for PR #30191 (@ccurme) **Description**: currently, ChatOllama [will raise a value error if a ChatMessage is passed to it](https://github.com/langchain-ai/langchain/blob/master/libs/partners/ollama/langchain_ollama/chat_models.py#L514), as described https://github.com/langchain-ai/langchain/pull/30147#issuecomment-2708932481. Furthermore, ollama-python is removing the limitations on valid roles that can be passed through chat messages to a model in ollama - https://github.com/ollama/ollama-python/pull/462#event-16917810634. This PR removes the role limitations imposed by langchain and enables passing langchain ChatMessages with arbitrary 'role' values through the langchain ChatOllama class to the underlying ollama-python Client. As this PR relies on [merged but unreleased functionality in ollama-python]( https://github.com/ollama/ollama-python/pull/462#event-16917810634), I have temporarily pointed the ollama package source to the main branch of the ollama-python github repo. Format, lint, and tests of new functionality passing. Need to resolve issue with recently added ChatOllama tests. (Now resolved) **Issue**: resolves #30122 (related to ollama issue https://github.com/ollama/ollama/issues/8955) **Dependencies**: no new dependencies [x] PR title [x] PR message [x] Lint and test: format, lint, and test all running successfully and passing --------- Co-authored-by: Ryan Stewart <ryanstewart@Ryans-MacBook-Pro.local> Co-authored-by: Chester Curme <chester.curme@gmail.com>
This commit is contained in:
@@ -1,7 +1,13 @@
|
||||
"""Test chat model integration."""
|
||||
|
||||
import json
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
from httpx import Client, Request, Response
|
||||
from langchain_core.messages import ChatMessage
|
||||
from langchain_tests.unit_tests import ChatModelUnitTests
|
||||
|
||||
from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call
|
||||
@@ -23,3 +29,38 @@ def test__parse_arguments_from_tool_call() -> None:
|
||||
response = _parse_arguments_from_tool_call(raw_tool_calls[0])
|
||||
assert response is not None
|
||||
assert isinstance(response["arg_1"], str)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _mock_httpx_client_stream(
|
||||
*args: Any, **kwargs: Any
|
||||
) -> Generator[Response, Any, Any]:
|
||||
yield Response(
|
||||
status_code=200,
|
||||
content='{"message": {"role": "assistant", "content": "The meaning ..."}}',
|
||||
request=Request(method="POST", url="http://whocares:11434"),
|
||||
)
|
||||
|
||||
|
||||
def test_arbitrary_roles_accepted_in_chatmessages(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
monkeypatch.setattr(Client, "stream", _mock_httpx_client_stream)
|
||||
|
||||
llm = ChatOllama(
|
||||
base_url="http://whocares:11434",
|
||||
model="granite3.2",
|
||||
verbose=True,
|
||||
format=None,
|
||||
)
|
||||
|
||||
messages = [
|
||||
ChatMessage(
|
||||
role="somerandomrole",
|
||||
content="I'm ok with you adding any role message now!",
|
||||
),
|
||||
ChatMessage(role="control", content="thinking"),
|
||||
ChatMessage(role="user", content="What is the meaning of life?"),
|
||||
]
|
||||
|
||||
llm.invoke(messages)
|
||||
|
Reference in New Issue
Block a user