mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-16 15:04:13 +00:00
ollama[patch]: fix model validation, ensure per-call reasoning can be set, tests (#31927)
* update model validation due to change in [Ollama client](https://github.com/ollama/ollama) - ensure you are running the latest version (0.9.6) to use `validate_model_on_init` * add code example and fix formatting for ChatOllama reasoning * ensure that setting `reasoning` in invocation kwargs overrides class-level setting * tests
This commit is contained in:
@@ -48,3 +48,24 @@ def test_validate_model_on_init(mock_validate_model: Any) -> None:
|
||||
# Test that validate_model is NOT called by default
|
||||
OllamaLLM(model=MODEL_NAME)
|
||||
mock_validate_model.assert_not_called()
|
||||
|
||||
|
||||
def test_reasoning_aggregation() -> None:
|
||||
"""Test that reasoning chunks are aggregated into final response."""
|
||||
llm = OllamaLLM(model=MODEL_NAME, reasoning=True)
|
||||
prompts = ["some prompt"]
|
||||
mock_stream = [
|
||||
{"thinking": "I am thinking.", "done": False},
|
||||
{"thinking": " Still thinking.", "done": False},
|
||||
{"response": "Final Answer.", "done": True},
|
||||
]
|
||||
|
||||
with patch.object(llm, "_create_generate_stream") as mock_stream_method:
|
||||
mock_stream_method.return_value = iter(mock_stream)
|
||||
result = llm.generate(prompts)
|
||||
|
||||
assert result.generations[0][0].generation_info is not None
|
||||
assert (
|
||||
result.generations[0][0].generation_info["thinking"]
|
||||
== "I am thinking. Still thinking."
|
||||
)
|
||||
|
Reference in New Issue
Block a user