mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-16 16:11:02 +00:00
* update model validation due to change in [Ollama client](https://github.com/ollama/ollama) - ensure you are running the latest version (0.9.6) to use `validate_model_on_init` * add code example and fix formatting for ChatOllama reasoning * ensure that setting `reasoning` in invocation kwargs overrides class-level setting * tests
40 lines
1.4 KiB
Python
40 lines
1.4 KiB
Python
"""Utility functions for validating Ollama models."""
|
|
|
|
from httpx import ConnectError
|
|
from ollama import Client, ResponseError
|
|
|
|
|
|
def validate_model(client: Client, model_name: str) -> None:
|
|
"""Validate that a model exists in the Ollama instance.
|
|
|
|
Args:
|
|
client: The Ollama client.
|
|
model_name: The name of the model to validate.
|
|
|
|
Raises:
|
|
ValueError: If the model is not found or if there's a connection issue.
|
|
"""
|
|
try:
|
|
response = client.list()
|
|
|
|
model_names: list[str] = [model["model"] for model in response["models"]]
|
|
|
|
if not any(
|
|
model_name == m or m.startswith(f"{model_name}:") for m in model_names
|
|
):
|
|
msg = (
|
|
f"Model `{model_name}` not found in Ollama. Please pull the "
|
|
f"model (using `ollama pull {model_name}`) or specify a valid "
|
|
f"model name. Available local models: {', '.join(model_names)}"
|
|
)
|
|
raise ValueError(msg)
|
|
except ConnectError as e:
|
|
msg = "Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download" # noqa: E501
|
|
raise ValueError(msg) from e
|
|
except ResponseError as e:
|
|
msg = (
|
|
"Received an error from the Ollama API. "
|
|
"Please check your Ollama server logs."
|
|
)
|
|
raise ValueError(msg) from e
|