diff --git a/libs/partners/ollama/langchain_ollama/chat_models.py b/libs/partners/ollama/langchain_ollama/chat_models.py index 143761cf5f0..5adb13e300c 100644 --- a/libs/partners/ollama/langchain_ollama/chat_models.py +++ b/libs/partners/ollama/langchain_ollama/chat_models.py @@ -35,11 +35,12 @@ from langchain_core.messages import ( from langchain_core.messages.ai import UsageMetadata from langchain_core.messages.tool import tool_call from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult -from langchain_core.pydantic_v1 import Field, root_validator from langchain_core.runnables import Runnable from langchain_core.tools import BaseTool from langchain_core.utils.function_calling import convert_to_openai_tool from ollama import AsyncClient, Client, Message, Options +from pydantic import PrivateAttr, model_validator +from typing_extensions import Self def _get_usage_metadata_from_generation_info( @@ -328,12 +329,12 @@ class ChatOllama(BaseChatModel): For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html) """ - _client: Client = Field(default=None) + _client: Client = PrivateAttr(default=None) """ The client to use for making requests. """ - _async_client: AsyncClient = Field(default=None) + _async_client: AsyncClient = PrivateAttr(default=None) """ The async client to use for making requests. """ @@ -364,14 +365,13 @@ class ChatOllama(BaseChatModel): "keep_alive": self.keep_alive, } - @root_validator(pre=False, skip_on_failure=True) - def _set_clients(cls, values: dict) -> dict: + @model_validator(mode="after") + def _set_clients(self) -> Self: """Set clients to use for ollama.""" - values["_client"] = Client(host=values["base_url"], **values["client_kwargs"]) - values["_async_client"] = AsyncClient( - host=values["base_url"], **values["client_kwargs"] - ) - return values + client_kwargs = self.client_kwargs or {} + self._client = Client(host=self.base_url, **client_kwargs) + self._async_client = AsyncClient(host=self.base_url, **client_kwargs) + return self def _convert_messages_to_ollama_messages( self, messages: List[BaseMessage] diff --git a/libs/partners/ollama/langchain_ollama/embeddings.py b/libs/partners/ollama/langchain_ollama/embeddings.py index 8cf991232c1..6c5b812dc99 100644 --- a/libs/partners/ollama/langchain_ollama/embeddings.py +++ b/libs/partners/ollama/langchain_ollama/embeddings.py @@ -4,8 +4,14 @@ from typing import ( ) from langchain_core.embeddings import Embeddings -from langchain_core.pydantic_v1 import BaseModel, Field, root_validator from ollama import AsyncClient, Client +from pydantic import ( + BaseModel, + ConfigDict, + PrivateAttr, + model_validator, +) +from typing_extensions import Self class OllamaEmbeddings(BaseModel, Embeddings): @@ -126,29 +132,27 @@ class OllamaEmbeddings(BaseModel, Embeddings): For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html) """ - _client: Client = Field(default=None) + _client: Client = PrivateAttr(default=None) """ The client to use for making requests. """ - _async_client: AsyncClient = Field(default=None) + _async_client: AsyncClient = PrivateAttr(default=None) """ The async client to use for making requests. """ - class Config: - """Configuration for this pydantic object.""" + model_config = ConfigDict( + extra="forbid", + ) - extra = "forbid" - - @root_validator(pre=False, skip_on_failure=True) - def _set_clients(cls, values: dict) -> dict: + @model_validator(mode="after") + def _set_clients(self) -> Self: """Set clients to use for ollama.""" - values["_client"] = Client(host=values["base_url"], **values["client_kwargs"]) - values["_async_client"] = AsyncClient( - host=values["base_url"], **values["client_kwargs"] - ) - return values + client_kwargs = self.client_kwargs or {} + self._client = Client(host=self.base_url, **client_kwargs) + self._async_client = AsyncClient(host=self.base_url, **client_kwargs) + return self def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed search docs.""" diff --git a/libs/partners/ollama/langchain_ollama/llms.py b/libs/partners/ollama/langchain_ollama/llms.py index 8097aa2db86..783d20104ef 100644 --- a/libs/partners/ollama/langchain_ollama/llms.py +++ b/libs/partners/ollama/langchain_ollama/llms.py @@ -18,8 +18,9 @@ from langchain_core.callbacks import ( ) from langchain_core.language_models import BaseLLM, LangSmithParams from langchain_core.outputs import GenerationChunk, LLMResult -from langchain_core.pydantic_v1 import Field, root_validator from ollama import AsyncClient, Client, Options +from pydantic import PrivateAttr, model_validator +from typing_extensions import Self class OllamaLLM(BaseLLM): @@ -115,12 +116,12 @@ class OllamaLLM(BaseLLM): For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html) """ - _client: Client = Field(default=None) + _client: Client = PrivateAttr(default=None) """ The client to use for making requests. """ - _async_client: AsyncClient = Field(default=None) + _async_client: AsyncClient = PrivateAttr(default=None) """ The async client to use for making requests. """ @@ -164,14 +165,13 @@ class OllamaLLM(BaseLLM): params["ls_max_tokens"] = max_tokens return params - @root_validator(pre=False, skip_on_failure=True) - def _set_clients(cls, values: dict) -> dict: + @model_validator(mode="after") + def _set_clients(self) -> Self: """Set clients to use for ollama.""" - values["_client"] = Client(host=values["base_url"], **values["client_kwargs"]) - values["_async_client"] = AsyncClient( - host=values["base_url"], **values["client_kwargs"] - ) - return values + client_kwargs = self.client_kwargs or {} + self._client = Client(host=self.base_url, **client_kwargs) + self._async_client = AsyncClient(host=self.base_url, **client_kwargs) + return self async def _acreate_generate_stream( self, diff --git a/libs/partners/ollama/poetry.lock b/libs/partners/ollama/poetry.lock index 0e54aad2ab1..d1af5b71388 100644 --- a/libs/partners/ollama/poetry.lock +++ b/libs/partners/ollama/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -11,9 +11,6 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "anyio" version = "4.4.0" @@ -294,10 +291,10 @@ files = [ [[package]] name = "langchain-core" -version = "0.2.37" +version = "0.2.38" description = "Building applications with LLMs through composability" optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = ">=3.9,<4.0" files = [] develop = true @@ -886,5 +883,5 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" -python-versions = ">=3.8.1,<4.0" -content-hash = "0de4df1fb448399e93be5924717c1cb7ec3a0c8255951878020844c28bfb7b5e" +python-versions = ">=3.9,<4.0" +content-hash = "27bee0c6422c0b758e3742b1fef5a3d6d577d3d8c6be98cb8158c92f18afdc7b" diff --git a/libs/partners/ollama/pyproject.toml b/libs/partners/ollama/pyproject.toml index 27efbdeb093..89accf92081 100644 --- a/libs/partners/ollama/pyproject.toml +++ b/libs/partners/ollama/pyproject.toml @@ -19,9 +19,10 @@ disallow_untyped_defs = "True" "Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true" [tool.poetry.dependencies] -python = ">=3.8.1,<4.0" +python = ">=3.9,<4.0" ollama = ">=0.3.0,<1" langchain-core = "^0.2.36" +pydantic = ">=2,<3" [tool.ruff.lint] select = [ "E", "F", "I", "T201",]