ollama[major]: upgrade pydantic (#26044)

This commit is contained in:
ccurme
2024-09-04 13:54:52 -04:00
committed by GitHub
parent 51c6899850
commit be7cd0756f
5 changed files with 45 additions and 43 deletions

View File

@@ -35,11 +35,12 @@ from langchain_core.messages import (
from langchain_core.messages.ai import UsageMetadata from langchain_core.messages.ai import UsageMetadata
from langchain_core.messages.tool import tool_call from langchain_core.messages.tool import tool_call
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import Field, root_validator
from langchain_core.runnables import Runnable from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool from langchain_core.utils.function_calling import convert_to_openai_tool
from ollama import AsyncClient, Client, Message, Options from ollama import AsyncClient, Client, Message, Options
from pydantic import PrivateAttr, model_validator
from typing_extensions import Self
def _get_usage_metadata_from_generation_info( def _get_usage_metadata_from_generation_info(
@@ -328,12 +329,12 @@ class ChatOllama(BaseChatModel):
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html) For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
""" """
_client: Client = Field(default=None) _client: Client = PrivateAttr(default=None)
""" """
The client to use for making requests. The client to use for making requests.
""" """
_async_client: AsyncClient = Field(default=None) _async_client: AsyncClient = PrivateAttr(default=None)
""" """
The async client to use for making requests. The async client to use for making requests.
""" """
@@ -364,14 +365,13 @@ class ChatOllama(BaseChatModel):
"keep_alive": self.keep_alive, "keep_alive": self.keep_alive,
} }
@root_validator(pre=False, skip_on_failure=True) @model_validator(mode="after")
def _set_clients(cls, values: dict) -> dict: def _set_clients(self) -> Self:
"""Set clients to use for ollama.""" """Set clients to use for ollama."""
values["_client"] = Client(host=values["base_url"], **values["client_kwargs"]) client_kwargs = self.client_kwargs or {}
values["_async_client"] = AsyncClient( self._client = Client(host=self.base_url, **client_kwargs)
host=values["base_url"], **values["client_kwargs"] self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
) return self
return values
def _convert_messages_to_ollama_messages( def _convert_messages_to_ollama_messages(
self, messages: List[BaseMessage] self, messages: List[BaseMessage]

View File

@@ -4,8 +4,14 @@ from typing import (
) )
from langchain_core.embeddings import Embeddings from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
from ollama import AsyncClient, Client from ollama import AsyncClient, Client
from pydantic import (
BaseModel,
ConfigDict,
PrivateAttr,
model_validator,
)
from typing_extensions import Self
class OllamaEmbeddings(BaseModel, Embeddings): class OllamaEmbeddings(BaseModel, Embeddings):
@@ -126,29 +132,27 @@ class OllamaEmbeddings(BaseModel, Embeddings):
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html) For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
""" """
_client: Client = Field(default=None) _client: Client = PrivateAttr(default=None)
""" """
The client to use for making requests. The client to use for making requests.
""" """
_async_client: AsyncClient = Field(default=None) _async_client: AsyncClient = PrivateAttr(default=None)
""" """
The async client to use for making requests. The async client to use for making requests.
""" """
class Config: model_config = ConfigDict(
"""Configuration for this pydantic object.""" extra="forbid",
)
extra = "forbid" @model_validator(mode="after")
def _set_clients(self) -> Self:
@root_validator(pre=False, skip_on_failure=True)
def _set_clients(cls, values: dict) -> dict:
"""Set clients to use for ollama.""" """Set clients to use for ollama."""
values["_client"] = Client(host=values["base_url"], **values["client_kwargs"]) client_kwargs = self.client_kwargs or {}
values["_async_client"] = AsyncClient( self._client = Client(host=self.base_url, **client_kwargs)
host=values["base_url"], **values["client_kwargs"] self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
) return self
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]: def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed search docs.""" """Embed search docs."""

View File

@@ -18,8 +18,9 @@ from langchain_core.callbacks import (
) )
from langchain_core.language_models import BaseLLM, LangSmithParams from langchain_core.language_models import BaseLLM, LangSmithParams
from langchain_core.outputs import GenerationChunk, LLMResult from langchain_core.outputs import GenerationChunk, LLMResult
from langchain_core.pydantic_v1 import Field, root_validator
from ollama import AsyncClient, Client, Options from ollama import AsyncClient, Client, Options
from pydantic import PrivateAttr, model_validator
from typing_extensions import Self
class OllamaLLM(BaseLLM): class OllamaLLM(BaseLLM):
@@ -115,12 +116,12 @@ class OllamaLLM(BaseLLM):
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html) For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
""" """
_client: Client = Field(default=None) _client: Client = PrivateAttr(default=None)
""" """
The client to use for making requests. The client to use for making requests.
""" """
_async_client: AsyncClient = Field(default=None) _async_client: AsyncClient = PrivateAttr(default=None)
""" """
The async client to use for making requests. The async client to use for making requests.
""" """
@@ -164,14 +165,13 @@ class OllamaLLM(BaseLLM):
params["ls_max_tokens"] = max_tokens params["ls_max_tokens"] = max_tokens
return params return params
@root_validator(pre=False, skip_on_failure=True) @model_validator(mode="after")
def _set_clients(cls, values: dict) -> dict: def _set_clients(self) -> Self:
"""Set clients to use for ollama.""" """Set clients to use for ollama."""
values["_client"] = Client(host=values["base_url"], **values["client_kwargs"]) client_kwargs = self.client_kwargs or {}
values["_async_client"] = AsyncClient( self._client = Client(host=self.base_url, **client_kwargs)
host=values["base_url"], **values["client_kwargs"] self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
) return self
return values
async def _acreate_generate_stream( async def _acreate_generate_stream(
self, self,

View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]] [[package]]
name = "annotated-types" name = "annotated-types"
@@ -11,9 +11,6 @@ files = [
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
] ]
[package.dependencies]
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
[[package]] [[package]]
name = "anyio" name = "anyio"
version = "4.4.0" version = "4.4.0"
@@ -294,10 +291,10 @@ files = [
[[package]] [[package]]
name = "langchain-core" name = "langchain-core"
version = "0.2.37" version = "0.2.38"
description = "Building applications with LLMs through composability" description = "Building applications with LLMs through composability"
optional = false optional = false
python-versions = ">=3.8.1,<4.0" python-versions = ">=3.9,<4.0"
files = [] files = []
develop = true develop = true
@@ -886,5 +883,5 @@ zstd = ["zstandard (>=0.18.0)"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = ">=3.8.1,<4.0" python-versions = ">=3.9,<4.0"
content-hash = "0de4df1fb448399e93be5924717c1cb7ec3a0c8255951878020844c28bfb7b5e" content-hash = "27bee0c6422c0b758e3742b1fef5a3d6d577d3d8c6be98cb8158c92f18afdc7b"

View File

@@ -19,9 +19,10 @@ disallow_untyped_defs = "True"
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true" "Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true"
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = ">=3.8.1,<4.0" python = ">=3.9,<4.0"
ollama = ">=0.3.0,<1" ollama = ">=0.3.0,<1"
langchain-core = "^0.2.36" langchain-core = "^0.2.36"
pydantic = ">=2,<3"
[tool.ruff.lint] [tool.ruff.lint]
select = [ "E", "F", "I", "T201",] select = [ "E", "F", "I", "T201",]