multiple: pydantic 2 compatibility, v0.3 (#26443)

Signed-off-by: ChengZi <chen.zhang@zilliz.com>
Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com>
Co-authored-by: Dan O'Donovan <dan.odonovan@gmail.com>
Co-authored-by: Tom Daniel Grande <tomdgrande@gmail.com>
Co-authored-by: Grande <Tom.Daniel.Grande@statsbygg.no>
Co-authored-by: Bagatur <baskaryan@gmail.com>
Co-authored-by: ccurme <chester.curme@gmail.com>
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
Co-authored-by: Tomaz Bratanic <bratanic.tomaz@gmail.com>
Co-authored-by: ZhangShenao <15201440436@163.com>
Co-authored-by: Friso H. Kingma <fhkingma@gmail.com>
Co-authored-by: ChengZi <chen.zhang@zilliz.com>
Co-authored-by: Nuno Campos <nuno@langchain.dev>
Co-authored-by: Morgante Pell <morgantep@google.com>
This commit is contained in:
Erick Friis
2024-09-13 14:38:45 -07:00
committed by GitHub
parent d9813bdbbc
commit c2a3021bb0
1402 changed files with 38318 additions and 30410 deletions

View File

@@ -1,7 +1,8 @@
from typing import Any, Dict, List, Optional
from typing import Any, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
from pydantic import BaseModel, ConfigDict, Field, model_validator
from typing_extensions import Self
class LlamaCppEmbeddings(BaseModel, Embeddings):
@@ -18,7 +19,7 @@ class LlamaCppEmbeddings(BaseModel, Embeddings):
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model_path: str
n_ctx: int = Field(512, alias="n_ctx")
@@ -60,13 +61,14 @@ class LlamaCppEmbeddings(BaseModel, Embeddings):
device: Optional[str] = Field(None, alias="device")
"""Device type to use and pass to the model"""
class Config:
extra = "forbid"
model_config = ConfigDict(
extra="forbid",
)
@root_validator(pre=False, skip_on_failure=True)
def validate_environment(cls, values: Dict) -> Dict:
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
model_path = self.model_path
model_param_names = [
"n_ctx",
"n_parts",
@@ -80,15 +82,15 @@ class LlamaCppEmbeddings(BaseModel, Embeddings):
"verbose",
"device",
]
model_params = {k: values[k] for k in model_param_names}
model_params = {k: getattr(self, k) for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
if self.n_gpu_layers is not None:
model_params["n_gpu_layers"] = self.n_gpu_layers
try:
from llama_cpp import Llama
values["client"] = Llama(model_path, embedding=True, **model_params)
self.client = Llama(model_path, embedding=True, **model_params)
except ImportError:
raise ImportError(
"Could not import llama-cpp-python library. "
@@ -101,7 +103,7 @@ class LlamaCppEmbeddings(BaseModel, Embeddings):
f"Received error {e}"
)
return values
return self
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using the Llama model.