mirror of
https://github.com/hwchase17/langchain.git
synced 2025-05-12 10:37:32 +00:00
Upgrade to using a literal for specifying the extra which is the recommended approach in pydantic 2. This works correctly also in pydantic v1. ```python from pydantic.v1 import BaseModel class Foo(BaseModel, extra="forbid"): x: int Foo(x=5, y=1) ``` And ```python from pydantic.v1 import BaseModel class Foo(BaseModel): x: int class Config: extra = "forbid" Foo(x=5, y=1) ``` ## Enum -> literal using grit pattern: ``` engine marzano(0.1) language python or { `extra=Extra.allow` => `extra="allow"`, `extra=Extra.forbid` => `extra="forbid"`, `extra=Extra.ignore` => `extra="ignore"` } ``` Resorted attributes in config and removed doc-string in case we will need to deal with going back and forth between pydantic v1 and v2 during the 0.3 release. (This will reduce merge conflicts.) ## Sort attributes in Config: ``` engine marzano(0.1) language python function sort($values) js { return $values.text.split(',').sort().join("\n"); } class_definition($name, $body) as $C where { $name <: `Config`, $body <: block($statements), $values = [], $statements <: some bubble($values) assignment() as $A where { $values += $A }, $body => sort($values), } ```
74 lines
2.3 KiB
Python
74 lines
2.3 KiB
Python
from typing import Any, List
|
|
|
|
from langchain_core.embeddings import Embeddings
|
|
from langchain_core.pydantic_v1 import BaseModel
|
|
|
|
DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
|
|
|
|
|
|
class TensorflowHubEmbeddings(BaseModel, Embeddings):
|
|
"""TensorflowHub embedding models.
|
|
|
|
To use, you should have the ``tensorflow_text`` python package installed.
|
|
|
|
Example:
|
|
.. code-block:: python
|
|
|
|
from langchain_community.embeddings import TensorflowHubEmbeddings
|
|
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
|
|
tf = TensorflowHubEmbeddings(model_url=url)
|
|
"""
|
|
|
|
embed: Any #: :meta private:
|
|
model_url: str = DEFAULT_MODEL_URL
|
|
"""Model name to use."""
|
|
|
|
def __init__(self, **kwargs: Any):
|
|
"""Initialize the tensorflow_hub and tensorflow_text."""
|
|
super().__init__(**kwargs)
|
|
try:
|
|
import tensorflow_hub
|
|
except ImportError:
|
|
raise ImportError(
|
|
"Could not import tensorflow-hub python package. "
|
|
"Please install it with `pip install tensorflow-hub``."
|
|
)
|
|
try:
|
|
import tensorflow_text # noqa
|
|
except ImportError:
|
|
raise ImportError(
|
|
"Could not import tensorflow_text python package. "
|
|
"Please install it with `pip install tensorflow_text``."
|
|
)
|
|
|
|
self.embed = tensorflow_hub.load(self.model_url)
|
|
|
|
class Config:
|
|
extra = "forbid"
|
|
|
|
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
|
"""Compute doc embeddings using a TensorflowHub embedding model.
|
|
|
|
Args:
|
|
texts: The list of texts to embed.
|
|
|
|
Returns:
|
|
List of embeddings, one for each text.
|
|
"""
|
|
texts = list(map(lambda x: x.replace("\n", " "), texts))
|
|
embeddings = self.embed(texts).numpy()
|
|
return embeddings.tolist()
|
|
|
|
def embed_query(self, text: str) -> List[float]:
|
|
"""Compute query embeddings using a TensorflowHub embedding model.
|
|
|
|
Args:
|
|
text: The text to embed.
|
|
|
|
Returns:
|
|
Embeddings for the text.
|
|
"""
|
|
text = text.replace("\n", " ")
|
|
embedding = self.embed([text]).numpy()[0]
|
|
return embedding.tolist()
|