mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-29 07:19:59 +00:00
262 lines
9.0 KiB
Python
262 lines
9.0 KiB
Python
from typing import Any, Dict, List, Optional
|
|
|
|
from pydantic import BaseModel, Extra, Field
|
|
|
|
from langchain.embeddings.base import Embeddings
|
|
|
|
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
|
|
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
|
|
DEFAULT_BGE_MODEL = "BAAI/bge-large-en"
|
|
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
|
|
DEFAULT_QUERY_INSTRUCTION = (
|
|
"Represent the question for retrieving supporting documents: "
|
|
)
|
|
DEFAULT_QUERY_BGE_INSTRUCTION_EN = (
|
|
"Represent this question for searching relevant passages: "
|
|
)
|
|
DEFAULT_QUERY_BGE_INSTRUCTION_ZH = "为这个句子生成表示以用于检索相关文章:"
|
|
|
|
|
|
class HuggingFaceEmbeddings(BaseModel, Embeddings):
|
|
"""HuggingFace sentence_transformers embedding models.
|
|
|
|
To use, you should have the ``sentence_transformers`` python package installed.
|
|
|
|
Example:
|
|
.. code-block:: python
|
|
|
|
from langchain.embeddings import HuggingFaceEmbeddings
|
|
|
|
model_name = "sentence-transformers/all-mpnet-base-v2"
|
|
model_kwargs = {'device': 'cpu'}
|
|
encode_kwargs = {'normalize_embeddings': False}
|
|
hf = HuggingFaceEmbeddings(
|
|
model_name=model_name,
|
|
model_kwargs=model_kwargs,
|
|
encode_kwargs=encode_kwargs
|
|
)
|
|
"""
|
|
|
|
client: Any #: :meta private:
|
|
model_name: str = DEFAULT_MODEL_NAME
|
|
"""Model name to use."""
|
|
cache_folder: Optional[str] = None
|
|
"""Path to store models.
|
|
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
|
|
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
|
"""Key word arguments to pass to the model."""
|
|
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
|
"""Key word arguments to pass when calling the `encode` method of the model."""
|
|
|
|
def __init__(self, **kwargs: Any):
|
|
"""Initialize the sentence_transformer."""
|
|
super().__init__(**kwargs)
|
|
try:
|
|
import sentence_transformers
|
|
|
|
except ImportError as exc:
|
|
raise ImportError(
|
|
"Could not import sentence_transformers python package. "
|
|
"Please install it with `pip install sentence_transformers`."
|
|
) from exc
|
|
|
|
self.client = sentence_transformers.SentenceTransformer(
|
|
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
|
|
)
|
|
|
|
class Config:
|
|
"""Configuration for this pydantic object."""
|
|
|
|
extra = Extra.forbid
|
|
|
|
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
|
"""Compute doc embeddings using a HuggingFace transformer model.
|
|
|
|
Args:
|
|
texts: The list of texts to embed.
|
|
|
|
Returns:
|
|
List of embeddings, one for each text.
|
|
"""
|
|
texts = list(map(lambda x: x.replace("\n", " "), texts))
|
|
embeddings = self.client.encode(texts, **self.encode_kwargs)
|
|
return embeddings.tolist()
|
|
|
|
def embed_query(self, text: str) -> List[float]:
|
|
"""Compute query embeddings using a HuggingFace transformer model.
|
|
|
|
Args:
|
|
text: The text to embed.
|
|
|
|
Returns:
|
|
Embeddings for the text.
|
|
"""
|
|
text = text.replace("\n", " ")
|
|
embedding = self.client.encode(text, **self.encode_kwargs)
|
|
return embedding.tolist()
|
|
|
|
|
|
class HuggingFaceInstructEmbeddings(BaseModel, Embeddings):
|
|
"""Wrapper around sentence_transformers embedding models.
|
|
|
|
To use, you should have the ``sentence_transformers``
|
|
and ``InstructorEmbedding`` python packages installed.
|
|
|
|
Example:
|
|
.. code-block:: python
|
|
|
|
from langchain.embeddings import HuggingFaceInstructEmbeddings
|
|
|
|
model_name = "hkunlp/instructor-large"
|
|
model_kwargs = {'device': 'cpu'}
|
|
encode_kwargs = {'normalize_embeddings': True}
|
|
hf = HuggingFaceInstructEmbeddings(
|
|
model_name=model_name,
|
|
model_kwargs=model_kwargs,
|
|
encode_kwargs=encode_kwargs
|
|
)
|
|
"""
|
|
|
|
client: Any #: :meta private:
|
|
model_name: str = DEFAULT_INSTRUCT_MODEL
|
|
"""Model name to use."""
|
|
cache_folder: Optional[str] = None
|
|
"""Path to store models.
|
|
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
|
|
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
|
"""Key word arguments to pass to the model."""
|
|
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
|
"""Key word arguments to pass when calling the `encode` method of the model."""
|
|
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
|
|
"""Instruction to use for embedding documents."""
|
|
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
|
|
"""Instruction to use for embedding query."""
|
|
|
|
def __init__(self, **kwargs: Any):
|
|
"""Initialize the sentence_transformer."""
|
|
super().__init__(**kwargs)
|
|
try:
|
|
from InstructorEmbedding import INSTRUCTOR
|
|
|
|
self.client = INSTRUCTOR(
|
|
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
|
|
)
|
|
except ImportError as e:
|
|
raise ValueError("Dependencies for InstructorEmbedding not found.") from e
|
|
|
|
class Config:
|
|
"""Configuration for this pydantic object."""
|
|
|
|
extra = Extra.forbid
|
|
|
|
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
|
"""Compute doc embeddings using a HuggingFace instruct model.
|
|
|
|
Args:
|
|
texts: The list of texts to embed.
|
|
|
|
Returns:
|
|
List of embeddings, one for each text.
|
|
"""
|
|
instruction_pairs = [[self.embed_instruction, text] for text in texts]
|
|
embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs)
|
|
return embeddings.tolist()
|
|
|
|
def embed_query(self, text: str) -> List[float]:
|
|
"""Compute query embeddings using a HuggingFace instruct model.
|
|
|
|
Args:
|
|
text: The text to embed.
|
|
|
|
Returns:
|
|
Embeddings for the text.
|
|
"""
|
|
instruction_pair = [self.query_instruction, text]
|
|
embedding = self.client.encode([instruction_pair], **self.encode_kwargs)[0]
|
|
return embedding.tolist()
|
|
|
|
|
|
class HuggingFaceBgeEmbeddings(BaseModel, Embeddings):
|
|
"""HuggingFace BGE sentence_transformers embedding models.
|
|
|
|
To use, you should have the ``sentence_transformers`` python package installed.
|
|
|
|
Example:
|
|
.. code-block:: python
|
|
|
|
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
|
|
|
model_name = "BAAI/bge-large-en"
|
|
model_kwargs = {'device': 'cpu'}
|
|
encode_kwargs = {'normalize_embeddings': True}
|
|
hf = HuggingFaceBgeEmbeddings(
|
|
model_name=model_name,
|
|
model_kwargs=model_kwargs,
|
|
encode_kwargs=encode_kwargs
|
|
)
|
|
"""
|
|
|
|
client: Any #: :meta private:
|
|
model_name: str = DEFAULT_BGE_MODEL
|
|
"""Model name to use."""
|
|
cache_folder: Optional[str] = None
|
|
"""Path to store models.
|
|
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
|
|
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
|
"""Key word arguments to pass to the model."""
|
|
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
|
"""Key word arguments to pass when calling the `encode` method of the model."""
|
|
query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN
|
|
"""Instruction to use for embedding query."""
|
|
|
|
def __init__(self, **kwargs: Any):
|
|
"""Initialize the sentence_transformer."""
|
|
super().__init__(**kwargs)
|
|
try:
|
|
import sentence_transformers
|
|
|
|
except ImportError as exc:
|
|
raise ImportError(
|
|
"Could not import sentence_transformers python package. "
|
|
"Please install it with `pip install sentence_transformers`."
|
|
) from exc
|
|
|
|
self.client = sentence_transformers.SentenceTransformer(
|
|
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
|
|
)
|
|
if "-zh" in self.model_name:
|
|
self.query_instruction = DEFAULT_QUERY_BGE_INSTRUCTION_ZH
|
|
|
|
class Config:
|
|
"""Configuration for this pydantic object."""
|
|
|
|
extra = Extra.forbid
|
|
|
|
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
|
"""Compute doc embeddings using a HuggingFace transformer model.
|
|
|
|
Args:
|
|
texts: The list of texts to embed.
|
|
|
|
Returns:
|
|
List of embeddings, one for each text.
|
|
"""
|
|
texts = [t.replace("\n", " ") for t in texts]
|
|
embeddings = self.client.encode(texts, **self.encode_kwargs)
|
|
return embeddings.tolist()
|
|
|
|
def embed_query(self, text: str) -> List[float]:
|
|
"""Compute query embeddings using a HuggingFace transformer model.
|
|
|
|
Args:
|
|
text: The text to embed.
|
|
|
|
Returns:
|
|
Embeddings for the text.
|
|
"""
|
|
text = text.replace("\n", " ")
|
|
embedding = self.client.encode(
|
|
self.query_instruction + text, **self.encode_kwargs
|
|
)
|
|
return embedding.tolist()
|