mirror of
https://github.com/hwchase17/langchain.git
synced 2025-10-30 15:22:13 +00:00
Upgrade to using a literal for specifying the extra which is the
recommended approach in pydantic 2.
This works correctly also in pydantic v1.
```python
from pydantic.v1 import BaseModel
class Foo(BaseModel, extra="forbid"):
x: int
Foo(x=5, y=1)
```
And
```python
from pydantic.v1 import BaseModel
class Foo(BaseModel):
x: int
class Config:
extra = "forbid"
Foo(x=5, y=1)
```
## Enum -> literal using grit pattern:
```
engine marzano(0.1)
language python
or {
`extra=Extra.allow` => `extra="allow"`,
`extra=Extra.forbid` => `extra="forbid"`,
`extra=Extra.ignore` => `extra="ignore"`
}
```
Resorted attributes in config and removed doc-string in case we will
need to deal with going back and forth between pydantic v1 and v2 during
the 0.3 release. (This will reduce merge conflicts.)
## Sort attributes in Config:
```
engine marzano(0.1)
language python
function sort($values) js {
return $values.text.split(',').sort().join("\n");
}
class_definition($name, $body) as $C where {
$name <: `Config`,
$body <: block($statements),
$values = [],
$statements <: some bubble($values) assignment() as $A where {
$values += $A
},
$body => sort($values),
}
```
66 lines
2.1 KiB
Python
66 lines
2.1 KiB
Python
from typing import Any, Dict, List, Tuple
|
|
|
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
|
|
from langchain_community.cross_encoders.base import BaseCrossEncoder
|
|
|
|
DEFAULT_MODEL_NAME = "BAAI/bge-reranker-base"
|
|
|
|
|
|
class HuggingFaceCrossEncoder(BaseModel, BaseCrossEncoder):
|
|
"""HuggingFace cross encoder models.
|
|
|
|
Example:
|
|
.. code-block:: python
|
|
|
|
from langchain_community.cross_encoders import HuggingFaceCrossEncoder
|
|
|
|
model_name = "BAAI/bge-reranker-base"
|
|
model_kwargs = {'device': 'cpu'}
|
|
hf = HuggingFaceCrossEncoder(
|
|
model_name=model_name,
|
|
model_kwargs=model_kwargs
|
|
)
|
|
"""
|
|
|
|
client: Any #: :meta private:
|
|
model_name: str = DEFAULT_MODEL_NAME
|
|
"""Model name to use."""
|
|
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
|
"""Keyword arguments to pass to the model."""
|
|
|
|
def __init__(self, **kwargs: Any):
|
|
"""Initialize the sentence_transformer."""
|
|
super().__init__(**kwargs)
|
|
try:
|
|
import sentence_transformers
|
|
|
|
except ImportError as exc:
|
|
raise ImportError(
|
|
"Could not import sentence_transformers python package. "
|
|
"Please install it with `pip install sentence-transformers`."
|
|
) from exc
|
|
|
|
self.client = sentence_transformers.CrossEncoder(
|
|
self.model_name, **self.model_kwargs
|
|
)
|
|
|
|
class Config:
|
|
extra = "forbid"
|
|
|
|
def score(self, text_pairs: List[Tuple[str, str]]) -> List[float]:
|
|
"""Compute similarity scores using a HuggingFace transformer model.
|
|
|
|
Args:
|
|
text_pairs: The list of text text_pairs to score the similarity.
|
|
|
|
Returns:
|
|
List of scores, one for each pair.
|
|
"""
|
|
scores = self.client.predict(text_pairs)
|
|
# Some models e.g bert-multilingual-passage-reranking-msmarco
|
|
# gives two score not_relevant and relevant as compare with the query.
|
|
if len(scores.shape) > 1: # we are going to get the relevant scores
|
|
scores = map(lambda x: x[1], scores)
|
|
return scores
|