mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-08 06:23:20 +00:00
langchain: add RankLLM Reranker (#21171)
Integrate RankLLM reranker (https://github.com/castorini/rank_llm) into LangChain An example notebook is given in `docs/docs/integrations/retrievers/rankllm-reranker.ipynb` --------- Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
This commit is contained in:
@@ -14,12 +14,16 @@ if TYPE_CHECKING:
|
||||
from langchain_community.document_compressors.openvino_rerank import (
|
||||
OpenVINOReranker,
|
||||
)
|
||||
from langchain_community.document_compressors.rankllm_rerank import (
|
||||
RankLLMRerank,
|
||||
)
|
||||
|
||||
|
||||
_module_lookup = {
|
||||
"LLMLinguaCompressor": "langchain_community.document_compressors.llmlingua_filter",
|
||||
"OpenVINOReranker": "langchain_community.document_compressors.openvino_rerank",
|
||||
"JinaRerank": "langchain_community.document_compressors.jina_rerank",
|
||||
"RankLLMRerank": "langchain_community.document_compressors.rankllm_rerank",
|
||||
"FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank",
|
||||
}
|
||||
|
||||
@@ -31,4 +35,10 @@ def __getattr__(name: str) -> Any:
|
||||
raise AttributeError(f"module {__name__} has no attribute {name}")
|
||||
|
||||
|
||||
__all__ = ["LLMLinguaCompressor", "OpenVINOReranker", "FlashrankRerank", "JinaRerank"]
|
||||
__all__ = [
|
||||
"LLMLinguaCompressor",
|
||||
"OpenVINOReranker",
|
||||
"FlashrankRerank",
|
||||
"JinaRerank",
|
||||
"RankLLMRerank",
|
||||
]
|
||||
|
@@ -0,0 +1,124 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from copy import deepcopy
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence
|
||||
|
||||
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
|
||||
from langchain_core.callbacks.manager import Callbacks
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.pydantic_v1 import Extra, Field, PrivateAttr, root_validator
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from rank_llm.data import Candidate, Query, Request
|
||||
else:
|
||||
# Avoid pydantic annotation issues when actually instantiating
|
||||
# while keeping this import optional
|
||||
try:
|
||||
from rank_llm.data import Candidate, Query, Request
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class RankLLMRerank(BaseDocumentCompressor):
|
||||
"""Document compressor using Flashrank interface."""
|
||||
|
||||
client: Any = None
|
||||
"""RankLLM client to use for compressing documents"""
|
||||
top_n: int = Field(default=3)
|
||||
"""Top N documents to return."""
|
||||
model: str = Field(default="zephyr")
|
||||
"""Name of model to use for reranking."""
|
||||
step_size: int = Field(default=10)
|
||||
"""Step size for moving sliding window."""
|
||||
gpt_model: str = Field(default="gpt-3.5-turbo")
|
||||
"""OpenAI model name."""
|
||||
_retriever: Any = PrivateAttr()
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate python package exists in environment."""
|
||||
|
||||
if not values.get("client"):
|
||||
client_name = values.get("model", "zephyr")
|
||||
|
||||
try:
|
||||
model_enum = ModelType(client_name.lower())
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
"Unsupported model type. Please use 'vicuna', 'zephyr', or 'gpt'."
|
||||
)
|
||||
|
||||
try:
|
||||
if model_enum == ModelType.VICUNA:
|
||||
from rank_llm.rerank.vicuna_reranker import VicunaReranker
|
||||
|
||||
values["client"] = VicunaReranker()
|
||||
elif model_enum == ModelType.ZEPHYR:
|
||||
from rank_llm.rerank.zephyr_reranker import ZephyrReranker
|
||||
|
||||
values["client"] = ZephyrReranker()
|
||||
elif model_enum == ModelType.GPT:
|
||||
from rank_llm.rerank.rank_gpt import SafeOpenai
|
||||
from rank_llm.rerank.reranker import Reranker
|
||||
|
||||
openai_api_key = get_from_dict_or_env(
|
||||
values, "open_api_key", "OPENAI_API_KEY"
|
||||
)
|
||||
|
||||
agent = SafeOpenai(
|
||||
model=values["gpt_model"],
|
||||
context_size=4096,
|
||||
keys=openai_api_key,
|
||||
)
|
||||
values["client"] = Reranker(agent)
|
||||
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import rank_llm python package. "
|
||||
"Please install it with `pip install rank_llm`."
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
def compress_documents(
|
||||
self,
|
||||
documents: Sequence[Document],
|
||||
query: str,
|
||||
callbacks: Optional[Callbacks] = None,
|
||||
) -> Sequence[Document]:
|
||||
request = Request(
|
||||
query=Query(text=query, qid=1),
|
||||
candidates=[
|
||||
Candidate(doc={"text": doc.page_content}, docid=index, score=1)
|
||||
for index, doc in enumerate(documents)
|
||||
],
|
||||
)
|
||||
|
||||
rerank_results = self.client.rerank(
|
||||
request,
|
||||
rank_end=len(documents),
|
||||
window_size=min(20, len(documents)),
|
||||
step=10,
|
||||
)
|
||||
|
||||
final_results = []
|
||||
for res in rerank_results.candidates:
|
||||
doc = documents[int(res.docid)]
|
||||
doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata))
|
||||
final_results.append(doc_copy)
|
||||
|
||||
return final_results[: self.top_n]
|
||||
|
||||
|
||||
class ModelType(Enum):
|
||||
VICUNA = "vicuna"
|
||||
ZEPHYR = "zephyr"
|
||||
GPT = "gpt"
|
@@ -0,0 +1 @@
|
||||
"""Test document compressor integrations."""
|
@@ -0,0 +1,8 @@
|
||||
"""Test rankllm reranker."""
|
||||
|
||||
from langchain_community.document_compressors.rankllm_rerank import RankLLMRerank
|
||||
|
||||
|
||||
def test_rankllm_reranker_init() -> None:
|
||||
"""Test the RankLLM reranker initializes correctly."""
|
||||
RankLLMRerank()
|
@@ -4,6 +4,7 @@ EXPECTED_ALL = [
|
||||
"LLMLinguaCompressor",
|
||||
"OpenVINOReranker",
|
||||
"JinaRerank",
|
||||
"RankLLMRerank",
|
||||
"FlashrankRerank",
|
||||
]
|
||||
|
||||
|
Reference in New Issue
Block a user