mirror of
https://github.com/hwchase17/langchain.git
synced 2025-11-20 20:48:16 +00:00
```python
"""python scripts/update_mypy_ruff.py"""
import glob
import tomllib
from pathlib import Path
import toml
import subprocess
import re
ROOT_DIR = Path(__file__).parents[1]
def main():
for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True):
print(path)
with open(path, "rb") as f:
pyproject = tomllib.load(f)
try:
pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = (
"^1.10"
)
pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = (
"^0.5"
)
except KeyError:
continue
with open(path, "w") as f:
toml.dump(pyproject, f)
cwd = "/".join(path.split("/")[:-1])
completed = subprocess.run(
"poetry lock --no-update; poetry install --with typing; poetry run mypy . --no-color",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
logs = completed.stdout.split("\n")
to_ignore = {}
for l in logs:
if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l):
path, line_no, error_type = re.match(
"^(.*)\:(\d+)\: error:.*\[(.*)\]", l
).groups()
if (path, line_no) in to_ignore:
to_ignore[(path, line_no)].append(error_type)
else:
to_ignore[(path, line_no)] = [error_type]
print(len(to_ignore))
for (error_path, line_no), error_types in to_ignore.items():
all_errors = ", ".join(error_types)
full_path = f"{cwd}/{error_path}"
try:
with open(full_path, "r") as f:
file_lines = f.readlines()
except FileNotFoundError:
continue
file_lines[int(line_no) - 1] = (
file_lines[int(line_no) - 1][:-1] + f" # type: ignore[{all_errors}]\n"
)
with open(full_path, "w") as f:
f.write("".join(file_lines))
subprocess.run(
"poetry run ruff format .; poetry run ruff --select I --fix .",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
if __name__ == "__main__":
main()
```
108 lines
3.2 KiB
Python
108 lines
3.2 KiB
Python
"""KNN Retriever.
|
|
Largely based on
|
|
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import concurrent.futures
|
|
from typing import Any, Iterable, List, Optional
|
|
|
|
import numpy as np
|
|
from langchain_core.callbacks import CallbackManagerForRetrieverRun
|
|
from langchain_core.documents import Document
|
|
from langchain_core.embeddings import Embeddings
|
|
from langchain_core.retrievers import BaseRetriever
|
|
|
|
|
|
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
|
|
"""
|
|
Create an index of embeddings for a list of contexts.
|
|
|
|
Args:
|
|
contexts: List of contexts to embed.
|
|
embeddings: Embeddings model to use.
|
|
|
|
Returns:
|
|
Index of embeddings.
|
|
"""
|
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
return np.array(list(executor.map(embeddings.embed_query, contexts)))
|
|
|
|
|
|
class KNNRetriever(BaseRetriever):
|
|
"""`KNN` retriever."""
|
|
|
|
embeddings: Embeddings
|
|
"""Embeddings model to use."""
|
|
index: Any
|
|
"""Index of embeddings."""
|
|
texts: List[str]
|
|
"""List of texts to index."""
|
|
metadatas: Optional[List[dict]] = None
|
|
"""List of metadatas corresponding with each text."""
|
|
k: int = 4
|
|
"""Number of results to return."""
|
|
relevancy_threshold: Optional[float] = None
|
|
"""Threshold for relevancy."""
|
|
|
|
class Config:
|
|
"""Configuration for this pydantic object."""
|
|
|
|
arbitrary_types_allowed = True
|
|
|
|
@classmethod
|
|
def from_texts(
|
|
cls,
|
|
texts: List[str],
|
|
embeddings: Embeddings,
|
|
metadatas: Optional[List[dict]] = None,
|
|
**kwargs: Any,
|
|
) -> KNNRetriever:
|
|
index = create_index(texts, embeddings)
|
|
return cls(
|
|
embeddings=embeddings,
|
|
index=index,
|
|
texts=texts,
|
|
metadatas=metadatas,
|
|
**kwargs,
|
|
)
|
|
|
|
@classmethod
|
|
def from_documents(
|
|
cls,
|
|
documents: Iterable[Document],
|
|
embeddings: Embeddings,
|
|
**kwargs: Any,
|
|
) -> KNNRetriever:
|
|
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
|
|
return cls.from_texts(
|
|
texts=texts, embeddings=embeddings, metadatas=metadatas, **kwargs
|
|
)
|
|
|
|
def _get_relevant_documents(
|
|
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
|
|
) -> List[Document]:
|
|
query_embeds = np.array(self.embeddings.embed_query(query))
|
|
# calc L2 norm
|
|
index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True))
|
|
query_embeds = query_embeds / np.sqrt((query_embeds**2).sum())
|
|
|
|
similarities = index_embeds.dot(query_embeds)
|
|
sorted_ix = np.argsort(-similarities)
|
|
|
|
denominator = np.max(similarities) - np.min(similarities) + 1e-6
|
|
normalized_similarities = (similarities - np.min(similarities)) / denominator
|
|
|
|
top_k_results = [
|
|
Document(
|
|
page_content=self.texts[row],
|
|
metadata=self.metadatas[row] if self.metadatas else {},
|
|
)
|
|
for row in sorted_ix[0 : self.k]
|
|
if (
|
|
self.relevancy_threshold is None
|
|
or normalized_similarities[row] >= self.relevancy_threshold
|
|
)
|
|
]
|
|
return top_k_results
|