mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-10 05:20:39 +00:00
This PR adds scaffolding for langchain 1.0 entry package. Most contents have been removed. Currently remaining entrypoints for: * chat models * embedding models * memory -> trimming messages, filtering messages and counting tokens [we may remove this] * prompts -> we may remove some prompts * storage: primarily to support cache backed embeddings, may remove the kv store * tools -> report tool primitives Things to be added: * Selected agent implementations * Selected workflows * Common primitives: messages, Document * Primitives for type hinting: BaseChatModel, BaseEmbeddings * Selected retrievers * Selected text splitters Things to be removed: * Globals needs to be removed (needs an update in langchain core) Todos: * TBD indexing api (requires sqlalchemy which we don't want as a dependency) * Be explicit about public/private interfaces (e.g., likely rename chat_models.base.py to something more internal) * Remove dockerfiles * Update module doc-strings and README.md
82 lines
2.8 KiB
Python
82 lines
2.8 KiB
Python
"""Fake Embedding class for testing purposes."""
|
|
|
|
import math
|
|
|
|
from langchain_core.embeddings import Embeddings
|
|
|
|
fake_texts = ["foo", "bar", "baz"]
|
|
|
|
|
|
class FakeEmbeddings(Embeddings):
|
|
"""Fake embeddings functionality for testing."""
|
|
|
|
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
|
"""Return simple embeddings.
|
|
Embeddings encode each text as its index."""
|
|
return [[1.0] * 9 + [float(i)] for i in range(len(texts))]
|
|
|
|
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
|
|
return self.embed_documents(texts)
|
|
|
|
def embed_query(self, text: str) -> list[float]:
|
|
"""Return constant query embeddings.
|
|
Embeddings are identical to embed_documents(texts)[0].
|
|
Distance to each text will be that text's index,
|
|
as it was passed to embed_documents."""
|
|
return [1.0] * 9 + [0.0]
|
|
|
|
async def aembed_query(self, text: str) -> list[float]:
|
|
return self.embed_query(text)
|
|
|
|
|
|
class ConsistentFakeEmbeddings(FakeEmbeddings):
|
|
"""Fake embeddings which remember all the texts seen so far to return consistent
|
|
vectors for the same texts."""
|
|
|
|
def __init__(self, dimensionality: int = 10) -> None:
|
|
self.known_texts: list[str] = []
|
|
self.dimensionality = dimensionality
|
|
|
|
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
|
"""Return consistent embeddings for each text seen so far."""
|
|
out_vectors = []
|
|
for text in texts:
|
|
if text not in self.known_texts:
|
|
self.known_texts.append(text)
|
|
vector = [1.0] * (self.dimensionality - 1) + [
|
|
float(self.known_texts.index(text)),
|
|
]
|
|
out_vectors.append(vector)
|
|
return out_vectors
|
|
|
|
def embed_query(self, text: str) -> list[float]:
|
|
"""Return consistent embeddings for the text, if seen before, or a constant
|
|
one if the text is unknown."""
|
|
return self.embed_documents([text])[0]
|
|
|
|
|
|
class AngularTwoDimensionalEmbeddings(Embeddings):
|
|
"""
|
|
From angles (as strings in units of pi) to unit embedding vectors on a circle.
|
|
"""
|
|
|
|
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
|
"""
|
|
Make a list of texts into a list of embedding vectors.
|
|
"""
|
|
return [self.embed_query(text) for text in texts]
|
|
|
|
def embed_query(self, text: str) -> list[float]:
|
|
"""
|
|
Convert input text to a 'vector' (list of floats).
|
|
If the text is a number, use it as the angle for the
|
|
unit vector in units of pi.
|
|
Any other input text becomes the singular result [0, 0] !
|
|
"""
|
|
try:
|
|
angle = float(text)
|
|
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
|
|
except ValueError:
|
|
# Assume: just test string, no attention is paid to values.
|
|
return [0.0, 0.0]
|