mongodb[patch]: Added scoring threshold to caching (#19286)

## Description
Semantic Cache can retrieve noisy information if the score threshold for
the value is too low. Adding the ability to set a `score_threshold` on
cache construction can allow for less noisy scores to appear.


- [x] **Add tests and docs**
  1. Added tests that confirm the `score_threshold` query is valid.


- [x] **Lint and test**: Run `make format`, `make lint` and `make test`
from the root of the package(s) you've modified. See contribution
guidelines for more: https://python.langchain.com/docs/contributing/

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Jib
2024-03-19 14:30:02 -04:00
committed by GitHub
parent 30e4a35d7a
commit f8078e41e5
4 changed files with 23 additions and 2 deletions

View File

@@ -217,6 +217,7 @@ class MongoDBAtlasSemanticCache(BaseCache, MongoDBAtlasVectorSearch):
database_name: str = "default",
index_name: str = "default",
wait_until_ready: bool = False,
score_threshold: Optional[float] = None,
**kwargs: Dict[str, Any],
):
"""
@@ -237,6 +238,7 @@ class MongoDBAtlasSemanticCache(BaseCache, MongoDBAtlasVectorSearch):
"""
client = _generate_mongo_client(connection_string)
self.collection = client[database_name][collection_name]
self.score_threshold = score_threshold
self._wait_until_ready = wait_until_ready
super().__init__(
collection=self.collection,
@@ -247,8 +249,17 @@ class MongoDBAtlasSemanticCache(BaseCache, MongoDBAtlasVectorSearch):
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
post_filter_pipeline = (
[{"$match": {"score": {"$gte": self.score_threshold}}}]
if self.score_threshold
else None
)
search_response = self.similarity_search_with_score(
prompt, 1, pre_filter={self.LLM: {"$eq": llm_string}}
prompt,
1,
pre_filter={self.LLM: {"$eq": llm_string}},
post_filter_pipeline=post_filter_pipeline,
)
if search_response:
return_val = search_response[0][0].metadata.get(self.RETURN_VAL)