From 9187d2f3a97abc6d89daea9b5abfa652a425e1de Mon Sep 17 00:00:00 2001 From: 0xcrusher <113572970+0xcrusher@users.noreply.github.com> Date: Mon, 26 Jun 2023 15:14:32 +0700 Subject: [PATCH] Fixed caching bug for Multiple Caching types by correctly checking types (#6746) - Fixed an issue where some caching types check the wrong types, hence not allowing caching to work Maintainer responsibilities: - DataLoaders / VectorStores / Retrievers: @rlancemartin, @eyurtsev --- langchain/cache.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/langchain/cache.py b/langchain/cache.py index 2cfb7ff3720..db1718e6ffb 100644 --- a/langchain/cache.py +++ b/langchain/cache.py @@ -226,7 +226,7 @@ class RedisCache(BaseCache): def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: - if not isinstance(return_val, Generation): + if not isinstance(gen, Generation): raise ValueError( "RedisCache only supports caching of normal LLM generations, " f"got {type(gen)}" @@ -337,7 +337,7 @@ class RedisSemanticCache(BaseCache): def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: - if not isinstance(return_val, Generation): + if not isinstance(gen, Generation): raise ValueError( "RedisSemanticCache only supports caching of " f"normal LLM generations, got {type(gen)}" @@ -455,7 +455,7 @@ class GPTCache(BaseCache): and then store the `prompt` and `return_val` in the cache object. """ for gen in return_val: - if not isinstance(return_val, Generation): + if not isinstance(gen, Generation): raise ValueError( "GPTCache only supports caching of normal LLM generations, " f"got {type(gen)}" @@ -628,7 +628,7 @@ class MomentoCache(BaseCache): Exception: Unexpected response """ for gen in return_val: - if not isinstance(return_val, Generation): + if not isinstance(gen, Generation): raise ValueError( "Momento only supports caching of normal LLM generations, " f"got {type(gen)}"