mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-04 04:07:54 +00:00
Fixed caching bug for Multiple Caching types by correctly checking types (#6746)
- Fixed an issue where some caching types check the wrong types, hence not allowing caching to work Maintainer responsibilities: - DataLoaders / VectorStores / Retrievers: @rlancemartin, @eyurtsev
This commit is contained in:
parent
e9877ea8b1
commit
9187d2f3a9
@ -226,7 +226,7 @@ class RedisCache(BaseCache):
|
|||||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||||
"""Update cache based on prompt and llm_string."""
|
"""Update cache based on prompt and llm_string."""
|
||||||
for gen in return_val:
|
for gen in return_val:
|
||||||
if not isinstance(return_val, Generation):
|
if not isinstance(gen, Generation):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"RedisCache only supports caching of normal LLM generations, "
|
"RedisCache only supports caching of normal LLM generations, "
|
||||||
f"got {type(gen)}"
|
f"got {type(gen)}"
|
||||||
@ -337,7 +337,7 @@ class RedisSemanticCache(BaseCache):
|
|||||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||||
"""Update cache based on prompt and llm_string."""
|
"""Update cache based on prompt and llm_string."""
|
||||||
for gen in return_val:
|
for gen in return_val:
|
||||||
if not isinstance(return_val, Generation):
|
if not isinstance(gen, Generation):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"RedisSemanticCache only supports caching of "
|
"RedisSemanticCache only supports caching of "
|
||||||
f"normal LLM generations, got {type(gen)}"
|
f"normal LLM generations, got {type(gen)}"
|
||||||
@ -455,7 +455,7 @@ class GPTCache(BaseCache):
|
|||||||
and then store the `prompt` and `return_val` in the cache object.
|
and then store the `prompt` and `return_val` in the cache object.
|
||||||
"""
|
"""
|
||||||
for gen in return_val:
|
for gen in return_val:
|
||||||
if not isinstance(return_val, Generation):
|
if not isinstance(gen, Generation):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"GPTCache only supports caching of normal LLM generations, "
|
"GPTCache only supports caching of normal LLM generations, "
|
||||||
f"got {type(gen)}"
|
f"got {type(gen)}"
|
||||||
@ -628,7 +628,7 @@ class MomentoCache(BaseCache):
|
|||||||
Exception: Unexpected response
|
Exception: Unexpected response
|
||||||
"""
|
"""
|
||||||
for gen in return_val:
|
for gen in return_val:
|
||||||
if not isinstance(return_val, Generation):
|
if not isinstance(gen, Generation):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Momento only supports caching of normal LLM generations, "
|
"Momento only supports caching of normal LLM generations, "
|
||||||
f"got {type(gen)}"
|
f"got {type(gen)}"
|
||||||
|
Loading…
Reference in New Issue
Block a user