mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-06 13:18:12 +00:00
community[patch],core[minor]: Move in memory cache implementation to core (#20753)
This PR moves the InMemoryCache implementation from community to core.
This commit is contained in:
parent
4f67ce485a
commit
ad6b5f84e5
@ -22,7 +22,7 @@ Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Optional, Sequence
|
||||
from typing import Any, Dict, Optional, Sequence, Tuple
|
||||
|
||||
from langchain_core.outputs import Generation
|
||||
from langchain_core.runnables import run_in_executor
|
||||
@ -105,3 +105,37 @@ class BaseCache(ABC):
|
||||
async def aclear(self, **kwargs: Any) -> None:
|
||||
"""Clear cache that can take additional keyword arguments."""
|
||||
return await run_in_executor(None, self.clear, **kwargs)
|
||||
|
||||
|
||||
class InMemoryCache(BaseCache):
|
||||
"""Cache that stores things in memory."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize with empty cache."""
|
||||
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
|
||||
|
||||
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||
"""Look up based on prompt and llm_string."""
|
||||
return self._cache.get((prompt, llm_string), None)
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string."""
|
||||
self._cache[(prompt, llm_string)] = return_val
|
||||
|
||||
def clear(self, **kwargs: Any) -> None:
|
||||
"""Clear cache."""
|
||||
self._cache = {}
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||
"""Look up based on prompt and llm_string."""
|
||||
return self.lookup(prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Update cache based on prompt and llm_string."""
|
||||
self.update(prompt, llm_string, return_val)
|
||||
|
||||
async def aclear(self, **kwargs: Any) -> None:
|
||||
"""Clear cache."""
|
||||
self.clear()
|
||||
|
@ -18,7 +18,6 @@ from langchain_community.cache import (
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"InMemoryCache",
|
||||
"FullLLMCache",
|
||||
"SQLAlchemyCache",
|
||||
"SQLiteCache",
|
||||
@ -27,6 +26,7 @@ __all__ = [
|
||||
"RedisSemanticCache",
|
||||
"GPTCache",
|
||||
"MomentoCache",
|
||||
"InMemoryCache",
|
||||
"CassandraCache",
|
||||
"CassandraSemanticCache",
|
||||
"FullMd5LLMCache",
|
||||
|
@ -6,9 +6,10 @@ try:
|
||||
except ImportError:
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
from langchain_core.caches import InMemoryCache
|
||||
from langchain_core.outputs import Generation, LLMResult
|
||||
|
||||
from langchain.cache import InMemoryCache, SQLAlchemyCache
|
||||
from langchain.cache import SQLAlchemyCache
|
||||
from langchain.globals import get_llm_cache, set_llm_cache
|
||||
from langchain.llms.base import __all__
|
||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||
|
@ -6,6 +6,7 @@ import pytest
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from langchain_community.chat_models import FakeListChatModel
|
||||
from langchain_community.llms import FakeListLLM
|
||||
from langchain_core.caches import InMemoryCache
|
||||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
from langchain_core.language_models.llms import BaseLLM
|
||||
from langchain_core.load import dumps
|
||||
@ -14,7 +15,7 @@ from langchain_core.outputs import ChatGeneration, Generation
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from langchain.cache import InMemoryCache, SQLAlchemyCache
|
||||
from langchain.cache import SQLAlchemyCache
|
||||
from langchain.globals import get_llm_cache, set_llm_cache
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user