mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-07 05:30:39 +00:00
community[patch],core[minor]: Move in memory cache implementation to core (#20753)
This PR moves the InMemoryCache implementation from community to core.
This commit is contained in:
parent
4f67ce485a
commit
ad6b5f84e5
@ -22,7 +22,7 @@ Cache directly competes with Memory. See documentation for Pros and Cons.
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import Any, Optional, Sequence
|
from typing import Any, Dict, Optional, Sequence, Tuple
|
||||||
|
|
||||||
from langchain_core.outputs import Generation
|
from langchain_core.outputs import Generation
|
||||||
from langchain_core.runnables import run_in_executor
|
from langchain_core.runnables import run_in_executor
|
||||||
@ -105,3 +105,37 @@ class BaseCache(ABC):
|
|||||||
async def aclear(self, **kwargs: Any) -> None:
|
async def aclear(self, **kwargs: Any) -> None:
|
||||||
"""Clear cache that can take additional keyword arguments."""
|
"""Clear cache that can take additional keyword arguments."""
|
||||||
return await run_in_executor(None, self.clear, **kwargs)
|
return await run_in_executor(None, self.clear, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class InMemoryCache(BaseCache):
|
||||||
|
"""Cache that stores things in memory."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
"""Initialize with empty cache."""
|
||||||
|
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
|
||||||
|
|
||||||
|
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||||
|
"""Look up based on prompt and llm_string."""
|
||||||
|
return self._cache.get((prompt, llm_string), None)
|
||||||
|
|
||||||
|
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||||
|
"""Update cache based on prompt and llm_string."""
|
||||||
|
self._cache[(prompt, llm_string)] = return_val
|
||||||
|
|
||||||
|
def clear(self, **kwargs: Any) -> None:
|
||||||
|
"""Clear cache."""
|
||||||
|
self._cache = {}
|
||||||
|
|
||||||
|
async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||||
|
"""Look up based on prompt and llm_string."""
|
||||||
|
return self.lookup(prompt, llm_string)
|
||||||
|
|
||||||
|
async def aupdate(
|
||||||
|
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||||
|
) -> None:
|
||||||
|
"""Update cache based on prompt and llm_string."""
|
||||||
|
self.update(prompt, llm_string, return_val)
|
||||||
|
|
||||||
|
async def aclear(self, **kwargs: Any) -> None:
|
||||||
|
"""Clear cache."""
|
||||||
|
self.clear()
|
||||||
|
@ -18,7 +18,6 @@ from langchain_community.cache import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"InMemoryCache",
|
|
||||||
"FullLLMCache",
|
"FullLLMCache",
|
||||||
"SQLAlchemyCache",
|
"SQLAlchemyCache",
|
||||||
"SQLiteCache",
|
"SQLiteCache",
|
||||||
@ -27,6 +26,7 @@ __all__ = [
|
|||||||
"RedisSemanticCache",
|
"RedisSemanticCache",
|
||||||
"GPTCache",
|
"GPTCache",
|
||||||
"MomentoCache",
|
"MomentoCache",
|
||||||
|
"InMemoryCache",
|
||||||
"CassandraCache",
|
"CassandraCache",
|
||||||
"CassandraSemanticCache",
|
"CassandraSemanticCache",
|
||||||
"FullMd5LLMCache",
|
"FullMd5LLMCache",
|
||||||
|
@ -6,9 +6,10 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
from sqlalchemy.ext.declarative import declarative_base
|
||||||
|
|
||||||
|
from langchain_core.caches import InMemoryCache
|
||||||
from langchain_core.outputs import Generation, LLMResult
|
from langchain_core.outputs import Generation, LLMResult
|
||||||
|
|
||||||
from langchain.cache import InMemoryCache, SQLAlchemyCache
|
from langchain.cache import SQLAlchemyCache
|
||||||
from langchain.globals import get_llm_cache, set_llm_cache
|
from langchain.globals import get_llm_cache, set_llm_cache
|
||||||
from langchain.llms.base import __all__
|
from langchain.llms.base import __all__
|
||||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||||
|
@ -6,6 +6,7 @@ import pytest
|
|||||||
from _pytest.fixtures import FixtureRequest
|
from _pytest.fixtures import FixtureRequest
|
||||||
from langchain_community.chat_models import FakeListChatModel
|
from langchain_community.chat_models import FakeListChatModel
|
||||||
from langchain_community.llms import FakeListLLM
|
from langchain_community.llms import FakeListLLM
|
||||||
|
from langchain_core.caches import InMemoryCache
|
||||||
from langchain_core.language_models.chat_models import BaseChatModel
|
from langchain_core.language_models.chat_models import BaseChatModel
|
||||||
from langchain_core.language_models.llms import BaseLLM
|
from langchain_core.language_models.llms import BaseLLM
|
||||||
from langchain_core.load import dumps
|
from langchain_core.load import dumps
|
||||||
@ -14,7 +15,7 @@ from langchain_core.outputs import ChatGeneration, Generation
|
|||||||
from sqlalchemy import create_engine
|
from sqlalchemy import create_engine
|
||||||
from sqlalchemy.orm import Session
|
from sqlalchemy.orm import Session
|
||||||
|
|
||||||
from langchain.cache import InMemoryCache, SQLAlchemyCache
|
from langchain.cache import SQLAlchemyCache
|
||||||
from langchain.globals import get_llm_cache, set_llm_cache
|
from langchain.globals import get_llm_cache, set_llm_cache
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user