langchain[patch]: deprecate ConversationChain (#23504)

Would like some feedback on how to best incorporate legacy memory
objects into `RunnableWithMessageHistory`.
This commit is contained in:
ccurme 2024-06-27 16:32:44 -04:00 committed by GitHub
parent c6f700b7cb
commit d04f657424
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1,6 +1,7 @@
"""Chain that carries on a conversation and calls an LLM."""
from typing import Dict, List
from langchain_core._api import deprecated
from langchain_core.memory import BaseMemory
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Extra, Field, root_validator
@ -10,9 +11,87 @@ from langchain.chains.llm import LLMChain
from langchain.memory.buffer import ConversationBufferMemory
@deprecated(
since="0.2.7",
alternative=(
"RunnableWithMessageHistory: "
"https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html" # noqa: E501
),
removal="1.0",
)
class ConversationChain(LLMChain):
"""Chain to have a conversation and load context from memory.
This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer
to this tutorial for more detail: https://python.langchain.com/v0.2/docs/tutorials/chatbot/
``RunnableWithMessageHistory`` offers several benefits, including:
- Stream, batch, and async support;
- More flexible memory handling, including the ability to manage memory
outside the chain;
- Support for multiple threads.
Below is a minimal implementation, analogous to using ``ConversationChain`` with
the default ``ConversationBufferMemory``:
.. code-block:: python
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
store = {} # memory is maintained outside the chain
def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
chain = RunnableWithMessageHistory(llm, get_session_history)
chain.invoke(
"Hi I'm Bob.",
config={"configurable": {"session_id": "1"}},
) # session_id determines thread
Memory objects can also be incorporated into the ``get_session_history`` callable:
.. code-block:: python
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
store = {} # memory is maintained outside the chain
def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
memory = ConversationBufferWindowMemory(
chat_memory=store[session_id],
k=3,
return_messages=True,
)
assert len(memory.memory_variables) == 1
key = memory.memory_variables[0]
messages = memory.load_memory_variables({})[key]
store[session_id] = InMemoryChatMessageHistory(messages=messages)
return store[session_id]
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
chain = RunnableWithMessageHistory(llm, get_session_history)
chain.invoke(
"Hi I'm Bob.",
config={"configurable": {"session_id": "1"}},
) # session_id determines thread
Example:
.. code-block:: python