mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-04 04:07:54 +00:00
Adding LangChain v0.2 support for nvidia ai endpoint, langchain-nvidia-ai-endpoints. Removed deprecated classes from nvidia_ai_endpoints.ipynb (#24411)
Description: added support for LangChain v0.2 for nvidia ai endpoint. Implremented inMemory storage for chains using RunnableWithMessageHistory which is analogous to using `ConversationChain` which was used in v0.1 with the default `ConversationBufferMemory`. This class is deprecated in favor of `RunnableWithMessageHistory` in LangChain v0.2 Issue: None Dependencies: None. --------- Co-authored-by: Chester Curme <chester.curme@gmail.com>
This commit is contained in:
parent
334fc1ed1c
commit
4c651ba13a
@ -540,7 +540,7 @@
|
|||||||
"id": "137662a6"
|
"id": "137662a6"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"## Example usage within a Conversation Chains"
|
"## Example usage within RunnableWithMessageHistory "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -550,7 +550,7 @@
|
|||||||
"id": "79efa62d"
|
"id": "79efa62d"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Like any other integration, ChatNVIDIA is fine to support chat utilities like conversation buffers by default. Below, we show the [LangChain ConversationBufferMemory](https://python.langchain.com/docs/modules/memory/types/buffer) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
|
"Like any other integration, ChatNVIDIA is fine to support chat utilities like RunnableWithMessageHistory which is analogous to using `ConversationChain`. Below, we show the [LangChain RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -572,8 +572,19 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from langchain.chains import ConversationChain\n",
|
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
|
||||||
"from langchain.memory import ConversationBufferMemory\n",
|
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||||
|
"\n",
|
||||||
|
"# store is a dictionary that maps session IDs to their corresponding chat histories.\n",
|
||||||
|
"store = {} # memory is maintained outside the chain\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"# A function that returns the chat history for a given session ID.\n",
|
||||||
|
"def get_session_history(session_id: str) -> InMemoryChatMessageHistory:\n",
|
||||||
|
" if session_id not in store:\n",
|
||||||
|
" store[session_id] = InMemoryChatMessageHistory()\n",
|
||||||
|
" return store[session_id]\n",
|
||||||
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"chat = ChatNVIDIA(\n",
|
"chat = ChatNVIDIA(\n",
|
||||||
" model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n",
|
" model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n",
|
||||||
@ -582,24 +593,18 @@
|
|||||||
" top_p=1.0,\n",
|
" top_p=1.0,\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"conversation = ConversationChain(llm=chat, memory=ConversationBufferMemory())"
|
"# Define a RunnableConfig object, with a `configurable` key. session_id determines thread\n",
|
||||||
]
|
"config = {\"configurable\": {\"session_id\": \"1\"}}\n",
|
||||||
},
|
"\n",
|
||||||
{
|
"conversation = RunnableWithMessageHistory(\n",
|
||||||
"cell_type": "code",
|
" chat,\n",
|
||||||
"execution_count": null,
|
" get_session_history,\n",
|
||||||
"id": "f644ff28",
|
")\n",
|
||||||
"metadata": {
|
"\n",
|
||||||
"colab": {
|
"conversation.invoke(\n",
|
||||||
"base_uri": "https://localhost:8080/",
|
" \"Hi I'm Srijan Dubey.\", # input or query\n",
|
||||||
"height": 268
|
" config=config,\n",
|
||||||
},
|
")"
|
||||||
"id": "f644ff28",
|
|
||||||
"outputId": "bae354cc-2118-4e01-ce20-a717ac94d27d"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"conversation.invoke(\"Hi there!\")[\"response\"]"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -616,26 +621,30 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"conversation.invoke(\"I'm doing well! Just having a conversation with an AI.\")[\n",
|
"conversation.invoke(\n",
|
||||||
" \"response\"\n",
|
" \"I'm doing well! Just having a conversation with an AI.\",\n",
|
||||||
"]"
|
" config=config,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"id": "LyD1xVKmVSs4",
|
"id": "uHIMZxVSVNBC",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"base_uri": "https://localhost:8080/",
|
"base_uri": "https://localhost:8080/",
|
||||||
"height": 350
|
"height": 284
|
||||||
},
|
},
|
||||||
"id": "LyD1xVKmVSs4",
|
"id": "uHIMZxVSVNBC",
|
||||||
"outputId": "a1714513-a8fd-4d14-f974-233e39d5c4f5"
|
"outputId": "79acc89d-a820-4f2c-bac2-afe99da95580"
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"conversation.invoke(\"Tell me about yourself.\")[\"response\"]"
|
"conversation.invoke(\n",
|
||||||
|
" \"Tell me about yourself.\",\n",
|
||||||
|
" config=config,\n",
|
||||||
|
")"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
Loading…
Reference in New Issue
Block a user