From 2a0d9d05fb5f0d770639753d39217efc26c9ba56 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Thu, 26 Sep 2024 13:02:30 -0400 Subject: [PATCH] docs: Fix trim_messages invocations in the memory migration guide (#26902) Should only be start_on="human", not start_on=("human", "ai") --- .../conversation_buffer_window_memory.ipynb | 63 +++++++++++-------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/docs/docs/versions/migrating_memory/conversation_buffer_window_memory.ipynb b/docs/docs/versions/migrating_memory/conversation_buffer_window_memory.ipynb index 0e84de6001a..f031e1dbb70 100644 --- a/docs/docs/versions/migrating_memory/conversation_buffer_window_memory.ipynb +++ b/docs/docs/versions/migrating_memory/conversation_buffer_window_memory.ipynb @@ -222,13 +222,15 @@ " token_counter=len, # <-- len will simply count the number of messages rather than tokens\n", " max_tokens=5, # <-- allow up to 5 messages.\n", " strategy=\"last\",\n", - " # The start_on is specified\n", - " # to make sure we do not generate a sequence where\n", - " # a ToolMessage that contains the result of a tool invocation\n", - " # appears before the AIMessage that requested a tool invocation\n", - " # as this will cause some chat models to raise an error.\n", - " start_on=(\"human\", \"ai\"),\n", - " include_system=True, # <-- Keep the system message\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " # start_on=\"human\" makes sure we produce a valid chat history\n", + " start_on=\"human\",\n", + " # Usually, we want to keep the SystemMessage\n", + " # if it's present in the original history.\n", + " # The SystemMessage has special instructions for the model.\n", + " include_system=True,\n", " allow_partial=False,\n", ")\n", "\n", @@ -280,13 +282,16 @@ " token_counter=ChatOpenAI(model=\"gpt-4o\"),\n", " max_tokens=80, # <-- token limit\n", " # The start_on is specified\n", - " # to make sure we do not generate a sequence where\n", - " # a ToolMessage that contains the result of a tool invocation\n", - " # appears before the AIMessage that requested a tool invocation\n", - " # as this will cause some chat models to raise an error.\n", - " start_on=(\"human\", \"ai\"),\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " # start_on=\"human\" makes sure we produce a valid chat history\n", + " start_on=\"human\",\n", + " # Usually, we want to keep the SystemMessage\n", + " # if it's present in the original history.\n", + " # The SystemMessage has special instructions for the model.\n", + " include_system=True,\n", " strategy=\"last\",\n", - " include_system=True, # <-- Keep the system message\n", ")\n", "\n", "for msg in selected_messages:\n", @@ -361,13 +366,15 @@ " token_counter=len, # <-- len will simply count the number of messages rather than tokens\n", " max_tokens=5, # <-- allow up to 5 messages.\n", " strategy=\"last\",\n", - " # The start_on is specified\n", - " # to make sure we do not generate a sequence where\n", - " # a ToolMessage that contains the result of a tool invocation\n", - " # appears before the AIMessage that requested a tool invocation\n", - " # as this will cause some chat models to raise an error.\n", - " start_on=(\"human\", \"ai\"),\n", - " include_system=True, # <-- Keep the system message\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " # start_on=\"human\" makes sure we produce a valid chat history\n", + " start_on=\"human\",\n", + " # Usually, we want to keep the SystemMessage\n", + " # if it's present in the original history.\n", + " # The SystemMessage has special instructions for the model.\n", + " include_system=True,\n", " allow_partial=False,\n", " )\n", "\n", @@ -501,13 +508,15 @@ " token_counter=len, # <-- len will simply count the number of messages rather than tokens\n", " max_tokens=5, # <-- allow up to 5 messages.\n", " strategy=\"last\",\n", - " # The start_on is specified\n", - " # to make sure we do not generate a sequence where\n", - " # a ToolMessage that contains the result of a tool invocation\n", - " # appears before the AIMessage that requested a tool invocation\n", - " # as this will cause some chat models to raise an error.\n", - " start_on=(\"human\", \"ai\"),\n", - " include_system=True, # <-- Keep the system message\n", + " # Most chat models expect that chat history starts with either:\n", + " # (1) a HumanMessage or\n", + " # (2) a SystemMessage followed by a HumanMessage\n", + " # start_on=\"human\" makes sure we produce a valid chat history\n", + " start_on=\"human\",\n", + " # Usually, we want to keep the SystemMessage\n", + " # if it's present in the original history.\n", + " # The SystemMessage has special instructions for the model.\n", + " include_system=True,\n", " allow_partial=False,\n", " )\n", "\n",