mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-08 22:42:05 +00:00
core:Add optional max_messages to MessagePlaceholder (#16098)
- **Description:** Add optional max_messages to MessagePlaceholder - **Issue:** [16096](https://github.com/langchain-ai/langchain/issues/16096) - **Dependencies:** None - **Twitter handle:** @davedecaprio Sometimes it's better to limit the history in the prompt itself rather than the memory. This is needed if you want different prompts in the chain to have different history lengths. --------- Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
This commit is contained in:
@@ -655,6 +655,21 @@ def test_messages_placeholder() -> None:
|
||||
]
|
||||
|
||||
|
||||
def test_messages_placeholder_with_max() -> None:
|
||||
history = [
|
||||
AIMessage(content="1"),
|
||||
AIMessage(content="2"),
|
||||
AIMessage(content="3"),
|
||||
]
|
||||
prompt = MessagesPlaceholder("history")
|
||||
assert prompt.format_messages(history=history) == history
|
||||
prompt = MessagesPlaceholder("history", n_messages=2)
|
||||
assert prompt.format_messages(history=history) == [
|
||||
AIMessage(content="2"),
|
||||
AIMessage(content="3"),
|
||||
]
|
||||
|
||||
|
||||
def test_chat_prompt_message_placeholder_partial() -> None:
|
||||
prompt = ChatPromptTemplate.from_messages([MessagesPlaceholder("history")])
|
||||
prompt = prompt.partial(history=[("system", "foo")])
|
||||
|
Reference in New Issue
Block a user