mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-26 00:23:25 +00:00
docs: fix trim_messages code blocks (#23271)
This commit is contained in:
parent
86326269a1
commit
9eda8f2fe8
@ -585,163 +585,137 @@ def trim_messages(
|
||||
return count
|
||||
|
||||
First 30 tokens, not allowing partial messages:
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(messages, max_tokens=30, token_counter=dummy_token_counter, strategy="first")
|
||||
trim_messages(messages, max_tokens=30, token_counter=dummy_token_counter, strategy="first")
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
]
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
]
|
||||
|
||||
First 30 tokens, allowing partial messages:
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="first",
|
||||
allow_partial=True,
|
||||
)
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="first",
|
||||
allow_partial=True,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"),
|
||||
]
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"),
|
||||
]
|
||||
|
||||
First 30 tokens, allowing partial messages, have to end on HumanMessage:
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="first"
|
||||
allow_partial=True,
|
||||
end_on="human",
|
||||
)
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="first"
|
||||
allow_partial=True,
|
||||
end_on="human",
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
]
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
]
|
||||
|
||||
|
||||
Last 30 tokens, including system message, not allowing partial messages:
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(messages, max_tokens=30, include_system=True, token_counter=dummy_token_counter, strategy="last")
|
||||
trim_messages(messages, max_tokens=30, include_system=True, token_counter=dummy_token_counter, strategy="last")
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"),
|
||||
]
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"),
|
||||
]
|
||||
|
||||
Last 40 tokens, including system message, allowing partial messages:
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=40,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="last",
|
||||
allow_partial=True,
|
||||
include_system=True
|
||||
)
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=40,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="last",
|
||||
allow_partial=True,
|
||||
include_system=True
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."},],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"),
|
||||
]
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."},],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"),
|
||||
]
|
||||
|
||||
Last 30 tokens, including system message, allowing partial messages, end on HumanMessage:
|
||||
.. code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="last",
|
||||
end_on="human",
|
||||
include_system=True,
|
||||
allow_partial=True,
|
||||
)
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="last",
|
||||
end_on="human",
|
||||
include_system=True,
|
||||
allow_partial=True,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."},],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
]
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."},],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
]
|
||||
|
||||
Last 40 tokens, including system message, allowing partial messages, start on HumanMessage:
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=40,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="last",
|
||||
include_system=True,
|
||||
allow_partial=True,
|
||||
start_on="human"
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"),
|
||||
]
|
||||
|
||||
Using a TextSplitter for splitting parting messages:
|
||||
.. code-block:: python
|
||||
|
||||
...
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=40,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="last",
|
||||
include_system=True,
|
||||
allow_partial=True,
|
||||
start_on="human"
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
...
|
||||
|
||||
Using a model for token counting:
|
||||
.. code-block:: python
|
||||
|
||||
...
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
...
|
||||
|
||||
Chaining:
|
||||
.. code-block:: python
|
||||
|
||||
...
|
||||
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"),
|
||||
]
|
||||
""" # noqa: E501
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user