langchain: Add ruff rule W (#31876)

All auto-fixes
See https://docs.astral.sh/ruff/rules/#warning-w

Co-authored-by: Chester Curme <chester.curme@gmail.com>
This commit is contained in:
Christophe Bornet 2025-07-05 23:57:30 +02:00 committed by GitHub
parent 3f4b355eef
commit bf05229029
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
27 changed files with 125 additions and 125 deletions

View File

@ -23,22 +23,22 @@ class AgentType(str, Enum):
REACT_DOCSTORE = "react-docstore"
"""A zero shot agent that does a reasoning step before acting.
This agent has access to a document store that allows it to look up
This agent has access to a document store that allows it to look up
relevant information to answering the question.
"""
SELF_ASK_WITH_SEARCH = "self-ask-with-search"
"""An agent that breaks down a complex question into a series of simpler questions.
This agent uses a search tool to look up answers to the simpler questions
in order to answer the original complex question.
"""
CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description"
CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description"
"""A zero shot agent that does a reasoning step before acting.
This agent is designed to be used in conjunction
This agent is designed to be used in conjunction
"""
CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description"
@ -47,7 +47,7 @@ class AgentType(str, Enum):
"structured-chat-zero-shot-react-description"
)
"""An zero-shot react agent optimized for chat models.
This agent is capable of invoking tools that have multiple inputs.
"""

View File

@ -27,10 +27,10 @@ def create_json_chat_agent(
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more.
stop_sequence: bool or list of str.
If True, adds a stop token of "Observation:" to avoid hallucinates.
If True, adds a stop token of "Observation:" to avoid hallucinates.
If False, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
Default is True. You may to set this to False if the LLM you are using
does not support stop sequences.
tools_renderer: This controls how the tools are converted into a string and
@ -43,7 +43,7 @@ def create_json_chat_agent(
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
ValueError: If the template_tool_response is missing
@ -79,18 +79,18 @@ def create_json_chat_agent(
)
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions and arguments for each tool.
* `tool_names`: contains all tool names.
* `agent_scratchpad`: must be a MessagesPlaceholder. Contains previous agent actions and tool outputs as messages.
Here's an example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
system = '''Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering \
@ -110,7 +110,7 @@ def create_json_chat_agent(
and provide valuable insights and information on a wide range of topics. Whether \
you need help with a specific question or just want to have a conversation about \
a particular topic, Assistant is here to assist.'''
human = '''TOOLS
------
Assistant can ask the user to use tools to look up information that may be helpful in \
@ -151,7 +151,7 @@ def create_json_chat_agent(
blob with a single action, and NOTHING else):
{input}'''
prompt = ChatPromptTemplate.from_messages(
[
("system", system),

View File

@ -201,10 +201,10 @@ try:
output_key: str = "output" #: :meta private:
limit_to_domains: Optional[Sequence[str]] = Field(default_factory=list) # type: ignore[arg-type]
"""Use to limit the domains that can be accessed by the API chain.
* For example, to limit to just the domain `https://www.example.com`, set
`limit_to_domains=["https://www.example.com"]`.
* The default value is an empty tuple, which means that no domains are
allowed by default. By design this will raise an error on instantiation.
* Use a None if you want to allow all domains by default -- this is not

View File

@ -93,7 +93,7 @@ class BaseConversationalRetrievalChain(Chain):
"""An optional function to get a string of the chat history.
If None is provided, will use a default."""
response_if_no_docs_found: Optional[str] = None
"""If specified, the chain will return a fixed response if no docs
"""If specified, the chain will return a fixed response if no docs
are found for the question. """
model_config = ConfigDict(

View File

@ -88,7 +88,7 @@ class LLMChain(Chain):
output_key: str = "text" #: :meta private:
output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser)
"""Output parser to use.
Defaults to one that takes the most likely string but does not change it
Defaults to one that takes the most likely string but does not change it
otherwise."""
return_final_only: bool = True
"""Whether to return only the final parsed result. Defaults to True.

View File

@ -63,18 +63,18 @@ Passage:
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
@ -133,18 +133,18 @@ def create_extraction_chain(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),

View File

@ -34,18 +34,18 @@ If a property is not present and is not required in the function parameters, do
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),

View File

@ -59,7 +59,7 @@ class MultiRouteChain(Chain):
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided.
"""If True, use default_chain when an invalid destination name is provided.
Defaults to False."""
model_config = ConfigDict(

View File

@ -47,18 +47,18 @@ from pydantic import BaseModel
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
@ -168,15 +168,15 @@ def create_openai_fn_runnable(
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
@ -201,39 +201,39 @@ def create_structured_output_runnable(
is passed in, it's assumed to already be a valid JsonSchema.
For best results, pydantic.BaseModels should have docstrings describing what
the schema represents and descriptions for the parameters.
llm: Language model to use. Assumed to support the OpenAI function-calling API
if mode is 'openai-function'. Assumed to support OpenAI response_format
llm: Language model to use. Assumed to support the OpenAI function-calling API
if mode is 'openai-function'. Assumed to support OpenAI response_format
parameter if mode is 'openai-json'.
prompt: BasePromptTemplate to pass to the model. If mode is 'openai-json' and
prompt has input variable 'output_schema' then the given output_schema
prompt: BasePromptTemplate to pass to the model. If mode is 'openai-json' and
prompt has input variable 'output_schema' then the given output_schema
will be converted to a JsonSchema and inserted in the prompt.
output_parser: Output parser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModel is passed
in, then the OutputParser will try to parse outputs using the pydantic
in, then the OutputParser will try to parse outputs using the pydantic
class. Otherwise model outputs will be parsed as JSON.
mode: How structured outputs are extracted from the model. If 'openai-functions'
then OpenAI function calling is used with the deprecated 'functions',
'function_call' schema. If 'openai-tools' then OpenAI function
calling with the latest 'tools', 'tool_choice' schema is used. This is
recommended over 'openai-functions'. If 'openai-json' then OpenAI model
mode: How structured outputs are extracted from the model. If 'openai-functions'
then OpenAI function calling is used with the deprecated 'functions',
'function_call' schema. If 'openai-tools' then OpenAI function
calling with the latest 'tools', 'tool_choice' schema is used. This is
recommended over 'openai-functions'. If 'openai-json' then OpenAI model
with response_format set to JSON is used.
enforce_function_usage: Only applies when mode is 'openai-tools' or
'openai-functions'. If True, then the model will be forced to use the given
output schema. If False, then the model can elect whether to use the output
enforce_function_usage: Only applies when mode is 'openai-tools' or
'openai-functions'. If True, then the model will be forced to use the given
output schema. If False, then the model can elect whether to use the output
schema.
return_single: Only applies when mode is 'openai-tools'. Whether to a list of
structured outputs or a single one. If True and model does not return any
structured outputs then chain output is None. If False and model does not
return_single: Only applies when mode is 'openai-tools'. Whether to a list of
structured outputs or a single one. If True and model does not return any
structured outputs then chain output is None. If False and model does not
return any structured outputs then chain output is an empty list.
kwargs: Additional named arguments.
Returns:
A runnable sequence that will return a structured output(s) matching the given
A runnable sequence that will return a structured output(s) matching the given
output_schema.
OpenAI tools example with Pydantic schema (mode='openai-tools'):
.. code-block:: python
from typing import Optional
from langchain.chains import create_structured_output_runnable
@ -251,23 +251,23 @@ def create_structured_output_runnable(
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are an extraction algorithm. Please extract every possible instance"),
("system", "You are an extraction algorithm. Please extract every possible instance"),
('human', '{input}')
]
)
structured_llm = create_structured_output_runnable(
RecordDog,
llm,
mode="openai-tools",
enforce_function_usage=True,
RecordDog,
llm,
mode="openai-tools",
enforce_function_usage=True,
return_single=True
)
structured_llm.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
OpenAI tools example with dict schema (mode="openai-tools"):
.. code-block:: python
from typing import Optional
from langchain.chains import create_structured_output_runnable
@ -303,15 +303,15 @@ def create_structured_output_runnable(
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
structured_llm = create_structured_output_runnable(
dog_schema,
llm,
mode="openai-tools",
enforce_function_usage=True,
dog_schema,
llm,
mode="openai-tools",
enforce_function_usage=True,
return_single=True
)
structured_llm.invoke("Harry was a chubby brown beagle who loved chicken")
# -> {'name': 'Harry', 'color': 'brown', 'fav_food': 'chicken'}
OpenAI functions example (mode="openai-functions"):
.. code-block:: python
@ -332,7 +332,7 @@ def create_structured_output_runnable(
structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-functions")
structured_llm.invoke("Harry was a chubby brown beagle who loved chicken")
# -> Dog(name="Harry", color="brown", fav_food="chicken")
OpenAI functions with prompt example:
.. code-block:: python
@ -361,7 +361,7 @@ def create_structured_output_runnable(
# -> Dog(name="Harry", color="brown", fav_food="chicken")
OpenAI json response format example (mode="openai-json"):
.. code-block:: python
from typing import Optional
from langchain.chains import create_structured_output_runnable
@ -379,9 +379,9 @@ def create_structured_output_runnable(
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-json")
system = '''You are a world class assistant for extracting information in structured JSON formats. \
Extract a valid JSON blob from the user input that matches the following JSON Schema:
{output_schema}'''
prompt = ChatPromptTemplate.from_messages(
[("system", system), ("human", "{input}"),]

View File

@ -34,7 +34,7 @@ class EvaluatorType(str, Enum):
"""The pairwise string evaluator, which predicts the preferred prediction from
between two models."""
SCORE_STRING = "score_string"
"""The scored string evaluator, which gives a score between 1 and 10
"""The scored string evaluator, which gives a score between 1 and 10
to a prediction."""
LABELED_PAIRWISE_STRING = "labeled_pairwise_string"
"""The labeled pairwise string evaluator, which predicts the preferred prediction

View File

@ -23,7 +23,7 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
DEFAULT_HISTORY_TEMPLATE = """
Current date and time: {current_time}.
Potentially relevant timestamped excerpts of previous conversations (you
Potentially relevant timestamped excerpts of previous conversations (you
do not need to use these if irrelevant):
{previous_history}

View File

@ -10,7 +10,7 @@ class DatetimeOutputParser(BaseOutputParser[datetime]):
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that is used as the datetime format.
Update this to match the desired datetime format for your application.
"""

View File

@ -20,7 +20,7 @@ class YamlOutputParser(BaseOutputParser[T]):
pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
)
"""Regex pattern to match yaml code blocks
"""Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T:

View File

@ -36,7 +36,7 @@ class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: Runnable
"""LLM wrapper to use for filtering documents.
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input

View File

@ -24,7 +24,7 @@ class CohereRerank(BaseDocumentCompressor):
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable
"""Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""

View File

@ -73,8 +73,8 @@ class LLMListwiseRerank(BaseDocumentCompressor):
"""
reranker: Runnable[dict, list[Document]]
"""LLM-based reranker to use for filtering documents. Expected to take in a dict
with 'documents: Sequence[Document]' and 'query: str' keys and output a
"""LLM-based reranker to use for filtering documents. Expected to take in a dict
with 'documents: Sequence[Document]' and 'query: str' keys and output a
List[Document]."""
top_n: int = 3

View File

@ -31,12 +31,12 @@ class LineListOutputParser(BaseOutputParser[list[str]]):
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate(
input_variables=["question"],
template="""You are an AI language model assistant. Your task is
to generate 3 different versions of the given user
question to retrieve relevant documents from a vector database.
By generating multiple perspectives on the user question,
your goal is to help the user overcome some of the limitations
of distance-based similarity search. Provide these alternative
template="""You are an AI language model assistant. Your task is
to generate 3 different versions of the given user
question to retrieve relevant documents from a vector database.
By generating multiple perspectives on the user question,
your goal is to help the user overcome some of the limitations
of distance-based similarity search. Provide these alternative
questions separated by newlines. Original question: {question}""",
)

View File

@ -66,7 +66,7 @@ class ParentDocumentRetriever(MultiVectorRetriever):
If none, then the parent documents will be the raw documents passed in."""
child_metadata_fields: Optional[Sequence[str]] = None
"""Metadata fields to leave in child documents. If None, leave all parent document
"""Metadata fields to leave in child documents. If None, leave all parent document
metadata.
"""

View File

@ -238,7 +238,7 @@ class SelfQueryRetriever(BaseRetriever):
"""The underlying vector store from which documents will be retrieved."""
query_constructor: Runnable[dict, StructuredQuery] = Field(alias="llm_chain")
"""The query constructor chain for generating the vector store queries.
llm_chain is legacy name kept for backwards compatibility."""
search_type: str = "similarity"
"""The search type to perform on the vector store."""

View File

@ -133,7 +133,7 @@ class RunEvalConfig(BaseModel):
:class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>`, such
as `EvaluatorType.QA`, the evaluator type string ("qa"), or a configuration for a
given evaluator
(e.g.,
(e.g.,
:class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`)."""
custom_evaluators: Optional[list[CUSTOM_EVALUATOR_TYPE]] = None
"""Custom evaluators to apply to the dataset run."""

View File

@ -143,7 +143,7 @@ ignore-regex = ".*(Stati Uniti|Tense=Pres).*"
ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin"
[tool.ruff.lint]
select = ["E", "F", "I", "PGH003", "T201", "D", "UP", "S"]
select = ["E", "F", "I", "PGH003", "T201", "D", "UP", "S", "W"]
pydocstyle.convention = "google"
pyupgrade.keep-runtime-typing = true

View File

@ -71,7 +71,7 @@ def test_final_answer_after_parsable_action() -> None:
Observation: I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar
Final Answer: The best pizza to eat is margaritta
Final Answer: The best pizza to eat is margaritta
"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse(llm_output)

View File

@ -63,7 +63,7 @@ def test_parse_with_language_and_spaces() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
```json
{
"action": "foo",
@ -139,24 +139,24 @@ class TestCreatePrompt:
expected = dedent(
"""
Respond to the human as helpfully and accurately as possible. You have access to the following tools:
foo: Test tool FOO, args: {'tool_input': {'type': 'string'}}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or foo
Provide only ONE action per $JSON_BLOB, as shown:
```
{
"action": $TOOL_NAME,
"action_input": $INPUT
}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
@ -173,7 +173,7 @@ class TestCreatePrompt:
"action_input": "Final response to human"
}
```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought:
""" # noqa: E501
@ -205,25 +205,25 @@ class TestCreatePrompt:
expected = dedent(
"""
Respond to the human as helpfully and accurately as possible. You have access to the following tools:
foo: Test tool FOO, args: {'tool_input': {'type': 'string'}}
bar: Test tool BAR, args: {'tool_input': {'type': 'string'}}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or foo, bar
Provide only ONE action per $JSON_BLOB, as shown:
```
{
"action": $TOOL_NAME,
"action_input": $INPUT
}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
@ -240,7 +240,7 @@ class TestCreatePrompt:
"action_input": "Final response to human"
}
```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought:
""" # noqa: E501

View File

@ -105,11 +105,11 @@ GRADE:""", # noqa: E501
1. The question asks who founded the Roanoke settlement.
2. The context states that the grade incorrect answer is Walter Raleigh.
2. The context states that the grade incorrect answer is Walter Raleigh.
3. The student's answer is "Sir Walter Raleigh".
4. The student's answer matches the context, which states the answer is Walter Raleigh.
4. The student's answer matches the context, which states the answer is Walter Raleigh.
5. The addition of "Sir" in the student's answer does not contradict the context. It provides extra detail about Walter Raleigh's title, but the core answer of Walter Raleigh is still correct.
@ -125,7 +125,7 @@ GRADE: CORRECT""", # noqa: E501
""" CORRECT
QUESTION: who was the first president of the united states?
STUDENT ANSWER: George Washington
STUDENT ANSWER: George Washington
TRUE ANSWER: George Washington was the first president of the United States.
GRADE:""",
{

View File

@ -14,7 +14,7 @@ from tests.unit_tests.llms.fake_llm import FakeLLM
def test_PairwiseStringResultOutputParser_parse() -> None:
output_parser = ScoreStringResultOutputParser()
text = """This answer is really good.
text = """This answer is really good.
Rating: [[10]]"""
got = output_parser.parse(text)
want = {
@ -24,12 +24,12 @@ Rating: [[10]]"""
assert got.get("reasoning") == want["reasoning"]
assert got.get("score") == want["score"]
text = """This answer is really good.
text = """This answer is really good.
Rating: 10"""
with pytest.raises(ValueError):
output_parser.parse(text)
text = """This answer is really good.
text = """This answer is really good.
Rating: [[0]]"""
# Not in range [1, 10]
with pytest.raises(ValueError):

View File

@ -63,12 +63,12 @@ class GenericFakeChatModel(BaseChatModel):
messages: Iterator[AIMessage]
"""Get an iterator over messages.
This can be expanded to accept other types like Callables / dicts / strings
to make the interface more generic if needed.
Note: if you want to pass a list, you can use `iter` to convert it to an iterator.
Please note that streaming is not implemented yet. We should try to implement it
in the future by delegating to invoke and then breaking the resulting output
into message chunks.

View File

@ -38,7 +38,7 @@ action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
escape_newline:
```"""
DEF_RESULT_NO_BACKTICKS = """
@ -47,7 +47,7 @@ action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
escape_newline:
"""
@ -62,7 +62,7 @@ DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE,
action_input="The yamlOutputParser class is powerful",
additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline: \n",
for_new_lines="not_escape_newline:\n escape_newline:\n",
)