mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-13 16:36:06 +00:00
add agent output parsers (#10790)
This commit is contained in:
parent
67c5950df3
commit
386ef1e654
@ -1,9 +1,11 @@
|
|||||||
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
|
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
|
||||||
import json
|
import json
|
||||||
from json import JSONDecodeError
|
|
||||||
from typing import Any, List, Optional, Sequence, Tuple, Union
|
from typing import Any, List, Optional, Sequence, Tuple, Union
|
||||||
|
|
||||||
from langchain.agents import BaseSingleActionAgent
|
from langchain.agents import BaseSingleActionAgent
|
||||||
|
from langchain.agents.output_parsers.openai_functions import (
|
||||||
|
OpenAIFunctionsAgentOutputParser,
|
||||||
|
)
|
||||||
from langchain.callbacks.base import BaseCallbackManager
|
from langchain.callbacks.base import BaseCallbackManager
|
||||||
from langchain.callbacks.manager import Callbacks
|
from langchain.callbacks.manager import Callbacks
|
||||||
from langchain.chat_models.openai import ChatOpenAI
|
from langchain.chat_models.openai import ChatOpenAI
|
||||||
@ -18,7 +20,6 @@ from langchain.schema import (
|
|||||||
AgentAction,
|
AgentAction,
|
||||||
AgentFinish,
|
AgentFinish,
|
||||||
BasePromptTemplate,
|
BasePromptTemplate,
|
||||||
OutputParserException,
|
|
||||||
)
|
)
|
||||||
from langchain.schema.agent import AgentActionMessageLog
|
from langchain.schema.agent import AgentActionMessageLog
|
||||||
from langchain.schema.language_model import BaseLanguageModel
|
from langchain.schema.language_model import BaseLanguageModel
|
||||||
@ -97,46 +98,6 @@ def _format_intermediate_steps(
|
|||||||
return messages
|
return messages
|
||||||
|
|
||||||
|
|
||||||
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
|
|
||||||
"""Parse an AI message."""
|
|
||||||
if not isinstance(message, AIMessage):
|
|
||||||
raise TypeError(f"Expected an AI message got {type(message)}")
|
|
||||||
|
|
||||||
function_call = message.additional_kwargs.get("function_call", {})
|
|
||||||
|
|
||||||
if function_call:
|
|
||||||
function_name = function_call["name"]
|
|
||||||
try:
|
|
||||||
_tool_input = json.loads(function_call["arguments"])
|
|
||||||
except JSONDecodeError:
|
|
||||||
raise OutputParserException(
|
|
||||||
f"Could not parse tool input: {function_call} because "
|
|
||||||
f"the `arguments` is not valid JSON."
|
|
||||||
)
|
|
||||||
|
|
||||||
# HACK HACK HACK:
|
|
||||||
# The code that encodes tool input into Open AI uses a special variable
|
|
||||||
# name called `__arg1` to handle old style tools that do not expose a
|
|
||||||
# schema and expect a single string argument as an input.
|
|
||||||
# We unpack the argument here if it exists.
|
|
||||||
# Open AI does not support passing in a JSON array as an argument.
|
|
||||||
if "__arg1" in _tool_input:
|
|
||||||
tool_input = _tool_input["__arg1"]
|
|
||||||
else:
|
|
||||||
tool_input = _tool_input
|
|
||||||
|
|
||||||
content_msg = f"responded: {message.content}\n" if message.content else "\n"
|
|
||||||
|
|
||||||
return _FunctionsAgentAction(
|
|
||||||
tool=function_name,
|
|
||||||
tool_input=tool_input,
|
|
||||||
log=f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n",
|
|
||||||
message_log=[message],
|
|
||||||
)
|
|
||||||
|
|
||||||
return AgentFinish(return_values={"output": message.content}, log=message.content)
|
|
||||||
|
|
||||||
|
|
||||||
class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||||
"""An Agent driven by OpenAIs function powered API.
|
"""An Agent driven by OpenAIs function powered API.
|
||||||
|
|
||||||
@ -216,7 +177,9 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|||||||
messages,
|
messages,
|
||||||
callbacks=callbacks,
|
callbacks=callbacks,
|
||||||
)
|
)
|
||||||
agent_decision = _parse_ai_message(predicted_message)
|
agent_decision = OpenAIFunctionsAgentOutputParser._parse_ai_message(
|
||||||
|
predicted_message
|
||||||
|
)
|
||||||
return agent_decision
|
return agent_decision
|
||||||
|
|
||||||
async def aplan(
|
async def aplan(
|
||||||
@ -245,7 +208,9 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|||||||
predicted_message = await self.llm.apredict_messages(
|
predicted_message = await self.llm.apredict_messages(
|
||||||
messages, functions=self.functions, callbacks=callbacks
|
messages, functions=self.functions, callbacks=callbacks
|
||||||
)
|
)
|
||||||
agent_decision = _parse_ai_message(predicted_message)
|
agent_decision = OpenAIFunctionsAgentOutputParser._parse_ai_message(
|
||||||
|
predicted_message
|
||||||
|
)
|
||||||
return agent_decision
|
return agent_decision
|
||||||
|
|
||||||
def return_stopped_response(
|
def return_stopped_response(
|
||||||
|
32
libs/langchain/langchain/agents/output_parsers/__init__.py
Normal file
32
libs/langchain/langchain/agents/output_parsers/__init__.py
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
"""Parsing utils to go from string to AgentAction or Agent Finish.
|
||||||
|
|
||||||
|
AgentAction means that an action should be taken.
|
||||||
|
This contains the name of the tool to use, the input to pass to that tool,
|
||||||
|
and a `log` variable (which contains a log of the agent's thinking).
|
||||||
|
|
||||||
|
AgentFinish means that a response should be given.
|
||||||
|
This contains a `return_values` dictionary. This usually contains a
|
||||||
|
single `output` key, but can be extended to contain more.
|
||||||
|
This also contains a `log` variable (which contains a log of the agent's thinking).
|
||||||
|
"""
|
||||||
|
from langchain.agents.output_parsers.json import JSONAgentOutputParser
|
||||||
|
from langchain.agents.output_parsers.openai_functions import (
|
||||||
|
OpenAIFunctionsAgentOutputParser,
|
||||||
|
)
|
||||||
|
from langchain.agents.output_parsers.react_json_single_input import (
|
||||||
|
ReActJsonSingleInputOutputParser,
|
||||||
|
)
|
||||||
|
from langchain.agents.output_parsers.react_single_input import (
|
||||||
|
ReActSingleInputOutputParser,
|
||||||
|
)
|
||||||
|
from langchain.agents.output_parsers.self_ask import SelfAskOutputParser
|
||||||
|
from langchain.agents.output_parsers.xml import XMLAgentOutputParser
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"ReActSingleInputOutputParser",
|
||||||
|
"SelfAskOutputParser",
|
||||||
|
"ReActJsonSingleInputOutputParser",
|
||||||
|
"OpenAIFunctionsAgentOutputParser",
|
||||||
|
"XMLAgentOutputParser",
|
||||||
|
"JSONAgentOutputParser",
|
||||||
|
]
|
66
libs/langchain/langchain/agents/output_parsers/json.py
Normal file
66
libs/langchain/langchain/agents/output_parsers/json.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from langchain.agents.agent import AgentOutputParser
|
||||||
|
from langchain.schema import AgentAction, AgentFinish, OutputParserException
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class JSONAgentOutputParser(AgentOutputParser):
|
||||||
|
"""Parses tool invocations and final answers in XML format.
|
||||||
|
|
||||||
|
Expects output to be in one of two formats.
|
||||||
|
|
||||||
|
If the output signals that an action should be taken,
|
||||||
|
should be in the below format. This will result in an AgentAction
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"action": "search",
|
||||||
|
"action_input": "2+2"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
If the output signals that a final answer should be given,
|
||||||
|
should be in the below format. This will result in an AgentFinish
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"action": "Final Answer",
|
||||||
|
"action_input": "4"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
pattern = re.compile(r"```(?:json)?\n(.*?)```", re.DOTALL)
|
||||||
|
|
||||||
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||||
|
try:
|
||||||
|
action_match = self.pattern.search(text)
|
||||||
|
if action_match is not None:
|
||||||
|
response = json.loads(action_match.group(1).strip(), strict=False)
|
||||||
|
if isinstance(response, list):
|
||||||
|
# gpt turbo frequently ignores the directive to emit a single action
|
||||||
|
logger.warning("Got multiple action responses: %s", response)
|
||||||
|
response = response[0]
|
||||||
|
if response["action"] == "Final Answer":
|
||||||
|
return AgentFinish({"output": response["action_input"]}, text)
|
||||||
|
else:
|
||||||
|
return AgentAction(
|
||||||
|
response["action"], response.get("action_input", {}), text
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return AgentFinish({"output": text}, text)
|
||||||
|
except Exception as e:
|
||||||
|
raise OutputParserException(f"Could not parse LLM output: {text}") from e
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _type(self) -> str:
|
||||||
|
return "json-agent"
|
@ -0,0 +1,84 @@
|
|||||||
|
import json
|
||||||
|
from json import JSONDecodeError
|
||||||
|
from typing import List, Union
|
||||||
|
|
||||||
|
from langchain.agents.agent import AgentOutputParser
|
||||||
|
from langchain.schema import (
|
||||||
|
AgentAction,
|
||||||
|
AgentFinish,
|
||||||
|
OutputParserException,
|
||||||
|
)
|
||||||
|
from langchain.schema.agent import AgentActionMessageLog
|
||||||
|
from langchain.schema.messages import (
|
||||||
|
AIMessage,
|
||||||
|
BaseMessage,
|
||||||
|
)
|
||||||
|
from langchain.schema.output import ChatGeneration, Generation
|
||||||
|
|
||||||
|
|
||||||
|
class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
|
||||||
|
"""Parses a message into agent action/finish.
|
||||||
|
|
||||||
|
Is meant to be used with OpenAI models, as it relies on the specific
|
||||||
|
function_call parameter from OpenAI to convey what tools to use.
|
||||||
|
|
||||||
|
If a function_call parameter is passed, then that is used to get
|
||||||
|
the tool and tool input.
|
||||||
|
|
||||||
|
If one is not passed, then the AIMessage is assumed to be the final output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _type(self) -> str:
|
||||||
|
return "openai-functions-agent"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
|
||||||
|
"""Parse an AI message."""
|
||||||
|
if not isinstance(message, AIMessage):
|
||||||
|
raise TypeError(f"Expected an AI message got {type(message)}")
|
||||||
|
|
||||||
|
function_call = message.additional_kwargs.get("function_call", {})
|
||||||
|
|
||||||
|
if function_call:
|
||||||
|
function_name = function_call["name"]
|
||||||
|
try:
|
||||||
|
_tool_input = json.loads(function_call["arguments"])
|
||||||
|
except JSONDecodeError:
|
||||||
|
raise OutputParserException(
|
||||||
|
f"Could not parse tool input: {function_call} because "
|
||||||
|
f"the `arguments` is not valid JSON."
|
||||||
|
)
|
||||||
|
|
||||||
|
# HACK HACK HACK:
|
||||||
|
# The code that encodes tool input into Open AI uses a special variable
|
||||||
|
# name called `__arg1` to handle old style tools that do not expose a
|
||||||
|
# schema and expect a single string argument as an input.
|
||||||
|
# We unpack the argument here if it exists.
|
||||||
|
# Open AI does not support passing in a JSON array as an argument.
|
||||||
|
if "__arg1" in _tool_input:
|
||||||
|
tool_input = _tool_input["__arg1"]
|
||||||
|
else:
|
||||||
|
tool_input = _tool_input
|
||||||
|
|
||||||
|
content_msg = f"responded: {message.content}\n" if message.content else "\n"
|
||||||
|
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
|
||||||
|
return AgentActionMessageLog(
|
||||||
|
tool=function_name,
|
||||||
|
tool_input=tool_input,
|
||||||
|
log=log,
|
||||||
|
message_log=[message],
|
||||||
|
)
|
||||||
|
|
||||||
|
return AgentFinish(
|
||||||
|
return_values={"output": message.content}, log=message.content
|
||||||
|
)
|
||||||
|
|
||||||
|
def parse_result(self, result: List[Generation]) -> Union[AgentAction, AgentFinish]:
|
||||||
|
if not isinstance(result[0], ChatGeneration):
|
||||||
|
raise ValueError("This output parser only works on ChatGeneration output")
|
||||||
|
message = result[0].message
|
||||||
|
return self._parse_ai_message(message)
|
||||||
|
|
||||||
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||||
|
raise ValueError("Can only parse messages")
|
@ -0,0 +1,76 @@
|
|||||||
|
import json
|
||||||
|
import re
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from langchain.agents.agent import AgentOutputParser
|
||||||
|
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
|
||||||
|
from langchain.schema import AgentAction, AgentFinish, OutputParserException
|
||||||
|
|
||||||
|
FINAL_ANSWER_ACTION = "Final Answer:"
|
||||||
|
|
||||||
|
|
||||||
|
class ReActJsonSingleInputOutputParser(AgentOutputParser):
|
||||||
|
"""Parses ReAct-style LLM calls that have a single tool input in json format.
|
||||||
|
|
||||||
|
Expects output to be in one of two formats.
|
||||||
|
|
||||||
|
If the output signals that an action should be taken,
|
||||||
|
should be in the below format. This will result in an AgentAction
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
Thought: agent thought here
|
||||||
|
Action:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"action": "search",
|
||||||
|
"action_input": "what is the temperature in SF"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
```
|
||||||
|
|
||||||
|
If the output signals that a final answer should be given,
|
||||||
|
should be in the below format. This will result in an AgentFinish
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
Thought: agent thought here
|
||||||
|
Final Answer: The temperature is 100 degrees
|
||||||
|
```
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
|
||||||
|
"""Regex pattern to parse the output."""
|
||||||
|
|
||||||
|
def get_format_instructions(self) -> str:
|
||||||
|
return FORMAT_INSTRUCTIONS
|
||||||
|
|
||||||
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||||
|
includes_answer = FINAL_ANSWER_ACTION in text
|
||||||
|
try:
|
||||||
|
found = self.pattern.search(text)
|
||||||
|
if not found:
|
||||||
|
# Fast fail to parse Final Answer.
|
||||||
|
raise ValueError("action not found")
|
||||||
|
action = found.group(1)
|
||||||
|
response = json.loads(action.strip())
|
||||||
|
includes_action = "action" in response
|
||||||
|
if includes_answer and includes_action:
|
||||||
|
raise OutputParserException(
|
||||||
|
"Parsing LLM output produced a final answer "
|
||||||
|
f"and a parse-able action: {text}"
|
||||||
|
)
|
||||||
|
return AgentAction(
|
||||||
|
response["action"], response.get("action_input", {}), text
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
if not includes_answer:
|
||||||
|
raise OutputParserException(f"Could not parse LLM output: {text}")
|
||||||
|
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
||||||
|
return AgentFinish({"output": output}, text)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _type(self) -> str:
|
||||||
|
return "react-json-single-input"
|
@ -0,0 +1,92 @@
|
|||||||
|
import re
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from langchain.agents.agent import AgentOutputParser
|
||||||
|
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
|
||||||
|
from langchain.schema import AgentAction, AgentFinish, OutputParserException
|
||||||
|
|
||||||
|
FINAL_ANSWER_ACTION = "Final Answer:"
|
||||||
|
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
|
||||||
|
"Invalid Format: Missing 'Action:' after 'Thought:"
|
||||||
|
)
|
||||||
|
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
|
||||||
|
"Invalid Format: Missing 'Action Input:' after 'Action:'"
|
||||||
|
)
|
||||||
|
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
|
||||||
|
"Parsing LLM output produced both a final answer and a parse-able action:"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ReActSingleInputOutputParser(AgentOutputParser):
|
||||||
|
"""Parses ReAct-style LLM calls that have a single tool input.
|
||||||
|
|
||||||
|
Expects output to be in one of two formats.
|
||||||
|
|
||||||
|
If the output signals that an action should be taken,
|
||||||
|
should be in the below format. This will result in an AgentAction
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
Thought: agent thought here
|
||||||
|
Action: search
|
||||||
|
Action Input: what is the temperature in SF?
|
||||||
|
```
|
||||||
|
|
||||||
|
If the output signals that a final answer should be given,
|
||||||
|
should be in the below format. This will result in an AgentFinish
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
Thought: agent thought here
|
||||||
|
Final Answer: The temperature is 100 degrees
|
||||||
|
```
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_format_instructions(self) -> str:
|
||||||
|
return FORMAT_INSTRUCTIONS
|
||||||
|
|
||||||
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||||
|
includes_answer = FINAL_ANSWER_ACTION in text
|
||||||
|
regex = (
|
||||||
|
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
||||||
|
)
|
||||||
|
action_match = re.search(regex, text, re.DOTALL)
|
||||||
|
if action_match:
|
||||||
|
if includes_answer:
|
||||||
|
raise OutputParserException(
|
||||||
|
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
|
||||||
|
)
|
||||||
|
action = action_match.group(1).strip()
|
||||||
|
action_input = action_match.group(2)
|
||||||
|
tool_input = action_input.strip(" ")
|
||||||
|
|
||||||
|
return AgentAction(action, tool_input, text)
|
||||||
|
|
||||||
|
elif includes_answer:
|
||||||
|
return AgentFinish(
|
||||||
|
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
|
||||||
|
)
|
||||||
|
|
||||||
|
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
||||||
|
raise OutputParserException(
|
||||||
|
f"Could not parse LLM output: `{text}`",
|
||||||
|
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
|
||||||
|
llm_output=text,
|
||||||
|
send_to_llm=True,
|
||||||
|
)
|
||||||
|
elif not re.search(
|
||||||
|
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
|
||||||
|
):
|
||||||
|
raise OutputParserException(
|
||||||
|
f"Could not parse LLM output: `{text}`",
|
||||||
|
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||||
|
llm_output=text,
|
||||||
|
send_to_llm=True,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise OutputParserException(f"Could not parse LLM output: `{text}`")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _type(self) -> str:
|
||||||
|
return "react-single-input"
|
47
libs/langchain/langchain/agents/output_parsers/self_ask.py
Normal file
47
libs/langchain/langchain/agents/output_parsers/self_ask.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
from typing import Sequence, Union
|
||||||
|
|
||||||
|
from langchain.agents.agent import AgentOutputParser
|
||||||
|
from langchain.schema import AgentAction, AgentFinish, OutputParserException
|
||||||
|
|
||||||
|
|
||||||
|
class SelfAskOutputParser(AgentOutputParser):
|
||||||
|
"""Parses self-ask style LLM calls.
|
||||||
|
|
||||||
|
Expects output to be in one of two formats.
|
||||||
|
|
||||||
|
If the output signals that an action should be taken,
|
||||||
|
should be in the below format. This will result in an AgentAction
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
Thoughts go here...
|
||||||
|
Follow up: what is the temperature in SF?
|
||||||
|
```
|
||||||
|
|
||||||
|
If the output signals that a final answer should be given,
|
||||||
|
should be in the below format. This will result in an AgentFinish
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
Thoughts go here...
|
||||||
|
So the final answer is: The temperature is 100 degrees
|
||||||
|
```
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
followups: Sequence[str] = ("Follow up:", "Followup:")
|
||||||
|
finish_string: str = "So the final answer is: "
|
||||||
|
|
||||||
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||||
|
last_line = text.split("\n")[-1]
|
||||||
|
if not any([follow in last_line for follow in self.followups]):
|
||||||
|
if self.finish_string not in last_line:
|
||||||
|
raise OutputParserException(f"Could not parse output: {text}")
|
||||||
|
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
|
||||||
|
|
||||||
|
after_colon = text.split(":")[-1].strip()
|
||||||
|
return AgentAction("Intermediate Answer", after_colon, text)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _type(self) -> str:
|
||||||
|
return "self_ask"
|
51
libs/langchain/langchain/agents/output_parsers/xml.py
Normal file
51
libs/langchain/langchain/agents/output_parsers/xml.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from langchain.agents import AgentOutputParser
|
||||||
|
from langchain.schema import AgentAction, AgentFinish
|
||||||
|
|
||||||
|
|
||||||
|
class XMLAgentOutputParser(AgentOutputParser):
|
||||||
|
"""Parses tool invocations and final answers in XML format.
|
||||||
|
|
||||||
|
Expects output to be in one of two formats.
|
||||||
|
|
||||||
|
If the output signals that an action should be taken,
|
||||||
|
should be in the below format. This will result in an AgentAction
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
<tool>search</tool>
|
||||||
|
<tool_input>what is 2 + 2</tool_input>
|
||||||
|
```
|
||||||
|
|
||||||
|
If the output signals that a final answer should be given,
|
||||||
|
should be in the below format. This will result in an AgentFinish
|
||||||
|
being returned.
|
||||||
|
|
||||||
|
```
|
||||||
|
<final_answer>Foo</final_answer>
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||||
|
if "</tool>" in text:
|
||||||
|
tool, tool_input = text.split("</tool>")
|
||||||
|
_tool = tool.split("<tool>")[1]
|
||||||
|
_tool_input = tool_input.split("<tool_input>")[1]
|
||||||
|
if "</tool_input>" in _tool_input:
|
||||||
|
_tool_input = _tool_input.split("</tool_input>")[0]
|
||||||
|
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
|
||||||
|
elif "<final_answer>" in text:
|
||||||
|
_, answer = text.split("<final_answer>")
|
||||||
|
if "</final_answer>" in answer:
|
||||||
|
answer = answer.split("</final_answer>")[0]
|
||||||
|
return AgentFinish(return_values={"output": answer}, log=text)
|
||||||
|
else:
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
|
def get_format_instructions(self) -> str:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _type(self) -> str:
|
||||||
|
return "xml-agent"
|
@ -1,25 +1,4 @@
|
|||||||
from typing import Sequence, Union
|
from langchain.agents.output_parsers.self_ask import SelfAskOutputParser
|
||||||
|
|
||||||
from langchain.agents.agent import AgentOutputParser
|
# For backwards compatibility
|
||||||
from langchain.schema import AgentAction, AgentFinish, OutputParserException
|
__all__ = ["SelfAskOutputParser"]
|
||||||
|
|
||||||
|
|
||||||
class SelfAskOutputParser(AgentOutputParser):
|
|
||||||
"""Output parser for the self-ask agent."""
|
|
||||||
|
|
||||||
followups: Sequence[str] = ("Follow up:", "Followup:")
|
|
||||||
finish_string: str = "So the final answer is: "
|
|
||||||
|
|
||||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
|
||||||
last_line = text.split("\n")[-1]
|
|
||||||
if not any([follow in last_line for follow in self.followups]):
|
|
||||||
if self.finish_string not in last_line:
|
|
||||||
raise OutputParserException(f"Could not parse output: {text}")
|
|
||||||
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
|
|
||||||
|
|
||||||
after_colon = text.split(":")[-1].strip()
|
|
||||||
return AgentAction("Intermediate Answer", after_colon, text)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _type(self) -> str:
|
|
||||||
return "self_ask"
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from typing import Any, List, Tuple, Union
|
from typing import Any, List, Tuple, Union
|
||||||
|
|
||||||
from langchain.agents.agent import AgentOutputParser, BaseSingleActionAgent
|
from langchain.agents.agent import BaseSingleActionAgent
|
||||||
|
from langchain.agents.output_parsers.xml import XMLAgentOutputParser
|
||||||
from langchain.agents.xml.prompt import agent_instructions
|
from langchain.agents.xml.prompt import agent_instructions
|
||||||
from langchain.callbacks.base import Callbacks
|
from langchain.callbacks.base import Callbacks
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
@ -9,29 +10,6 @@ from langchain.schema import AgentAction, AgentFinish
|
|||||||
from langchain.tools.base import BaseTool
|
from langchain.tools.base import BaseTool
|
||||||
|
|
||||||
|
|
||||||
class XMLAgentOutputParser(AgentOutputParser):
|
|
||||||
"""Output parser for XMLAgent."""
|
|
||||||
|
|
||||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
|
||||||
if "</tool>" in text:
|
|
||||||
tool, tool_input = text.split("</tool>")
|
|
||||||
_tool = tool.split("<tool>")[1]
|
|
||||||
_tool_input = tool_input.split("<tool_input>")[1]
|
|
||||||
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
|
|
||||||
elif "<final_answer>" in text:
|
|
||||||
_, answer = text.split("<final_answer>")
|
|
||||||
return AgentFinish(return_values={"output": answer}, log=text)
|
|
||||||
else:
|
|
||||||
raise ValueError
|
|
||||||
|
|
||||||
def get_format_instructions(self) -> str:
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _type(self) -> str:
|
|
||||||
return "xml-agent"
|
|
||||||
|
|
||||||
|
|
||||||
class XMLAgent(BaseSingleActionAgent):
|
class XMLAgent(BaseSingleActionAgent):
|
||||||
"""Agent that uses XML tags.
|
"""Agent that uses XML tags.
|
||||||
|
|
||||||
|
@ -0,0 +1,28 @@
|
|||||||
|
from langchain.agents.output_parsers.json import JSONAgentOutputParser
|
||||||
|
from langchain.schema.agent import AgentAction, AgentFinish
|
||||||
|
|
||||||
|
|
||||||
|
def test_tool_usage() -> None:
|
||||||
|
parser = JSONAgentOutputParser()
|
||||||
|
_input = """ ```
|
||||||
|
{
|
||||||
|
"action": "search",
|
||||||
|
"action_input": "2+2"
|
||||||
|
}
|
||||||
|
```"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentAction(tool="search", tool_input="2+2", log=_input)
|
||||||
|
assert output == expected_output
|
||||||
|
|
||||||
|
|
||||||
|
def test_finish() -> None:
|
||||||
|
parser = JSONAgentOutputParser()
|
||||||
|
_input = """```
|
||||||
|
{
|
||||||
|
"action": "Final Answer",
|
||||||
|
"action_input": "4"
|
||||||
|
}
|
||||||
|
```"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
|
||||||
|
assert output == expected_output
|
@ -0,0 +1,80 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from langchain.agents.output_parsers.openai_functions import (
|
||||||
|
OpenAIFunctionsAgentOutputParser,
|
||||||
|
)
|
||||||
|
from langchain.schema import AgentFinish, OutputParserException
|
||||||
|
from langchain.schema.agent import AgentActionMessageLog
|
||||||
|
from langchain.schema.messages import AIMessage, SystemMessage
|
||||||
|
|
||||||
|
|
||||||
|
def test_not_an_ai() -> None:
|
||||||
|
parser = OpenAIFunctionsAgentOutputParser()
|
||||||
|
err = f"Expected an AI message got {str(SystemMessage)}"
|
||||||
|
with pytest.raises(TypeError, match=err):
|
||||||
|
parser.invoke(SystemMessage(content="x"))
|
||||||
|
|
||||||
|
|
||||||
|
# Test: Model response (not a function call).
|
||||||
|
def test_model_response() -> None:
|
||||||
|
parser = OpenAIFunctionsAgentOutputParser()
|
||||||
|
msg = AIMessage(content="Model response.")
|
||||||
|
result = parser.invoke(msg)
|
||||||
|
|
||||||
|
assert isinstance(result, AgentFinish)
|
||||||
|
assert result.return_values == {"output": "Model response."}
|
||||||
|
assert result.log == "Model response."
|
||||||
|
|
||||||
|
|
||||||
|
# Test: Model response with a function call.
|
||||||
|
def test_func_call() -> None:
|
||||||
|
parser = OpenAIFunctionsAgentOutputParser()
|
||||||
|
msg = AIMessage(
|
||||||
|
content="LLM thoughts.",
|
||||||
|
additional_kwargs={
|
||||||
|
"function_call": {"name": "foo", "arguments": '{"param": 42}'}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
result = parser.invoke(msg)
|
||||||
|
|
||||||
|
assert isinstance(result, AgentActionMessageLog)
|
||||||
|
assert result.tool == "foo"
|
||||||
|
assert result.tool_input == {"param": 42}
|
||||||
|
assert result.log == (
|
||||||
|
"\nInvoking: `foo` with `{'param': 42}`\nresponded: LLM thoughts.\n\n"
|
||||||
|
)
|
||||||
|
assert result.message_log == [msg]
|
||||||
|
|
||||||
|
|
||||||
|
# Test: Model response with a function call (old style tools).
|
||||||
|
def test_func_call_oldstyle() -> None:
|
||||||
|
parser = OpenAIFunctionsAgentOutputParser()
|
||||||
|
msg = AIMessage(
|
||||||
|
content="LLM thoughts.",
|
||||||
|
additional_kwargs={
|
||||||
|
"function_call": {"name": "foo", "arguments": '{"__arg1": "42"}'}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
result = parser.invoke(msg)
|
||||||
|
|
||||||
|
assert isinstance(result, AgentActionMessageLog)
|
||||||
|
assert result.tool == "foo"
|
||||||
|
assert result.tool_input == "42"
|
||||||
|
assert result.log == "\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n"
|
||||||
|
assert result.message_log == [msg]
|
||||||
|
|
||||||
|
|
||||||
|
# Test: Invalid function call args.
|
||||||
|
def test_func_call_invalid() -> None:
|
||||||
|
parser = OpenAIFunctionsAgentOutputParser()
|
||||||
|
msg = AIMessage(
|
||||||
|
content="LLM thoughts.",
|
||||||
|
additional_kwargs={"function_call": {"name": "foo", "arguments": "{42]"}},
|
||||||
|
)
|
||||||
|
|
||||||
|
err = (
|
||||||
|
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} "
|
||||||
|
"because the `arguments` is not valid JSON."
|
||||||
|
)
|
||||||
|
with pytest.raises(OutputParserException, match=err):
|
||||||
|
parser.invoke(msg)
|
@ -0,0 +1,34 @@
|
|||||||
|
from langchain.agents.output_parsers.react_json_single_input import (
|
||||||
|
ReActJsonSingleInputOutputParser,
|
||||||
|
)
|
||||||
|
from langchain.schema.agent import AgentAction, AgentFinish
|
||||||
|
|
||||||
|
|
||||||
|
def test_action() -> None:
|
||||||
|
"""Test standard parsing of action/action input."""
|
||||||
|
parser = ReActJsonSingleInputOutputParser()
|
||||||
|
_input = """Thought: agent thought here
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"action": "search",
|
||||||
|
"action_input": "what is the temperature in SF?"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentAction(
|
||||||
|
tool="search", tool_input="what is the temperature in SF?", log=_input
|
||||||
|
)
|
||||||
|
assert output == expected_output
|
||||||
|
|
||||||
|
|
||||||
|
def test_finish() -> None:
|
||||||
|
"""Test standard parsing of agent finish."""
|
||||||
|
parser = ReActJsonSingleInputOutputParser()
|
||||||
|
_input = """Thought: agent thought here
|
||||||
|
Final Answer: The temperature is 100"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentFinish(
|
||||||
|
return_values={"output": "The temperature is 100"}, log=_input
|
||||||
|
)
|
||||||
|
assert output == expected_output
|
@ -0,0 +1,42 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from langchain.agents.output_parsers.react_single_input import (
|
||||||
|
ReActSingleInputOutputParser,
|
||||||
|
)
|
||||||
|
from langchain.schema.agent import AgentAction, AgentFinish
|
||||||
|
from langchain.schema.output_parser import OutputParserException
|
||||||
|
|
||||||
|
|
||||||
|
def test_action() -> None:
|
||||||
|
"""Test standard parsing of action/action input."""
|
||||||
|
parser = ReActSingleInputOutputParser()
|
||||||
|
_input = """Thought: agent thought here
|
||||||
|
Action: search
|
||||||
|
Action Input: what is the temperature in SF?"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentAction(
|
||||||
|
tool="search", tool_input="what is the temperature in SF?", log=_input
|
||||||
|
)
|
||||||
|
assert output == expected_output
|
||||||
|
|
||||||
|
|
||||||
|
def test_finish() -> None:
|
||||||
|
"""Test standard parsing of agent finish."""
|
||||||
|
parser = ReActSingleInputOutputParser()
|
||||||
|
_input = """Thought: agent thought here
|
||||||
|
Final Answer: The temperature is 100"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentFinish(
|
||||||
|
return_values={"output": "The temperature is 100"}, log=_input
|
||||||
|
)
|
||||||
|
assert output == expected_output
|
||||||
|
|
||||||
|
|
||||||
|
def test_action_with_finish() -> None:
|
||||||
|
"""Test that if final thought is in action/action input, error is raised."""
|
||||||
|
parser = ReActSingleInputOutputParser()
|
||||||
|
_input = """Thought: agent thought here
|
||||||
|
Action: search Final Answer:
|
||||||
|
Action Input: what is the temperature in SF?"""
|
||||||
|
with pytest.raises(OutputParserException):
|
||||||
|
parser.invoke(_input)
|
@ -0,0 +1,49 @@
|
|||||||
|
from langchain.agents.output_parsers.self_ask import SelfAskOutputParser
|
||||||
|
from langchain.schema.agent import AgentAction, AgentFinish
|
||||||
|
|
||||||
|
|
||||||
|
def test_follow_up() -> None:
|
||||||
|
"""Test follow up parsing."""
|
||||||
|
parser = SelfAskOutputParser()
|
||||||
|
_input = "Follow up: what is two + 2"
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentAction(
|
||||||
|
tool="Intermediate Answer", tool_input="what is two + 2", log=_input
|
||||||
|
)
|
||||||
|
assert output == expected_output
|
||||||
|
# Test that also handles one word by default
|
||||||
|
_input = "Followup: what is two + 2"
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentAction(
|
||||||
|
tool="Intermediate Answer", tool_input="what is two + 2", log=_input
|
||||||
|
)
|
||||||
|
assert output == expected_output
|
||||||
|
|
||||||
|
|
||||||
|
def test_follow_up_custom() -> None:
|
||||||
|
"""Test follow up parsing for custom followups."""
|
||||||
|
parser = SelfAskOutputParser(followups=("Now:",))
|
||||||
|
_input = "Now: what is two + 2"
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentAction(
|
||||||
|
tool="Intermediate Answer", tool_input="what is two + 2", log=_input
|
||||||
|
)
|
||||||
|
assert output == expected_output
|
||||||
|
|
||||||
|
|
||||||
|
def test_finish() -> None:
|
||||||
|
"""Test standard finish."""
|
||||||
|
parser = SelfAskOutputParser()
|
||||||
|
_input = "So the final answer is: 4"
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
|
||||||
|
assert output == expected_output
|
||||||
|
|
||||||
|
|
||||||
|
def test_finish_custom() -> None:
|
||||||
|
"""Test custom finish."""
|
||||||
|
parser = SelfAskOutputParser(finish_string="Finally: ")
|
||||||
|
_input = "Finally: 4"
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
|
||||||
|
assert output == expected_output
|
@ -0,0 +1,33 @@
|
|||||||
|
from langchain.agents.output_parsers.xml import XMLAgentOutputParser
|
||||||
|
from langchain.schema.agent import AgentAction, AgentFinish
|
||||||
|
|
||||||
|
|
||||||
|
def test_tool_usage() -> None:
|
||||||
|
parser = XMLAgentOutputParser()
|
||||||
|
# Test when final closing </tool_input> is included
|
||||||
|
_input = """<tool>search</tool><tool_input>foo</tool_input>"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentAction(tool="search", tool_input="foo", log=_input)
|
||||||
|
assert output == expected_output
|
||||||
|
# Test when final closing </tool_input> is NOT included
|
||||||
|
# This happens when it's used as a stop token
|
||||||
|
_input = """<tool>search</tool><tool_input>foo</tool_input>"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentAction(tool="search", tool_input="foo", log=_input)
|
||||||
|
assert output == expected_output
|
||||||
|
|
||||||
|
|
||||||
|
def test_finish() -> None:
|
||||||
|
parser = XMLAgentOutputParser()
|
||||||
|
# Test when final closing <final_answer> is included
|
||||||
|
_input = """<final_answer>bar</final_answer>"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentFinish(return_values={"output": "bar"}, log=_input)
|
||||||
|
assert output == expected_output
|
||||||
|
|
||||||
|
# Test when final closing <final_answer> is NOT included
|
||||||
|
# This happens when it's used as a stop token
|
||||||
|
_input = """<final_answer>bar</final_answer>"""
|
||||||
|
output = parser.invoke(_input)
|
||||||
|
expected_output = AgentFinish(return_values={"output": "bar"}, log=_input)
|
||||||
|
assert output == expected_output
|
@ -1,76 +0,0 @@
|
|||||||
import pytest
|
|
||||||
|
|
||||||
from langchain.agents.openai_functions_agent.base import (
|
|
||||||
_FunctionsAgentAction,
|
|
||||||
_parse_ai_message,
|
|
||||||
)
|
|
||||||
from langchain.schema import AgentFinish, OutputParserException
|
|
||||||
from langchain.schema.messages import AIMessage, SystemMessage
|
|
||||||
|
|
||||||
|
|
||||||
# Test: _parse_ai_message() function.
|
|
||||||
class TestParseAIMessage:
|
|
||||||
# Test: Pass Non-AIMessage.
|
|
||||||
def test_not_an_ai(self) -> None:
|
|
||||||
err = f"Expected an AI message got {str(SystemMessage)}"
|
|
||||||
with pytest.raises(TypeError, match=err):
|
|
||||||
_parse_ai_message(SystemMessage(content="x"))
|
|
||||||
|
|
||||||
# Test: Model response (not a function call).
|
|
||||||
def test_model_response(self) -> None:
|
|
||||||
msg = AIMessage(content="Model response.")
|
|
||||||
result = _parse_ai_message(msg)
|
|
||||||
|
|
||||||
assert isinstance(result, AgentFinish)
|
|
||||||
assert result.return_values == {"output": "Model response."}
|
|
||||||
assert result.log == "Model response."
|
|
||||||
|
|
||||||
# Test: Model response with a function call.
|
|
||||||
def test_func_call(self) -> None:
|
|
||||||
msg = AIMessage(
|
|
||||||
content="LLM thoughts.",
|
|
||||||
additional_kwargs={
|
|
||||||
"function_call": {"name": "foo", "arguments": '{"param": 42}'}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
result = _parse_ai_message(msg)
|
|
||||||
|
|
||||||
assert isinstance(result, _FunctionsAgentAction)
|
|
||||||
assert result.tool == "foo"
|
|
||||||
assert result.tool_input == {"param": 42}
|
|
||||||
assert result.log == (
|
|
||||||
"\nInvoking: `foo` with `{'param': 42}`\nresponded: LLM thoughts.\n\n"
|
|
||||||
)
|
|
||||||
assert result.message_log == [msg]
|
|
||||||
|
|
||||||
# Test: Model response with a function call (old style tools).
|
|
||||||
def test_func_call_oldstyle(self) -> None:
|
|
||||||
msg = AIMessage(
|
|
||||||
content="LLM thoughts.",
|
|
||||||
additional_kwargs={
|
|
||||||
"function_call": {"name": "foo", "arguments": '{"__arg1": "42"}'}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
result = _parse_ai_message(msg)
|
|
||||||
|
|
||||||
assert isinstance(result, _FunctionsAgentAction)
|
|
||||||
assert result.tool == "foo"
|
|
||||||
assert result.tool_input == "42"
|
|
||||||
assert result.log == (
|
|
||||||
"\nInvoking: `foo` with `42`\nresponded: LLM thoughts.\n\n"
|
|
||||||
)
|
|
||||||
assert result.message_log == [msg]
|
|
||||||
|
|
||||||
# Test: Invalid function call args.
|
|
||||||
def test_func_call_invalid(self) -> None:
|
|
||||||
msg = AIMessage(
|
|
||||||
content="LLM thoughts.",
|
|
||||||
additional_kwargs={"function_call": {"name": "foo", "arguments": "{42]"}},
|
|
||||||
)
|
|
||||||
|
|
||||||
err = (
|
|
||||||
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} "
|
|
||||||
"because the `arguments` is not valid JSON."
|
|
||||||
)
|
|
||||||
with pytest.raises(OutputParserException, match=err):
|
|
||||||
_parse_ai_message(msg)
|
|
Loading…
Reference in New Issue
Block a user