mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-23 07:09:31 +00:00
parent
5bb2952860
commit
7198a1cb22
@ -48,6 +48,29 @@ class Agent(BaseModel):
|
|||||||
def _stop(self) -> List[str]:
|
def _stop(self) -> List[str]:
|
||||||
return [f"\n{self.observation_prefix}"]
|
return [f"\n{self.observation_prefix}"]
|
||||||
|
|
||||||
|
def _construct_scratchpad(
|
||||||
|
self, intermediate_steps: List[Tuple[AgentAction, str]]
|
||||||
|
) -> str:
|
||||||
|
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||||
|
thoughts = ""
|
||||||
|
for action, observation in intermediate_steps:
|
||||||
|
thoughts += action.log
|
||||||
|
thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
|
||||||
|
return thoughts
|
||||||
|
|
||||||
|
def _get_next_action(self, full_inputs: Dict[str, str]) -> AgentAction:
|
||||||
|
full_output = self.llm_chain.predict(**full_inputs)
|
||||||
|
parsed_output = self._extract_tool_and_input(full_output)
|
||||||
|
while parsed_output is None:
|
||||||
|
full_output = self._fix_text(full_output)
|
||||||
|
full_inputs["agent_scratchpad"] += full_output
|
||||||
|
output = self.llm_chain.predict(**full_inputs)
|
||||||
|
full_output += output
|
||||||
|
parsed_output = self._extract_tool_and_input(full_output)
|
||||||
|
return AgentAction(
|
||||||
|
tool=parsed_output[0], tool_input=parsed_output[1], log=full_output
|
||||||
|
)
|
||||||
|
|
||||||
def plan(
|
def plan(
|
||||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||||
) -> Union[AgentAction, AgentFinish]:
|
) -> Union[AgentAction, AgentFinish]:
|
||||||
@ -61,24 +84,14 @@ class Agent(BaseModel):
|
|||||||
Returns:
|
Returns:
|
||||||
Action specifying what tool to use.
|
Action specifying what tool to use.
|
||||||
"""
|
"""
|
||||||
thoughts = ""
|
thoughts = self._construct_scratchpad(intermediate_steps)
|
||||||
for action, observation in intermediate_steps:
|
|
||||||
thoughts += action.log
|
|
||||||
thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
|
|
||||||
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
|
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
|
||||||
full_inputs = {**kwargs, **new_inputs}
|
full_inputs = {**kwargs, **new_inputs}
|
||||||
full_output = self.llm_chain.predict(**full_inputs)
|
|
||||||
parsed_output = self._extract_tool_and_input(full_output)
|
action = self._get_next_action(full_inputs)
|
||||||
while parsed_output is None:
|
if action.tool == self.finish_tool_name:
|
||||||
full_output = self._fix_text(full_output)
|
return AgentFinish({"output": action.tool_input}, action.log)
|
||||||
full_inputs["agent_scratchpad"] += full_output
|
return action
|
||||||
output = self.llm_chain.predict(**full_inputs)
|
|
||||||
full_output += output
|
|
||||||
parsed_output = self._extract_tool_and_input(full_output)
|
|
||||||
tool, tool_input = parsed_output
|
|
||||||
if tool == self.finish_tool_name:
|
|
||||||
return AgentFinish({"output": tool_input}, full_output)
|
|
||||||
return AgentAction(tool, tool_input, full_output)
|
|
||||||
|
|
||||||
def prepare_for_new_call(self) -> None:
|
def prepare_for_new_call(self) -> None:
|
||||||
"""Prepare the agent for new call, if needed."""
|
"""Prepare the agent for new call, if needed."""
|
||||||
|
Loading…
Reference in New Issue
Block a user