mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-18 02:33:19 +00:00
add handling on error (#541)
This commit is contained in:
parent
1631981f84
commit
5aefc2b7ce
@ -272,9 +272,14 @@ class AgentExecutor(Chain, BaseModel):
|
|||||||
self.callback_manager.on_tool_start(
|
self.callback_manager.on_tool_start(
|
||||||
{"name": str(chain)[:60] + "..."}, output, color="green"
|
{"name": str(chain)[:60] + "..."}, output, color="green"
|
||||||
)
|
)
|
||||||
# We then call the tool on the tool input to get an observation
|
try:
|
||||||
observation = chain(output.tool_input)
|
# We then call the tool on the tool input to get an observation
|
||||||
color = color_mapping[output.tool]
|
observation = chain(output.tool_input)
|
||||||
|
color = color_mapping[output.tool]
|
||||||
|
except Exception as e:
|
||||||
|
if self.verbose:
|
||||||
|
self.callback_manager.on_tool_error(e)
|
||||||
|
raise e
|
||||||
else:
|
else:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
self.callback_manager.on_tool_start(
|
self.callback_manager.on_tool_start(
|
||||||
|
@ -138,7 +138,12 @@ class Chain(BaseModel, ABC):
|
|||||||
self.callback_manager.on_chain_start(
|
self.callback_manager.on_chain_start(
|
||||||
{"name": self.__class__.__name__}, inputs
|
{"name": self.__class__.__name__}, inputs
|
||||||
)
|
)
|
||||||
outputs = self._call(inputs)
|
try:
|
||||||
|
outputs = self._call(inputs)
|
||||||
|
except Exception as e:
|
||||||
|
if self.verbose:
|
||||||
|
self.callback_manager.on_chain_error(e)
|
||||||
|
raise e
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
self.callback_manager.on_chain_end(outputs)
|
self.callback_manager.on_chain_end(outputs)
|
||||||
self._validate_outputs(outputs)
|
self._validate_outputs(outputs)
|
||||||
|
@ -73,7 +73,12 @@ class BaseLLM(BaseModel, ABC):
|
|||||||
self.callback_manager.on_llm_start(
|
self.callback_manager.on_llm_start(
|
||||||
{"name": self.__class__.__name__}, prompts
|
{"name": self.__class__.__name__}, prompts
|
||||||
)
|
)
|
||||||
output = self._generate(prompts, stop=stop)
|
try:
|
||||||
|
output = self._generate(prompts, stop=stop)
|
||||||
|
except Exception as e:
|
||||||
|
if self.verbose:
|
||||||
|
self.callback_manager.on_llm_error(e)
|
||||||
|
raise e
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
self.callback_manager.on_llm_end(output)
|
self.callback_manager.on_llm_end(output)
|
||||||
return output
|
return output
|
||||||
@ -90,11 +95,18 @@ class BaseLLM(BaseModel, ABC):
|
|||||||
else:
|
else:
|
||||||
missing_prompts.append(prompt)
|
missing_prompts.append(prompt)
|
||||||
missing_prompt_idxs.append(i)
|
missing_prompt_idxs.append(i)
|
||||||
self.callback_manager.on_llm_start(
|
if self.verbose:
|
||||||
{"name": self.__class__.__name__}, missing_prompts
|
self.callback_manager.on_llm_start(
|
||||||
)
|
{"name": self.__class__.__name__}, missing_prompts
|
||||||
new_results = self._generate(missing_prompts, stop=stop)
|
)
|
||||||
self.callback_manager.on_llm_end(new_results)
|
try:
|
||||||
|
new_results = self._generate(missing_prompts, stop=stop)
|
||||||
|
except Exception as e:
|
||||||
|
if self.verbose:
|
||||||
|
self.callback_manager.on_llm_error(e)
|
||||||
|
raise e
|
||||||
|
if self.verbose:
|
||||||
|
self.callback_manager.on_llm_end(new_results)
|
||||||
for i, result in enumerate(new_results.generations):
|
for i, result in enumerate(new_results.generations):
|
||||||
existing_prompts[missing_prompt_idxs[i]] = result
|
existing_prompts[missing_prompt_idxs[i]] = result
|
||||||
prompt = prompts[i]
|
prompt = prompts[i]
|
||||||
|
Loading…
Reference in New Issue
Block a user