diff --git a/libs/core/langchain_core/callbacks/manager.py b/libs/core/langchain_core/callbacks/manager.py index 1d06f4e6aa3..f675b230c87 100644 --- a/libs/core/langchain_core/callbacks/manager.py +++ b/libs/core/langchain_core/callbacks/manager.py @@ -127,7 +127,7 @@ def trace_as_chain_group( except Exception as e: if not group_cm.ended: run_manager.on_chain_error(e) - raise e + raise else: if not group_cm.ended: run_manager.on_chain_end({}) @@ -207,7 +207,7 @@ async def atrace_as_chain_group( except Exception as e: if not group_cm.ended: await run_manager.on_chain_error(e) - raise e + raise else: if not group_cm.ended: await run_manager.on_chain_end({}) @@ -289,7 +289,7 @@ def handle_event( f" {repr(e)}" ) if handler.raise_error: - raise e + raise finally: if coros: try: @@ -388,7 +388,7 @@ async def _ahandle_event_for_handler( f"Error in {handler.__class__.__name__}.{event_name} callback: {repr(e)}" ) if handler.raise_error: - raise e + raise async def ahandle_event( diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index f3d94e49f01..6aaaf7d4ca8 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -268,7 +268,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 def invoke( self, @@ -407,9 +407,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): generation = chunk else: generation += chunk - if generation is None: - msg = "No generation chunks were returned" - raise ValueError(msg) except BaseException as e: run_manager.on_llm_error( e, @@ -417,9 +414,14 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): generations=[[generation]] if generation else [] ), ) - raise e - else: - run_manager.on_llm_end(LLMResult(generations=[[generation]])) + raise + + if generation is None: + err = ValueError("No generation chunks were returned") + run_manager.on_llm_error(err, response=LLMResult(generations=[])) + raise err + + run_manager.on_llm_end(LLMResult(generations=[[generation]])) async def astream( self, @@ -485,19 +487,21 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): generation = chunk else: generation += chunk - if generation is None: - msg = "No generation chunks were returned" - raise ValueError(msg) except BaseException as e: await run_manager.on_llm_error( e, response=LLMResult(generations=[[generation]] if generation else []), ) - raise e - else: - await run_manager.on_llm_end( - LLMResult(generations=[[generation]]), - ) + raise + + if generation is None: + err = ValueError("No generation chunks were returned") + await run_manager.on_llm_error(err, response=LLMResult(generations=[])) + raise err + + await run_manager.on_llm_end( + LLMResult(generations=[[generation]]), + ) # --- Custom methods --- @@ -641,7 +645,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): except BaseException as e: if run_managers: run_managers[i].on_llm_error(e, response=LLMResult(generations=[])) - raise e + raise flattened_outputs = [ LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item] for res in results @@ -1022,7 +1026,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): return generation.message else: msg = "Unexpected generation type" - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 async def _call_async( self, @@ -1039,7 +1043,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): return generation.message else: msg = "Unexpected generation type" - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 @deprecated("0.1.7", alternative="invoke", removal="1.0") def call_as_llm( @@ -1057,7 +1061,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): return result.content else: msg = "Cannot use predict when output is not a string." - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 @deprecated("0.1.7", alternative="invoke", removal="1.0") def predict_messages( @@ -1082,7 +1086,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): return result.content else: msg = "Cannot use predict when output is not a string." - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 @deprecated("0.1.7", alternative="ainvoke", removal="1.0") async def apredict_messages( diff --git a/libs/core/langchain_core/language_models/fake_chat_models.py b/libs/core/langchain_core/language_models/fake_chat_models.py index a77a12d9d10..9bd62f1267e 100644 --- a/libs/core/langchain_core/language_models/fake_chat_models.py +++ b/libs/core/langchain_core/language_models/fake_chat_models.py @@ -242,7 +242,7 @@ class GenericFakeChatModel(BaseChatModel): f"Expected generate to return a ChatResult, " f"but got {type(chat_result)} instead." ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 message = chat_result.generations[0].message @@ -251,7 +251,7 @@ class GenericFakeChatModel(BaseChatModel): f"Expected invoke to return an AIMessage, " f"but got {type(message)} instead." ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 content = message.content diff --git a/libs/core/langchain_core/language_models/llms.py b/libs/core/langchain_core/language_models/llms.py index c4867c6cbb9..4ba16f51696 100644 --- a/libs/core/langchain_core/language_models/llms.py +++ b/libs/core/langchain_core/language_models/llms.py @@ -337,7 +337,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 def _get_ls_params( self, @@ -448,7 +448,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): if return_exceptions: return cast(list[str], [e for _ in inputs]) else: - raise e + raise else: batches = [ inputs[i : i + max_concurrency] @@ -494,7 +494,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): if return_exceptions: return cast(list[str], [e for _ in inputs]) else: - raise e + raise else: batches = [ inputs[i : i + max_concurrency] @@ -562,9 +562,6 @@ class BaseLLM(BaseLanguageModel[str], ABC): generation = chunk else: generation += chunk - if generation is None: - msg = "No generation chunks were returned" - raise ValueError(msg) except BaseException as e: run_manager.on_llm_error( e, @@ -572,9 +569,14 @@ class BaseLLM(BaseLanguageModel[str], ABC): generations=[[generation]] if generation else [] ), ) - raise e - else: - run_manager.on_llm_end(LLMResult(generations=[[generation]])) + raise + + if generation is None: + err = ValueError("No generation chunks were returned") + run_manager.on_llm_error(err, response=LLMResult(generations=[])) + raise err + + run_manager.on_llm_end(LLMResult(generations=[[generation]])) async def astream( self, @@ -632,17 +634,19 @@ class BaseLLM(BaseLanguageModel[str], ABC): generation = chunk else: generation += chunk - if generation is None: - msg = "No generation chunks were returned" - raise ValueError(msg) except BaseException as e: await run_manager.on_llm_error( e, response=LLMResult(generations=[[generation]] if generation else []), ) - raise e - else: - await run_manager.on_llm_end(LLMResult(generations=[[generation]])) + raise + + if generation is None: + err = ValueError("No generation chunks were returned") + await run_manager.on_llm_error(err, response=LLMResult(generations=[])) + raise err + + await run_manager.on_llm_end(LLMResult(generations=[[generation]])) # --- Custom methods --- @@ -790,7 +794,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): except BaseException as e: for run_manager in run_managers: run_manager.on_llm_error(e, response=LLMResult(generations=[])) - raise e + raise flattened_outputs = output.flatten() for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) @@ -850,7 +854,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 # Create callback managers if isinstance(metadata, list): metadata = [ @@ -1036,7 +1040,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): for run_manager in run_managers ] ) - raise e + raise flattened_outputs = output.flatten() await asyncio.gather( *[ @@ -1289,7 +1293,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 return ( self.generate( [prompt], diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index 074bbf9a568..c317bf099c7 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -363,6 +363,17 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): return self tool_calls = [] invalid_tool_calls = [] + + def add_chunk_to_invalid_tool_calls(chunk: ToolCallChunk) -> None: + invalid_tool_calls.append( + create_invalid_tool_call( + name=chunk["name"], + args=chunk["args"], + id=chunk["id"], + error=None, + ) + ) + for chunk in self.tool_call_chunks: try: args_ = parse_partial_json(chunk["args"]) if chunk["args"] != "" else {} # type: ignore[arg-type] @@ -375,17 +386,9 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): ) ) else: - msg = "Malformed args." - raise ValueError(msg) + add_chunk_to_invalid_tool_calls(chunk) except Exception: - invalid_tool_calls.append( - create_invalid_tool_call( - name=chunk["name"], - args=chunk["args"], - id=chunk["id"], - error=None, - ) - ) + add_chunk_to_invalid_tool_calls(chunk) self.tool_calls = tool_calls self.invalid_tool_calls = invalid_tool_calls return self diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index 2559cb12d83..f7d823bf80a 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -124,7 +124,7 @@ def get_buffer_string( role = m.role else: msg = f"Got unsupported message type: {m}" - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 message = f"{role}: {m.content}" if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs: message += f"{m.additional_kwargs['function_call']}" @@ -1400,7 +1400,7 @@ def _get_message_openai_role(message: BaseMessage) -> str: return message.role else: msg = f"Unknown BaseMessage type {message.__class__}." - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 def _convert_to_openai_tool_calls(tool_calls: list[ToolCall]) -> list[dict]: diff --git a/libs/core/langchain_core/output_parsers/openai_tools.py b/libs/core/langchain_core/output_parsers/openai_tools.py index 9b05ed813fb..da1d8609fe5 100644 --- a/libs/core/langchain_core/output_parsers/openai_tools.py +++ b/libs/core/langchain_core/output_parsers/openai_tools.py @@ -282,19 +282,20 @@ class PydanticToolsParser(JsonOutputToolsParser): name_dict = {tool.__name__: tool for tool in self.tools} pydantic_objects = [] for res in json_results: - try: - if not isinstance(res["args"], dict): - msg = ( - f"Tool arguments must be specified as a dict, received: " - f"{res['args']}" - ) - raise ValueError(msg) - pydantic_objects.append(name_dict[res["type"]](**res["args"])) - except (ValidationError, ValueError) as e: + if not isinstance(res["args"], dict): if partial: continue - else: - raise e + msg = ( + f"Tool arguments must be specified as a dict, received: " + f"{res['args']}" + ) + raise ValueError(msg) + try: + pydantic_objects.append(name_dict[res["type"]](**res["args"])) + except (ValidationError, ValueError): + if partial: + continue + raise if self.first_tool_only: return pydantic_objects[0] if pydantic_objects else None else: diff --git a/libs/core/langchain_core/output_parsers/pydantic.py b/libs/core/langchain_core/output_parsers/pydantic.py index 844fc9a7038..a23bdc24aa1 100644 --- a/libs/core/langchain_core/output_parsers/pydantic.py +++ b/libs/core/langchain_core/output_parsers/pydantic.py @@ -66,10 +66,10 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]): try: json_object = super().parse_result(result) return self._parse_obj(json_object) - except OutputParserException as e: + except OutputParserException: if partial: return None - raise e + raise def parse(self, text: str) -> TBaseModel: """Parse the output of an LLM call to a pydantic object. diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 8133d3b3687..11924e3328d 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -244,7 +244,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate): f"variable {self.variable_name} should be a list of base messages, " f"got {value} of type {type(value)}" ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 value = convert_to_messages(value) if self.n_messages: value = value[-self.n_messages :] @@ -577,7 +577,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate): return cls(prompt=prompt, **kwargs) else: msg = f"Invalid template: {template}" - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 @classmethod def from_template_file( @@ -1225,7 +1225,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): result.extend(message) else: msg = f"Unexpected input: {message_template}" - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 return result async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: @@ -1253,7 +1253,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): result.extend(message) else: msg = f"Unexpected input: {message_template}" - raise ValueError(msg) + raise ValueError(msg) # noqa:TRY004 return result def partial(self, **kwargs: Any) -> ChatPromptTemplate: @@ -1399,7 +1399,7 @@ def _create_template_from_message_type( var_name_wrapped, is_optional = template if not isinstance(var_name_wrapped, str): msg = f"Expected variable name to be a string. Got: {var_name_wrapped}" - raise ValueError(msg) + raise ValueError(msg) # noqa:TRY004 if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}": msg = ( f"Invalid placeholder template: {var_name_wrapped}." diff --git a/libs/core/langchain_core/prompts/loading.py b/libs/core/langchain_core/prompts/loading.py index d6928c72717..da89972e757 100644 --- a/libs/core/langchain_core/prompts/loading.py +++ b/libs/core/langchain_core/prompts/loading.py @@ -78,7 +78,7 @@ def _load_examples(config: dict) -> dict: config["examples"] = examples else: msg = "Invalid examples format. Only list or string are supported." - raise ValueError(msg) + raise ValueError(msg) # noqa:TRY004 return config diff --git a/libs/core/langchain_core/retrievers.py b/libs/core/langchain_core/retrievers.py index 0f70c6e4927..d1a5cb8cf93 100644 --- a/libs/core/langchain_core/retrievers.py +++ b/libs/core/langchain_core/retrievers.py @@ -262,7 +262,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): result = self._get_relevant_documents(input, **_kwargs) except Exception as e: run_manager.on_retriever_error(e) - raise e + raise else: run_manager.on_retriever_end( result, @@ -325,7 +325,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): result = await self._aget_relevant_documents(input, **_kwargs) except Exception as e: await run_manager.on_retriever_error(e) - raise e + raise else: await run_manager.on_retriever_end( result, diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index 97d450015ea..f932ce3589e 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -188,7 +188,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): last_error = e except BaseException as e: run_manager.on_chain_error(e) - raise e + raise else: run_manager.on_chain_end(output) return output @@ -241,7 +241,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): last_error = e except BaseException as e: await run_manager.on_chain_error(e) - raise e + raise else: await run_manager.on_chain_end(output) return output @@ -488,7 +488,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): last_error = e except BaseException as e: run_manager.on_chain_error(e) - raise e + raise else: first_error = None break @@ -507,7 +507,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): output = None except BaseException as e: run_manager.on_chain_error(e) - raise e + raise run_manager.on_chain_end(output) async def astream( @@ -558,7 +558,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): last_error = e except BaseException as e: await run_manager.on_chain_error(e) - raise e + raise else: first_error = None break @@ -577,7 +577,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): output = None except BaseException as e: await run_manager.on_chain_error(e) - raise e + raise await run_manager.on_chain_end(output) def __getattr__(self, name: str) -> Any: diff --git a/libs/core/langchain_core/runnables/graph.py b/libs/core/langchain_core/runnables/graph.py index 53399a52fc6..84b86994dbf 100644 --- a/libs/core/langchain_core/runnables/graph.py +++ b/libs/core/langchain_core/runnables/graph.py @@ -50,9 +50,9 @@ def is_uuid(value: str) -> bool: """ try: UUID(value) - return True except ValueError: return False + return True class Edge(NamedTuple): diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index e6a88e4d224..d2040a3f3fb 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -481,7 +481,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. " f"Got {input_val}." ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 def _get_output_messages( self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict] @@ -517,7 +517,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. " f"Got {output_val}." ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 def _enter_history(self, input: Any, config: RunnableConfig) -> list[BaseMessage]: hist: BaseChatMessageHistory = config["configurable"]["message_history"] diff --git a/libs/core/langchain_core/runnables/passthrough.py b/libs/core/langchain_core/runnables/passthrough.py index 9881def4669..b0da175ae32 100644 --- a/libs/core/langchain_core/runnables/passthrough.py +++ b/libs/core/langchain_core/runnables/passthrough.py @@ -474,7 +474,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): ) -> dict[str, Any]: if not isinstance(input, dict): msg = "The input to RunnablePassthrough.assign() must be a dict." - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 return { **input, @@ -502,7 +502,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): ) -> dict[str, Any]: if not isinstance(input, dict): msg = "The input to RunnablePassthrough.assign() must be a dict." - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 return { **input, @@ -555,7 +555,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): for chunk in for_passthrough: if not isinstance(chunk, dict): msg = "The input to RunnablePassthrough.assign() must be a dict." - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 # remove mapper keys from passthrough chunk, to be overwritten by map filtered = AddableDict( {k: v for k, v in chunk.items() if k not in mapper_keys} @@ -605,7 +605,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): async for chunk in for_passthrough: if not isinstance(chunk, dict): msg = "The input to RunnablePassthrough.assign() must be a dict." - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 # remove mapper keys from passthrough chunk, to be overwritten by map output filtered = AddableDict( @@ -708,7 +708,7 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]): def _pick(self, input: dict[str, Any]) -> Any: if not isinstance(input, dict): msg = "The input to RunnablePassthrough.assign() must be a dict." - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 if isinstance(self.keys, str): return input.get(self.keys) diff --git a/libs/core/langchain_core/runnables/utils.py b/libs/core/langchain_core/runnables/utils.py index 9c7afea107c..75063c7db58 100644 --- a/libs/core/langchain_core/runnables/utils.py +++ b/libs/core/langchain_core/runnables/utils.py @@ -397,9 +397,9 @@ def get_lambda_source(func: Callable) -> Optional[str]: tree = ast.parse(textwrap.dedent(code)) visitor = GetLambdaSource() visitor.visit(tree) - return visitor.source if visitor.count == 1 else name except (SyntaxError, TypeError, OSError, SystemError): return name + return visitor.source if visitor.count == 1 else name @lru_cache(maxsize=256) @@ -440,10 +440,11 @@ def get_function_nonlocals(func: Callable) -> list[Any]: break else: values.append(vv) - return values except (SyntaxError, TypeError, OSError, SystemError): return [] + return values + def indent_lines_after_first(text: str, prefix: str) -> str: """Indent all lines of text after the first line. diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py index 625d532da32..f0833cdc24a 100644 --- a/libs/core/langchain_core/tools/base.py +++ b/libs/core/langchain_core/tools/base.py @@ -680,6 +680,7 @@ class ChildTool(BaseTool): content = None artifact = None + status = "success" error_to_raise: Union[Exception, KeyboardInterrupt, None] = None try: child_config = patch_config(config, callbacks=run_manager.get_child()) @@ -699,26 +700,25 @@ class ChildTool(BaseTool): f"expected. Instead generated response of type: " f"{type(response)}." ) - raise ValueError(msg) - content, artifact = response + error_to_raise = ValueError(msg) + else: + content, artifact = response else: content = response - status = "success" except (ValidationError, ValidationErrorV1) as e: if not self.handle_validation_error: error_to_raise = e else: content = _handle_validation_error(e, flag=self.handle_validation_error) - status = "error" + status = "error" except ToolException as e: if not self.handle_tool_error: error_to_raise = e else: content = _handle_tool_error(e, flag=self.handle_tool_error) - status = "error" + status = "error" except (Exception, KeyboardInterrupt) as e: error_to_raise = e - status = "error" if error_to_raise: run_manager.on_tool_error(error_to_raise) @@ -789,6 +789,7 @@ class ChildTool(BaseTool): ) content = None artifact = None + status = "success" error_to_raise: Optional[Union[Exception, KeyboardInterrupt]] = None try: tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id) @@ -816,26 +817,25 @@ class ChildTool(BaseTool): f"expected. Instead generated response of type: " f"{type(response)}." ) - raise ValueError(msg) - content, artifact = response + error_to_raise = ValueError(msg) + else: + content, artifact = response else: content = response - status = "success" except ValidationError as e: if not self.handle_validation_error: error_to_raise = e else: content = _handle_validation_error(e, flag=self.handle_validation_error) - status = "error" + status = "error" except ToolException as e: if not self.handle_tool_error: error_to_raise = e else: content = _handle_tool_error(e, flag=self.handle_tool_error) - status = "error" + status = "error" except (Exception, KeyboardInterrupt) as e: error_to_raise = e - status = "error" if error_to_raise: await run_manager.on_tool_error(error_to_raise) @@ -873,7 +873,7 @@ def _handle_validation_error( f"Got unexpected type of `handle_validation_error`. Expected bool, " f"str or callable. Received: {flag}" ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 return content @@ -893,7 +893,7 @@ def _handle_tool_error( f"Got unexpected type of `handle_tool_error`. Expected bool, str " f"or callable. Received: {flag}" ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 return content diff --git a/libs/core/langchain_core/tracers/evaluation.py b/libs/core/langchain_core/tracers/evaluation.py index b7bd4508a14..425c1c01222 100644 --- a/libs/core/langchain_core/tracers/evaluation.py +++ b/libs/core/langchain_core/tracers/evaluation.py @@ -139,7 +139,7 @@ class EvaluatorCallbackHandler(BaseTracer): f"{evaluator.__class__.__name__}: {repr(e)}", exc_info=True, ) - raise e + raise example_id = str(run.reference_example_id) with self.lock: for res in eval_results: diff --git a/libs/core/langchain_core/utils/html.py b/libs/core/langchain_core/utils/html.py index b3ff3e5b30c..f54e52024cc 100644 --- a/libs/core/langchain_core/utils/html.py +++ b/libs/core/langchain_core/utils/html.py @@ -97,8 +97,7 @@ def extract_sub_links( if continue_on_failure: logger.warning(f"Unable to load link {link}. Raised exception:\n\n{e}") continue - else: - raise e + raise results = [] for path in absolute_paths: diff --git a/libs/core/langchain_core/utils/mustache.py b/libs/core/langchain_core/utils/mustache.py index aa54e4ede6c..ee2ed8f2528 100644 --- a/libs/core/langchain_core/utils/mustache.py +++ b/libs/core/langchain_core/utils/mustache.py @@ -53,13 +53,14 @@ def grab_literal(template: str, l_del: str) -> tuple[str, str]: # Look for the next tag and move the template to it literal, template = template.split(l_del, 1) _CURRENT_LINE += literal.count("\n") - return (literal, template) # There are no more tags in the template? except ValueError: # Then the rest of the template is a literal return (template, "") + return (literal, template) + def l_sa_check(template: str, literal: str, is_standalone: bool) -> bool: """Do a preliminary check to see if a tag could be a standalone. diff --git a/libs/core/langchain_core/utils/usage.py b/libs/core/langchain_core/utils/usage.py index ce198e268ba..38c38209a56 100644 --- a/libs/core/langchain_core/utils/usage.py +++ b/libs/core/langchain_core/utils/usage.py @@ -33,5 +33,5 @@ def _dict_int_op( msg = ( f"Unknown value types: {types}. Only dict and int values are supported." ) - raise ValueError(msg) + raise ValueError(msg) # noqa: TRY004 return combined diff --git a/libs/core/langchain_core/vectorstores/utils.py b/libs/core/langchain_core/vectorstores/utils.py index add5e2ef4ab..6e407d44920 100644 --- a/libs/core/langchain_core/vectorstores/utils.py +++ b/libs/core/langchain_core/vectorstores/utils.py @@ -54,11 +54,6 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray: raise ValueError(msg) try: import simsimd as simd # type: ignore - - x = np.array(x, dtype=np.float32) - y = np.array(y, dtype=np.float32) - z = 1 - np.array(simd.cdist(x, y, metric="cosine")) - return z except ImportError: logger.debug( "Unable to import simsimd, defaulting to NumPy implementation. If you want " @@ -72,6 +67,10 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray: similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0 return similarity + x = np.array(x, dtype=np.float32) + y = np.array(y, dtype=np.float32) + return 1 - np.array(simd.cdist(x, y, metric="cosine")) + def maximal_marginal_relevance( query_embedding: np.ndarray, diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index a14282b8476..f9b731c7301 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -44,7 +44,7 @@ python = ">=3.12.4" [tool.poetry.extras] [tool.ruff.lint] -select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "UP", "W", "YTT",] +select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "TRY", "UP", "W", "YTT",] ignore = [ "COM812", "UP007", "S110", "S112",] [tool.coverage.run] diff --git a/libs/core/tests/unit_tests/runnables/test_fallbacks.py b/libs/core/tests/unit_tests/runnables/test_fallbacks.py index ec1d516587d..1826f883e7b 100644 --- a/libs/core/tests/unit_tests/runnables/test_fallbacks.py +++ b/libs/core/tests/unit_tests/runnables/test_fallbacks.py @@ -103,7 +103,7 @@ def _runnable(inputs: dict) -> str: if inputs["text"] == "bar": return "second" if isinstance(inputs["exception"], ValueError): - raise RuntimeError + raise RuntimeError # noqa: TRY004 return "third" diff --git a/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py b/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py index 6168793247c..59c5e765e23 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py @@ -1600,7 +1600,7 @@ async def test_event_stream_with_retry() -> None: def fail(inputs: str) -> None: """Simple func.""" msg = "fail" - raise Exception(msg) + raise ValueError(msg) chain = RunnableLambda(success) | RunnableLambda(fail).with_retry( stop_after_attempt=1, diff --git a/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py b/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py index 8e88eb11a65..698a4c4ddab 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py @@ -1556,7 +1556,7 @@ async def test_event_stream_with_retry() -> None: def fail(inputs: str) -> None: """Simple func.""" msg = "fail" - raise Exception(msg) + raise ValueError(msg) chain = RunnableLambda(success) | RunnableLambda(fail).with_retry( stop_after_attempt=1, @@ -1906,7 +1906,7 @@ async def test_runnable_with_message_history() -> None: return fn(*args, **kwargs) except Exception as e: raised_errors.append(e) - raise e + raise return _get_output_messages @@ -2097,7 +2097,7 @@ class StreamingRunnable(Runnable[Input, Output]): final_output = None for element in self.iterable: if isinstance(element, BaseException): - raise element + raise element # noqa: TRY301 yield element if final_output is None: @@ -2409,10 +2409,10 @@ async def test_break_astream_events() -> None: self.started = True try: await asyncio.sleep(0.5) - return input except asyncio.CancelledError: self.cancelled = True raise + return input def reset(self) -> None: self.started = False @@ -2474,10 +2474,10 @@ async def test_cancel_astream_events() -> None: self.started = True try: await asyncio.sleep(0.5) - return input except asyncio.CancelledError: self.cancelled = True raise + return input def reset(self) -> None: self.started = False