mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-26 00:23:25 +00:00
core: Add ruff rules TRY (tryceratops) (#29388)
TRY004 ("use TypeError rather than ValueError") existing errors are marked as ignore to preserve backward compatibility. LMK if you prefer to fix some of them. Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
parent
723b603f52
commit
dbb6b7b103
@ -127,7 +127,7 @@ def trace_as_chain_group(
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
if not group_cm.ended:
|
if not group_cm.ended:
|
||||||
run_manager.on_chain_error(e)
|
run_manager.on_chain_error(e)
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
if not group_cm.ended:
|
if not group_cm.ended:
|
||||||
run_manager.on_chain_end({})
|
run_manager.on_chain_end({})
|
||||||
@ -207,7 +207,7 @@ async def atrace_as_chain_group(
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
if not group_cm.ended:
|
if not group_cm.ended:
|
||||||
await run_manager.on_chain_error(e)
|
await run_manager.on_chain_error(e)
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
if not group_cm.ended:
|
if not group_cm.ended:
|
||||||
await run_manager.on_chain_end({})
|
await run_manager.on_chain_end({})
|
||||||
@ -289,7 +289,7 @@ def handle_event(
|
|||||||
f" {repr(e)}"
|
f" {repr(e)}"
|
||||||
)
|
)
|
||||||
if handler.raise_error:
|
if handler.raise_error:
|
||||||
raise e
|
raise
|
||||||
finally:
|
finally:
|
||||||
if coros:
|
if coros:
|
||||||
try:
|
try:
|
||||||
@ -388,7 +388,7 @@ async def _ahandle_event_for_handler(
|
|||||||
f"Error in {handler.__class__.__name__}.{event_name} callback: {repr(e)}"
|
f"Error in {handler.__class__.__name__}.{event_name} callback: {repr(e)}"
|
||||||
)
|
)
|
||||||
if handler.raise_error:
|
if handler.raise_error:
|
||||||
raise e
|
raise
|
||||||
|
|
||||||
|
|
||||||
async def ahandle_event(
|
async def ahandle_event(
|
||||||
|
@ -268,7 +268,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
f"Invalid input type {type(input)}. "
|
f"Invalid input type {type(input)}. "
|
||||||
"Must be a PromptValue, str, or list of BaseMessages."
|
"Must be a PromptValue, str, or list of BaseMessages."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
def invoke(
|
def invoke(
|
||||||
self,
|
self,
|
||||||
@ -407,9 +407,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
generation = chunk
|
generation = chunk
|
||||||
else:
|
else:
|
||||||
generation += chunk
|
generation += chunk
|
||||||
if generation is None:
|
|
||||||
msg = "No generation chunks were returned"
|
|
||||||
raise ValueError(msg)
|
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
run_manager.on_llm_error(
|
run_manager.on_llm_error(
|
||||||
e,
|
e,
|
||||||
@ -417,9 +414,14 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
generations=[[generation]] if generation else []
|
generations=[[generation]] if generation else []
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
raise e
|
raise
|
||||||
else:
|
|
||||||
run_manager.on_llm_end(LLMResult(generations=[[generation]]))
|
if generation is None:
|
||||||
|
err = ValueError("No generation chunks were returned")
|
||||||
|
run_manager.on_llm_error(err, response=LLMResult(generations=[]))
|
||||||
|
raise err
|
||||||
|
|
||||||
|
run_manager.on_llm_end(LLMResult(generations=[[generation]]))
|
||||||
|
|
||||||
async def astream(
|
async def astream(
|
||||||
self,
|
self,
|
||||||
@ -485,19 +487,21 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
generation = chunk
|
generation = chunk
|
||||||
else:
|
else:
|
||||||
generation += chunk
|
generation += chunk
|
||||||
if generation is None:
|
|
||||||
msg = "No generation chunks were returned"
|
|
||||||
raise ValueError(msg)
|
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
await run_manager.on_llm_error(
|
await run_manager.on_llm_error(
|
||||||
e,
|
e,
|
||||||
response=LLMResult(generations=[[generation]] if generation else []),
|
response=LLMResult(generations=[[generation]] if generation else []),
|
||||||
)
|
)
|
||||||
raise e
|
raise
|
||||||
else:
|
|
||||||
await run_manager.on_llm_end(
|
if generation is None:
|
||||||
LLMResult(generations=[[generation]]),
|
err = ValueError("No generation chunks were returned")
|
||||||
)
|
await run_manager.on_llm_error(err, response=LLMResult(generations=[]))
|
||||||
|
raise err
|
||||||
|
|
||||||
|
await run_manager.on_llm_end(
|
||||||
|
LLMResult(generations=[[generation]]),
|
||||||
|
)
|
||||||
|
|
||||||
# --- Custom methods ---
|
# --- Custom methods ---
|
||||||
|
|
||||||
@ -641,7 +645,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
if run_managers:
|
if run_managers:
|
||||||
run_managers[i].on_llm_error(e, response=LLMResult(generations=[]))
|
run_managers[i].on_llm_error(e, response=LLMResult(generations=[]))
|
||||||
raise e
|
raise
|
||||||
flattened_outputs = [
|
flattened_outputs = [
|
||||||
LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item]
|
LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item]
|
||||||
for res in results
|
for res in results
|
||||||
@ -1022,7 +1026,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
return generation.message
|
return generation.message
|
||||||
else:
|
else:
|
||||||
msg = "Unexpected generation type"
|
msg = "Unexpected generation type"
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
async def _call_async(
|
async def _call_async(
|
||||||
self,
|
self,
|
||||||
@ -1039,7 +1043,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
return generation.message
|
return generation.message
|
||||||
else:
|
else:
|
||||||
msg = "Unexpected generation type"
|
msg = "Unexpected generation type"
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
||||||
def call_as_llm(
|
def call_as_llm(
|
||||||
@ -1057,7 +1061,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
return result.content
|
return result.content
|
||||||
else:
|
else:
|
||||||
msg = "Cannot use predict when output is not a string."
|
msg = "Cannot use predict when output is not a string."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
||||||
def predict_messages(
|
def predict_messages(
|
||||||
@ -1082,7 +1086,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
return result.content
|
return result.content
|
||||||
else:
|
else:
|
||||||
msg = "Cannot use predict when output is not a string."
|
msg = "Cannot use predict when output is not a string."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
||||||
async def apredict_messages(
|
async def apredict_messages(
|
||||||
|
@ -242,7 +242,7 @@ class GenericFakeChatModel(BaseChatModel):
|
|||||||
f"Expected generate to return a ChatResult, "
|
f"Expected generate to return a ChatResult, "
|
||||||
f"but got {type(chat_result)} instead."
|
f"but got {type(chat_result)} instead."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
message = chat_result.generations[0].message
|
message = chat_result.generations[0].message
|
||||||
|
|
||||||
@ -251,7 +251,7 @@ class GenericFakeChatModel(BaseChatModel):
|
|||||||
f"Expected invoke to return an AIMessage, "
|
f"Expected invoke to return an AIMessage, "
|
||||||
f"but got {type(message)} instead."
|
f"but got {type(message)} instead."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
content = message.content
|
content = message.content
|
||||||
|
|
||||||
|
@ -337,7 +337,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
f"Invalid input type {type(input)}. "
|
f"Invalid input type {type(input)}. "
|
||||||
"Must be a PromptValue, str, or list of BaseMessages."
|
"Must be a PromptValue, str, or list of BaseMessages."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
def _get_ls_params(
|
def _get_ls_params(
|
||||||
self,
|
self,
|
||||||
@ -448,7 +448,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
if return_exceptions:
|
if return_exceptions:
|
||||||
return cast(list[str], [e for _ in inputs])
|
return cast(list[str], [e for _ in inputs])
|
||||||
else:
|
else:
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
batches = [
|
batches = [
|
||||||
inputs[i : i + max_concurrency]
|
inputs[i : i + max_concurrency]
|
||||||
@ -494,7 +494,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
if return_exceptions:
|
if return_exceptions:
|
||||||
return cast(list[str], [e for _ in inputs])
|
return cast(list[str], [e for _ in inputs])
|
||||||
else:
|
else:
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
batches = [
|
batches = [
|
||||||
inputs[i : i + max_concurrency]
|
inputs[i : i + max_concurrency]
|
||||||
@ -562,9 +562,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
generation = chunk
|
generation = chunk
|
||||||
else:
|
else:
|
||||||
generation += chunk
|
generation += chunk
|
||||||
if generation is None:
|
|
||||||
msg = "No generation chunks were returned"
|
|
||||||
raise ValueError(msg)
|
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
run_manager.on_llm_error(
|
run_manager.on_llm_error(
|
||||||
e,
|
e,
|
||||||
@ -572,9 +569,14 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
generations=[[generation]] if generation else []
|
generations=[[generation]] if generation else []
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
raise e
|
raise
|
||||||
else:
|
|
||||||
run_manager.on_llm_end(LLMResult(generations=[[generation]]))
|
if generation is None:
|
||||||
|
err = ValueError("No generation chunks were returned")
|
||||||
|
run_manager.on_llm_error(err, response=LLMResult(generations=[]))
|
||||||
|
raise err
|
||||||
|
|
||||||
|
run_manager.on_llm_end(LLMResult(generations=[[generation]]))
|
||||||
|
|
||||||
async def astream(
|
async def astream(
|
||||||
self,
|
self,
|
||||||
@ -632,17 +634,19 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
generation = chunk
|
generation = chunk
|
||||||
else:
|
else:
|
||||||
generation += chunk
|
generation += chunk
|
||||||
if generation is None:
|
|
||||||
msg = "No generation chunks were returned"
|
|
||||||
raise ValueError(msg)
|
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
await run_manager.on_llm_error(
|
await run_manager.on_llm_error(
|
||||||
e,
|
e,
|
||||||
response=LLMResult(generations=[[generation]] if generation else []),
|
response=LLMResult(generations=[[generation]] if generation else []),
|
||||||
)
|
)
|
||||||
raise e
|
raise
|
||||||
else:
|
|
||||||
await run_manager.on_llm_end(LLMResult(generations=[[generation]]))
|
if generation is None:
|
||||||
|
err = ValueError("No generation chunks were returned")
|
||||||
|
await run_manager.on_llm_error(err, response=LLMResult(generations=[]))
|
||||||
|
raise err
|
||||||
|
|
||||||
|
await run_manager.on_llm_end(LLMResult(generations=[[generation]]))
|
||||||
|
|
||||||
# --- Custom methods ---
|
# --- Custom methods ---
|
||||||
|
|
||||||
@ -790,7 +794,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
for run_manager in run_managers:
|
for run_manager in run_managers:
|
||||||
run_manager.on_llm_error(e, response=LLMResult(generations=[]))
|
run_manager.on_llm_error(e, response=LLMResult(generations=[]))
|
||||||
raise e
|
raise
|
||||||
flattened_outputs = output.flatten()
|
flattened_outputs = output.flatten()
|
||||||
for manager, flattened_output in zip(run_managers, flattened_outputs):
|
for manager, flattened_output in zip(run_managers, flattened_outputs):
|
||||||
manager.on_llm_end(flattened_output)
|
manager.on_llm_end(flattened_output)
|
||||||
@ -850,7 +854,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
"Argument 'prompts' is expected to be of type List[str], received"
|
"Argument 'prompts' is expected to be of type List[str], received"
|
||||||
f" argument of type {type(prompts)}."
|
f" argument of type {type(prompts)}."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
# Create callback managers
|
# Create callback managers
|
||||||
if isinstance(metadata, list):
|
if isinstance(metadata, list):
|
||||||
metadata = [
|
metadata = [
|
||||||
@ -1036,7 +1040,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
for run_manager in run_managers
|
for run_manager in run_managers
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
raise e
|
raise
|
||||||
flattened_outputs = output.flatten()
|
flattened_outputs = output.flatten()
|
||||||
await asyncio.gather(
|
await asyncio.gather(
|
||||||
*[
|
*[
|
||||||
@ -1289,7 +1293,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
|
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
|
||||||
"`generate` instead."
|
"`generate` instead."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
return (
|
return (
|
||||||
self.generate(
|
self.generate(
|
||||||
[prompt],
|
[prompt],
|
||||||
|
@ -363,6 +363,17 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|||||||
return self
|
return self
|
||||||
tool_calls = []
|
tool_calls = []
|
||||||
invalid_tool_calls = []
|
invalid_tool_calls = []
|
||||||
|
|
||||||
|
def add_chunk_to_invalid_tool_calls(chunk: ToolCallChunk) -> None:
|
||||||
|
invalid_tool_calls.append(
|
||||||
|
create_invalid_tool_call(
|
||||||
|
name=chunk["name"],
|
||||||
|
args=chunk["args"],
|
||||||
|
id=chunk["id"],
|
||||||
|
error=None,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
for chunk in self.tool_call_chunks:
|
for chunk in self.tool_call_chunks:
|
||||||
try:
|
try:
|
||||||
args_ = parse_partial_json(chunk["args"]) if chunk["args"] != "" else {} # type: ignore[arg-type]
|
args_ = parse_partial_json(chunk["args"]) if chunk["args"] != "" else {} # type: ignore[arg-type]
|
||||||
@ -375,17 +386,9 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
msg = "Malformed args."
|
add_chunk_to_invalid_tool_calls(chunk)
|
||||||
raise ValueError(msg)
|
|
||||||
except Exception:
|
except Exception:
|
||||||
invalid_tool_calls.append(
|
add_chunk_to_invalid_tool_calls(chunk)
|
||||||
create_invalid_tool_call(
|
|
||||||
name=chunk["name"],
|
|
||||||
args=chunk["args"],
|
|
||||||
id=chunk["id"],
|
|
||||||
error=None,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
self.tool_calls = tool_calls
|
self.tool_calls = tool_calls
|
||||||
self.invalid_tool_calls = invalid_tool_calls
|
self.invalid_tool_calls = invalid_tool_calls
|
||||||
return self
|
return self
|
||||||
|
@ -124,7 +124,7 @@ def get_buffer_string(
|
|||||||
role = m.role
|
role = m.role
|
||||||
else:
|
else:
|
||||||
msg = f"Got unsupported message type: {m}"
|
msg = f"Got unsupported message type: {m}"
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
message = f"{role}: {m.content}"
|
message = f"{role}: {m.content}"
|
||||||
if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs:
|
if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs:
|
||||||
message += f"{m.additional_kwargs['function_call']}"
|
message += f"{m.additional_kwargs['function_call']}"
|
||||||
@ -1400,7 +1400,7 @@ def _get_message_openai_role(message: BaseMessage) -> str:
|
|||||||
return message.role
|
return message.role
|
||||||
else:
|
else:
|
||||||
msg = f"Unknown BaseMessage type {message.__class__}."
|
msg = f"Unknown BaseMessage type {message.__class__}."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_openai_tool_calls(tool_calls: list[ToolCall]) -> list[dict]:
|
def _convert_to_openai_tool_calls(tool_calls: list[ToolCall]) -> list[dict]:
|
||||||
|
@ -282,19 +282,20 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
|||||||
name_dict = {tool.__name__: tool for tool in self.tools}
|
name_dict = {tool.__name__: tool for tool in self.tools}
|
||||||
pydantic_objects = []
|
pydantic_objects = []
|
||||||
for res in json_results:
|
for res in json_results:
|
||||||
try:
|
if not isinstance(res["args"], dict):
|
||||||
if not isinstance(res["args"], dict):
|
|
||||||
msg = (
|
|
||||||
f"Tool arguments must be specified as a dict, received: "
|
|
||||||
f"{res['args']}"
|
|
||||||
)
|
|
||||||
raise ValueError(msg)
|
|
||||||
pydantic_objects.append(name_dict[res["type"]](**res["args"]))
|
|
||||||
except (ValidationError, ValueError) as e:
|
|
||||||
if partial:
|
if partial:
|
||||||
continue
|
continue
|
||||||
else:
|
msg = (
|
||||||
raise e
|
f"Tool arguments must be specified as a dict, received: "
|
||||||
|
f"{res['args']}"
|
||||||
|
)
|
||||||
|
raise ValueError(msg)
|
||||||
|
try:
|
||||||
|
pydantic_objects.append(name_dict[res["type"]](**res["args"]))
|
||||||
|
except (ValidationError, ValueError):
|
||||||
|
if partial:
|
||||||
|
continue
|
||||||
|
raise
|
||||||
if self.first_tool_only:
|
if self.first_tool_only:
|
||||||
return pydantic_objects[0] if pydantic_objects else None
|
return pydantic_objects[0] if pydantic_objects else None
|
||||||
else:
|
else:
|
||||||
|
@ -66,10 +66,10 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|||||||
try:
|
try:
|
||||||
json_object = super().parse_result(result)
|
json_object = super().parse_result(result)
|
||||||
return self._parse_obj(json_object)
|
return self._parse_obj(json_object)
|
||||||
except OutputParserException as e:
|
except OutputParserException:
|
||||||
if partial:
|
if partial:
|
||||||
return None
|
return None
|
||||||
raise e
|
raise
|
||||||
|
|
||||||
def parse(self, text: str) -> TBaseModel:
|
def parse(self, text: str) -> TBaseModel:
|
||||||
"""Parse the output of an LLM call to a pydantic object.
|
"""Parse the output of an LLM call to a pydantic object.
|
||||||
|
@ -244,7 +244,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
|||||||
f"variable {self.variable_name} should be a list of base messages, "
|
f"variable {self.variable_name} should be a list of base messages, "
|
||||||
f"got {value} of type {type(value)}"
|
f"got {value} of type {type(value)}"
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
value = convert_to_messages(value)
|
value = convert_to_messages(value)
|
||||||
if self.n_messages:
|
if self.n_messages:
|
||||||
value = value[-self.n_messages :]
|
value = value[-self.n_messages :]
|
||||||
@ -577,7 +577,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
|||||||
return cls(prompt=prompt, **kwargs)
|
return cls(prompt=prompt, **kwargs)
|
||||||
else:
|
else:
|
||||||
msg = f"Invalid template: {template}"
|
msg = f"Invalid template: {template}"
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_template_file(
|
def from_template_file(
|
||||||
@ -1225,7 +1225,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|||||||
result.extend(message)
|
result.extend(message)
|
||||||
else:
|
else:
|
||||||
msg = f"Unexpected input: {message_template}"
|
msg = f"Unexpected input: {message_template}"
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
return result
|
return result
|
||||||
|
|
||||||
async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
|
async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
|
||||||
@ -1253,7 +1253,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|||||||
result.extend(message)
|
result.extend(message)
|
||||||
else:
|
else:
|
||||||
msg = f"Unexpected input: {message_template}"
|
msg = f"Unexpected input: {message_template}"
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa:TRY004
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def partial(self, **kwargs: Any) -> ChatPromptTemplate:
|
def partial(self, **kwargs: Any) -> ChatPromptTemplate:
|
||||||
@ -1399,7 +1399,7 @@ def _create_template_from_message_type(
|
|||||||
var_name_wrapped, is_optional = template
|
var_name_wrapped, is_optional = template
|
||||||
if not isinstance(var_name_wrapped, str):
|
if not isinstance(var_name_wrapped, str):
|
||||||
msg = f"Expected variable name to be a string. Got: {var_name_wrapped}"
|
msg = f"Expected variable name to be a string. Got: {var_name_wrapped}"
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa:TRY004
|
||||||
if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}":
|
if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}":
|
||||||
msg = (
|
msg = (
|
||||||
f"Invalid placeholder template: {var_name_wrapped}."
|
f"Invalid placeholder template: {var_name_wrapped}."
|
||||||
|
@ -78,7 +78,7 @@ def _load_examples(config: dict) -> dict:
|
|||||||
config["examples"] = examples
|
config["examples"] = examples
|
||||||
else:
|
else:
|
||||||
msg = "Invalid examples format. Only list or string are supported."
|
msg = "Invalid examples format. Only list or string are supported."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa:TRY004
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
@ -262,7 +262,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
|||||||
result = self._get_relevant_documents(input, **_kwargs)
|
result = self._get_relevant_documents(input, **_kwargs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
run_manager.on_retriever_error(e)
|
run_manager.on_retriever_error(e)
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
run_manager.on_retriever_end(
|
run_manager.on_retriever_end(
|
||||||
result,
|
result,
|
||||||
@ -325,7 +325,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
|||||||
result = await self._aget_relevant_documents(input, **_kwargs)
|
result = await self._aget_relevant_documents(input, **_kwargs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
await run_manager.on_retriever_error(e)
|
await run_manager.on_retriever_error(e)
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
await run_manager.on_retriever_end(
|
await run_manager.on_retriever_end(
|
||||||
result,
|
result,
|
||||||
|
@ -188,7 +188,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
|||||||
last_error = e
|
last_error = e
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
run_manager.on_chain_error(e)
|
run_manager.on_chain_error(e)
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
run_manager.on_chain_end(output)
|
run_manager.on_chain_end(output)
|
||||||
return output
|
return output
|
||||||
@ -241,7 +241,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
|||||||
last_error = e
|
last_error = e
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
await run_manager.on_chain_error(e)
|
await run_manager.on_chain_error(e)
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
await run_manager.on_chain_end(output)
|
await run_manager.on_chain_end(output)
|
||||||
return output
|
return output
|
||||||
@ -488,7 +488,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
|||||||
last_error = e
|
last_error = e
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
run_manager.on_chain_error(e)
|
run_manager.on_chain_error(e)
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
first_error = None
|
first_error = None
|
||||||
break
|
break
|
||||||
@ -507,7 +507,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
|||||||
output = None
|
output = None
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
run_manager.on_chain_error(e)
|
run_manager.on_chain_error(e)
|
||||||
raise e
|
raise
|
||||||
run_manager.on_chain_end(output)
|
run_manager.on_chain_end(output)
|
||||||
|
|
||||||
async def astream(
|
async def astream(
|
||||||
@ -558,7 +558,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
|||||||
last_error = e
|
last_error = e
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
await run_manager.on_chain_error(e)
|
await run_manager.on_chain_error(e)
|
||||||
raise e
|
raise
|
||||||
else:
|
else:
|
||||||
first_error = None
|
first_error = None
|
||||||
break
|
break
|
||||||
@ -577,7 +577,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
|||||||
output = None
|
output = None
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
await run_manager.on_chain_error(e)
|
await run_manager.on_chain_error(e)
|
||||||
raise e
|
raise
|
||||||
await run_manager.on_chain_end(output)
|
await run_manager.on_chain_end(output)
|
||||||
|
|
||||||
def __getattr__(self, name: str) -> Any:
|
def __getattr__(self, name: str) -> Any:
|
||||||
|
@ -50,9 +50,9 @@ def is_uuid(value: str) -> bool:
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
UUID(value)
|
UUID(value)
|
||||||
return True
|
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return False
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
class Edge(NamedTuple):
|
class Edge(NamedTuple):
|
||||||
|
@ -481,7 +481,7 @@ class RunnableWithMessageHistory(RunnableBindingBase):
|
|||||||
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
|
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
|
||||||
f"Got {input_val}."
|
f"Got {input_val}."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
def _get_output_messages(
|
def _get_output_messages(
|
||||||
self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict]
|
self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict]
|
||||||
@ -517,7 +517,7 @@ class RunnableWithMessageHistory(RunnableBindingBase):
|
|||||||
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
|
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
|
||||||
f"Got {output_val}."
|
f"Got {output_val}."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
def _enter_history(self, input: Any, config: RunnableConfig) -> list[BaseMessage]:
|
def _enter_history(self, input: Any, config: RunnableConfig) -> list[BaseMessage]:
|
||||||
hist: BaseChatMessageHistory = config["configurable"]["message_history"]
|
hist: BaseChatMessageHistory = config["configurable"]["message_history"]
|
||||||
|
@ -474,7 +474,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
|
|||||||
) -> dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
if not isinstance(input, dict):
|
if not isinstance(input, dict):
|
||||||
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
return {
|
return {
|
||||||
**input,
|
**input,
|
||||||
@ -502,7 +502,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
|
|||||||
) -> dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
if not isinstance(input, dict):
|
if not isinstance(input, dict):
|
||||||
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
return {
|
return {
|
||||||
**input,
|
**input,
|
||||||
@ -555,7 +555,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
|
|||||||
for chunk in for_passthrough:
|
for chunk in for_passthrough:
|
||||||
if not isinstance(chunk, dict):
|
if not isinstance(chunk, dict):
|
||||||
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
# remove mapper keys from passthrough chunk, to be overwritten by map
|
# remove mapper keys from passthrough chunk, to be overwritten by map
|
||||||
filtered = AddableDict(
|
filtered = AddableDict(
|
||||||
{k: v for k, v in chunk.items() if k not in mapper_keys}
|
{k: v for k, v in chunk.items() if k not in mapper_keys}
|
||||||
@ -605,7 +605,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
|
|||||||
async for chunk in for_passthrough:
|
async for chunk in for_passthrough:
|
||||||
if not isinstance(chunk, dict):
|
if not isinstance(chunk, dict):
|
||||||
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
# remove mapper keys from passthrough chunk, to be overwritten by map output
|
# remove mapper keys from passthrough chunk, to be overwritten by map output
|
||||||
filtered = AddableDict(
|
filtered = AddableDict(
|
||||||
@ -708,7 +708,7 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]):
|
|||||||
def _pick(self, input: dict[str, Any]) -> Any:
|
def _pick(self, input: dict[str, Any]) -> Any:
|
||||||
if not isinstance(input, dict):
|
if not isinstance(input, dict):
|
||||||
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
msg = "The input to RunnablePassthrough.assign() must be a dict."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
|
|
||||||
if isinstance(self.keys, str):
|
if isinstance(self.keys, str):
|
||||||
return input.get(self.keys)
|
return input.get(self.keys)
|
||||||
|
@ -397,9 +397,9 @@ def get_lambda_source(func: Callable) -> Optional[str]:
|
|||||||
tree = ast.parse(textwrap.dedent(code))
|
tree = ast.parse(textwrap.dedent(code))
|
||||||
visitor = GetLambdaSource()
|
visitor = GetLambdaSource()
|
||||||
visitor.visit(tree)
|
visitor.visit(tree)
|
||||||
return visitor.source if visitor.count == 1 else name
|
|
||||||
except (SyntaxError, TypeError, OSError, SystemError):
|
except (SyntaxError, TypeError, OSError, SystemError):
|
||||||
return name
|
return name
|
||||||
|
return visitor.source if visitor.count == 1 else name
|
||||||
|
|
||||||
|
|
||||||
@lru_cache(maxsize=256)
|
@lru_cache(maxsize=256)
|
||||||
@ -440,10 +440,11 @@ def get_function_nonlocals(func: Callable) -> list[Any]:
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
values.append(vv)
|
values.append(vv)
|
||||||
return values
|
|
||||||
except (SyntaxError, TypeError, OSError, SystemError):
|
except (SyntaxError, TypeError, OSError, SystemError):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
return values
|
||||||
|
|
||||||
|
|
||||||
def indent_lines_after_first(text: str, prefix: str) -> str:
|
def indent_lines_after_first(text: str, prefix: str) -> str:
|
||||||
"""Indent all lines of text after the first line.
|
"""Indent all lines of text after the first line.
|
||||||
|
@ -680,6 +680,7 @@ class ChildTool(BaseTool):
|
|||||||
|
|
||||||
content = None
|
content = None
|
||||||
artifact = None
|
artifact = None
|
||||||
|
status = "success"
|
||||||
error_to_raise: Union[Exception, KeyboardInterrupt, None] = None
|
error_to_raise: Union[Exception, KeyboardInterrupt, None] = None
|
||||||
try:
|
try:
|
||||||
child_config = patch_config(config, callbacks=run_manager.get_child())
|
child_config = patch_config(config, callbacks=run_manager.get_child())
|
||||||
@ -699,26 +700,25 @@ class ChildTool(BaseTool):
|
|||||||
f"expected. Instead generated response of type: "
|
f"expected. Instead generated response of type: "
|
||||||
f"{type(response)}."
|
f"{type(response)}."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
error_to_raise = ValueError(msg)
|
||||||
content, artifact = response
|
else:
|
||||||
|
content, artifact = response
|
||||||
else:
|
else:
|
||||||
content = response
|
content = response
|
||||||
status = "success"
|
|
||||||
except (ValidationError, ValidationErrorV1) as e:
|
except (ValidationError, ValidationErrorV1) as e:
|
||||||
if not self.handle_validation_error:
|
if not self.handle_validation_error:
|
||||||
error_to_raise = e
|
error_to_raise = e
|
||||||
else:
|
else:
|
||||||
content = _handle_validation_error(e, flag=self.handle_validation_error)
|
content = _handle_validation_error(e, flag=self.handle_validation_error)
|
||||||
status = "error"
|
status = "error"
|
||||||
except ToolException as e:
|
except ToolException as e:
|
||||||
if not self.handle_tool_error:
|
if not self.handle_tool_error:
|
||||||
error_to_raise = e
|
error_to_raise = e
|
||||||
else:
|
else:
|
||||||
content = _handle_tool_error(e, flag=self.handle_tool_error)
|
content = _handle_tool_error(e, flag=self.handle_tool_error)
|
||||||
status = "error"
|
status = "error"
|
||||||
except (Exception, KeyboardInterrupt) as e:
|
except (Exception, KeyboardInterrupt) as e:
|
||||||
error_to_raise = e
|
error_to_raise = e
|
||||||
status = "error"
|
|
||||||
|
|
||||||
if error_to_raise:
|
if error_to_raise:
|
||||||
run_manager.on_tool_error(error_to_raise)
|
run_manager.on_tool_error(error_to_raise)
|
||||||
@ -789,6 +789,7 @@ class ChildTool(BaseTool):
|
|||||||
)
|
)
|
||||||
content = None
|
content = None
|
||||||
artifact = None
|
artifact = None
|
||||||
|
status = "success"
|
||||||
error_to_raise: Optional[Union[Exception, KeyboardInterrupt]] = None
|
error_to_raise: Optional[Union[Exception, KeyboardInterrupt]] = None
|
||||||
try:
|
try:
|
||||||
tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id)
|
tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id)
|
||||||
@ -816,26 +817,25 @@ class ChildTool(BaseTool):
|
|||||||
f"expected. Instead generated response of type: "
|
f"expected. Instead generated response of type: "
|
||||||
f"{type(response)}."
|
f"{type(response)}."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
error_to_raise = ValueError(msg)
|
||||||
content, artifact = response
|
else:
|
||||||
|
content, artifact = response
|
||||||
else:
|
else:
|
||||||
content = response
|
content = response
|
||||||
status = "success"
|
|
||||||
except ValidationError as e:
|
except ValidationError as e:
|
||||||
if not self.handle_validation_error:
|
if not self.handle_validation_error:
|
||||||
error_to_raise = e
|
error_to_raise = e
|
||||||
else:
|
else:
|
||||||
content = _handle_validation_error(e, flag=self.handle_validation_error)
|
content = _handle_validation_error(e, flag=self.handle_validation_error)
|
||||||
status = "error"
|
status = "error"
|
||||||
except ToolException as e:
|
except ToolException as e:
|
||||||
if not self.handle_tool_error:
|
if not self.handle_tool_error:
|
||||||
error_to_raise = e
|
error_to_raise = e
|
||||||
else:
|
else:
|
||||||
content = _handle_tool_error(e, flag=self.handle_tool_error)
|
content = _handle_tool_error(e, flag=self.handle_tool_error)
|
||||||
status = "error"
|
status = "error"
|
||||||
except (Exception, KeyboardInterrupt) as e:
|
except (Exception, KeyboardInterrupt) as e:
|
||||||
error_to_raise = e
|
error_to_raise = e
|
||||||
status = "error"
|
|
||||||
|
|
||||||
if error_to_raise:
|
if error_to_raise:
|
||||||
await run_manager.on_tool_error(error_to_raise)
|
await run_manager.on_tool_error(error_to_raise)
|
||||||
@ -873,7 +873,7 @@ def _handle_validation_error(
|
|||||||
f"Got unexpected type of `handle_validation_error`. Expected bool, "
|
f"Got unexpected type of `handle_validation_error`. Expected bool, "
|
||||||
f"str or callable. Received: {flag}"
|
f"str or callable. Received: {flag}"
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
|
||||||
@ -893,7 +893,7 @@ def _handle_tool_error(
|
|||||||
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
|
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
|
||||||
f"or callable. Received: {flag}"
|
f"or callable. Received: {flag}"
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
|
||||||
|
@ -139,7 +139,7 @@ class EvaluatorCallbackHandler(BaseTracer):
|
|||||||
f"{evaluator.__class__.__name__}: {repr(e)}",
|
f"{evaluator.__class__.__name__}: {repr(e)}",
|
||||||
exc_info=True,
|
exc_info=True,
|
||||||
)
|
)
|
||||||
raise e
|
raise
|
||||||
example_id = str(run.reference_example_id)
|
example_id = str(run.reference_example_id)
|
||||||
with self.lock:
|
with self.lock:
|
||||||
for res in eval_results:
|
for res in eval_results:
|
||||||
|
@ -97,8 +97,7 @@ def extract_sub_links(
|
|||||||
if continue_on_failure:
|
if continue_on_failure:
|
||||||
logger.warning(f"Unable to load link {link}. Raised exception:\n\n{e}")
|
logger.warning(f"Unable to load link {link}. Raised exception:\n\n{e}")
|
||||||
continue
|
continue
|
||||||
else:
|
raise
|
||||||
raise e
|
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
for path in absolute_paths:
|
for path in absolute_paths:
|
||||||
|
@ -53,13 +53,14 @@ def grab_literal(template: str, l_del: str) -> tuple[str, str]:
|
|||||||
# Look for the next tag and move the template to it
|
# Look for the next tag and move the template to it
|
||||||
literal, template = template.split(l_del, 1)
|
literal, template = template.split(l_del, 1)
|
||||||
_CURRENT_LINE += literal.count("\n")
|
_CURRENT_LINE += literal.count("\n")
|
||||||
return (literal, template)
|
|
||||||
|
|
||||||
# There are no more tags in the template?
|
# There are no more tags in the template?
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# Then the rest of the template is a literal
|
# Then the rest of the template is a literal
|
||||||
return (template, "")
|
return (template, "")
|
||||||
|
|
||||||
|
return (literal, template)
|
||||||
|
|
||||||
|
|
||||||
def l_sa_check(template: str, literal: str, is_standalone: bool) -> bool:
|
def l_sa_check(template: str, literal: str, is_standalone: bool) -> bool:
|
||||||
"""Do a preliminary check to see if a tag could be a standalone.
|
"""Do a preliminary check to see if a tag could be a standalone.
|
||||||
|
@ -33,5 +33,5 @@ def _dict_int_op(
|
|||||||
msg = (
|
msg = (
|
||||||
f"Unknown value types: {types}. Only dict and int values are supported."
|
f"Unknown value types: {types}. Only dict and int values are supported."
|
||||||
)
|
)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg) # noqa: TRY004
|
||||||
return combined
|
return combined
|
||||||
|
@ -54,11 +54,6 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
|
|||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
try:
|
try:
|
||||||
import simsimd as simd # type: ignore
|
import simsimd as simd # type: ignore
|
||||||
|
|
||||||
x = np.array(x, dtype=np.float32)
|
|
||||||
y = np.array(y, dtype=np.float32)
|
|
||||||
z = 1 - np.array(simd.cdist(x, y, metric="cosine"))
|
|
||||||
return z
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
|
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
|
||||||
@ -72,6 +67,10 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
|
|||||||
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
|
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
|
||||||
return similarity
|
return similarity
|
||||||
|
|
||||||
|
x = np.array(x, dtype=np.float32)
|
||||||
|
y = np.array(y, dtype=np.float32)
|
||||||
|
return 1 - np.array(simd.cdist(x, y, metric="cosine"))
|
||||||
|
|
||||||
|
|
||||||
def maximal_marginal_relevance(
|
def maximal_marginal_relevance(
|
||||||
query_embedding: np.ndarray,
|
query_embedding: np.ndarray,
|
||||||
|
@ -44,7 +44,7 @@ python = ">=3.12.4"
|
|||||||
[tool.poetry.extras]
|
[tool.poetry.extras]
|
||||||
|
|
||||||
[tool.ruff.lint]
|
[tool.ruff.lint]
|
||||||
select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "UP", "W", "YTT",]
|
select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "TRY", "UP", "W", "YTT",]
|
||||||
ignore = [ "COM812", "UP007", "S110", "S112",]
|
ignore = [ "COM812", "UP007", "S110", "S112",]
|
||||||
|
|
||||||
[tool.coverage.run]
|
[tool.coverage.run]
|
||||||
|
@ -103,7 +103,7 @@ def _runnable(inputs: dict) -> str:
|
|||||||
if inputs["text"] == "bar":
|
if inputs["text"] == "bar":
|
||||||
return "second"
|
return "second"
|
||||||
if isinstance(inputs["exception"], ValueError):
|
if isinstance(inputs["exception"], ValueError):
|
||||||
raise RuntimeError
|
raise RuntimeError # noqa: TRY004
|
||||||
return "third"
|
return "third"
|
||||||
|
|
||||||
|
|
||||||
|
@ -1600,7 +1600,7 @@ async def test_event_stream_with_retry() -> None:
|
|||||||
def fail(inputs: str) -> None:
|
def fail(inputs: str) -> None:
|
||||||
"""Simple func."""
|
"""Simple func."""
|
||||||
msg = "fail"
|
msg = "fail"
|
||||||
raise Exception(msg)
|
raise ValueError(msg)
|
||||||
|
|
||||||
chain = RunnableLambda(success) | RunnableLambda(fail).with_retry(
|
chain = RunnableLambda(success) | RunnableLambda(fail).with_retry(
|
||||||
stop_after_attempt=1,
|
stop_after_attempt=1,
|
||||||
|
@ -1556,7 +1556,7 @@ async def test_event_stream_with_retry() -> None:
|
|||||||
def fail(inputs: str) -> None:
|
def fail(inputs: str) -> None:
|
||||||
"""Simple func."""
|
"""Simple func."""
|
||||||
msg = "fail"
|
msg = "fail"
|
||||||
raise Exception(msg)
|
raise ValueError(msg)
|
||||||
|
|
||||||
chain = RunnableLambda(success) | RunnableLambda(fail).with_retry(
|
chain = RunnableLambda(success) | RunnableLambda(fail).with_retry(
|
||||||
stop_after_attempt=1,
|
stop_after_attempt=1,
|
||||||
@ -1906,7 +1906,7 @@ async def test_runnable_with_message_history() -> None:
|
|||||||
return fn(*args, **kwargs)
|
return fn(*args, **kwargs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raised_errors.append(e)
|
raised_errors.append(e)
|
||||||
raise e
|
raise
|
||||||
|
|
||||||
return _get_output_messages
|
return _get_output_messages
|
||||||
|
|
||||||
@ -2097,7 +2097,7 @@ class StreamingRunnable(Runnable[Input, Output]):
|
|||||||
final_output = None
|
final_output = None
|
||||||
for element in self.iterable:
|
for element in self.iterable:
|
||||||
if isinstance(element, BaseException):
|
if isinstance(element, BaseException):
|
||||||
raise element
|
raise element # noqa: TRY301
|
||||||
yield element
|
yield element
|
||||||
|
|
||||||
if final_output is None:
|
if final_output is None:
|
||||||
@ -2409,10 +2409,10 @@ async def test_break_astream_events() -> None:
|
|||||||
self.started = True
|
self.started = True
|
||||||
try:
|
try:
|
||||||
await asyncio.sleep(0.5)
|
await asyncio.sleep(0.5)
|
||||||
return input
|
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
self.cancelled = True
|
self.cancelled = True
|
||||||
raise
|
raise
|
||||||
|
return input
|
||||||
|
|
||||||
def reset(self) -> None:
|
def reset(self) -> None:
|
||||||
self.started = False
|
self.started = False
|
||||||
@ -2474,10 +2474,10 @@ async def test_cancel_astream_events() -> None:
|
|||||||
self.started = True
|
self.started = True
|
||||||
try:
|
try:
|
||||||
await asyncio.sleep(0.5)
|
await asyncio.sleep(0.5)
|
||||||
return input
|
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
self.cancelled = True
|
self.cancelled = True
|
||||||
raise
|
raise
|
||||||
|
return input
|
||||||
|
|
||||||
def reset(self) -> None:
|
def reset(self) -> None:
|
||||||
self.started = False
|
self.started = False
|
||||||
|
Loading…
Reference in New Issue
Block a user