diff --git a/libs/core/langchain_core/_api/beta_decorator.py b/libs/core/langchain_core/_api/beta_decorator.py index c3113b68777..0eab1d439eb 100644 --- a/libs/core/langchain_core/_api/beta_decorator.py +++ b/libs/core/langchain_core/_api/beta_decorator.py @@ -120,7 +120,7 @@ def beta( if isinstance(obj, type): if not _obj_type: _obj_type = "class" - wrapped = obj.__init__ # type: ignore + wrapped = obj.__init__ # type: ignore[misc] _name = _name or obj.__qualname__ old_doc = obj.__doc__ diff --git a/libs/core/langchain_core/_api/deprecation.py b/libs/core/langchain_core/_api/deprecation.py index 78897a02d9f..7964471c220 100644 --- a/libs/core/langchain_core/_api/deprecation.py +++ b/libs/core/langchain_core/_api/deprecation.py @@ -200,7 +200,7 @@ def deprecated( if isinstance(obj, type): if not _obj_type: _obj_type = "class" - wrapped = obj.__init__ # type: ignore + wrapped = obj.__init__ # type: ignore[misc] _name = _name or obj.__qualname__ old_doc = obj.__doc__ diff --git a/libs/core/langchain_core/agents.py b/libs/core/langchain_core/agents.py index a142db3940c..74bfcdd4012 100644 --- a/libs/core/langchain_core/agents.py +++ b/libs/core/langchain_core/agents.py @@ -113,7 +113,7 @@ class AgentActionMessageLog(AgentAction): # Ignoring type because we're overriding the type from AgentAction. # And this is the correct thing to do in this case. # The type literal is used for serialization purposes. - type: Literal["AgentActionMessageLog"] = "AgentActionMessageLog" # type: ignore + type: Literal["AgentActionMessageLog"] = "AgentActionMessageLog" # type: ignore[assignment] class AgentStep(Serializable): diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index 3b9192577fe..40adbde7356 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -202,7 +202,7 @@ class AIMessage(BaseMessage): raw_tool_calls := values.get("additional_kwargs", {}).get("tool_calls") ): try: - if issubclass(cls, AIMessageChunk): # type: ignore + if issubclass(cls, AIMessageChunk): values["tool_call_chunks"] = default_tool_chunk_parser( raw_tool_calls ) @@ -284,7 +284,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. - type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore + type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] """The type of the message (used for deserialization). Defaults to "AIMessageChunk".""" @@ -369,7 +369,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): return self @override - def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] if isinstance(other, AIMessageChunk): return add_ai_message_chunks(self, other) if isinstance(other, (list, tuple)) and all( diff --git a/libs/core/langchain_core/messages/base.py b/libs/core/langchain_core/messages/base.py index 079d90f329b..11b32e77f35 100644 --- a/libs/core/langchain_core/messages/base.py +++ b/libs/core/langchain_core/messages/base.py @@ -165,10 +165,10 @@ def merge_content( merged = cast("str", merged) + content # If the next chunk is a list, add the current to the start of the list else: - merged = [merged] + content # type: ignore + merged = [merged] + content # type: ignore[assignment,operator] elif isinstance(content, list): # If both are lists - merged = merge_lists(cast("list", merged), content) # type: ignore + merged = merge_lists(cast("list", merged), content) # type: ignore[assignment] # If the first content is a list, and the second content is a string # If the last element of the first content is a string # Add the second content to the last element @@ -186,7 +186,7 @@ def merge_content( class BaseMessageChunk(BaseMessage): """Message chunk, which can be concatenated with other Message chunks.""" - def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] """Message chunks support concatenation with other message chunks. This functionality is useful to combine message chunks yielded from diff --git a/libs/core/langchain_core/messages/chat.py b/libs/core/langchain_core/messages/chat.py index 05f1916070c..c9f11f976b6 100644 --- a/libs/core/langchain_core/messages/chat.py +++ b/libs/core/langchain_core/messages/chat.py @@ -31,12 +31,12 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk): # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. - type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore + type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment] """The type of the message (used during serialization). Defaults to "ChatMessageChunk".""" @override - def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] if isinstance(other, ChatMessageChunk): if self.role != other.role: msg = "Cannot concatenate ChatMessageChunks with different roles." diff --git a/libs/core/langchain_core/messages/function.py b/libs/core/langchain_core/messages/function.py index c08555dfddf..c79d54ae5b2 100644 --- a/libs/core/langchain_core/messages/function.py +++ b/libs/core/langchain_core/messages/function.py @@ -44,7 +44,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): Defaults to "FunctionMessageChunk".""" @override - def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] if isinstance(other, FunctionMessageChunk): if self.name != other.name: msg = "Cannot concatenate FunctionMessageChunks with different names." diff --git a/libs/core/langchain_core/messages/tool.py b/libs/core/langchain_core/messages/tool.py index f307d3dc044..3a1408809c9 100644 --- a/libs/core/langchain_core/messages/tool.py +++ b/libs/core/langchain_core/messages/tool.py @@ -158,7 +158,7 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk): type: Literal["ToolMessageChunk"] = "ToolMessageChunk" # type: ignore[assignment] @override - def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore + def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override] if isinstance(other, ToolMessageChunk): if self.tool_call_id != other.tool_call_id: msg = "Cannot concatenate ToolMessageChunks with different names." diff --git a/libs/core/langchain_core/output_parsers/json.py b/libs/core/langchain_core/output_parsers/json.py index 0b64c594a31..70fda7aac89 100644 --- a/libs/core/langchain_core/output_parsers/json.py +++ b/libs/core/langchain_core/output_parsers/json.py @@ -28,7 +28,7 @@ else: from pydantic.v1 import BaseModel # Union type needs to be last assignment to PydanticBaseModel to make mypy happy. - PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore + PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc] TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel) @@ -43,7 +43,7 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]): describing the difference between the previous and the current object. """ - pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore + pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore[valid-type] """The Pydantic object to use for validation. If None, no validation is performed.""" diff --git a/libs/core/langchain_core/output_parsers/openai_functions.py b/libs/core/langchain_core/output_parsers/openai_functions.py index 53d97e04c65..1135ccf0df6 100644 --- a/libs/core/langchain_core/output_parsers/openai_functions.py +++ b/libs/core/langchain_core/output_parsers/openai_functions.py @@ -266,7 +266,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): if hasattr(self.pydantic_schema, "model_validate_json"): pydantic_args = self.pydantic_schema.model_validate_json(_result) else: - pydantic_args = self.pydantic_schema.parse_raw(_result) # type: ignore + pydantic_args = self.pydantic_schema.parse_raw(_result) # type: ignore[attr-defined] else: fn_name = _result["name"] _args = _result["arguments"] @@ -275,9 +275,9 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): else: pydantic_schema = self.pydantic_schema if hasattr(pydantic_schema, "model_validate_json"): - pydantic_args = pydantic_schema.model_validate_json(_args) # type: ignore + pydantic_args = pydantic_schema.model_validate_json(_args) else: - pydantic_args = pydantic_schema.parse_raw(_args) # type: ignore + pydantic_args = pydantic_schema.parse_raw(_args) return pydantic_args diff --git a/libs/core/langchain_core/output_parsers/openai_tools.py b/libs/core/langchain_core/output_parsers/openai_tools.py index 9ab28937677..254d11eca1f 100644 --- a/libs/core/langchain_core/output_parsers/openai_tools.py +++ b/libs/core/langchain_core/output_parsers/openai_tools.py @@ -66,7 +66,7 @@ def parse_tool_call( } if return_id: parsed["id"] = raw_tool_call.get("id") - parsed = create_tool_call(**parsed) # type: ignore + parsed = create_tool_call(**parsed) # type: ignore[assignment,arg-type] return parsed diff --git a/libs/core/langchain_core/output_parsers/pydantic.py b/libs/core/langchain_core/output_parsers/pydantic.py index b1ee96e0fa0..f41a8fe181e 100644 --- a/libs/core/langchain_core/output_parsers/pydantic.py +++ b/libs/core/langchain_core/output_parsers/pydantic.py @@ -20,7 +20,7 @@ from langchain_core.utils.pydantic import ( class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]): """Parse an output using a pydantic model.""" - pydantic_object: Annotated[type[TBaseModel], SkipValidation()] # type: ignore + pydantic_object: Annotated[type[TBaseModel], SkipValidation()] """The pydantic model to parse.""" def _parse_obj(self, obj: dict) -> TBaseModel: diff --git a/libs/core/langchain_core/output_parsers/xml.py b/libs/core/langchain_core/output_parsers/xml.py index ba7575c59d6..e7b8278c47b 100644 --- a/libs/core/langchain_core/output_parsers/xml.py +++ b/libs/core/langchain_core/output_parsers/xml.py @@ -49,7 +49,7 @@ class _StreamingParser: """ if parser == "defusedxml": try: - import defusedxml # type: ignore + import defusedxml # type: ignore[import-untyped] except ImportError as e: msg = ( "defusedxml is not installed. " @@ -205,7 +205,7 @@ class XMLOutputParser(BaseTransformOutputParser): # likely if you're reading this you can move them to the top of the file if self.parser == "defusedxml": try: - from defusedxml import ElementTree # type: ignore + from defusedxml import ElementTree except ImportError as e: msg = ( "defusedxml is not installed. " diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 5f75df829f6..d649ec2fbc3 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -229,7 +229,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate): """ # mypy can't detect the init which is defined in the parent class # b/c these are BaseModel classes. - super().__init__( # type: ignore + super().__init__( # type: ignore[call-arg] variable_name=variable_name, optional=optional, **kwargs ) diff --git a/libs/core/langchain_core/pydantic_v1/__init__.py b/libs/core/langchain_core/pydantic_v1/__init__.py index 5422482effd..c11a835dae7 100644 --- a/libs/core/langchain_core/pydantic_v1/__init__.py +++ b/libs/core/langchain_core/pydantic_v1/__init__.py @@ -18,7 +18,7 @@ from langchain_core._api.deprecation import warn_deprecated try: from pydantic.v1 import * # noqa: F403 except ImportError: - from pydantic import * # type: ignore # noqa: F403 + from pydantic import * # noqa: F403 try: diff --git a/libs/core/langchain_core/pydantic_v1/dataclasses.py b/libs/core/langchain_core/pydantic_v1/dataclasses.py index 903bd865b98..7d795991f18 100644 --- a/libs/core/langchain_core/pydantic_v1/dataclasses.py +++ b/libs/core/langchain_core/pydantic_v1/dataclasses.py @@ -5,7 +5,7 @@ from langchain_core._api import warn_deprecated try: from pydantic.v1.dataclasses import * # noqa: F403 except ImportError: - from pydantic.dataclasses import * # type: ignore # noqa: F403 + from pydantic.dataclasses import * # noqa: F403 warn_deprecated( "0.3.0", diff --git a/libs/core/langchain_core/pydantic_v1/main.py b/libs/core/langchain_core/pydantic_v1/main.py index 51e57df96e2..7f89181bf1b 100644 --- a/libs/core/langchain_core/pydantic_v1/main.py +++ b/libs/core/langchain_core/pydantic_v1/main.py @@ -5,7 +5,7 @@ from langchain_core._api import warn_deprecated try: from pydantic.v1.main import * # noqa: F403 except ImportError: - from pydantic.main import * # type: ignore # noqa: F403 + from pydantic.main import * # noqa: F403 warn_deprecated( "0.3.0", diff --git a/libs/core/langchain_core/retrievers.py b/libs/core/langchain_core/retrievers.py index e82ff04032d..593438b8853 100644 --- a/libs/core/langchain_core/retrievers.py +++ b/libs/core/langchain_core/retrievers.py @@ -190,7 +190,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): async def _aget_relevant_documents( self: Self, query: str ) -> list[Document]: - return await run_in_executor(None, self._get_relevant_documents, query) # type: ignore + return await run_in_executor(None, self._get_relevant_documents, query) # type: ignore[call-arg] cls._aget_relevant_documents = _aget_relevant_documents # type: ignore[assignment] diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 68c7e14f1b4..3295b14c0f6 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -70,7 +70,7 @@ from langchain_core.runnables.utils import ( Output, accepts_config, accepts_run_manager, - asyncio_accepts_context, + coro_with_context, gated_coro, gather_with_concurrency, get_function_first_arg_dict_keys, @@ -1119,7 +1119,7 @@ class Runnable(Generic[Input, Output], ABC): # Mypy isn't resolving the overloads here # Likely an issue b/c `self` is being passed through # and it's can't map it to Runnable[Input,Output]? - async for item in _astream_log_implementation( # type: ignore + async for item in _astream_log_implementation( # type: ignore[call-overload] self, input, config, @@ -1980,10 +1980,7 @@ class Runnable(Generic[Input, Output], ABC): coro = acall_func_with_variable_args( func, input, config, run_manager, **kwargs ) - if asyncio_accepts_context(): - output: Output = await asyncio.create_task(coro, context=context) # type: ignore - else: - output = await coro + output: Output = await coro_with_context(coro, context) except BaseException as e: await run_manager.on_chain_error(e) raise @@ -2207,14 +2204,14 @@ class Runnable(Generic[Input, Output], ABC): ) try: while True: - chunk: Output = context.run(next, iterator) # type: ignore + chunk: Output = context.run(next, iterator) yield chunk if final_output_supported: if final_output is None: final_output = chunk else: try: - final_output = final_output + chunk # type: ignore + final_output = final_output + chunk # type: ignore[operator] except TypeError: final_output = chunk final_output_supported = False @@ -2228,7 +2225,7 @@ class Runnable(Generic[Input, Output], ABC): final_input = ichunk else: try: - final_input = final_input + ichunk # type: ignore + final_input = final_input + ichunk # type: ignore[operator] except TypeError: final_input = ichunk final_input_supported = False @@ -2314,20 +2311,14 @@ class Runnable(Generic[Input, Output], ABC): iterator = iterator_ try: while True: - if asyncio_accepts_context(): - chunk: Output = await asyncio.create_task( # type: ignore[call-arg] - py_anext(iterator), # type: ignore[arg-type] - context=context, - ) - else: - chunk = cast("Output", await py_anext(iterator)) + chunk = await coro_with_context(py_anext(iterator), context) yield chunk if final_output_supported: if final_output is None: final_output = chunk else: try: - final_output = final_output + chunk # type: ignore + final_output = final_output + chunk # type: ignore[operator] except TypeError: final_output = chunk final_output_supported = False @@ -3084,10 +3075,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]): part = functools.partial(step.ainvoke, input, config, **kwargs) else: part = functools.partial(step.ainvoke, input, config) - if asyncio_accepts_context(): - input = await asyncio.create_task(part(), context=context) # type: ignore - else: - input = await asyncio.create_task(part()) + input = await coro_with_context(part(), context, create_task=True) # finish the root run except BaseException as e: await run_manager.on_chain_error(e) @@ -3807,11 +3795,9 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]): callbacks=run_manager.get_child(f"map:key:{key}"), ) with set_config_context(child_config) as context: - if asyncio_accepts_context(): - return await asyncio.create_task( # type: ignore - step.ainvoke(input, child_config), context=context - ) - return await asyncio.create_task(step.ainvoke(input, child_config)) + return await coro_with_context( + step.ainvoke(input, child_config), context, create_task=True + ) # gather results from all steps try: @@ -5050,7 +5036,7 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]): return create_model_v2( self.get_name("Input"), root=( - list[self.bound.get_input_schema(config)], # type: ignore + list[self.bound.get_input_schema(config)], # type: ignore[misc] None, ), # create model needs access to appropriate type annotations to be @@ -5285,7 +5271,7 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): kwargs. """ - config: RunnableConfig = Field(default_factory=RunnableConfig) # type: ignore + config: RunnableConfig = Field(default_factory=RunnableConfig) # type: ignore[arg-type] """The config to bind to the underlying Runnable.""" config_factories: list[Callable[[RunnableConfig], RunnableConfig]] = Field( diff --git a/libs/core/langchain_core/runnables/branch.py b/libs/core/langchain_core/runnables/branch.py index 26a14edf164..fad0480aef6 100644 --- a/libs/core/langchain_core/runnables/branch.py +++ b/libs/core/langchain_core/runnables/branch.py @@ -359,7 +359,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]): final_output = chunk else: try: - final_output = final_output + chunk # type: ignore + final_output = final_output + chunk # type: ignore[operator] except TypeError: final_output = None final_output_supported = False @@ -379,7 +379,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]): final_output = chunk else: try: - final_output = final_output + chunk # type: ignore + final_output = final_output + chunk # type: ignore[operator] except TypeError: final_output = None final_output_supported = False @@ -445,7 +445,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]): final_output = chunk else: try: - final_output = final_output + chunk # type: ignore + final_output = final_output + chunk # type: ignore[operator] except TypeError: final_output = None final_output_supported = False @@ -465,7 +465,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]): final_output = chunk else: try: - final_output = final_output + chunk # type: ignore + final_output = final_output + chunk # type: ignore[operator] except TypeError: final_output = None final_output_supported = False diff --git a/libs/core/langchain_core/runnables/config.py b/libs/core/langchain_core/runnables/config.py index aea0c6050c0..a2adca3be51 100644 --- a/libs/core/langchain_core/runnables/config.py +++ b/libs/core/langchain_core/runnables/config.py @@ -343,18 +343,18 @@ def merge_configs(*configs: Optional[RunnableConfig]) -> RunnableConfig: for config in (ensure_config(c) for c in configs if c is not None): for key in config: if key == "metadata": - base[key] = { # type: ignore - **base.get(key, {}), # type: ignore - **(config.get(key) or {}), # type: ignore + base["metadata"] = { + **base.get("metadata", {}), + **(config.get("metadata") or {}), } elif key == "tags": - base[key] = sorted( # type: ignore - set(base.get(key, []) + (config.get(key) or [])), # type: ignore + base["tags"] = sorted( + set(base.get("tags", []) + (config.get("tags") or [])), ) elif key == "configurable": - base[key] = { # type: ignore - **base.get(key, {}), # type: ignore - **(config.get(key) or {}), # type: ignore + base["configurable"] = { + **base.get("configurable", {}), + **(config.get("configurable") or {}), } elif key == "callbacks": base_callbacks = base.get("callbacks") @@ -390,7 +390,7 @@ def merge_configs(*configs: Optional[RunnableConfig]) -> RunnableConfig: elif key in COPIABLE_KEYS and config[key] is not None: # type: ignore[literal-required] base[key] = config[key].copy() # type: ignore[literal-required] else: - base[key] = config[key] or base.get(key) # type: ignore + base[key] = config[key] or base.get(key) # type: ignore[literal-required] return base diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index 2b57229fce1..e3ef9c057c2 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -30,7 +30,7 @@ from langchain_core.runnables.utils import ( ConfigurableFieldSpec, Input, Output, - asyncio_accepts_context, + coro_with_context, get_unique_config_specs, ) from langchain_core.utils.aiter import py_anext @@ -241,10 +241,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): child_config = patch_config(config, callbacks=run_manager.get_child()) with set_config_context(child_config) as context: coro = context.run(runnable.ainvoke, input, config, **kwargs) - if asyncio_accepts_context(): - output = await asyncio.create_task(coro, context=context) # type: ignore - else: - output = await coro + output = await coro_with_context(coro, context) except self.exceptions_to_handle as e: if first_error is None: first_error = e @@ -335,7 +332,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): run_again.pop(i) elif isinstance(output, self.exceptions_to_handle): if self.exception_key: - input[self.exception_key] = output # type: ignore + input[self.exception_key] = output # type: ignore[index] handled_exceptions[i] = cast("BaseException", output) else: run_managers[i].on_chain_end(output) @@ -432,7 +429,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): run_again.pop(i) elif isinstance(output, self.exceptions_to_handle): if self.exception_key: - input[self.exception_key] = output # type: ignore + input[self.exception_key] = output # type: ignore[index] handled_exceptions[i] = cast("BaseException", output) else: to_return[i] = output @@ -455,7 +452,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): if not return_exceptions and sorted_handled_exceptions: raise sorted_handled_exceptions[0][1] to_return.update(handled_exceptions) - return [output for _, output in sorted(to_return.items())] # type: ignore + return [output for _, output in sorted(to_return.items())] # type: ignore[misc] @override def stream( @@ -493,7 +490,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): input, **kwargs, ) - chunk: Output = context.run(next, stream) # type: ignore + chunk: Output = context.run(next, stream) except self.exceptions_to_handle as e: first_error = e if first_error is None else first_error last_error = e @@ -513,7 +510,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): for chunk in stream: yield chunk try: - output = output + chunk # type: ignore + output = output + chunk # type: ignore[operator] except TypeError: output = None except BaseException as e: @@ -557,13 +554,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): child_config, **kwargs, ) - if asyncio_accepts_context(): - chunk: Output = await asyncio.create_task( # type: ignore[call-arg] - py_anext(stream), # type: ignore[arg-type] - context=context, - ) - else: - chunk = cast("Output", await py_anext(stream)) + chunk = await coro_with_context(py_anext(stream), context) except self.exceptions_to_handle as e: first_error = e if first_error is None else first_error last_error = e @@ -583,7 +574,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): async for chunk in stream: yield chunk try: - output = output + chunk # type: ignore + output = output + chunk # type: ignore[operator] except TypeError: output = None except BaseException as e: diff --git a/libs/core/langchain_core/runnables/passthrough.py b/libs/core/langchain_core/runnables/passthrough.py index 428e98f5cf6..c9e853f7cf5 100644 --- a/libs/core/langchain_core/runnables/passthrough.py +++ b/libs/core/langchain_core/runnables/passthrough.py @@ -569,7 +569,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): # start map output stream first_map_chunk_future = executor.submit( next, - map_output, # type: ignore + map_output, None, ) # consume passthrough stream diff --git a/libs/core/langchain_core/runnables/utils.py b/libs/core/langchain_core/runnables/utils.py index 9b3530f00ab..4482bd622b4 100644 --- a/libs/core/langchain_core/runnables/utils.py +++ b/libs/core/langchain_core/runnables/utils.py @@ -6,6 +6,7 @@ import ast import asyncio import inspect import textwrap +from contextvars import Context from functools import lru_cache from inspect import signature from itertools import groupby @@ -126,6 +127,26 @@ def asyncio_accepts_context() -> bool: return accepts_context(asyncio.create_task) +def coro_with_context( + coro: Awaitable[Any], context: Context, *, create_task: bool = False +) -> Awaitable[Any]: + """Await a coroutine with a context. + + Args: + coro: The coroutine to await. + context: The context to use. + create_task: Whether to create a task. Defaults to False. + + Returns: + The coroutine with the context. + """ + if asyncio_accepts_context(): + return asyncio.create_task(coro, context=context) # type: ignore[arg-type,call-arg] + if create_task: + return asyncio.create_task(coro) # type: ignore[arg-type] + return coro + + class IsLocalDict(ast.NodeVisitor): """Check if a name is a local dict.""" diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py index dec85291e51..daee8604307 100644 --- a/libs/core/langchain_core/tools/base.py +++ b/libs/core/langchain_core/tools/base.py @@ -2,7 +2,6 @@ from __future__ import annotations -import asyncio import functools import inspect import json @@ -55,7 +54,7 @@ from langchain_core.runnables import ( run_in_executor, ) from langchain_core.runnables.config import set_config_context -from langchain_core.runnables.utils import asyncio_accepts_context +from langchain_core.runnables.utils import coro_with_context from langchain_core.utils.function_calling import ( _parse_google_docstring, _py_38_safe_origin, @@ -146,7 +145,7 @@ def _infer_arg_descriptions( """Infer argument descriptions from a function's docstring.""" if hasattr(inspect, "get_annotations"): # This is for python < 3.10 - annotations = inspect.get_annotations(fn) # type: ignore + annotations = inspect.get_annotations(fn) else: annotations = getattr(fn, "__annotations__", {}) if parse_docstring: @@ -243,7 +242,7 @@ def create_schema_from_function( sig = inspect.signature(func) if _function_annotations_are_pydantic_v1(sig, func): - validated = validate_arguments_v1(func, config=_SchemaConfig) # type: ignore + validated = validate_arguments_v1(func, config=_SchemaConfig) # type: ignore[call-overload] else: # https://docs.pydantic.dev/latest/usage/validation_decorator/ with warnings.catch_warnings(): @@ -251,7 +250,7 @@ def create_schema_from_function( # This code should be re-written to simply construct a pydantic model # using inspect.signature and create_model. warnings.simplefilter("ignore", category=PydanticDeprecationWarning) - validated = validate_arguments(func, config=_SchemaConfig) # type: ignore + validated = validate_arguments(func, config=_SchemaConfig) # type: ignore[operator] # Let's ignore `self` and `cls` arguments for class and instance methods # If qualified name has a ".", then it likely belongs in a class namespace @@ -266,7 +265,7 @@ def create_schema_from_function( elif param.kind == param.VAR_KEYWORD: has_kwargs = True - inferred_model = validated.model # type: ignore + inferred_model = validated.model if filter_args: filter_args_ = filter_args @@ -854,10 +853,7 @@ class ChildTool(BaseTool): tool_kwargs[config_param] = config coro = self._arun(*tool_args, **tool_kwargs) - if asyncio_accepts_context(): - response = await asyncio.create_task(coro, context=context) # type: ignore - else: - response = await coro + response = await coro_with_context(coro, context) if self.response_format == "content_and_artifact": if not isinstance(response, tuple) or len(response) != 2: msg = ( diff --git a/libs/core/langchain_core/tools/convert.py b/libs/core/langchain_core/tools/convert.py index ee84facfaf5..19172970682 100644 --- a/libs/core/langchain_core/tools/convert.py +++ b/libs/core/langchain_core/tools/convert.py @@ -371,7 +371,7 @@ def _get_schema_from_runnable_and_arg_types( ) raise TypeError(msg) from e fields = {key: (key_type, Field(...)) for key, key_type in arg_types.items()} - return create_model(name, **fields) # type: ignore + return create_model(name, **fields) # type: ignore[call-overload] def convert_runnable_to_tool( diff --git a/libs/core/langchain_core/tracers/evaluation.py b/libs/core/langchain_core/tracers/evaluation.py index 111f94433a4..7fd1288d1af 100644 --- a/libs/core/langchain_core/tracers/evaluation.py +++ b/libs/core/langchain_core/tracers/evaluation.py @@ -132,7 +132,7 @@ class EvaluatorCallbackHandler(BaseTracer): ) evaluation_result = evaluator.evaluate_run( # This is subclass, but getting errors for some reason - run, # type: ignore + run, # type: ignore[arg-type] example=reference_example, ) eval_results = self._log_evaluation_feedback( diff --git a/libs/core/langchain_core/tracers/event_stream.py b/libs/core/langchain_core/tracers/event_stream.py index be87d663b51..54621f96ccc 100644 --- a/libs/core/langchain_core/tracers/event_stream.py +++ b/libs/core/langchain_core/tracers/event_stream.py @@ -789,7 +789,7 @@ async def _astream_events_implementation_v1( # Ignoring mypy complaint about too many different union combinations # This arises because many of the argument types are unions - async for log in _astream_log_implementation( # type: ignore[misc] + async for log in _astream_log_implementation( runnable, input, config=config, diff --git a/libs/core/langchain_core/tracers/langchain.py b/libs/core/langchain_core/tracers/langchain.py index 9694d04602c..81029726429 100644 --- a/libs/core/langchain_core/tracers/langchain.py +++ b/libs/core/langchain_core/tracers/langchain.py @@ -124,7 +124,7 @@ class LangChainTracer(BaseTracer): super()._start_trace(run) if run._client is None: - run._client = self.client # type: ignore + run._client = self.client # type: ignore[misc] def on_chat_model_start( self, diff --git a/libs/core/langchain_core/tracers/log_stream.py b/libs/core/langchain_core/tracers/log_stream.py index 56389a43b4a..6a146615f52 100644 --- a/libs/core/langchain_core/tracers/log_stream.py +++ b/libs/core/langchain_core/tracers/log_stream.py @@ -657,7 +657,7 @@ async def _astream_log_implementation( final_output = chunk else: try: - final_output = final_output + chunk # type: ignore + final_output = final_output + chunk # type: ignore[operator] except TypeError: prev_final_output = None final_output = chunk diff --git a/libs/core/langchain_core/utils/function_calling.py b/libs/core/langchain_core/utils/function_calling.py index 3d79a3a9773..c94c552086c 100644 --- a/libs/core/langchain_core/utils/function_calling.py +++ b/libs/core/langchain_core/utils/function_calling.py @@ -249,7 +249,7 @@ def _convert_typed_dict_to_openai_function(typed_dict: type) -> FunctionDescript "type[BaseModel]", _convert_any_typed_dicts_to_pydantic(typed_dict, visited=visited), ) - return _convert_pydantic_to_openai_function(model) # type: ignore + return _convert_pydantic_to_openai_function(model) _MAX_TYPED_DICT_RECURSION = 25 @@ -691,7 +691,7 @@ def tool_example_to_messages( openai_tool_calls ) for output, tool_call_dict in zip(tool_outputs, openai_tool_calls): - messages.append(ToolMessage(content=output, tool_call_id=tool_call_dict["id"])) # type: ignore + messages.append(ToolMessage(content=output, tool_call_id=tool_call_dict["id"])) if ai_response: messages.append(AIMessage(content=ai_response)) diff --git a/libs/core/langchain_core/utils/mustache.py b/libs/core/langchain_core/utils/mustache.py index 402bde36ac5..963723c7d71 100644 --- a/libs/core/langchain_core/utils/mustache.py +++ b/libs/core/langchain_core/utils/mustache.py @@ -370,12 +370,12 @@ def _get_key( resolved_scope = getattr(resolved_scope, child) except (TypeError, AttributeError): # Try as a list - resolved_scope = resolved_scope[int(child)] # type: ignore + resolved_scope = resolved_scope[int(child)] # type: ignore[index] try: # This allows for custom falsy data types # https://github.com/noahmorrison/chevron/issues/35 - if resolved_scope._CHEVRON_return_scope_when_falsy: # type: ignore + if resolved_scope._CHEVRON_return_scope_when_falsy: # type: ignore[union-attr] return resolved_scope except AttributeError: if resolved_scope in (0, False): diff --git a/libs/core/langchain_core/utils/pydantic.py b/libs/core/langchain_core/utils/pydantic.py index 2c296429569..a1608064986 100644 --- a/libs/core/langchain_core/utils/pydantic.py +++ b/libs/core/langchain_core/utils/pydantic.py @@ -218,7 +218,7 @@ def pre_init(func: Callable) -> Any: name not in values or values[name] is None ) and not field_info.is_required(): if field_info.default_factory is not None: - values[name] = field_info.default_factory() # type: ignore + values[name] = field_info.default_factory() # type: ignore[call-arg] else: values[name] = field_info.default @@ -253,7 +253,7 @@ def _create_subset_model_v1( if IS_PYDANTIC_V1: from pydantic import create_model elif IS_PYDANTIC_V2: - from pydantic.v1 import create_model # type: ignore + from pydantic.v1 import create_model # type: ignore[no-redef] else: msg = f"Unsupported pydantic version: {PYDANTIC_VERSION.major}" raise NotImplementedError(msg) @@ -262,7 +262,7 @@ def _create_subset_model_v1( for field_name in field_names: # Using pydantic v1 so can access __fields__ as a dict. - field = model.__fields__[field_name] # type: ignore + field = model.__fields__[field_name] # type: ignore[index] t = ( # this isn't perfect but should work for most functions field.outer_type_ @@ -273,7 +273,7 @@ def _create_subset_model_v1( field.field_info.description = descriptions[field_name] fields[field_name] = (t, field.field_info) - rtn = create_model(name, **fields) # type: ignore + rtn = create_model(name, **fields) # type: ignore[call-overload] rtn.__doc__ = textwrap.dedent(fn_description or model.__doc__ or "") return rtn @@ -293,14 +293,14 @@ def _create_subset_model_v2( descriptions_ = descriptions or {} fields = {} for field_name in field_names: - field = model.model_fields[field_name] # type: ignore + field = model.model_fields[field_name] description = descriptions_.get(field_name, field.description) field_info = FieldInfo(description=description, default=field.default) if field.metadata: field_info.metadata = field.metadata fields[field_name] = (field.annotation, field_info) - rtn = create_model( # type: ignore + rtn = create_model( # type: ignore[call-overload] name, **fields, __config__=ConfigDict(arbitrary_types_allowed=True) ) @@ -383,10 +383,10 @@ if IS_PYDANTIC_V2: ) -> Union[dict[str, FieldInfoV2], dict[str, FieldInfoV1]]: """Get the field names of a Pydantic model.""" if hasattr(model, "model_fields"): - return model.model_fields # type: ignore + return model.model_fields # type: ignore[call-overload,arg-type] if hasattr(model, "__fields__"): - return model.__fields__ # type: ignore + return model.__fields__ # type: ignore[return-value] msg = f"Expected a Pydantic model. Got {type(model)}" raise TypeError(msg) @@ -397,7 +397,7 @@ elif IS_PYDANTIC_V1: model: Union[type[BaseModelV1_], BaseModelV1_], ) -> dict[str, FieldInfoV1]: """Get the field names of a Pydantic model.""" - return model.__fields__ # type: ignore + return model.__fields__ # type: ignore[return-value] else: msg = f"Unsupported Pydantic version: {PYDANTIC_VERSION.major}" diff --git a/libs/core/langchain_core/vectorstores/utils.py b/libs/core/langchain_core/vectorstores/utils.py index 6e407d44920..5a72e44cd9f 100644 --- a/libs/core/langchain_core/vectorstores/utils.py +++ b/libs/core/langchain_core/vectorstores/utils.py @@ -53,7 +53,7 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray: ) raise ValueError(msg) try: - import simsimd as simd # type: ignore + import simsimd as simd # type: ignore[import-not-found] except ImportError: logger.debug( "Unable to import simsimd, defaulting to NumPy implementation. If you want " diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 98914361bf9..c7f6f6a72ad 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -102,7 +102,6 @@ ignore = [ "ERA", "FBT001", "FBT002", - "PGH003", "PLR2004", "RUF", "SLF", diff --git a/libs/core/tests/benchmarks/test_imports.py b/libs/core/tests/benchmarks/test_imports.py index 2bd85a7ea0e..9fdc743cc82 100644 --- a/libs/core/tests/benchmarks/test_imports.py +++ b/libs/core/tests/benchmarks/test_imports.py @@ -2,7 +2,7 @@ import subprocess import sys import pytest -from pytest_benchmark.fixture import BenchmarkFixture # type: ignore +from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped] @pytest.mark.parametrize( diff --git a/libs/core/tests/unit_tests/fake/callbacks.py b/libs/core/tests/unit_tests/fake/callbacks.py index ca1eed68f37..b8ec1778b42 100644 --- a/libs/core/tests/unit_tests/fake/callbacks.py +++ b/libs/core/tests/unit_tests/fake/callbacks.py @@ -276,7 +276,7 @@ class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin): self.on_retriever_error_common() # Overriding since BaseModel has __deepcopy__ method as well - def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore + def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore[override] return self @@ -426,5 +426,5 @@ class FakeAsyncCallbackHandler(AsyncCallbackHandler, BaseFakeCallbackHandlerMixi self.on_text_common() # Overriding since BaseModel has __deepcopy__ method as well - def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore + def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore[override] return self diff --git a/libs/core/tests/unit_tests/indexing/test_hashed_document.py b/libs/core/tests/unit_tests/indexing/test_hashed_document.py index 34378732852..a756e2d93a4 100644 --- a/libs/core/tests/unit_tests/indexing/test_hashed_document.py +++ b/libs/core/tests/unit_tests/indexing/test_hashed_document.py @@ -16,7 +16,7 @@ def test_hashing_with_missing_content() -> None: with pytest.raises(TypeError): _HashedDocument( metadata={"key": "value"}, - ) # type: ignore + ) # type: ignore[call-arg] def test_uid_auto_assigned_to_hash() -> None: diff --git a/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py b/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py index 517aee40b73..ffa39ab1dcc 100644 --- a/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py +++ b/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py @@ -24,7 +24,7 @@ class TestDocumentIndexerTestSuite(DocumentIndexerTestSuite): class TestAsyncDocumentIndexerTestSuite(AsyncDocumentIndexTestSuite): # Something funky is going on with mypy and async pytest fixture @pytest.fixture - async def index(self) -> AsyncGenerator[DocumentIndex, None]: # type: ignore + async def index(self) -> AsyncGenerator[DocumentIndex, None]: # type: ignore[override] yield InMemoryDocumentIndex() # noqa: PT022 diff --git a/libs/core/tests/unit_tests/indexing/test_indexing.py b/libs/core/tests/unit_tests/indexing/test_indexing.py index 683d25d2cf4..f3a9632eef0 100644 --- a/libs/core/tests/unit_tests/indexing/test_indexing.py +++ b/libs/core/tests/unit_tests/indexing/test_indexing.py @@ -45,7 +45,7 @@ def record_manager() -> InMemoryRecordManager: return record_manager -@pytest_asyncio.fixture # type: ignore +@pytest_asyncio.fixture async def arecord_manager() -> InMemoryRecordManager: """Timestamped set fixture.""" record_manager = InMemoryRecordManager(namespace="hello") @@ -194,7 +194,7 @@ def test_index_simple_delete_full( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"mutated document 1", "This is another document."} @@ -284,7 +284,7 @@ async def test_aindex_simple_delete_full( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"mutated document 1", "This is another document."} @@ -356,7 +356,7 @@ def test_index_delete_full_recovery_after_deletion_failure( # and the vector store doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == { @@ -373,7 +373,7 @@ def test_index_delete_full_recovery_after_deletion_failure( indexing_result = index(loader, record_manager, vector_store, cleanup="full") doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"mutated document 1", "This is another document."} @@ -441,7 +441,7 @@ async def test_aindex_delete_full_recovery_after_deletion_failure( # and the vector store doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == { @@ -460,7 +460,7 @@ async def test_aindex_delete_full_recovery_after_deletion_failure( ) doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"mutated document 1", "This is another document."} @@ -659,7 +659,7 @@ def test_index_simple_delete_scoped_full( } doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == { @@ -781,7 +781,7 @@ async def test_aindex_simple_delete_scoped_full( } doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == { @@ -1273,7 +1273,7 @@ def test_incremental_delete( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"This is another document.", "This is a test document."} @@ -1336,7 +1336,7 @@ def test_incremental_delete( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == { @@ -1383,7 +1383,7 @@ def test_incremental_delete_with_same_source( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"This is another document.", "This is a test document."} @@ -1418,7 +1418,7 @@ def test_incremental_delete_with_same_source( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == { @@ -1472,7 +1472,7 @@ def test_incremental_indexing_with_batch_size( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"1", "2", "3", "4"} @@ -1498,7 +1498,7 @@ def test_incremental_indexing_with_batch_size( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"1", "2", "3", "4"} @@ -1550,7 +1550,7 @@ def test_incremental_delete_with_batch_size( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"1", "2", "3", "4"} @@ -1577,7 +1577,7 @@ def test_incremental_delete_with_batch_size( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"1", "2", "3", "4"} @@ -1615,7 +1615,7 @@ def test_incremental_delete_with_batch_size( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"1", "2", "3", "4"} @@ -1653,7 +1653,7 @@ def test_incremental_delete_with_batch_size( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"1", "2", "3", "4"} @@ -1690,7 +1690,7 @@ def test_incremental_delete_with_batch_size( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"changed 1", "changed 2", "3", "4"} @@ -1733,7 +1733,7 @@ async def test_aincremental_delete( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == {"This is another document.", "This is a test document."} @@ -1796,7 +1796,7 @@ async def test_aincremental_delete( doc_texts = { # Ignoring type since doc should be in the store and not a None - vector_store.get_by_ids([uid])[0].page_content # type: ignore + vector_store.get_by_ids([uid])[0].page_content for uid in vector_store.store } assert doc_texts == { @@ -2133,7 +2133,7 @@ def test_indexing_custom_batch_size( try: mock_add_documents = MagicMock() - vector_store.add_documents = mock_add_documents # type: ignore + vector_store.add_documents = mock_add_documents # type: ignore[method-assign] index(docs, record_manager, vector_store, batch_size=batch_size) args, kwargs = mock_add_documents.call_args @@ -2143,7 +2143,7 @@ def test_indexing_custom_batch_size( assert args == ([doc_with_id],) assert kwargs == {"ids": ids, "batch_size": batch_size} finally: - vector_store.add_documents = original # type: ignore + vector_store.add_documents = original # type: ignore[method-assign] async def test_aindexing_custom_batch_size( @@ -2163,7 +2163,7 @@ async def test_aindexing_custom_batch_size( doc_with_id = Document( id=ids[0], page_content="This is a test document.", metadata={"source": "1"} ) - vector_store.aadd_documents = mock_add_documents # type: ignore + vector_store.aadd_documents = mock_add_documents # type: ignore[method-assign] await aindex(docs, arecord_manager, vector_store, batch_size=batch_size) args, kwargs = mock_add_documents.call_args assert args == ([doc_with_id],) diff --git a/libs/core/tests/unit_tests/language_models/chat_models/test_base.py b/libs/core/tests/unit_tests/language_models/chat_models/test_base.py index 328e745f7a6..04fd163726e 100644 --- a/libs/core/tests/unit_tests/language_models/chat_models/test_base.py +++ b/libs/core/tests/unit_tests/language_models/chat_models/test_base.py @@ -225,11 +225,11 @@ async def test_astream_implementation_uses_astream() -> None: raise NotImplementedError @override - async def _astream( # type: ignore + async def _astream( self, messages: list[BaseMessage], stop: Optional[list[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, # type: ignore[override] **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: """Stream the output of the model.""" diff --git a/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py b/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py index d5c7efe3983..94afd7afb49 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py +++ b/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py @@ -43,7 +43,7 @@ def test_base_generation_parser() -> None: content = generation.message.content assert isinstance(content, str) - return content.swapcase() # type: ignore + return content.swapcase() StrInvertCase.model_rebuild() @@ -86,7 +86,7 @@ def test_base_transform_output_parser() -> None: raise OutputParserException(msg) content = generation.message.content assert isinstance(content, str) - return content.swapcase() # type: ignore + return content.swapcase() model = GenericFakeChatModel(messages=iter([AIMessage(content="hello world")])) chain = model | StrInvertCase() diff --git a/libs/core/tests/unit_tests/output_parsers/test_pydantic_parser.py b/libs/core/tests/unit_tests/output_parsers/test_pydantic_parser.py index 027254f5233..c8ee28c4853 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_pydantic_parser.py +++ b/libs/core/tests/unit_tests/output_parsers/test_pydantic_parser.py @@ -43,7 +43,7 @@ def test_pydantic_parser_chaining( model = ParrotFakeChatModel() - parser = PydanticOutputParser(pydantic_object=pydantic_object) # type: ignore + parser = PydanticOutputParser(pydantic_object=pydantic_object) # type: ignore[arg-type,var-annotated] chain = prompt | model | parser res = chain.invoke({}) @@ -66,7 +66,7 @@ def test_pydantic_parser_validation(pydantic_object: TBaseModel) -> None: model = ParrotFakeChatModel() - parser = PydanticOutputParser(pydantic_object=pydantic_object) # type: ignore + parser = PydanticOutputParser(pydantic_object=pydantic_object) # type: ignore[arg-type,var-annotated] chain = bad_prompt | model | parser with pytest.raises(OutputParserException): chain.invoke({}) @@ -88,7 +88,7 @@ def test_json_parser_chaining( model = ParrotFakeChatModel() - parser = JsonOutputParser(pydantic_object=pydantic_object) # type: ignore + parser = JsonOutputParser(pydantic_object=pydantic_object) # type: ignore[arg-type] chain = prompt | model | parser res = chain.invoke({}) @@ -172,7 +172,7 @@ def test_pydantic_output_parser_type_inference() -> None: # Ignoring mypy error that appears in python 3.8, but not 3.11. # This seems to be functionally correct, so we'll ignore the error. - pydantic_parser = PydanticOutputParser(pydantic_object=SampleModel) # type: ignore + pydantic_parser = PydanticOutputParser(pydantic_object=SampleModel) schema = pydantic_parser.get_output_schema().model_json_schema() assert schema == { @@ -203,5 +203,5 @@ def test_format_instructions_preserves_language() -> None: ) ) - parser = PydanticOutputParser(pydantic_object=Foo) # type: ignore + parser = PydanticOutputParser(pydantic_object=Foo) assert description in parser.get_format_instructions() diff --git a/libs/core/tests/unit_tests/prompts/test_chat.py b/libs/core/tests/unit_tests/prompts/test_chat.py index 84a83c6ae2d..6b4552b078f 100644 --- a/libs/core/tests/unit_tests/prompts/test_chat.py +++ b/libs/core/tests/unit_tests/prompts/test_chat.py @@ -895,10 +895,10 @@ def test_chat_prompt_message_dict() -> None: async def test_messages_prompt_accepts_list() -> None: prompt = ChatPromptTemplate([MessagesPlaceholder("history")]) - value = prompt.invoke([("user", "Hi there")]) # type: ignore + value = prompt.invoke([("user", "Hi there")]) # type: ignore[arg-type] assert value.to_messages() == [HumanMessage(content="Hi there")] - value = await prompt.ainvoke([("user", "Hi there")]) # type: ignore + value = await prompt.ainvoke([("user", "Hi there")]) # type: ignore[arg-type] assert value.to_messages() == [HumanMessage(content="Hi there")] # Assert still raises a nice error @@ -909,10 +909,10 @@ async def test_messages_prompt_accepts_list() -> None: ] ) with pytest.raises(TypeError): - prompt.invoke([("user", "Hi there")]) # type: ignore + prompt.invoke([("user", "Hi there")]) # type: ignore[arg-type] with pytest.raises(TypeError): - await prompt.ainvoke([("user", "Hi there")]) # type: ignore + await prompt.ainvoke([("user", "Hi there")]) # type: ignore[arg-type] def test_chat_input_schema(snapshot: SnapshotAssertion) -> None: diff --git a/libs/core/tests/unit_tests/prompts/test_prompt.py b/libs/core/tests/unit_tests/prompts/test_prompt.py index 03941ad6ff4..82c567c93a7 100644 --- a/libs/core/tests/unit_tests/prompts/test_prompt.py +++ b/libs/core/tests/unit_tests/prompts/test_prompt.py @@ -570,8 +570,8 @@ def test_prompt_invoke_with_metadata() -> None: ) assert result.to_string() == "This is a bar test." assert len(tracer.traced_runs) == 1 - assert tracer.traced_runs[0].extra["metadata"] == {"version": "1", "foo": "bar"} # type: ignore - assert tracer.traced_runs[0].tags == ["tag1", "tag2"] # type: ignore + assert tracer.traced_runs[0].extra["metadata"] == {"version": "1", "foo": "bar"} + assert tracer.traced_runs[0].tags == ["tag1", "tag2"] async def test_prompt_ainvoke_with_metadata() -> None: @@ -589,8 +589,8 @@ async def test_prompt_ainvoke_with_metadata() -> None: ) assert result.to_string() == "This is a bar test." assert len(tracer.traced_runs) == 1 - assert tracer.traced_runs[0].extra["metadata"] == {"version": "1", "foo": "bar"} # type: ignore - assert tracer.traced_runs[0].tags == ["tag1", "tag2"] # type: ignore + assert tracer.traced_runs[0].extra["metadata"] == {"version": "1", "foo": "bar"} + assert tracer.traced_runs[0].tags == ["tag1", "tag2"] @pytest.mark.parametrize( diff --git a/libs/core/tests/unit_tests/runnables/test_configurable.py b/libs/core/tests/unit_tests/runnables/test_configurable.py index 0c1b3b6b206..be4bf4759e5 100644 --- a/libs/core/tests/unit_tests/runnables/test_configurable.py +++ b/libs/core/tests/unit_tests/runnables/test_configurable.py @@ -73,7 +73,7 @@ class MyOtherRunnable(RunnableSerializable[str, str]): def test_doubly_set_configurable() -> None: """Test that setting a configurable field with a default value works.""" - runnable = MyRunnable(my_property="a") # type: ignore + runnable = MyRunnable(my_property="a") # type: ignore[call-arg] configurable_runnable = runnable.configurable_fields( my_property=ConfigurableField( id="my_property", @@ -86,7 +86,7 @@ def test_doubly_set_configurable() -> None: def test_alias_set_configurable() -> None: - runnable = MyRunnable(my_property="a") # type: ignore + runnable = MyRunnable(my_property="a") # type: ignore[call-arg] configurable_runnable = runnable.configurable_fields( my_property=ConfigurableField( id="my_property_alias", @@ -122,7 +122,7 @@ def test_field_alias_set_configurable() -> None: def test_config_passthrough() -> None: - runnable = MyRunnable(my_property="a") # type: ignore + runnable = MyRunnable(my_property="a") # type: ignore[call-arg] configurable_runnable = runnable.configurable_fields( my_property=ConfigurableField( id="my_property", @@ -158,7 +158,7 @@ def test_config_passthrough() -> None: def test_config_passthrough_nested() -> None: - runnable = MyRunnable(my_property="a") # type: ignore + runnable = MyRunnable(my_property="a") # type: ignore[call-arg] configurable_runnable = runnable.configurable_fields( my_property=ConfigurableField( id="my_property", diff --git a/libs/core/tests/unit_tests/runnables/test_history.py b/libs/core/tests/unit_tests/runnables/test_history.py index 2a5f2a0a84f..f9ae0bf05aa 100644 --- a/libs/core/tests/unit_tests/runnables/test_history.py +++ b/libs/core/tests/unit_tests/runnables/test_history.py @@ -318,7 +318,7 @@ def test_output_messages() -> None: ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( - runnable, # type: ignore + runnable, get_session_history, input_messages_key="input", history_messages_key="history", @@ -348,7 +348,7 @@ async def test_output_messages_async() -> None: ) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( - runnable, # type: ignore + runnable, get_session_history, input_messages_key="input", history_messages_key="history", @@ -553,7 +553,7 @@ def test_using_custom_config_specs() -> None: return store[(user_id, conversation_id)] with_message_history = RunnableWithMessageHistory( - runnable, # type: ignore + runnable, # type: ignore[arg-type] get_session_history=get_session_history, input_messages_key="messages", history_messages_key="history", @@ -666,7 +666,7 @@ async def test_using_custom_config_specs_async() -> None: return store[(user_id, conversation_id)] with_message_history = RunnableWithMessageHistory( - runnable, # type: ignore + runnable, # type: ignore[arg-type] get_session_history=get_session_history, input_messages_key="messages", history_messages_key="history", @@ -769,7 +769,7 @@ def test_ignore_session_id() -> None: runnable = RunnableLambda(_fake_llm) history = InMemoryChatMessageHistory() - with_message_history = RunnableWithMessageHistory(runnable, lambda: history) # type: ignore + with_message_history = RunnableWithMessageHistory(runnable, lambda: history) # type: ignore[arg-type] _ = with_message_history.invoke("hello") _ = with_message_history.invoke("hello again") assert len(history.messages) == 4 diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index 9a63d432678..b2fa8e292d5 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -647,7 +647,7 @@ def test_lambda_schemas(snapshot: SnapshotAssertion) -> None: if PYDANTIC_VERSION_AT_LEAST_29: assert _normalize_schema( - RunnableLambda(aget_values_typed).get_output_jsonschema() # type: ignore + RunnableLambda(aget_values_typed).get_output_jsonschema() # type: ignore[arg-type] ) == snapshot(name="schema8") @@ -5359,7 +5359,7 @@ async def test_astream_log_deep_copies() -> None: def _get_run_log(run_log_patches: Sequence[RunLogPatch]) -> RunLog: """Get run log.""" - run_log = RunLog(state=None) # type: ignore + run_log = RunLog(state=None) # type: ignore[arg-type] for log_patch in run_log_patches: run_log = run_log + log_patch return run_log @@ -5379,7 +5379,7 @@ async def test_astream_log_deep_copies() -> None: state = run_log.state.copy() # Ignoring type here since we know that the state is a dict # so we can delete `id` for testing purposes - state.pop("id") # type: ignore + state.pop("id") # type: ignore[misc] assert state == { "final_output": 2, "logs": {}, @@ -5438,7 +5438,7 @@ def test_default_transform_with_dicts() -> None: def invoke( self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> Output: - return cast("Output", input) # type: ignore + return cast("Output", input) runnable = CustomRunnable[dict[str, str], dict[str, str]]() chunks = iter( @@ -5608,7 +5608,7 @@ def test_closing_iterator_doesnt_raise_error() -> None: st = chain_.stream("hello") next(st) # This is a generator so close is defined on it. - st.close() # type: ignore + st.close() # type: ignore[attr-defined] # Wait for a bit to make sure that the callback is called. time.sleep(0.05) assert on_chain_error_triggered is False diff --git a/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py b/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py index d099d35c978..bffe5ef6a96 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable_events_v1.py @@ -1038,7 +1038,7 @@ async def test_event_streaming_with_tools() -> None: # type ignores below because the tools don't appear to be runnables to type checkers # we can remove as soon as that's fixed - events = await _collect_events(parameterless.astream_events({}, version="v1")) # type: ignore + events = await _collect_events(parameterless.astream_events({}, version="v1")) _assert_events_equal_allow_superset_metadata( events, [ @@ -1072,7 +1072,7 @@ async def test_event_streaming_with_tools() -> None: ], ) - events = await _collect_events(with_callbacks.astream_events({}, version="v1")) # type: ignore + events = await _collect_events(with_callbacks.astream_events({}, version="v1")) _assert_events_equal_allow_superset_metadata( events, [ @@ -1106,7 +1106,7 @@ async def test_event_streaming_with_tools() -> None: ], ) events = await _collect_events( - with_parameters.astream_events({"x": 1, "y": "2"}, version="v1") # type: ignore + with_parameters.astream_events({"x": 1, "y": "2"}, version="v1") ) _assert_events_equal_allow_superset_metadata( events, @@ -1142,7 +1142,7 @@ async def test_event_streaming_with_tools() -> None: ) events = await _collect_events( - with_parameters_and_callbacks.astream_events({"x": 1, "y": "2"}, version="v1") # type: ignore + with_parameters_and_callbacks.astream_events({"x": 1, "y": "2"}, version="v1") ) _assert_events_equal_allow_superset_metadata( events, @@ -1393,7 +1393,7 @@ async def test_event_stream_on_chain_with_tool() -> None: # For whatever reason type annotations fail here because reverse # does not appear to be a runnable - chain = concat | reverse # type: ignore + chain = concat | reverse events = await _collect_events( chain.astream_events({"a": "hello", "b": "world"}, version="v1") @@ -1819,7 +1819,7 @@ async def test_runnable_each() -> None: async def add_one(x: int) -> int: return x + 1 - add_one_map = RunnableLambda(add_one).map() # type: ignore + add_one_map = RunnableLambda(add_one).map() # type: ignore[arg-type,var-annotated] assert await add_one_map.ainvoke([1, 2, 3]) == [2, 3, 4] with pytest.raises(NotImplementedError): @@ -2048,7 +2048,7 @@ async def test_sync_in_async_stream_lambdas() -> None: results = list(streaming) return results[0] - add_one_proxy = RunnableLambda(add_one_proxy_) # type: ignore + add_one_proxy = RunnableLambda(add_one_proxy_) # type: ignore[arg-type,var-annotated] events = await _collect_events(add_one_proxy.astream_events(1, version="v1")) _assert_events_equal_allow_superset_metadata(events, EXPECTED_EVENTS) @@ -2060,7 +2060,7 @@ async def test_async_in_async_stream_lambdas() -> None: async def add_one(x: int) -> int: return x + 1 - add_one_ = RunnableLambda(add_one) # type: ignore + add_one_ = RunnableLambda(add_one) # type: ignore[arg-type,var-annotated] async def add_one_proxy(x: int, config: RunnableConfig) -> int: # Use sync streaming @@ -2068,7 +2068,7 @@ async def test_async_in_async_stream_lambdas() -> None: results = [result async for result in streaming] return results[0] - add_one_proxy_ = RunnableLambda(add_one_proxy) # type: ignore + add_one_proxy_ = RunnableLambda(add_one_proxy) # type: ignore[arg-type,var-annotated] events = await _collect_events(add_one_proxy_.astream_events(1, version="v1")) _assert_events_equal_allow_superset_metadata(events, EXPECTED_EVENTS) diff --git a/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py b/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py index f20a06b932c..5f1b113341c 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable_events_v2.py @@ -1060,7 +1060,7 @@ async def test_event_streaming_with_tools() -> None: # type ignores below because the tools don't appear to be runnables to type checkers # we can remove as soon as that's fixed - events = await _collect_events(parameterless.astream_events({}, version="v2")) # type: ignore + events = await _collect_events(parameterless.astream_events({}, version="v2")) _assert_events_equal_allow_superset_metadata( events, [ @@ -1084,7 +1084,7 @@ async def test_event_streaming_with_tools() -> None: }, ], ) - events = await _collect_events(with_callbacks.astream_events({}, version="v2")) # type: ignore + events = await _collect_events(with_callbacks.astream_events({}, version="v2")) _assert_events_equal_allow_superset_metadata( events, [ @@ -1109,7 +1109,7 @@ async def test_event_streaming_with_tools() -> None: ], ) events = await _collect_events( - with_parameters.astream_events({"x": 1, "y": "2"}, version="v2") # type: ignore + with_parameters.astream_events({"x": 1, "y": "2"}, version="v2") ) _assert_events_equal_allow_superset_metadata( events, @@ -1136,7 +1136,7 @@ async def test_event_streaming_with_tools() -> None: ) events = await _collect_events( - with_parameters_and_callbacks.astream_events({"x": 1, "y": "2"}, version="v2") # type: ignore + with_parameters_and_callbacks.astream_events({"x": 1, "y": "2"}, version="v2") ) _assert_events_equal_allow_superset_metadata( events, @@ -1354,7 +1354,7 @@ async def test_event_stream_on_chain_with_tool() -> None: # For whatever reason type annotations fail here because reverse # does not appear to be a runnable - chain = concat | reverse # type: ignore + chain = concat | reverse events = await _collect_events( chain.astream_events({"a": "hello", "b": "world"}, version="v2") @@ -1769,7 +1769,7 @@ async def test_runnable_each() -> None: async def add_one(x: int) -> int: return x + 1 - add_one_map = RunnableLambda(add_one).map() # type: ignore + add_one_map = RunnableLambda(add_one).map() # type: ignore[arg-type,var-annotated] assert await add_one_map.ainvoke([1, 2, 3]) == [2, 3, 4] with pytest.raises(NotImplementedError): @@ -2016,7 +2016,7 @@ async def test_sync_in_async_stream_lambdas(blockbuster: BlockBuster) -> None: results = list(streaming) return results[0] - add_one_proxy_ = RunnableLambda(add_one_proxy) # type: ignore + add_one_proxy_ = RunnableLambda(add_one_proxy) # type: ignore[arg-type,var-annotated] events = await _collect_events(add_one_proxy_.astream_events(1, version="v2")) _assert_events_equal_allow_superset_metadata(events, EXPECTED_EVENTS) @@ -2028,7 +2028,7 @@ async def test_async_in_async_stream_lambdas() -> None: async def add_one(x: int) -> int: return x + 1 - add_one_ = RunnableLambda(add_one) # type: ignore + add_one_ = RunnableLambda(add_one) # type: ignore[arg-type,var-annotated] async def add_one_proxy(x: int, config: RunnableConfig) -> int: # Use sync streaming @@ -2036,7 +2036,7 @@ async def test_async_in_async_stream_lambdas() -> None: results = [result async for result in streaming] return results[0] - add_one_proxy_ = RunnableLambda(add_one_proxy) # type: ignore + add_one_proxy_ = RunnableLambda(add_one_proxy) # type: ignore[arg-type,var-annotated] events = await _collect_events(add_one_proxy_.astream_events(1, version="v2")) _assert_events_equal_allow_superset_metadata(events, EXPECTED_EVENTS) @@ -2190,19 +2190,19 @@ async def test_parent_run_id_assignment() -> None: # Type ignores in the code below need to be investigated. # Looks like a typing issue when using RunnableLambda as a decorator # with async functions. - @RunnableLambda # type: ignore + @RunnableLambda # type: ignore[arg-type] async def grandchild(x: str) -> str: return x - @RunnableLambda # type: ignore + @RunnableLambda # type: ignore[arg-type] async def child(x: str, config: RunnableConfig) -> str: config["run_id"] = uuid.UUID(int=9) - return await grandchild.ainvoke(x, config) # type: ignore + return await grandchild.ainvoke(x, config) # type: ignore[arg-type] - @RunnableLambda # type: ignore + @RunnableLambda # type: ignore[arg-type] async def parent(x: str, config: RunnableConfig) -> str: config["run_id"] = uuid.UUID(int=8) - return await child.ainvoke(x, config) # type: ignore + return await child.ainvoke(x, config) # type: ignore[arg-type] bond = uuid.UUID(int=7) events = await _collect_events( @@ -2291,14 +2291,14 @@ async def test_bad_parent_ids() -> None: # Type ignores in the code below need to be investigated. # Looks like a typing issue when using RunnableLambda as a decorator # with async functions. - @RunnableLambda # type: ignore + @RunnableLambda # type: ignore[arg-type] async def child(x: str) -> str: return x - @RunnableLambda # type: ignore + @RunnableLambda # type: ignore[arg-type] async def parent(x: str, config: RunnableConfig) -> str: config["run_id"] = uuid.UUID(int=7) - return await child.ainvoke(x, config) # type: ignore + return await child.ainvoke(x, config) # type: ignore[arg-type] bond = uuid.UUID(int=7) events = await _collect_events( @@ -2399,9 +2399,7 @@ async def test_with_explicit_config() -> None: return await chain.ainvoke(query) - events = await _collect_events( - say_hello.astream_events("meow", version="v2") # type: ignore - ) + events = await _collect_events(say_hello.astream_events("meow", version="v2")) assert [ event["data"]["chunk"].content diff --git a/libs/core/tests/unit_tests/runnables/test_tracing_interops.py b/libs/core/tests/unit_tests/runnables/test_tracing_interops.py index a681fd8a12d..67cacadaad4 100644 --- a/libs/core/tests/unit_tests/runnables/test_tracing_interops.py +++ b/libs/core/tests/unit_tests/runnables/test_tracing_interops.py @@ -21,7 +21,7 @@ from langchain_core.tracers.langchain import LangChainTracer def _get_posts(client: Client) -> list: - mock_calls = client.session.request.mock_calls # type: ignore + mock_calls = client.session.request.mock_calls # type: ignore[attr-defined] posts = [] for call in mock_calls: if call.args: @@ -163,13 +163,13 @@ async def test_config_traceable_async_handoff() -> None: def my_great_grandchild_function(a: int) -> int: return my_great_great_grandchild_function(a) - @RunnableLambda # type: ignore + @RunnableLambda # type: ignore[arg-type,attr-defined] async def my_grandchild_function(a: int) -> int: return my_great_grandchild_function.invoke(a) @traceable async def my_child_function(a: int) -> int: - return await my_grandchild_function.ainvoke(a) * 3 # type: ignore + return await my_grandchild_function.ainvoke(a) * 3 # type: ignore[arg-type,attr-defined] @traceable() async def my_function(a: int) -> int: @@ -178,7 +178,7 @@ async def test_config_traceable_async_handoff() -> None: async def my_parent_function(a: int) -> int: return await my_function(a) - my_parent_runnable = RunnableLambda(my_parent_function) # type: ignore + my_parent_runnable = RunnableLambda(my_parent_function) # type: ignore[arg-type,var-annotated] result = await my_parent_runnable.ainvoke(1, {"callbacks": [tracer]}) assert result == 6 posts = _get_posts(tracer.client) @@ -280,7 +280,7 @@ class TestRunnableSequenceParallelTraceNesting: sequence = before | parallel | after if isasyncgenfunction(other_thing): - @RunnableLambda # type: ignore + @RunnableLambda # type: ignore[arg-type] async def parent(a: int) -> int: return await sequence.ainvoke(a) @@ -375,7 +375,7 @@ class TestRunnableSequenceParallelTraceNesting: def test_sync( self, method: Callable[[RunnableLambda, list[BaseCallbackHandler]], int] ) -> None: - def other_thing(_: int) -> Generator[int, None, None]: # type: ignore + def other_thing(_: int) -> Generator[int, None, None]: yield 1 parent = self._create_parent(other_thing) @@ -467,9 +467,9 @@ def test_tree_is_constructed(parent_type: Literal["ls", "lc"]) -> None: return child.invoke("foo") tracer = LangChainTracer() - tracer._persist_run = collect_run # type: ignore + tracer._persist_run = collect_run # type: ignore[method-assign] - assert parent.invoke(..., {"run_id": rid, "callbacks": [tracer]}) == "foo" # type: ignore + assert parent.invoke(..., {"run_id": rid, "callbacks": [tracer]}) == "foo" # type: ignore[attr-defined] run = collected.get(str(rid)) assert run is not None @@ -482,9 +482,9 @@ def test_tree_is_constructed(parent_type: Literal["ls", "lc"]) -> None: assert grandchild_run.name == "grandchild" assert grandchild_run.child_runs assert grandchild_run.metadata.get("some_foo") == "some_bar" - assert "afoo" in grandchild_run.tags # type: ignore + assert "afoo" in grandchild_run.tags # type: ignore[operator] kitten_run = grandchild_run.child_runs[0] assert kitten_run.name == "kitten" assert not kitten_run.child_runs assert kitten_run.metadata.get("some_foo") == "some_bar" - assert "afoo" in kitten_run.tags # type: ignore + assert "afoo" in kitten_run.tags # type: ignore[operator] diff --git a/libs/core/tests/unit_tests/stores/test_in_memory.py b/libs/core/tests/unit_tests/stores/test_in_memory.py index 1ae91db4a38..60a0b307224 100644 --- a/libs/core/tests/unit_tests/stores/test_in_memory.py +++ b/libs/core/tests/unit_tests/stores/test_in_memory.py @@ -14,7 +14,7 @@ class TestSyncInMemoryStore(BaseStoreSyncTests): return InMemoryStore() @pytest.fixture - def three_values(self) -> tuple[str, str, str]: # type: ignore + def three_values(self) -> tuple[str, str, str]: return "value1", "value2", "value3" @@ -24,7 +24,7 @@ class TestAsyncInMemoryStore(BaseStoreAsyncTests): return InMemoryStore() @pytest.fixture - def three_values(self) -> tuple[str, str, str]: # type: ignore + def three_values(self) -> tuple[str, str, str]: # type: ignore[override] return "value1", "value2", "value3" diff --git a/libs/core/tests/unit_tests/test_tools.py b/libs/core/tests/unit_tests/test_tools.py index f21bf705872..7d08adf42cc 100644 --- a/libs/core/tests/unit_tests/test_tools.py +++ b/libs/core/tests/unit_tests/test_tools.py @@ -143,7 +143,7 @@ def test_misannotated_base_tool_raises_error() -> None: class _MisAnnotatedTool(BaseTool): name: str = "structured_api" # This would silently be ignored without the custom metaclass - args_schema: BaseModel = _MockSchema # type: ignore + args_schema: BaseModel = _MockSchema # type: ignore[assignment] description: str = "A Structured Tool" def _run(self, arg1: int, arg2: bool, arg3: Optional[dict] = None) -> str: @@ -499,7 +499,7 @@ def test_structured_tool_lambda_multi_args_schema() -> None: tool = StructuredTool.from_function( name="tool", description="A tool", - func=lambda tool_input, other_arg: f"{tool_input}{other_arg}", # type: ignore + func=lambda tool_input, other_arg: f"{tool_input}{other_arg}", ) assert tool.args_schema is not None expected_args = { @@ -1015,10 +1015,10 @@ def test_tool_invoke_optional_args(inputs: dict, expected: Optional[dict]) -> No } if expected is not None: - assert foo.invoke(inputs) == expected # type: ignore + assert foo.invoke(inputs) == expected else: with pytest.raises(ValidationError): - foo.invoke(inputs) # type: ignore + foo.invoke(inputs) def test_tool_pass_context() -> None: @@ -1030,7 +1030,7 @@ def test_tool_pass_context() -> None: assert bar == "baz" return bar - assert foo.invoke({"bar": "baz"}, {"configurable": {"foo": "not-bar"}}) == "baz" # type: ignore + assert foo.invoke({"bar": "baz"}, {"configurable": {"foo": "not-bar"}}) == "baz" @pytest.mark.skipif( @@ -1047,7 +1047,7 @@ async def test_async_tool_pass_context() -> None: return bar assert ( - await foo.ainvoke({"bar": "baz"}, {"configurable": {"foo": "not-bar"}}) == "baz" # type: ignore + await foo.ainvoke({"bar": "baz"}, {"configurable": {"foo": "not-bar"}}) == "baz" ) @@ -1147,7 +1147,7 @@ def test_tool_description() -> None: return bar foo1 = tool(foo) - assert foo1.description == "The foo." # type: ignore + assert foo1.description == "The foo." foo2 = StructuredTool.from_function(foo) assert foo2.description == "The foo." @@ -1164,7 +1164,7 @@ def test_tool_arg_descriptions() -> None: return bar foo1 = tool(foo) - args_schema = _schema(foo1.args_schema) # type: ignore + args_schema = _schema(foo1.args_schema) assert args_schema == { "title": "foo", "type": "object", @@ -1178,7 +1178,7 @@ def test_tool_arg_descriptions() -> None: # Test parses docstring foo2 = tool(foo, parse_docstring=True) - args_schema = _schema(foo2.args_schema) # type: ignore + args_schema = _schema(foo2.args_schema) expected = { "title": "foo", "description": "The foo.", @@ -1204,7 +1204,7 @@ def test_tool_arg_descriptions() -> None: return bar as_tool = tool(foo3, parse_docstring=True) - args_schema = _schema(as_tool.args_schema) # type: ignore + args_schema = _schema(as_tool.args_schema) assert args_schema["description"] == expected["description"] assert args_schema["properties"] == expected["properties"] @@ -1215,7 +1215,7 @@ def test_tool_arg_descriptions() -> None: return "bar" as_tool = tool(foo4, parse_docstring=True) - args_schema = _schema(as_tool.args_schema) # type: ignore + args_schema = _schema(as_tool.args_schema) assert args_schema["description"] == expected["description"] def foo5(run_manager: Optional[CallbackManagerForToolRun] = None) -> str: @@ -1223,7 +1223,7 @@ def test_tool_arg_descriptions() -> None: return "bar" as_tool = tool(foo5, parse_docstring=True) - args_schema = _schema(as_tool.args_schema) # type: ignore + args_schema = _schema(as_tool.args_schema) assert args_schema["description"] == expected["description"] @@ -1250,7 +1250,7 @@ def test_docstring_parsing() -> None: return bar as_tool = tool(foo, parse_docstring=True) - args_schema = _schema(as_tool.args_schema) # type: ignore + args_schema = _schema(as_tool.args_schema) assert args_schema["description"] == "The foo." assert args_schema["properties"] == expected["properties"] @@ -1267,7 +1267,7 @@ def test_docstring_parsing() -> None: return bar as_tool = tool(foo2, parse_docstring=True) - args_schema2 = _schema(as_tool.args_schema) # type: ignore + args_schema2 = _schema(as_tool.args_schema) assert args_schema2["description"] == "The foo. Additional description here." assert args_schema2["properties"] == expected["properties"] @@ -1287,7 +1287,7 @@ def test_docstring_parsing() -> None: return bar as_tool = tool(foo3, parse_docstring=True) - args_schema3 = _schema(as_tool.args_schema) # type: ignore + args_schema3 = _schema(as_tool.args_schema) args_schema3["title"] = "foo2" assert args_schema2 == args_schema3 @@ -1301,7 +1301,7 @@ def test_docstring_parsing() -> None: return bar as_tool = tool(foo4, parse_docstring=True) - args_schema4 = _schema(as_tool.args_schema) # type: ignore + args_schema4 = _schema(as_tool.args_schema) assert args_schema4["description"] == "The foo." assert args_schema4["properties"] == { "bar": {"description": "The bar.", "title": "Bar", "type": "string"} @@ -1354,7 +1354,7 @@ def test_tool_annotated_descriptions() -> None: return bar foo1 = tool(foo) - args_schema = _schema(foo1.args_schema) # type: ignore + args_schema = _schema(foo1.args_schema) assert args_schema == { "title": "foo", "type": "object", @@ -2305,14 +2305,14 @@ def test_injected_arg_with_complex_type() -> None: """Tool that has an injected tool arg.""" return foo.value - assert injected_tool.invoke({"x": 5, "foo": Foo()}) == "bar" # type: ignore + assert injected_tool.invoke({"x": 5, "foo": Foo()}) == "bar" def test_tool_injected_tool_call_id() -> None: @tool def foo(x: int, tool_call_id: Annotated[str, InjectedToolCallId]) -> ToolMessage: """Foo.""" - return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore + return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore[arg-type] assert foo.invoke( { @@ -2321,7 +2321,7 @@ def test_tool_injected_tool_call_id() -> None: "name": "foo", "id": "bar", } - ) == ToolMessage(0, tool_call_id="bar") # type: ignore + ) == ToolMessage(0, tool_call_id="bar") # type: ignore[arg-type] with pytest.raises( ValueError, @@ -2333,7 +2333,7 @@ def test_tool_injected_tool_call_id() -> None: @tool def foo2(x: int, tool_call_id: Annotated[str, InjectedToolCallId()]) -> ToolMessage: """Foo.""" - return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore + return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore[arg-type] assert foo2.invoke( { @@ -2342,14 +2342,14 @@ def test_tool_injected_tool_call_id() -> None: "name": "foo", "id": "bar", } - ) == ToolMessage(0, tool_call_id="bar") # type: ignore + ) == ToolMessage(0, tool_call_id="bar") # type: ignore[arg-type] def test_tool_uninjected_tool_call_id() -> None: @tool def foo(x: int, tool_call_id: str) -> ToolMessage: """Foo.""" - return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore + return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore[arg-type] with pytest.raises(ValueError, match="1 validation error for foo"): foo.invoke({"type": "tool_call", "args": {"x": 0}, "name": "foo", "id": "bar"}) @@ -2361,7 +2361,7 @@ def test_tool_uninjected_tool_call_id() -> None: "name": "foo", "id": "bar", } - ) == ToolMessage(0, tool_call_id="zap") # type: ignore + ) == ToolMessage(0, tool_call_id="zap") # type: ignore[arg-type] def test_tool_return_output_mixin() -> None: diff --git a/libs/core/tests/unit_tests/tracers/test_langchain.py b/libs/core/tests/unit_tests/tracers/test_langchain.py index 71fabecc948..a1690b5833c 100644 --- a/libs/core/tests/unit_tests/tracers/test_langchain.py +++ b/libs/core/tests/unit_tests/tracers/test_langchain.py @@ -65,11 +65,11 @@ def test_example_id_assignment_threadsafe() -> None: def test_tracer_with_run_tree_parent() -> None: mock_session = unittest.mock.MagicMock() client = Client(session=mock_session, api_key="test") - parent = RunTree(name="parent", inputs={"input": "foo"}, _client=client) # type: ignore + parent = RunTree(name="parent", inputs={"input": "foo"}, ls_client=client) run_id = uuid.uuid4() tracer = LangChainTracer(client=client) tracer.order_map[parent.id] = (parent.trace_id, parent.dotted_order) - tracer.run_map[str(parent.id)] = parent # type: ignore + tracer.run_map[str(parent.id)] = parent tracer.on_chain_start( {"name": "child"}, {"input": "bar"}, run_id=run_id, parent_run_id=parent.id ) diff --git a/libs/core/tests/unit_tests/utils/test_function_calling.py b/libs/core/tests/unit_tests/utils/test_function_calling.py index 7e6f41992cb..1d2e264153b 100644 --- a/libs/core/tests/unit_tests/utils/test_function_calling.py +++ b/libs/core/tests/unit_tests/utils/test_function_calling.py @@ -366,7 +366,7 @@ def test_convert_to_openai_function( dummy_extensions_typed_dict, dummy_extensions_typed_dict_docstring, ): - actual = convert_to_openai_function(fn) # type: ignore + actual = convert_to_openai_function(fn) assert actual == expected # Test runnables @@ -749,7 +749,7 @@ def test__convert_typed_dict_to_openai_function( class SubTool(typed_dict): """Subtool docstring.""" - args: annotated[dict[str, Any], {}, "this does bar"] # noqa: F722 # type: ignore + args: annotated[dict[str, Any], {}, "this does bar"] # noqa: F722 class Tool(typed_dict): """Docstring. @@ -774,7 +774,7 @@ def test__convert_typed_dict_to_openai_function( arg12: annotated[dict[str, SubTool], ...] arg13: annotated[Mapping[str, SubTool], ...] arg14: annotated[MutableMapping[str, SubTool], ...] - arg15: annotated[bool, False, "flag"] # noqa: F821 # type: ignore + arg15: annotated[bool, False, "flag"] # noqa: F821 expected = { "name": "Tool", @@ -1061,5 +1061,5 @@ def test_convert_to_json_schema( dummy_extensions_typed_dict, dummy_extensions_typed_dict_docstring, ): - actual = convert_to_json_schema(fn) # type: ignore + actual = convert_to_json_schema(fn) assert actual == expected diff --git a/libs/core/tests/unit_tests/utils/test_pydantic.py b/libs/core/tests/unit_tests/utils/test_pydantic.py index 070c4e01e12..8cea721c616 100644 --- a/libs/core/tests/unit_tests/utils/test_pydantic.py +++ b/libs/core/tests/unit_tests/utils/test_pydantic.py @@ -32,9 +32,9 @@ def test_pre_init_decorator() -> None: return v # Type ignore initialization b/c y is marked as required - foo = Foo() # type: ignore + foo = Foo() # type: ignore[call-arg] assert foo.y == 6 - foo = Foo(x=10) # type: ignore + foo = Foo(x=10) # type: ignore[call-arg] assert foo.y == 11 @@ -57,7 +57,7 @@ def test_pre_init_decorator_with_more_defaults() -> None: # Try to create an instance of Foo # nothing is required, but mypy can't track the default for `c` - Foo() # type: ignore + Foo() def test_with_aliases() -> None: @@ -78,19 +78,19 @@ def test_with_aliases() -> None: # Based on defaults # z is required - foo = Foo() # type: ignore + foo = Foo() # type: ignore[call-arg] assert foo.x == 1 assert foo.z == 1 # Based on field name # z is required - foo = Foo(x=2) # type: ignore + foo = Foo(x=2) # type: ignore[call-arg] assert foo.x == 2 assert foo.z == 2 # Based on alias # z is required - foo = Foo(y=2) # type: ignore + foo = Foo(y=2) # type: ignore[call-arg] assert foo.x == 2 assert foo.z == 2 diff --git a/libs/core/tests/unit_tests/utils/test_utils.py b/libs/core/tests/unit_tests/utils/test_utils.py index e0d08913f9a..258b447409b 100644 --- a/libs/core/tests/unit_tests/utils/test_utils.py +++ b/libs/core/tests/unit_tests/utils/test_utils.py @@ -386,7 +386,7 @@ def test_using_secret_from_env_as_default_factory( ) # We know it will be SecretStr rather than Optional[SecretStr] - assert Buzz().secret.get_secret_value() == "hello" # type: ignore + assert Buzz().secret.get_secret_value() == "hello" # type: ignore[union-attr] class OhMy(BaseModel): secret: Optional[SecretStr] = Field( diff --git a/libs/core/tests/unit_tests/vectorstores/test_vectorstore.py b/libs/core/tests/unit_tests/vectorstores/test_vectorstore.py index c8e6df87ac9..a0269331124 100644 --- a/libs/core/tests/unit_tests/vectorstores/test_vectorstore.py +++ b/libs/core/tests/unit_tests/vectorstores/test_vectorstore.py @@ -54,7 +54,7 @@ class CustomAddTextsVectorstore(VectorStore): @classmethod @override - def from_texts( # type: ignore + def from_texts( cls, texts: list[str], embedding: Embeddings, @@ -100,7 +100,7 @@ class CustomAddDocumentsVectorstore(VectorStore): @classmethod @override - def from_texts( # type: ignore + def from_texts( cls, texts: list[str], embedding: Embeddings,