diff --git a/libs/cli/langchain_cli/integration_template/integration_template/document_loaders.py b/libs/cli/langchain_cli/integration_template/integration_template/document_loaders.py index 7dacdd6faeb..cb4c1aa78fd 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/document_loaders.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/document_loaders.py @@ -61,6 +61,7 @@ class __ModuleName__Loader(BaseLoader): .. code-block:: python TODO: Example output + """ # noqa: E501 # TODO: This method must be implemented to load documents. diff --git a/libs/cli/langchain_cli/integration_template/integration_template/tools.py b/libs/cli/langchain_cli/integration_template/integration_template/tools.py index 1904c9865b7..156f9c719cf 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/tools.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/tools.py @@ -61,6 +61,7 @@ class __ModuleName__Tool(BaseTool): # type: ignore[override] .. code-block:: python # TODO: output of invocation + """ # noqa: E501 # TODO: Set tool name and description diff --git a/libs/core/langchain_core/_api/beta_decorator.py b/libs/core/langchain_core/_api/beta_decorator.py index 8fe8d2b81b8..4849a195f1d 100644 --- a/libs/core/langchain_core/_api/beta_decorator.py +++ b/libs/core/langchain_core/_api/beta_decorator.py @@ -70,6 +70,7 @@ def beta( @beta def the_function_to_annotate(): pass + """ def beta( diff --git a/libs/core/langchain_core/_api/deprecation.py b/libs/core/langchain_core/_api/deprecation.py index 63d04cd720b..762c76f7845 100644 --- a/libs/core/langchain_core/_api/deprecation.py +++ b/libs/core/langchain_core/_api/deprecation.py @@ -136,6 +136,7 @@ def deprecated( @deprecated('1.4.0') def the_function_to_deprecate(): pass + """ _validate_deprecation_params( removal, alternative, alternative_import, pending=pending @@ -549,6 +550,7 @@ def rename_parameter( @_api.rename_parameter("3.1", "bad_name", "good_name") def func(good_name): ... + """ def decorator(f: Callable[_P, _R]) -> Callable[_P, _R]: diff --git a/libs/core/langchain_core/beta/runnables/context.py b/libs/core/langchain_core/beta/runnables/context.py index 3aa76f34ed9..9901913ab31 100644 --- a/libs/core/langchain_core/beta/runnables/context.py +++ b/libs/core/langchain_core/beta/runnables/context.py @@ -363,6 +363,7 @@ class Context: print(output["result"]) # Output: "hello" print(output["context"]) # Output: "What's your name?" print(output["input"]) # Output: "What's your name? + """ @staticmethod diff --git a/libs/core/langchain_core/callbacks/file.py b/libs/core/langchain_core/callbacks/file.py index ec6d173457d..7e948dc29ac 100644 --- a/libs/core/langchain_core/callbacks/file.py +++ b/libs/core/langchain_core/callbacks/file.py @@ -53,6 +53,7 @@ class FileCallbackHandler(BaseCallbackHandler): When not used as a context manager, a deprecation warning will be issued on first use. The file will be opened immediately in ``__init__`` and closed in ``__del__`` or when ``close()`` is called explicitly. + """ def __init__( diff --git a/libs/core/langchain_core/callbacks/manager.py b/libs/core/langchain_core/callbacks/manager.py index 3b4f1f6e30e..56fc1bb67ba 100644 --- a/libs/core/langchain_core/callbacks/manager.py +++ b/libs/core/langchain_core/callbacks/manager.py @@ -105,6 +105,7 @@ def trace_as_chain_group( # Use the callback manager for the chain group res = llm.invoke(llm_input, {"callbacks": manager}) manager.on_chain_end({"output": res}) + """ # noqa: E501 from langchain_core.tracers.context import _get_trace_callbacks @@ -186,6 +187,7 @@ async def atrace_as_chain_group( # Use the async callback manager for the chain group res = await llm.ainvoke(llm_input, {"callbacks": manager}) await manager.on_chain_end({"output": res}) + """ # noqa: E501 from langchain_core.tracers.context import _get_trace_callbacks @@ -2575,6 +2577,7 @@ async def adispatch_custom_event( behalf. .. versionadded:: 0.2.15 + """ from langchain_core.runnables.config import ( ensure_config, @@ -2645,6 +2648,7 @@ def dispatch_custom_event( foo_.invoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]}) .. versionadded:: 0.2.15 + """ from langchain_core.runnables.config import ( ensure_config, diff --git a/libs/core/langchain_core/callbacks/usage.py b/libs/core/langchain_core/callbacks/usage.py index e30d77ba2ce..0249cadec1f 100644 --- a/libs/core/langchain_core/callbacks/usage.py +++ b/libs/core/langchain_core/callbacks/usage.py @@ -44,6 +44,7 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler): 'input_token_details': {'cache_read': 0, 'cache_creation': 0}}} .. versionadded:: 0.3.49 + """ def __init__(self) -> None: @@ -127,6 +128,7 @@ def get_usage_metadata_callback( 'input_token_details': {'cache_read': 0, 'cache_creation': 0}}} .. versionadded:: 0.3.49 + """ from langchain_core.tracers.context import register_configure_hook diff --git a/libs/core/langchain_core/chat_history.py b/libs/core/langchain_core/chat_history.py index f985f25ad01..1ddee584f77 100644 --- a/libs/core/langchain_core/chat_history.py +++ b/libs/core/langchain_core/chat_history.py @@ -91,6 +91,7 @@ class BaseChatMessageHistory(ABC): def clear(self): with open(os.path.join(storage_path, session_id), "w") as f: f.write("[]") + """ messages: list[BaseMessage] diff --git a/libs/core/langchain_core/document_loaders/langsmith.py b/libs/core/langchain_core/document_loaders/langsmith.py index b9a5de4fac3..67089f520a3 100644 --- a/libs/core/langchain_core/document_loaders/langsmith.py +++ b/libs/core/langchain_core/document_loaders/langsmith.py @@ -36,6 +36,7 @@ class LangSmithLoader(BaseLoader): # -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...] .. versionadded:: 0.2.34 + """ # noqa: E501 def __init__( diff --git a/libs/core/langchain_core/documents/base.py b/libs/core/langchain_core/documents/base.py index 3b7f9637627..b22ee910bc0 100644 --- a/libs/core/langchain_core/documents/base.py +++ b/libs/core/langchain_core/documents/base.py @@ -102,6 +102,7 @@ class Blob(BaseMedia): # Read the blob as a byte stream with blob.as_bytes_io() as f: print(f.read()) + """ data: Union[bytes, str, None] = None @@ -265,6 +266,7 @@ class Document(BaseMedia): page_content="Hello, world!", metadata={"source": "https://example.com"} ) + """ page_content: str diff --git a/libs/core/langchain_core/embeddings/fake.py b/libs/core/langchain_core/embeddings/fake.py index d788416d9c2..99069d08fb3 100644 --- a/libs/core/langchain_core/embeddings/fake.py +++ b/libs/core/langchain_core/embeddings/fake.py @@ -46,6 +46,7 @@ class FakeEmbeddings(Embeddings, BaseModel): 2 [-0.5670477847544458, -0.31403828652395727, -0.5840547508955257] + """ size: int @@ -103,6 +104,7 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel): 2 [-0.5670477847544458, -0.31403828652395727, -0.5840547508955257] + """ size: int diff --git a/libs/core/langchain_core/language_models/_utils.py b/libs/core/langchain_core/language_models/_utils.py index 3b7c0c5debe..883f8c855ea 100644 --- a/libs/core/langchain_core/language_models/_utils.py +++ b/libs/core/langchain_core/language_models/_utils.py @@ -51,6 +51,7 @@ def _parse_data_uri(uri: str) -> Optional[dict]: "mime_type": "image/jpeg", "data": "/9j/4AAQSkZJRg...", } + """ regex = r"^data:(?P[^;]+);base64,(?P.+)$" match = re.match(regex, uri) diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index dcd3809bee5..d37ca232052 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -1467,6 +1467,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): .. versionchanged:: 0.2.26 Added support for TypedDict class. + """ # noqa: E501 _ = kwargs.pop("method", None) _ = kwargs.pop("strict", None) diff --git a/libs/core/langchain_core/language_models/llms.py b/libs/core/langchain_core/language_models/llms.py index c106e0698e0..5c24d516e7d 100644 --- a/libs/core/langchain_core/language_models/llms.py +++ b/libs/core/langchain_core/language_models/llms.py @@ -1418,6 +1418,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): .. code-block:: python llm.save(file_path="path/llm.yaml") + """ # Convert file to Path object. save_path = Path(file_path) diff --git a/libs/core/langchain_core/memory.py b/libs/core/langchain_core/memory.py index 2b8d4bdd2b7..b9249567b1c 100644 --- a/libs/core/langchain_core/memory.py +++ b/libs/core/langchain_core/memory.py @@ -53,6 +53,7 @@ class BaseMemory(Serializable, ABC): def clear(self) -> None: pass + """ # noqa: E501 model_config = ConfigDict( diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index c62a03345d2..c81187dc3f6 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -57,6 +57,7 @@ class InputTokenDetails(TypedDict, total=False): .. versionadded:: 0.3.9 May also hold extra provider-specific keys. + """ audio: int @@ -89,6 +90,7 @@ class OutputTokenDetails(TypedDict, total=False): } .. versionadded:: 0.3.9 + """ audio: int @@ -128,6 +130,7 @@ class UsageMetadata(TypedDict): .. versionchanged:: 0.3.9 Added ``input_token_details`` and ``output_token_details``. + """ input_tokens: int diff --git a/libs/core/langchain_core/messages/human.py b/libs/core/langchain_core/messages/human.py index 4e19904ab33..1be4cbfa9d3 100644 --- a/libs/core/langchain_core/messages/human.py +++ b/libs/core/langchain_core/messages/human.py @@ -28,6 +28,7 @@ class HumanMessage(BaseMessage): # Instantiate a chat model and invoke it with the messages model = ... print(model.invoke(messages)) + """ example: bool = False diff --git a/libs/core/langchain_core/messages/tool.py b/libs/core/langchain_core/messages/tool.py index 09c9eb04f24..1f8a519a7dc 100644 --- a/libs/core/langchain_core/messages/tool.py +++ b/libs/core/langchain_core/messages/tool.py @@ -59,6 +59,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin): The tool_call_id field is used to associate the tool call request with the tool call response. This is useful in situations where a chat model is able to request multiple tool calls in parallel. + """ # noqa: E501 tool_call_id: str @@ -191,6 +192,7 @@ class ToolCall(TypedDict): This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + """ name: str @@ -240,6 +242,7 @@ class ToolCallChunk(TypedDict): AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + """ name: Optional[str] diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index 77f323293dd..11c044eb438 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -111,6 +111,7 @@ def get_buffer_string( ] get_buffer_string(messages) # -> "Human: Hi, how are you?\nAI: Good, how are you?" + """ string_messages = [] for m in messages: @@ -463,6 +464,7 @@ def filter_messages( SystemMessage("you're a good assistant."), HumanMessage("what's your name", id="foo", name="example_user"), ] + """ # noqa: E501 messages = convert_to_messages(messages) filtered: list[BaseMessage] = [] @@ -869,6 +871,7 @@ def trim_messages( HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"), AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"), ] + """ # noqa: E501 # Validate arguments if start_on and strategy == "first": diff --git a/libs/core/langchain_core/output_parsers/base.py b/libs/core/langchain_core/output_parsers/base.py index 11737b74700..a187efb4b23 100644 --- a/libs/core/langchain_core/output_parsers/base.py +++ b/libs/core/langchain_core/output_parsers/base.py @@ -155,6 +155,7 @@ class BaseOutputParser( @property def _type(self) -> str: return "boolean_output_parser" + """ # noqa: E501 @property diff --git a/libs/core/langchain_core/output_parsers/openai_functions.py b/libs/core/langchain_core/output_parsers/openai_functions.py index 708eb5bd81b..129c9855061 100644 --- a/libs/core/langchain_core/output_parsers/openai_functions.py +++ b/libs/core/langchain_core/output_parsers/openai_functions.py @@ -214,6 +214,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): pydantic_schema={"cookie": Cookie, "dog": Dog} ) result = parser.parse_result([chat_generation]) + """ pydantic_schema: Union[type[BaseModel], dict[str, type[BaseModel]]] diff --git a/libs/core/langchain_core/prompts/base.py b/libs/core/langchain_core/prompts/base.py index e8d05bb1a6a..f36c69be8e6 100644 --- a/libs/core/langchain_core/prompts/base.py +++ b/libs/core/langchain_core/prompts/base.py @@ -307,6 +307,7 @@ class BasePromptTemplate( .. code-block:: python prompt.format(variable1="foo") + """ async def aformat(self, **kwargs: Any) -> FormatOutputType: @@ -323,6 +324,7 @@ class BasePromptTemplate( .. code-block:: python await prompt.aformat(variable1="foo") + """ return self.format(**kwargs) @@ -363,6 +365,7 @@ class BasePromptTemplate( .. code-block:: python prompt.save(file_path="path/prompt.yaml") + """ if self.partial_variables: msg = "Cannot save prompt with partial variables." @@ -442,6 +445,7 @@ def format_document(doc: Document, prompt: BasePromptTemplate[str]) -> str: prompt = PromptTemplate.from_template("Page {page}: {page_content}") format_document(doc, prompt) >>> "Page 1: This is a joke" + """ return prompt.format(**_get_document_info(doc, prompt)) diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 924762457f3..cfc0a0b38df 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -126,6 +126,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate): # -> [ # HumanMessage(content="Hello!"), # ] + """ variable_name: str @@ -1164,6 +1165,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): Returns: a chat prompt template. + """ return cls(messages, template_format=template_format) @@ -1248,6 +1250,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): template2 = template.partial(user="Lucy", name="R2D2") template2.format_messages(input="hello") + """ prompt_dict = self.__dict__.copy() prompt_dict["input_variables"] = list( diff --git a/libs/core/langchain_core/prompts/few_shot.py b/libs/core/langchain_core/prompts/few_shot.py index 49d6bdf3b3c..1b42dcdc9c4 100644 --- a/libs/core/langchain_core/prompts/few_shot.py +++ b/libs/core/langchain_core/prompts/few_shot.py @@ -357,6 +357,7 @@ class FewShotChatMessagePromptTemplate( from langchain_core.chat_models import ChatAnthropic chain = final_prompt | ChatAnthropic(model="claude-3-haiku-20240307") chain.invoke({"input": "What's 3+3?"}) + """ input_variables: list[str] = Field(default_factory=list) diff --git a/libs/core/langchain_core/prompts/few_shot_with_templates.py b/libs/core/langchain_core/prompts/few_shot_with_templates.py index 7a32146f4bd..eaf8ab0fd15 100644 --- a/libs/core/langchain_core/prompts/few_shot_with_templates.py +++ b/libs/core/langchain_core/prompts/few_shot_with_templates.py @@ -122,6 +122,7 @@ class FewShotPromptWithTemplates(StringPromptTemplate): .. code-block:: python prompt.format(variable1="foo") + """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. diff --git a/libs/core/langchain_core/prompts/image.py b/libs/core/langchain_core/prompts/image.py index 525d2941a92..4240b6bdec0 100644 --- a/libs/core/langchain_core/prompts/image.py +++ b/libs/core/langchain_core/prompts/image.py @@ -90,6 +90,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]): .. code-block:: python prompt.format(variable1="foo") + """ formatted = {} for k, v in self.template.items(): diff --git a/libs/core/langchain_core/prompts/pipeline.py b/libs/core/langchain_core/prompts/pipeline.py index 3b0160201d6..bb11fda48fd 100644 --- a/libs/core/langchain_core/prompts/pipeline.py +++ b/libs/core/langchain_core/prompts/pipeline.py @@ -45,6 +45,7 @@ class PipelinePromptTemplate(BasePromptTemplate): Each PromptTemplate will be formatted and then passed to future prompt templates as a variable with the same name as `name` + """ final_prompt: BasePromptTemplate diff --git a/libs/core/langchain_core/prompts/prompt.py b/libs/core/langchain_core/prompts/prompt.py index 6b77a4e71cb..0066445a050 100644 --- a/libs/core/langchain_core/prompts/prompt.py +++ b/libs/core/langchain_core/prompts/prompt.py @@ -56,6 +56,7 @@ class PromptTemplate(StringPromptTemplate): # Instantiation using initializer prompt = PromptTemplate(template="Say {foo}") + """ @property diff --git a/libs/core/langchain_core/prompts/structured.py b/libs/core/langchain_core/prompts/structured.py index 203c738681d..96dcb79124a 100644 --- a/libs/core/langchain_core/prompts/structured.py +++ b/libs/core/langchain_core/prompts/structured.py @@ -115,6 +115,7 @@ class StructuredPrompt(ChatPromptTemplate): Returns: a structured prompt template + """ return cls(messages, schema, **kwargs) diff --git a/libs/core/langchain_core/rate_limiters.py b/libs/core/langchain_core/rate_limiters.py index cf5c61d3ea7..dffdb580fa0 100644 --- a/libs/core/langchain_core/rate_limiters.py +++ b/libs/core/langchain_core/rate_limiters.py @@ -123,6 +123,7 @@ class InMemoryRateLimiter(BaseRateLimiter): .. versionadded:: 0.2.24 + """ # noqa: E501 def __init__( diff --git a/libs/core/langchain_core/retrievers.py b/libs/core/langchain_core/retrievers.py index a532d6368ff..efc9fc40a7b 100644 --- a/libs/core/langchain_core/retrievers.py +++ b/libs/core/langchain_core/retrievers.py @@ -124,6 +124,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): # Op -- (n_docs,1) -- Cosine Sim with each doc results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,)) return [self.docs[i] for i in results.argsort()[-self.k :][::-1]] + """ # noqa: E501 model_config = ConfigDict( @@ -230,6 +231,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): .. code-block:: python retriever.invoke("query") + """ from langchain_core.callbacks.manager import CallbackManager @@ -294,6 +296,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): .. code-block:: python await retriever.ainvoke("query") + """ from langchain_core.callbacks.manager import AsyncCallbackManager diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index aa87dc0edd6..5061cd3c5d0 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -236,6 +236,7 @@ class Runnable(ABC, Generic[Input, Output]): ) For a UI (and much more) checkout LangSmith: https://docs.smith.langchain.com/ + """ # noqa: E501 name: Optional[str] @@ -391,6 +392,7 @@ class Runnable(ABC, Generic[Input, Output]): print(runnable.get_input_jsonschema()) .. versionadded:: 0.3.0 + """ return self.get_input_schema(config).model_json_schema() @@ -464,6 +466,7 @@ class Runnable(ABC, Generic[Input, Output]): print(runnable.get_output_jsonschema()) .. versionadded:: 0.3.0 + """ return self.get_output_schema(config).model_json_schema() @@ -620,6 +623,7 @@ class Runnable(ABC, Generic[Input, Output]): sequence.batch([1, 2, 3]) await sequence.abatch([1, 2, 3]) # -> [4, 6, 8] + """ return RunnableSequence(self, *others, name=name) @@ -1361,6 +1365,7 @@ class Runnable(ABC, Generic[Input, Output]): Raises: NotImplementedError: If the version is not `v1` or `v2`. + """ # noqa: E501 from langchain_core.tracers.event_stream import ( _astream_events_implementation_v1, @@ -1607,6 +1612,7 @@ class Runnable(ABC, Generic[Input, Output]): on_end=fn_end ) chain.invoke(2) + """ from langchain_core.tracers.root_listeners import RootListenersTracer @@ -1825,6 +1831,7 @@ class Runnable(ABC, Generic[Input, Output]): runnable = RunnableLambda(_lambda) print(runnable.map().invoke([1, 2, 3])) # [2, 3, 4] + """ return RunnableEach(bound=self) @@ -2446,6 +2453,7 @@ class Runnable(ABC, Generic[Input, Output]): as_tool.invoke("b") .. versionadded:: 0.2.14 + """ # Avoid circular import from langchain_core.tools import convert_runnable_to_tool @@ -2517,6 +2525,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]): configurable={"output_token_number": 200} ).invoke("tell me something about chess").content ) + """ from langchain_core.runnables.configurable import RunnableConfigurableFields @@ -2577,6 +2586,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]): configurable={"llm": "openai"} ).invoke("which organization created you?").content ) + """ from langchain_core.runnables.configurable import ( RunnableConfigurableAlternatives, @@ -2741,6 +2751,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]): async for chunk in chain.astream({'topic': 'colors'}): print('-') # noqa: T201 print(chunk, sep='', flush=True) # noqa: T201 + """ # The steps are broken into first, middle and last, solely for type checking @@ -3539,6 +3550,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]): for key in chunk: output[key] = output[key] + chunk[key].content print(output) # noqa: T201 + """ steps__: Mapping[str, Runnable[Input, Any]] @@ -4061,6 +4073,7 @@ class RunnableGenerator(Runnable[Input, Output]): runnable = chant_chain | RunnableLambda(reverse_generator) "".join(runnable.stream({"topic": "waste"})) # ".elcycer ,esuer ,ecudeR" + """ def __init__( @@ -4321,6 +4334,7 @@ class RunnableLambda(Runnable[Input, Output]): runnable = RunnableLambda(add_one, afunc=add_one_async) runnable.invoke(1) # Uses add_one await runnable.ainvoke(1) # Uses add_one_async + """ def __init__( @@ -5175,6 +5189,7 @@ class RunnableEach(RunnableEachBase[Input, Output]): {'topic':'Art'}, {'topic':'Biology'}]) print(output) # noqa: T201 + """ @override @@ -5709,6 +5724,7 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): kwargs={'stop': ['-']} # <-- Note the additional kwargs ) runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot` + """ @override @@ -5989,5 +6005,6 @@ def chain( for chunk in llm.stream(formatted): yield chunk + """ return RunnableLambda(func) diff --git a/libs/core/langchain_core/runnables/branch.py b/libs/core/langchain_core/runnables/branch.py index b6f9d5f4432..dffbc79310a 100644 --- a/libs/core/langchain_core/runnables/branch.py +++ b/libs/core/langchain_core/runnables/branch.py @@ -63,6 +63,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]): branch.invoke("hello") # "HELLO" branch.invoke(None) # "goodbye" + """ branches: Sequence[tuple[Runnable[Input, bool], Runnable[Input, Output]]] diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index f79acb433d9..eaf1b3d8c9a 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -378,6 +378,7 @@ class RunnableConfigurableFields(DynamicRunnable[Input, Output]): {"question": "foo", "context": "bar"}, config={"configurable": {"hub_commit": "rlm/rag-prompt-llama"}}, ) + """ fields: dict[str, AnyConfigurableField] diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index 5ea6bcc40ce..a17552f23d0 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -85,6 +85,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): | model | StrOutputParser() ).with_fallbacks([RunnableLambda(when_all_is_lost)]) + """ runnable: Runnable[Input, Output] diff --git a/libs/core/langchain_core/runnables/graph.py b/libs/core/langchain_core/runnables/graph.py index 713fbfdb133..3e22494bad7 100644 --- a/libs/core/langchain_core/runnables/graph.py +++ b/libs/core/langchain_core/runnables/graph.py @@ -611,6 +611,7 @@ class Graph: Returns: The Mermaid syntax string. + """ from langchain_core.runnables.graph_mermaid import draw_mermaid @@ -681,6 +682,7 @@ class Graph: Returns: The PNG image as bytes. + """ from langchain_core.runnables.graph_mermaid import draw_mermaid_png diff --git a/libs/core/langchain_core/runnables/graph_ascii.py b/libs/core/langchain_core/runnables/graph_ascii.py index 162fcd40bd6..c33353148d8 100644 --- a/libs/core/langchain_core/runnables/graph_ascii.py +++ b/libs/core/langchain_core/runnables/graph_ascii.py @@ -263,6 +263,7 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str: +---+ +---+ | 3 | | 4 | +---+ +---+ + """ # NOTE: coordinates might me negative, so we need to shift # everything to the positive plane before we actually draw it. diff --git a/libs/core/langchain_core/runnables/graph_mermaid.py b/libs/core/langchain_core/runnables/graph_mermaid.py index 410c6c56652..a7655072d69 100644 --- a/libs/core/langchain_core/runnables/graph_mermaid.py +++ b/libs/core/langchain_core/runnables/graph_mermaid.py @@ -70,6 +70,7 @@ def draw_mermaid( Returns: str: Mermaid graph syntax. + """ # Initialize Mermaid graph configuration original_frontmatter_config = frontmatter_config or {} diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index 659a83f9cc1..7ae97bcd711 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -311,6 +311,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): into the get_session_history factory. **kwargs: Arbitrary additional kwargs to pass to parent class ``RunnableBindingBase`` init. + """ history_chain: Runnable = RunnableLambda( self._enter_history, self._aenter_history diff --git a/libs/core/langchain_core/runnables/passthrough.py b/libs/core/langchain_core/runnables/passthrough.py index 530a80e2c64..bed4ff8a820 100644 --- a/libs/core/langchain_core/runnables/passthrough.py +++ b/libs/core/langchain_core/runnables/passthrough.py @@ -132,6 +132,7 @@ class RunnablePassthrough(RunnableSerializable[Other, Other]): runnable.invoke('hello') # {'llm1': 'completion', 'llm2': 'completion', 'total_chars': 20} + """ input_type: Optional[type[Other]] = None @@ -393,6 +394,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): # Asynchronous example await runnable_assign.ainvoke({"input": 5}) # returns {'input': 5, 'add_step': {'added': 15}} + """ mapper: RunnableParallel @@ -697,6 +699,7 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]): output_data = runnable.invoke(input_data) print(output_data) # Output: {'name': 'John', 'age': 30} + """ keys: Union[str, list[str]] diff --git a/libs/core/langchain_core/runnables/retry.py b/libs/core/langchain_core/runnables/retry.py index 9a72f749d72..d495c59e6e9 100644 --- a/libs/core/langchain_core/runnables/retry.py +++ b/libs/core/langchain_core/runnables/retry.py @@ -110,6 +110,7 @@ class RunnableRetry(RunnableBindingBase[Input, Output]): # Bad chain = template | model retryable_chain = chain.with_retry() + """ # noqa: E501 retry_exception_types: tuple[type[BaseException], ...] = (Exception,) diff --git a/libs/core/langchain_core/runnables/router.py b/libs/core/langchain_core/runnables/router.py index 4de5896b030..c6af3168731 100644 --- a/libs/core/langchain_core/runnables/router.py +++ b/libs/core/langchain_core/runnables/router.py @@ -66,6 +66,7 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]): router = RouterRunnable(runnables={"add": add, "square": square}) router.invoke({"key": "square", "input": 3}) + """ runnables: Mapping[str, Runnable[Any, Output]] diff --git a/libs/core/langchain_core/runnables/schema.py b/libs/core/langchain_core/runnables/schema.py index 20ad580070a..1ec2c58a48f 100644 --- a/libs/core/langchain_core/runnables/schema.py +++ b/libs/core/langchain_core/runnables/schema.py @@ -83,6 +83,7 @@ class BaseStreamEvent(TypedDict): "tags": [], }, ] + """ event: str diff --git a/libs/core/langchain_core/stores.py b/libs/core/langchain_core/stores.py index 860fa1edc34..24ab0ea74d6 100644 --- a/libs/core/langchain_core/stores.py +++ b/libs/core/langchain_core/stores.py @@ -76,6 +76,7 @@ class BaseStore(ABC, Generic[K, V]): for key in self.store.keys(): if key.startswith(prefix): yield key + """ @abstractmethod @@ -302,6 +303,7 @@ class InMemoryStore(InMemoryBaseStore[Any]): # ['key2'] list(store.yield_keys(prefix='k')) # ['key2'] + """ @@ -327,6 +329,7 @@ class InMemoryByteStore(InMemoryBaseStore[bytes]): # ['key2'] list(store.yield_keys(prefix='k')) # ['key2'] + """ diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py index 8fcd1e42218..e54a09709d6 100644 --- a/libs/core/langchain_core/tools/base.py +++ b/libs/core/langchain_core/tools/base.py @@ -1273,6 +1273,7 @@ class InjectedToolCallId(InjectedToolArg): name="foo", tool_call_id=tool_call_id ) + """ diff --git a/libs/core/langchain_core/tools/convert.py b/libs/core/langchain_core/tools/convert.py index d045179ee46..8b103fd54d6 100644 --- a/libs/core/langchain_core/tools/convert.py +++ b/libs/core/langchain_core/tools/convert.py @@ -215,6 +215,7 @@ def tool( monkey: The baz. \"\"\" return bar + """ # noqa: D214, D410, D411 def _create_tool_factory( diff --git a/libs/core/langchain_core/tools/structured.py b/libs/core/langchain_core/tools/structured.py index b3856ea4843..a419a1ede62 100644 --- a/libs/core/langchain_core/tools/structured.py +++ b/libs/core/langchain_core/tools/structured.py @@ -174,6 +174,7 @@ class StructuredTool(BaseTool): return a + b tool = StructuredTool.from_function(add) tool.run(1, 2) # 3 + """ if func is not None: source_function = func diff --git a/libs/core/langchain_core/utils/aiter.py b/libs/core/langchain_core/utils/aiter.py index 02a6d84da54..701a2321726 100644 --- a/libs/core/langchain_core/utils/aiter.py +++ b/libs/core/langchain_core/utils/aiter.py @@ -189,6 +189,7 @@ class Tee(Generic[T]): To enforce sequential use of ``anext``, provide a ``lock`` - e.g. an :py:class:`asyncio.Lock` instance in an :py:mod:`asyncio` application - and access is automatically synchronised. + """ def __init__( @@ -280,6 +281,7 @@ class aclosing(AbstractAsyncContextManager): # noqa: N801 finally: await agen.aclose() + """ def __init__( diff --git a/libs/core/langchain_core/utils/function_calling.py b/libs/core/langchain_core/utils/function_calling.py index 2d2fa6e4088..d7059fded47 100644 --- a/libs/core/langchain_core/utils/function_calling.py +++ b/libs/core/langchain_core/utils/function_calling.py @@ -687,6 +687,7 @@ def tool_example_to_messages( messages.extend( tool_example_to_messages(txt, [tool_call]) ) + """ messages: list[BaseMessage] = [HumanMessage(content=input)] openai_tool_calls = [ diff --git a/libs/core/langchain_core/utils/iter.py b/libs/core/langchain_core/utils/iter.py index 208a8a6a07b..ecd4ba60976 100644 --- a/libs/core/langchain_core/utils/iter.py +++ b/libs/core/langchain_core/utils/iter.py @@ -126,6 +126,7 @@ class Tee(Generic[T]): To enforce sequential use of ``anext``, provide a ``lock`` - e.g. an :py:class:`asyncio.Lock` instance in an :py:mod:`asyncio` application - and access is automatically synchronised. + """ def __init__( diff --git a/libs/core/langchain_core/vectorstores/base.py b/libs/core/langchain_core/vectorstores/base.py index 1b0c3e6a299..c6bfc467260 100644 --- a/libs/core/langchain_core/vectorstores/base.py +++ b/libs/core/langchain_core/vectorstores/base.py @@ -994,6 +994,7 @@ class VectorStore(ABC): docsearch.as_retriever( search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}} ) + """ tags = kwargs.pop("tags", None) or [*self._get_retriever_tags()] return VectorStoreRetriever(vectorstore=self, tags=tags, **kwargs) diff --git a/libs/core/tests/unit_tests/conftest.py b/libs/core/tests/unit_tests/conftest.py index fb88b15fffd..aceca3156b8 100644 --- a/libs/core/tests/unit_tests/conftest.py +++ b/libs/core/tests/unit_tests/conftest.py @@ -66,6 +66,7 @@ def pytest_collection_modifyitems( @pytest.mark.requires("package1", "package2") def test_something(): ... + """ # Mapping from the name of a package to whether it is installed or not. # Used to avoid repeated calls to `util.find_spec` diff --git a/libs/langchain/langchain/agents/agent.py b/libs/langchain/langchain/agents/agent.py index b2a24b9ebda..941f4bc494f 100644 --- a/libs/langchain/langchain/agents/agent.py +++ b/libs/langchain/langchain/agents/agent.py @@ -196,6 +196,7 @@ class BaseSingleActionAgent(BaseModel): # If working with agent executor agent.agent.save(file_path="path/agent.yaml") + """ # Convert file to Path object. save_path = Path(file_path) if isinstance(file_path, str) else file_path @@ -339,6 +340,7 @@ class BaseMultiActionAgent(BaseModel): # If working with agent executor agent.agent.save(file_path="path/agent.yaml") + """ # Convert file to Path object. save_path = Path(file_path) if isinstance(file_path, str) else file_path diff --git a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/base.py b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/base.py index bc94a06468d..75f642bc082 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/base.py +++ b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/base.py @@ -90,6 +90,7 @@ def create_vectorstore_agent( Returns: AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response + """ # noqa: E501 tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) @@ -198,6 +199,7 @@ def create_vectorstore_router_agent( Returns: AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response. + """ # noqa: E501 tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) diff --git a/libs/langchain/langchain/agents/json_chat/base.py b/libs/langchain/langchain/agents/json_chat/base.py index 67ded18ab21..29f929952c3 100644 --- a/libs/langchain/langchain/agents/json_chat/base.py +++ b/libs/langchain/langchain/agents/json_chat/base.py @@ -160,6 +160,7 @@ def create_json_chat_agent( MessagesPlaceholder("agent_scratchpad"), ] ) + """ # noqa: E501 missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables), diff --git a/libs/langchain/langchain/agents/openai_functions_agent/base.py b/libs/langchain/langchain/agents/openai_functions_agent/base.py index 04d31dd614a..458d34967d9 100644 --- a/libs/langchain/langchain/agents/openai_functions_agent/base.py +++ b/libs/langchain/langchain/agents/openai_functions_agent/base.py @@ -359,6 +359,7 @@ def create_openai_functions_agent( MessagesPlaceholder("agent_scratchpad"), ] ) + """ if "agent_scratchpad" not in ( prompt.input_variables + list(prompt.partial_variables) diff --git a/libs/langchain/langchain/agents/openai_tools/base.py b/libs/langchain/langchain/agents/openai_tools/base.py index fb3a767c4e9..89ff92b7333 100644 --- a/libs/langchain/langchain/agents/openai_tools/base.py +++ b/libs/langchain/langchain/agents/openai_tools/base.py @@ -84,6 +84,7 @@ def create_openai_tools_agent( MessagesPlaceholder("agent_scratchpad"), ] ) + """ missing_vars = {"agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables), diff --git a/libs/langchain/langchain/agents/react/agent.py b/libs/langchain/langchain/agents/react/agent.py index 622a03855a5..59bda46d68e 100644 --- a/libs/langchain/langchain/agents/react/agent.py +++ b/libs/langchain/langchain/agents/react/agent.py @@ -116,6 +116,7 @@ def create_react_agent( Thought:{agent_scratchpad}''' prompt = PromptTemplate.from_template(template) + """ # noqa: E501 missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables), diff --git a/libs/langchain/langchain/agents/self_ask_with_search/base.py b/libs/langchain/langchain/agents/self_ask_with_search/base.py index aa69f443f87..0af5b1f8a41 100644 --- a/libs/langchain/langchain/agents/self_ask_with_search/base.py +++ b/libs/langchain/langchain/agents/self_ask_with_search/base.py @@ -185,6 +185,7 @@ def create_self_ask_with_search_agent( Are followup questions needed here:{agent_scratchpad}''' prompt = PromptTemplate.from_template(template) + """ # noqa: E501 missing_vars = {"agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables), diff --git a/libs/langchain/langchain/agents/structured_chat/base.py b/libs/langchain/langchain/agents/structured_chat/base.py index a00828b7d20..ff2eacd2c97 100644 --- a/libs/langchain/langchain/agents/structured_chat/base.py +++ b/libs/langchain/langchain/agents/structured_chat/base.py @@ -280,6 +280,7 @@ def create_structured_chat_agent( ("human", human), ] ) + """ # noqa: E501 missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables), diff --git a/libs/langchain/langchain/agents/tool_calling_agent/base.py b/libs/langchain/langchain/agents/tool_calling_agent/base.py index 4e22099c4b0..ae438d059cc 100644 --- a/libs/langchain/langchain/agents/tool_calling_agent/base.py +++ b/libs/langchain/langchain/agents/tool_calling_agent/base.py @@ -85,6 +85,7 @@ def create_tool_calling_agent( The agent prompt must have an `agent_scratchpad` key that is a ``MessagesPlaceholder``. Intermediate agent actions and tool output messages will be passed in here. + """ missing_vars = {"agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables), diff --git a/libs/langchain/langchain/agents/xml/base.py b/libs/langchain/langchain/agents/xml/base.py index 13bc7a66bce..9ecf167bf34 100644 --- a/libs/langchain/langchain/agents/xml/base.py +++ b/libs/langchain/langchain/agents/xml/base.py @@ -37,7 +37,6 @@ class XMLAgent(BaseSingleActionAgent): tools = ... model = - """ tools: list[BaseTool] @@ -209,6 +208,7 @@ def create_xml_agent( Question: {input} {agent_scratchpad}''' prompt = PromptTemplate.from_template(template) + """ # noqa: E501 missing_vars = {"tools", "agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables), diff --git a/libs/langchain/langchain/chains/api/base.py b/libs/langchain/langchain/chains/api/base.py index 6be2c29a096..b70beeabfc9 100644 --- a/libs/langchain/langchain/chains/api/base.py +++ b/libs/langchain/langchain/chains/api/base.py @@ -191,6 +191,7 @@ try: ) async for event in events: event["messages"][-1].pretty_print() + """ # noqa: E501 api_request_chain: LLMChain diff --git a/libs/langchain/langchain/chains/base.py b/libs/langchain/langchain/chains/base.py index 6d187bc43d9..50bc65bcbab 100644 --- a/libs/langchain/langchain/chains/base.py +++ b/libs/langchain/langchain/chains/base.py @@ -618,6 +618,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC): context = "Weather report for Boise, Idaho on 07/03/23..." chain.run(question=question, context=context) # -> "The temperature in Boise is..." + """ # Run at start to make sure this is possible/defined _output_key = self._run_output_key @@ -692,6 +693,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC): context = "Weather report for Boise, Idaho on 07/03/23..." await chain.arun(question=question, context=context) # -> "The temperature in Boise is..." + """ if len(self.output_keys) != 1: msg = ( @@ -746,6 +748,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC): chain.dict(exclude_unset=True) # -> {"_type": "foo", "verbose": False, ...} + """ _dict = super().dict(**kwargs) with contextlib.suppress(NotImplementedError): @@ -765,6 +768,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC): .. code-block:: python chain.save(file_path="path/chain.yaml") + """ if self.memory is not None: msg = "Saving of memory is not yet supported." diff --git a/libs/langchain/langchain/chains/combine_documents/base.py b/libs/langchain/langchain/chains/combine_documents/base.py index 74fd6135f53..00ec94f064c 100644 --- a/libs/langchain/langchain/chains/combine_documents/base.py +++ b/libs/langchain/langchain/chains/combine_documents/base.py @@ -234,6 +234,7 @@ class AnalyzeDocumentChain(Chain): input_documents=itemgetter("input_document") | split_text, ) | chain.pick("output_text") ) + """ input_key: str = "input_document" #: :meta private: diff --git a/libs/langchain/langchain/chains/combine_documents/map_reduce.py b/libs/langchain/langchain/chains/combine_documents/map_reduce.py index 0b1b4c14501..1e273a6af8f 100644 --- a/libs/langchain/langchain/chains/combine_documents/map_reduce.py +++ b/libs/langchain/langchain/chains/combine_documents/map_reduce.py @@ -99,6 +99,7 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain): llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, ) + """ llm_chain: LLMChain diff --git a/libs/langchain/langchain/chains/combine_documents/map_rerank.py b/libs/langchain/langchain/chains/combine_documents/map_rerank.py index 406bbebb3bb..d0d5aba469d 100644 --- a/libs/langchain/langchain/chains/combine_documents/map_rerank.py +++ b/libs/langchain/langchain/chains/combine_documents/map_rerank.py @@ -69,6 +69,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain): rank_key="score", answer_key="answer", ) + """ llm_chain: LLMChain diff --git a/libs/langchain/langchain/chains/combine_documents/reduce.py b/libs/langchain/langchain/chains/combine_documents/reduce.py index 18c2cba7b84..3923e83e287 100644 --- a/libs/langchain/langchain/chains/combine_documents/reduce.py +++ b/libs/langchain/langchain/chains/combine_documents/reduce.py @@ -201,6 +201,7 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain): combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_documents_chain, ) + """ combine_documents_chain: BaseCombineDocumentsChain diff --git a/libs/langchain/langchain/chains/combine_documents/refine.py b/libs/langchain/langchain/chains/combine_documents/refine.py index 5e5e3e74d05..337ce2b6ef8 100644 --- a/libs/langchain/langchain/chains/combine_documents/refine.py +++ b/libs/langchain/langchain/chains/combine_documents/refine.py @@ -79,6 +79,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain): document_variable_name=document_variable_name, initial_response_name=initial_response_name, ) + """ initial_llm_chain: LLMChain diff --git a/libs/langchain/langchain/chains/combine_documents/stuff.py b/libs/langchain/langchain/chains/combine_documents/stuff.py index 89b06743858..03e94c3a4b4 100644 --- a/libs/langchain/langchain/chains/combine_documents/stuff.py +++ b/libs/langchain/langchain/chains/combine_documents/stuff.py @@ -75,6 +75,7 @@ def create_stuff_documents_chain( ] chain.invoke({"context": docs}) + """ # noqa: E501 _validate_prompt(prompt, document_variable_name) @@ -142,6 +143,7 @@ class StuffDocumentsChain(BaseCombineDocumentsChain): document_prompt=document_prompt, document_variable_name=document_variable_name ) + """ llm_chain: LLMChain diff --git a/libs/langchain/langchain/chains/constitutional_ai/base.py b/libs/langchain/langchain/chains/constitutional_ai/base.py index cfe29606dcf..2113c70738f 100644 --- a/libs/langchain/langchain/chains/constitutional_ai/base.py +++ b/libs/langchain/langchain/chains/constitutional_ai/base.py @@ -187,6 +187,7 @@ class ConstitutionalChain(Chain): ) constitutional_chain.run(question="What is the meaning of life?") + """ # noqa: E501 chain: LLMChain diff --git a/libs/langchain/langchain/chains/conversation/base.py b/libs/langchain/langchain/chains/conversation/base.py index f81a47940a1..fb64f9a45ca 100644 --- a/libs/langchain/langchain/chains/conversation/base.py +++ b/libs/langchain/langchain/chains/conversation/base.py @@ -97,6 +97,7 @@ class ConversationChain(LLMChain): from langchain_community.llms import OpenAI conversation = ConversationChain(llm=OpenAI()) + """ memory: BaseMemory = Field(default_factory=ConversationBufferMemory) diff --git a/libs/langchain/langchain/chains/conversational_retrieval/base.py b/libs/langchain/langchain/chains/conversational_retrieval/base.py index 3b4d417bb0c..71d572fd956 100644 --- a/libs/langchain/langchain/chains/conversational_retrieval/base.py +++ b/libs/langchain/langchain/chains/conversational_retrieval/base.py @@ -374,6 +374,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain): retriever=retriever, question_generator=question_generator_chain, ) + """ retriever: BaseRetriever diff --git a/libs/langchain/langchain/chains/elasticsearch_database/base.py b/libs/langchain/langchain/chains/elasticsearch_database/base.py index 632d5d2fbc4..6463883294b 100644 --- a/libs/langchain/langchain/chains/elasticsearch_database/base.py +++ b/libs/langchain/langchain/chains/elasticsearch_database/base.py @@ -34,6 +34,7 @@ class ElasticsearchDatabaseChain(Chain): database = Elasticsearch("http://localhost:9200") db_chain = ElasticsearchDatabaseChain.from_llm(OpenAI(), database) + """ query_chain: Runnable diff --git a/libs/langchain/langchain/chains/llm.py b/libs/langchain/langchain/chains/llm.py index de03197b91f..cb7b7dc9416 100644 --- a/libs/langchain/langchain/chains/llm.py +++ b/libs/langchain/langchain/chains/llm.py @@ -74,6 +74,7 @@ class LLMChain(Chain): input_variables=["adjective"], template=prompt_template ) llm = LLMChain(llm=OpenAI(), prompt=prompt) + """ @classmethod @@ -323,6 +324,7 @@ class LLMChain(Chain): .. code-block:: python completion = llm.predict(adjective="funny") + """ return self(kwargs, callbacks=callbacks)[self.output_key] @@ -340,6 +342,7 @@ class LLMChain(Chain): .. code-block:: python completion = llm.predict(adjective="funny") + """ return (await self.acall(kwargs, callbacks=callbacks))[self.output_key] diff --git a/libs/langchain/langchain/chains/llm_checker/base.py b/libs/langchain/langchain/chains/llm_checker/base.py index 66db6394b85..970666e5578 100644 --- a/libs/langchain/langchain/chains/llm_checker/base.py +++ b/libs/langchain/langchain/chains/llm_checker/base.py @@ -82,6 +82,7 @@ class LLMCheckerChain(Chain): from langchain.chains import LLMCheckerChain llm = OpenAI(temperature=0.7) checker_chain = LLMCheckerChain.from_llm(llm) + """ question_to_checked_assertions_chain: SequentialChain diff --git a/libs/langchain/langchain/chains/llm_math/base.py b/libs/langchain/langchain/chains/llm_math/base.py index d55d52c71af..084c7719dda 100644 --- a/libs/langchain/langchain/chains/llm_math/base.py +++ b/libs/langchain/langchain/chains/llm_math/base.py @@ -146,6 +146,7 @@ class LLMMathChain(Chain): from langchain.chains import LLMMathChain from langchain_community.llms import OpenAI llm_math = LLMMathChain.from_llm(OpenAI()) + """ # noqa: E501 llm_chain: LLMChain diff --git a/libs/langchain/langchain/chains/llm_summarization_checker/base.py b/libs/langchain/langchain/chains/llm_summarization_checker/base.py index a4885b77017..1bee18ba147 100644 --- a/libs/langchain/langchain/chains/llm_summarization_checker/base.py +++ b/libs/langchain/langchain/chains/llm_summarization_checker/base.py @@ -85,6 +85,7 @@ class LLMSummarizationCheckerChain(Chain): from langchain.chains import LLMSummarizationCheckerChain llm = OpenAI(temperature=0.0) checker_chain = LLMSummarizationCheckerChain.from_llm(llm) + """ sequential_chain: SequentialChain diff --git a/libs/langchain/langchain/chains/moderation.py b/libs/langchain/langchain/chains/moderation.py index 2b687d9e9bd..21d52c07220 100644 --- a/libs/langchain/langchain/chains/moderation.py +++ b/libs/langchain/langchain/chains/moderation.py @@ -27,6 +27,7 @@ class OpenAIModerationChain(Chain): from langchain.chains import OpenAIModerationChain moderation = OpenAIModerationChain() + """ client: Any = None #: :meta private: diff --git a/libs/langchain/langchain/chains/natbot/base.py b/libs/langchain/langchain/chains/natbot/base.py index 3d3303d286d..351e23c6b02 100644 --- a/libs/langchain/langchain/chains/natbot/base.py +++ b/libs/langchain/langchain/chains/natbot/base.py @@ -47,6 +47,7 @@ class NatBotChain(Chain): from langchain.chains import NatBotChain natbot = NatBotChain.from_default("Buy me a new hat.") + """ llm_chain: Runnable @@ -151,6 +152,7 @@ class NatBotChain(Chain): browser_content = "...." llm_command = natbot.run("www.google.com", browser_content) + """ _inputs = { self.input_url_key: url, diff --git a/libs/langchain/langchain/chains/openai_functions/base.py b/libs/langchain/langchain/chains/openai_functions/base.py index 53b4df74e04..d7bd76b3a9f 100644 --- a/libs/langchain/langchain/chains/openai_functions/base.py +++ b/libs/langchain/langchain/chains/openai_functions/base.py @@ -121,6 +121,7 @@ def create_openai_fn_chain( chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt) chain.run("Harry was a chubby brown beagle who loved chicken") # -> RecordDog(name="Harry", color="brown", fav_food="chicken") + """ # noqa: E501 if not functions: msg = "Need to pass in at least one function. Received zero." @@ -203,6 +204,7 @@ def create_structured_output_chain( chain = create_structured_output_chain(Dog, llm, prompt) chain.run("Harry was a chubby brown beagle who loved chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken") + """ # noqa: E501 if isinstance(output_schema, dict): function: Any = { diff --git a/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py b/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py index 30412dcc611..f5a7704957a 100644 --- a/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py +++ b/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py @@ -94,6 +94,7 @@ def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable: Returns: Runnable that can be used to answer questions with citations. + """ if llm.bind_tools is BaseChatModel.bind_tools: msg = "Language model must implement bind_tools to use this function." diff --git a/libs/langchain/langchain/chains/openai_functions/openapi.py b/libs/langchain/langchain/chains/openai_functions/openapi.py index c42c30e6f87..64b863af1c1 100644 --- a/libs/langchain/langchain/chains/openai_functions/openapi.py +++ b/libs/langchain/langchain/chains/openai_functions/openapi.py @@ -345,6 +345,7 @@ def get_openapi_chain( `ChatOpenAI(model="gpt-3.5-turbo-0613")`. prompt: Main prompt template to use. request_chain: Chain for taking the functions output and executing the request. + """ # noqa: E501 try: from langchain_community.utilities.openapi import OpenAPISpec diff --git a/libs/langchain/langchain/chains/openai_functions/tagging.py b/libs/langchain/langchain/chains/openai_functions/tagging.py index 74a115502ae..721bcddbaf4 100644 --- a/libs/langchain/langchain/chains/openai_functions/tagging.py +++ b/libs/langchain/langchain/chains/openai_functions/tagging.py @@ -86,6 +86,7 @@ def create_tagging_chain( Returns: Chain (LLMChain) that can be used to extract information from a passage. + """ function = _get_tagging_function(schema) prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE) @@ -154,6 +155,7 @@ def create_tagging_chain_pydantic( Returns: Chain (LLMChain) that can be used to extract information from a passage. + """ if hasattr(pydantic_schema, "model_json_schema"): openai_schema = pydantic_schema.model_json_schema() diff --git a/libs/langchain/langchain/chains/qa_generation/base.py b/libs/langchain/langchain/chains/qa_generation/base.py index f203bd49c4e..e906702f439 100644 --- a/libs/langchain/langchain/chains/qa_generation/base.py +++ b/libs/langchain/langchain/chains/qa_generation/base.py @@ -62,6 +62,7 @@ class QAGenerationChain(Chain): split_text | RunnableEach(bound=prompt | llm | JsonOutputParser()) ) ) + """ llm_chain: LLMChain diff --git a/libs/langchain/langchain/chains/retrieval_qa/base.py b/libs/langchain/langchain/chains/retrieval_qa/base.py index 4c6cf5de307..b2b26e20ae2 100644 --- a/libs/langchain/langchain/chains/retrieval_qa/base.py +++ b/libs/langchain/langchain/chains/retrieval_qa/base.py @@ -147,6 +147,7 @@ class BaseRetrievalQA(Chain): res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] + """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] @@ -191,6 +192,7 @@ class BaseRetrievalQA(Chain): res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] + """ _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] diff --git a/libs/langchain/langchain/chains/router/llm_router.py b/libs/langchain/langchain/chains/router/llm_router.py index 1abbdbe79a3..2691383754d 100644 --- a/libs/langchain/langchain/chains/router/llm_router.py +++ b/libs/langchain/langchain/chains/router/llm_router.py @@ -96,6 +96,7 @@ class LLMRouterChain(RouterChain): ) chain.invoke({"query": "what color are carrots"}) + """ # noqa: E501 llm_chain: LLMChain diff --git a/libs/langchain/langchain/chains/router/multi_prompt.py b/libs/langchain/langchain/chains/router/multi_prompt.py index 2bfe1c42bd3..ecc73b0e1b1 100644 --- a/libs/langchain/langchain/chains/router/multi_prompt.py +++ b/libs/langchain/langchain/chains/router/multi_prompt.py @@ -140,6 +140,7 @@ class MultiPromptChain(MultiRouteChain): result = await app.ainvoke({"query": "what color are carrots"}) print(result["destination"]) print(result["answer"]) + """ # noqa: E501 @property diff --git a/libs/langchain/langchain/chains/sql_database/query.py b/libs/langchain/langchain/chains/sql_database/query.py index ef3ccce7155..e2bcf5f74a1 100644 --- a/libs/langchain/langchain/chains/sql_database/query.py +++ b/libs/langchain/langchain/chains/sql_database/query.py @@ -113,6 +113,7 @@ def create_sql_query_chain( Question: {input}''' prompt = PromptTemplate.from_template(template) + """ # noqa: E501 if prompt is not None: prompt_to_use = prompt diff --git a/libs/langchain/langchain/chains/structured_output/base.py b/libs/langchain/langchain/chains/structured_output/base.py index 88cccc5fea8..aa22ef2948c 100644 --- a/libs/langchain/langchain/chains/structured_output/base.py +++ b/libs/langchain/langchain/chains/structured_output/base.py @@ -132,6 +132,7 @@ def create_openai_fn_runnable( structured_llm = create_openai_fn_runnable([RecordPerson, RecordDog], llm) structured_llm.invoke("Harry was a chubby brown beagle who loved chicken) # -> RecordDog(name="Harry", color="brown", fav_food="chicken") + """ # noqa: E501 if not functions: msg = "Need to pass in at least one function. Received zero." @@ -390,6 +391,7 @@ def create_structured_output_runnable( ) chain = prompt | structured_llm chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) + """ # noqa: E501 # for backwards compatibility force_function_usage = kwargs.get( diff --git a/libs/langchain/langchain/chains/transform.py b/libs/langchain/langchain/chains/transform.py index 4f1ac06733c..9e5f210c349 100644 --- a/libs/langchain/langchain/chains/transform.py +++ b/libs/langchain/langchain/chains/transform.py @@ -26,6 +26,7 @@ class TransformChain(Chain): from langchain.chains import TransformChain transform_chain = TransformChain(input_variables=["text"], output_variables["entities"], transform=func()) + """ input_variables: list[str] diff --git a/libs/langchain/langchain/embeddings/base.py b/libs/langchain/langchain/embeddings/base.py index 6eb47a5f2d2..4142550363d 100644 --- a/libs/langchain/langchain/embeddings/base.py +++ b/libs/langchain/langchain/embeddings/base.py @@ -47,6 +47,7 @@ def _parse_model_string(model_name: str) -> tuple[str, str]: Raises: ValueError: If the model string is not in the correct format or the provider is unsupported + """ if ":" not in model_name: providers = _SUPPORTED_PROVIDERS @@ -177,6 +178,7 @@ def init_embeddings( ) .. versionadded:: 0.3.9 + """ if not model: providers = _SUPPORTED_PROVIDERS.keys() diff --git a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py index 3b043f97f2c..c11c5dfa7bf 100644 --- a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py +++ b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py @@ -140,6 +140,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain): ) print(result["score"]) # noqa: T201 # 0 + """ agent_tools: Optional[list[BaseTool]] = None diff --git a/libs/langchain/langchain/evaluation/loading.py b/libs/langchain/langchain/evaluation/loading.py index 082d4df2b9d..06f63048169 100644 --- a/libs/langchain/langchain/evaluation/loading.py +++ b/libs/langchain/langchain/evaluation/loading.py @@ -58,6 +58,7 @@ def load_dataset(uri: str) -> list[dict]: from langchain.evaluation import load_dataset ds = load_dataset("llm-math") + """ try: from datasets import load_dataset diff --git a/libs/langchain/langchain/retrievers/document_compressors/listwise_rerank.py b/libs/langchain/langchain/retrievers/document_compressors/listwise_rerank.py index 61da3a741aa..5f0dadd6f63 100644 --- a/libs/langchain/langchain/retrievers/document_compressors/listwise_rerank.py +++ b/libs/langchain/langchain/retrievers/document_compressors/listwise_rerank.py @@ -70,6 +70,7 @@ class LLMListwiseRerank(BaseDocumentCompressor): compressed_docs = reranker.compress_documents(documents, "Who is steve") assert len(compressed_docs) == 3 assert "Steve" in compressed_docs[0].page_content + """ reranker: Runnable[dict, list[Document]] diff --git a/libs/langchain/langchain/retrievers/parent_document_retriever.py b/libs/langchain/langchain/retrievers/parent_document_retriever.py index b1a70c423fc..0974f9327f6 100644 --- a/libs/langchain/langchain/retrievers/parent_document_retriever.py +++ b/libs/langchain/langchain/retrievers/parent_document_retriever.py @@ -54,6 +54,7 @@ class ParentDocumentRetriever(MultiVectorRetriever): child_splitter=child_splitter, parent_splitter=parent_splitter, ) + """ # noqa: E501 child_splitter: TextSplitter diff --git a/libs/langchain/langchain/smith/__init__.py b/libs/langchain/langchain/smith/__init__.py index ffdf2dd4d19..b8e9e37fee7 100644 --- a/libs/langchain/langchain/smith/__init__.py +++ b/libs/langchain/langchain/smith/__init__.py @@ -87,6 +87,7 @@ or LangSmith's `RunEvaluator` classes. - :func:`arun_on_dataset `: Asynchronous function to evaluate a chain, agent, or other LangChain component over a dataset. - :func:`run_on_dataset `: Function to evaluate a chain, agent, or other LangChain component over a dataset. - :class:`RunEvalConfig `: Class representing the configuration for running evaluation. You can select evaluators by :class:`EvaluatorType ` or config, or you can pass in `custom_evaluators` + """ # noqa: E501 from langchain.smith.evaluation import ( diff --git a/libs/langchain/langchain/smith/evaluation/runner_utils.py b/libs/langchain/langchain/smith/evaluation/runner_utils.py index ffbdcbf9111..987805b5355 100644 --- a/libs/langchain/langchain/smith/evaluation/runner_utils.py +++ b/libs/langchain/langchain/smith/evaluation/runner_utils.py @@ -1451,6 +1451,7 @@ async def arun_on_dataset( llm_or_chain_factory=construct_chain, evaluation=evaluation_config, ) + """ # noqa: E501 input_mapper = kwargs.pop("input_mapper", None) if input_mapper: @@ -1623,6 +1624,7 @@ def run_on_dataset( llm_or_chain_factory=construct_chain, evaluation=evaluation_config, ) + """ # noqa: E501 input_mapper = kwargs.pop("input_mapper", None) if input_mapper: diff --git a/libs/langchain/langchain/storage/encoder_backed.py b/libs/langchain/langchain/storage/encoder_backed.py index 956d3f6b395..1bb9fe25fad 100644 --- a/libs/langchain/langchain/storage/encoder_backed.py +++ b/libs/langchain/langchain/storage/encoder_backed.py @@ -46,6 +46,7 @@ class EncoderBackedStore(BaseStore[K, V]): store.mset([(1, 3.14), (2, 2.718)]) values = store.mget([1, 2]) # Retrieves [3.14, 2.718] store.mdelete([1, 2]) # Deletes the keys 1 and 2 + """ def __init__( diff --git a/libs/langchain/tests/unit_tests/conftest.py b/libs/langchain/tests/unit_tests/conftest.py index 3a620b411fe..69ca8245faf 100644 --- a/libs/langchain/tests/unit_tests/conftest.py +++ b/libs/langchain/tests/unit_tests/conftest.py @@ -78,6 +78,7 @@ def pytest_collection_modifyitems( @pytest.mark.requires("package1", "package2") def test_something(): ... + """ # Mapping from the name of a package to whether it is installed or not. # Used to avoid repeated calls to `util.find_spec` diff --git a/libs/langchain_v1/langchain/embeddings/base.py b/libs/langchain_v1/langchain/embeddings/base.py index 6eb47a5f2d2..4142550363d 100644 --- a/libs/langchain_v1/langchain/embeddings/base.py +++ b/libs/langchain_v1/langchain/embeddings/base.py @@ -47,6 +47,7 @@ def _parse_model_string(model_name: str) -> tuple[str, str]: Raises: ValueError: If the model string is not in the correct format or the provider is unsupported + """ if ":" not in model_name: providers = _SUPPORTED_PROVIDERS @@ -177,6 +178,7 @@ def init_embeddings( ) .. versionadded:: 0.3.9 + """ if not model: providers = _SUPPORTED_PROVIDERS.keys() diff --git a/libs/langchain_v1/langchain/storage/encoder_backed.py b/libs/langchain_v1/langchain/storage/encoder_backed.py index 956d3f6b395..1bb9fe25fad 100644 --- a/libs/langchain_v1/langchain/storage/encoder_backed.py +++ b/libs/langchain_v1/langchain/storage/encoder_backed.py @@ -46,6 +46,7 @@ class EncoderBackedStore(BaseStore[K, V]): store.mset([(1, 3.14), (2, 2.718)]) values = store.mget([1, 2]) # Retrieves [3.14, 2.718] store.mdelete([1, 2]) # Deletes the keys 1 and 2 + """ def __init__( diff --git a/libs/langchain_v1/tests/unit_tests/conftest.py b/libs/langchain_v1/tests/unit_tests/conftest.py index 4a0056c262f..880c3156d29 100644 --- a/libs/langchain_v1/tests/unit_tests/conftest.py +++ b/libs/langchain_v1/tests/unit_tests/conftest.py @@ -70,6 +70,7 @@ def pytest_collection_modifyitems( @pytest.mark.requires("package1", "package2") def test_something(): ... + """ # Mapping from the name of a package to whether it is installed or not. # Used to avoid repeated calls to `util.find_spec` diff --git a/libs/partners/fireworks/langchain_fireworks/embeddings.py b/libs/partners/fireworks/langchain_fireworks/embeddings.py index a1933961900..2291c859fe0 100644 --- a/libs/partners/fireworks/langchain_fireworks/embeddings.py +++ b/libs/partners/fireworks/langchain_fireworks/embeddings.py @@ -65,6 +65,7 @@ class FireworksEmbeddings(BaseModel, Embeddings): .. code-block:: python [-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915] + """ client: OpenAI = Field(default=None, exclude=True) # type: ignore[assignment] # :meta private: diff --git a/libs/partners/groq/langchain_groq/chat_models.py b/libs/partners/groq/langchain_groq/chat_models.py index 6936499cecc..c56da31d6aa 100644 --- a/libs/partners/groq/langchain_groq/chat_models.py +++ b/libs/partners/groq/langchain_groq/chat_models.py @@ -304,6 +304,7 @@ class ChatGroq(BaseChatModel): 'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop', 'logprobs': None} + """ # noqa: E501 client: Any = Field(default=None, exclude=True) #: :meta private: diff --git a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_pipeline.py b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_pipeline.py index 50071040881..740132b7a19 100644 --- a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_pipeline.py +++ b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_pipeline.py @@ -64,6 +64,7 @@ class HuggingFacePipeline(BaseLLM): "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) hf = HuggingFacePipeline(pipeline=pipe) + """ pipeline: Any = None #: :meta private: diff --git a/libs/partners/mistralai/langchain_mistralai/embeddings.py b/libs/partners/mistralai/langchain_mistralai/embeddings.py index 6eff302fa5b..2632a4dbe3e 100644 --- a/libs/partners/mistralai/langchain_mistralai/embeddings.py +++ b/libs/partners/mistralai/langchain_mistralai/embeddings.py @@ -121,6 +121,7 @@ class MistralAIEmbeddings(BaseModel, Embeddings): .. code-block:: python [-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188] + """ # The type for client and async_client is ignored because the type is not diff --git a/libs/partners/ollama/langchain_ollama/chat_models.py b/libs/partners/ollama/langchain_ollama/chat_models.py index fbc36ddd7b6..ae836ed5d08 100644 --- a/libs/partners/ollama/langchain_ollama/chat_models.py +++ b/libs/partners/ollama/langchain_ollama/chat_models.py @@ -418,7 +418,6 @@ class ChatOllama(BaseChatModel): AIMessage(content='The word "strawberry" contains **three \'r\' letters**. Here\'s a breakdown for clarity:\n\n- The spelling of "strawberry" has two parts ... be 3.\n\nTo be thorough, let\'s confirm with an online source or common knowledge.\n\nI can recall that "strawberry" has: s-t-r-a-w-b-e-r-r-y — yes, three r\'s.\n\nPerhaps it\'s misspelled by some, but standard is correct.\n\nSo I think the response should be 3.\n'}, response_metadata={'model': 'deepseek-r1:8b', 'created_at': '2025-07-08T19:33:55.891269Z', 'done': True, 'done_reason': 'stop', 'total_duration': 98232561292, 'load_duration': 28036792, 'prompt_eval_count': 10, 'prompt_eval_duration': 40171834, 'eval_count': 3615, 'eval_duration': 98163832416, 'model_name': 'deepseek-r1:8b'}, id='run--18f8269f-6a35-4a7c-826d-b89d52c753b3-0', usage_metadata={'input_tokens': 10, 'output_tokens': 3615, 'total_tokens': 3625}) - """ # noqa: E501, pylint: disable=line-too-long model: str @@ -1282,6 +1281,7 @@ class ChatOllama(BaseChatModel): # 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'), # 'parsing_error': None # } + """ # noqa: E501, D301 _ = kwargs.pop("strict", None) if kwargs: diff --git a/libs/partners/ollama/langchain_ollama/embeddings.py b/libs/partners/ollama/langchain_ollama/embeddings.py index c95e2f39b43..ac5619a3b06 100644 --- a/libs/partners/ollama/langchain_ollama/embeddings.py +++ b/libs/partners/ollama/langchain_ollama/embeddings.py @@ -122,6 +122,7 @@ class OllamaEmbeddings(BaseModel, Embeddings): .. code-block:: python [-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188] + """ # noqa: E501 model: str diff --git a/libs/partners/ollama/langchain_ollama/llms.py b/libs/partners/ollama/langchain_ollama/llms.py index f7978ed7c7b..b433606340d 100644 --- a/libs/partners/ollama/langchain_ollama/llms.py +++ b/libs/partners/ollama/langchain_ollama/llms.py @@ -33,6 +33,7 @@ class OllamaLLM(BaseLLM): model = OllamaLLM(model="llama3") print(model.invoke("Come up with 10 names for a song about parrots")) + """ model: str diff --git a/libs/partners/openai/langchain_openai/chat_models/_compat.py b/libs/partners/openai/langchain_openai/chat_models/_compat.py index fb03e05f3b8..25ff3eb607c 100644 --- a/libs/partners/openai/langchain_openai/chat_models/_compat.py +++ b/libs/partners/openai/langchain_openai/chat_models/_compat.py @@ -58,6 +58,7 @@ content blocks, rather than on the AIMessage.id, which now stores the response I For backwards compatibility, this module provides functions to convert between the old and new formats. The functions are used internally by ChatOpenAI. + """ # noqa: E501 import json diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 6327220a73c..ab67b8917d5 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -466,6 +466,7 @@ class AzureChatOpenAI(BaseChatOpenAI): "violence": {"filtered": False, "severity": "safe"}, }, } + """ # noqa: E501 azure_endpoint: Optional[str] = Field( @@ -1139,6 +1140,7 @@ class AzureChatOpenAI(BaseChatOpenAI): # }, # 'parsing_error': None # } + """ # noqa: E501 return super().with_structured_output( schema, method=method, include_raw=include_raw, strict=strict, **kwargs diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 10903b68da4..1bc9b66d880 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -526,6 +526,7 @@ class BaseChatOpenAI(BaseChatModel): } .. versionadded:: 0.3.24 + """ tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. @@ -657,6 +658,7 @@ class BaseChatOpenAI(BaseChatModel): llm.invoke([HumanMessage("How are you?")], previous_response_id="resp_123") .. versionadded:: 0.3.26 + """ use_responses_api: Optional[bool] = None @@ -1841,6 +1843,7 @@ class BaseChatOpenAI(BaseChatModel): .. versionchanged:: 0.3.21 Pass ``kwargs`` through to the model. + """ # noqa: E501 if strict is not None and method == "json_mode": raise ValueError( @@ -3162,6 +3165,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] # }, # 'parsing_error': None # } + """ # noqa: E501 return super().with_structured_output( schema, method=method, include_raw=include_raw, strict=strict, **kwargs @@ -3903,6 +3907,7 @@ def _convert_responses_chunk_to_generation_chunk( This function just identifies updates in output or sub-indexes and increments the current index accordingly. + """ nonlocal current_index, current_output_index, current_sub_index if sub_idx is None: diff --git a/libs/partners/openai/langchain_openai/embeddings/azure.py b/libs/partners/openai/langchain_openai/embeddings/azure.py index 5f4db948793..7fd567e03d5 100644 --- a/libs/partners/openai/langchain_openai/embeddings/azure.py +++ b/libs/partners/openai/langchain_openai/embeddings/azure.py @@ -99,6 +99,7 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override] .. code-block:: python [-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188] + """ # noqa: E501 azure_endpoint: Optional[str] = Field( diff --git a/libs/partners/openai/langchain_openai/embeddings/base.py b/libs/partners/openai/langchain_openai/embeddings/base.py index 212c6385b04..8f3b1d605bb 100644 --- a/libs/partners/openai/langchain_openai/embeddings/base.py +++ b/libs/partners/openai/langchain_openai/embeddings/base.py @@ -157,6 +157,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): .. code-block:: python [-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188] + """ client: Any = Field(default=None, exclude=True) #: :meta private: diff --git a/libs/partners/openai/langchain_openai/llms/azure.py b/libs/partners/openai/langchain_openai/llms/azure.py index 6e99c815ea2..02e3ab9e1e1 100644 --- a/libs/partners/openai/langchain_openai/llms/azure.py +++ b/libs/partners/openai/langchain_openai/llms/azure.py @@ -30,6 +30,7 @@ class AzureOpenAI(BaseOpenAI): from langchain_openai import AzureOpenAI openai = AzureOpenAI(model_name="gpt-3.5-turbo-instruct") + """ azure_endpoint: Optional[str] = Field( diff --git a/libs/partners/openai/langchain_openai/llms/base.py b/libs/partners/openai/langchain_openai/llms/base.py index d33649c237d..1e3c97fd4d7 100644 --- a/libs/partners/openai/langchain_openai/llms/base.py +++ b/libs/partners/openai/langchain_openai/llms/base.py @@ -289,6 +289,7 @@ class BaseOpenAI(BaseLLM): .. code-block:: python response = openai.generate(["Tell me a joke."]) + """ # TODO: write a unit test for this params = self._invocation_params @@ -508,6 +509,7 @@ class BaseOpenAI(BaseLLM): .. code-block:: python max_tokens = openai.modelname_to_contextsize("gpt-3.5-turbo-instruct") + """ model_token_mapping = { "gpt-4o-mini": 128_000, @@ -572,6 +574,7 @@ class BaseOpenAI(BaseLLM): .. code-block:: python max_tokens = openai.max_tokens_for_prompt("Tell me a joke.") + """ num_tokens = self.get_num_tokens(prompt) return self.max_context_size - num_tokens diff --git a/libs/partners/qdrant/langchain_qdrant/qdrant.py b/libs/partners/qdrant/langchain_qdrant/qdrant.py index b5cfb7075ee..d45f4083e16 100644 --- a/libs/partners/qdrant/langchain_qdrant/qdrant.py +++ b/libs/partners/qdrant/langchain_qdrant/qdrant.py @@ -199,6 +199,7 @@ class QdrantVectorStore(VectorStore): retrieval_mode=RetrievalMode.HYBRID, sparse_embedding=FastEmbedSparse(), ) + """ if validate_embeddings: self._validate_embeddings(retrieval_mode, embedding, sparse_embedding) @@ -318,6 +319,7 @@ class QdrantVectorStore(VectorStore): from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() qdrant = Qdrant.from_texts(texts, embeddings, url="http://localhost:6333") + """ client_options = { "location": location, diff --git a/libs/partners/qdrant/langchain_qdrant/vectorstores.py b/libs/partners/qdrant/langchain_qdrant/vectorstores.py index 9744368cb29..b7bf3422741 100644 --- a/libs/partners/qdrant/langchain_qdrant/vectorstores.py +++ b/libs/partners/qdrant/langchain_qdrant/vectorstores.py @@ -72,6 +72,7 @@ class Qdrant(VectorStore): client = QdrantClient() collection_name = "MyCollection" qdrant = Qdrant(client, collection_name, embedding_function) + """ CONTENT_KEY: str = "page_content" @@ -1306,6 +1307,7 @@ class Qdrant(VectorStore): from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() qdrant = Qdrant.from_texts(texts, embeddings, "localhost") + """ qdrant = cls.construct_instance( texts, @@ -1540,6 +1542,7 @@ class Qdrant(VectorStore): from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() qdrant = await Qdrant.afrom_texts(texts, embeddings, "localhost") + """ qdrant = await cls.aconstruct_instance( texts, diff --git a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py index 694b5196fac..090b1f6dc73 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py @@ -673,6 +673,7 @@ class ChatModelIntegrationTests(ChatModelTests): You can then commit the cassette to your repository. Subsequent test runs will use the cassette instead of making HTTP calls. + """ # noqa: E501 @property @@ -698,6 +699,7 @@ class ChatModelIntegrationTests(ChatModelTests): message=AIMessage(content="Output text") )] ) + """ result = model.invoke("Hello") assert result is not None @@ -730,6 +732,7 @@ class ChatModelIntegrationTests(ChatModelTests): message=AIMessage(content="Output text") )] ) + """ result = await model.ainvoke("Hello") assert result is not None @@ -761,6 +764,7 @@ class ChatModelIntegrationTests(ChatModelTests): yield ChatGenerationChunk( message=AIMessageChunk(content="chunk text") ) + """ num_chunks = 0 for chunk in model.stream("Hello"): @@ -796,6 +800,7 @@ class ChatModelIntegrationTests(ChatModelTests): yield ChatGenerationChunk( message=AIMessageChunk(content="chunk text") ) + """ num_chunks = 0 async for chunk in model.astream("Hello"): @@ -1011,6 +1016,7 @@ class ChatModelIntegrationTests(ChatModelTests): Check also that the response includes a ``'model_name'`` key in its ``usage_metadata``. + """ if not self.returns_usage_metadata: pytest.skip("Not implemented.") @@ -1188,6 +1194,7 @@ class ChatModelIntegrationTests(ChatModelTests): Check also that the aggregated response includes a ``'model_name'`` key in its ``usage_metadata``. + """ if not self.returns_usage_metadata: pytest.skip("Not implemented.") @@ -1273,6 +1280,7 @@ class ChatModelIntegrationTests(ChatModelTests): run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: + """ result = model.invoke("hi", stop=["you"]) assert isinstance(result, AIMessage) @@ -1324,6 +1332,7 @@ class ChatModelIntegrationTests(ChatModelTests): Otherwise, in the case that only one tool is bound, ensure that ``tool_choice`` supports the string ``'any'`` to force calling that tool. + """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1397,6 +1406,7 @@ class ChatModelIntegrationTests(ChatModelTests): Otherwise, in the case that only one tool is bound, ensure that ``tool_choice`` supports the string ``'any'`` to force calling that tool. + """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1456,6 +1466,7 @@ class ChatModelIntegrationTests(ChatModelTests): Otherwise, ensure that the ``tool_choice_value`` property is correctly specified on the test class. + """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1520,6 +1531,7 @@ class ChatModelIntegrationTests(ChatModelTests): @pytest.mark.xfail(reason=("Not implemented.")) def test_tool_message_histories_string_content(self, *args: Any) -> None: super().test_tool_message_histories_string_content(*args) + """ # noqa: E501 if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1604,6 +1616,7 @@ class ChatModelIntegrationTests(ChatModelTests): @pytest.mark.xfail(reason=("Not implemented.")) def test_tool_message_histories_list_content(self, *args: Any) -> None: super().test_tool_message_histories_list_content(*args) + """ # noqa: E501 if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1733,6 +1746,7 @@ class ChatModelIntegrationTests(ChatModelTests): Otherwise, in the case that only one tool is bound, ensure that ``tool_choice`` supports the string ``'any'`` to force calling that tool. + """ # noqa: E501 if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1788,6 +1802,7 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, check that the ``status`` field on ``ToolMessage`` objects is either ignored or passed to the model appropriately. + """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1859,6 +1874,7 @@ class ChatModelIntegrationTests(ChatModelTests): @pytest.mark.xfail(reason=("Not implemented.")) def test_structured_few_shot_examples(self, *args: Any) -> None: super().test_structured_few_shot_examples(*args) + """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1909,6 +1925,7 @@ class ChatModelIntegrationTests(ChatModelTests): most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + """ if not self.has_structured_output: pytest.skip("Test requires structured output.") @@ -1987,6 +2004,7 @@ class ChatModelIntegrationTests(ChatModelTests): most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + """ if not self.has_structured_output: pytest.skip("Test requires structured output.") @@ -2065,6 +2083,7 @@ class ChatModelIntegrationTests(ChatModelTests): most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + """ if not self.has_structured_output: pytest.skip("Test requires structured output.") @@ -2126,6 +2145,7 @@ class ChatModelIntegrationTests(ChatModelTests): most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + """ if not self.has_structured_output: pytest.skip("Test requires structured output.") @@ -2187,6 +2207,7 @@ class ChatModelIntegrationTests(ChatModelTests): .. dropdown:: Troubleshooting See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output + """ if not self.supports_json_mode: pytest.skip("Test requires json mode support.") @@ -2263,6 +2284,7 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, check that the model can correctly handle messages with pdf content blocks, including base64-encoded files. Otherwise, set the ``supports_pdf_inputs`` property to False. + """ if not self.supports_pdf_inputs: pytest.skip("Model does not support PDF inputs.") @@ -2338,6 +2360,7 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, check that the model can correctly handle messages with audio content blocks, specifically base64-encoded files. Otherwise, set the ``supports_audio_inputs`` property to False. + """ if not self.supports_audio_inputs: pytest.skip("Model does not support audio inputs.") @@ -2438,6 +2461,7 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, check that the model can correctly handle messages with image content blocks, including base64-encoded images. Otherwise, set the ``supports_image_inputs`` property to False. + """ if not self.supports_image_inputs: pytest.skip("Model does not support image message.") @@ -2544,6 +2568,7 @@ class ChatModelIntegrationTests(ChatModelTests): with image content blocks in ToolMessages, including base64-encoded images. Otherwise, set the ``supports_image_tool_message`` property to False. + """ if not self.supports_image_tool_message: pytest.skip("Model does not support image tool message.") @@ -2666,6 +2691,7 @@ class ChatModelIntegrationTests(ChatModelTests): Otherwise, if Anthropic tool call and result formats are not supported, set the ``supports_anthropic_inputs`` property to False. + """ # noqa: E501 if not self.supports_anthropic_inputs: pytest.skip("Model does not explicitly support Anthropic inputs.") @@ -2782,6 +2808,7 @@ class ChatModelIntegrationTests(ChatModelTests): If this test fails, check that the ``name`` field on ``HumanMessage`` objects is either ignored or passed to the model appropriately. + """ result = model.invoke([HumanMessage("hello", name="example_user")]) assert result is not None diff --git a/libs/standard-tests/langchain_tests/integration_tests/embeddings.py b/libs/standard-tests/langchain_tests/integration_tests/embeddings.py index ac70f38ffbb..eb68a42eccc 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/embeddings.py +++ b/libs/standard-tests/langchain_tests/integration_tests/embeddings.py @@ -32,7 +32,8 @@ class EmbeddingsIntegrationTests(EmbeddingsTests): return {"model": "model-001"} .. note:: - API references for individual test methods include troubleshooting tips. + API references for individual test methods include troubleshooting tips. + """ def test_embed_query(self, model: Embeddings) -> None: diff --git a/libs/standard-tests/langchain_tests/integration_tests/retrievers.py b/libs/standard-tests/langchain_tests/integration_tests/retrievers.py index 10687efc399..5c8f0f0188a 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/retrievers.py +++ b/libs/standard-tests/langchain_tests/integration_tests/retrievers.py @@ -49,6 +49,7 @@ class RetrieversIntegrationTests(BaseStandardTests): MyRetriever(k=3).invoke("query") should return 3 documents when invoked with a query. + """ params = { k: v for k, v in self.retriever_constructor_params.items() if k != "k" @@ -82,6 +83,7 @@ class RetrieversIntegrationTests(BaseStandardTests): MyRetriever().invoke("query", k=3) should return 3 documents when invoked with a query. + """ result_1 = retriever.invoke(self.retriever_query_example, k=1) assert len(result_1) == 1 diff --git a/libs/standard-tests/langchain_tests/integration_tests/vectorstores.py b/libs/standard-tests/langchain_tests/integration_tests/vectorstores.py index 84f4e57a37f..acb6960d5c0 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/vectorstores.py +++ b/libs/standard-tests/langchain_tests/integration_tests/vectorstores.py @@ -93,6 +93,7 @@ class VectorStoreIntegrationTests(BaseStandardTests): .. note:: API references for individual test methods include troubleshooting tips. + """ # noqa: E501 @abstractmethod @@ -331,6 +332,7 @@ class VectorStoreIntegrationTests(BaseStandardTests): @pytest.mark.xfail(reason=("get_by_ids not implemented.")) def test_get_by_ids(self, vectorstore: VectorStore) -> None: super().test_get_by_ids(vectorstore) + """ if not self.has_sync: pytest.skip("Sync tests not supported.") @@ -364,6 +366,7 @@ class VectorStoreIntegrationTests(BaseStandardTests): @pytest.mark.xfail(reason=("get_by_ids not implemented.")) def test_get_by_ids_missing(self, vectorstore: VectorStore) -> None: super().test_get_by_ids_missing(vectorstore) + """ if not self.has_sync: pytest.skip("Sync tests not supported.") @@ -393,6 +396,7 @@ class VectorStoreIntegrationTests(BaseStandardTests): @pytest.mark.xfail(reason=("get_by_ids not implemented.")) def test_add_documents_documents(self, vectorstore: VectorStore) -> None: super().test_add_documents_documents(vectorstore) + """ # noqa: E501 if not self.has_sync: pytest.skip("Sync tests not supported.") @@ -430,6 +434,7 @@ class VectorStoreIntegrationTests(BaseStandardTests): @pytest.mark.xfail(reason=("get_by_ids not implemented.")) def test_add_documents_with_existing_ids(self, vectorstore: VectorStore) -> None: super().test_add_documents_with_existing_ids(vectorstore) + """ # noqa: E501 if not self.has_sync: pytest.skip("Sync tests not supported.") @@ -657,6 +662,7 @@ class VectorStoreIntegrationTests(BaseStandardTests): @pytest.mark.xfail(reason=("get_by_ids not implemented.")) async def test_get_by_ids(self, vectorstore: VectorStore) -> None: await super().test_get_by_ids(vectorstore) + """ if not self.has_async: pytest.skip("Async tests not supported.") @@ -690,6 +696,7 @@ class VectorStoreIntegrationTests(BaseStandardTests): @pytest.mark.xfail(reason=("get_by_ids not implemented.")) async def test_get_by_ids_missing(self, vectorstore: VectorStore) -> None: await super().test_get_by_ids_missing(vectorstore) + """ # noqa: E501 if not self.has_async: pytest.skip("Async tests not supported.") @@ -720,6 +727,7 @@ class VectorStoreIntegrationTests(BaseStandardTests): @pytest.mark.xfail(reason=("get_by_ids not implemented.")) async def test_add_documents_documents(self, vectorstore: VectorStore) -> None: await super().test_add_documents_documents(vectorstore) + """ # noqa: E501 if not self.has_async: pytest.skip("Async tests not supported.") @@ -759,6 +767,7 @@ class VectorStoreIntegrationTests(BaseStandardTests): @pytest.mark.xfail(reason=("get_by_ids not implemented.")) async def test_add_documents_with_existing_ids(self, vectorstore: VectorStore) -> None: await super().test_add_documents_with_existing_ids(vectorstore) + """ # noqa: E501 if not self.has_async: pytest.skip("Async tests not supported.") diff --git a/libs/standard-tests/langchain_tests/unit_tests/chat_models.py b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py index d4fa85f4a85..320d2b491f1 100644 --- a/libs/standard-tests/langchain_tests/unit_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py @@ -798,6 +798,7 @@ class ChatModelUnitTests(ChatModelTests): "my_api_key": "api_key", }, ) + """ # noqa: E501 @property diff --git a/libs/standard-tests/langchain_tests/unit_tests/embeddings.py b/libs/standard-tests/langchain_tests/unit_tests/embeddings.py index 3428fd8041f..0df698ee8bd 100644 --- a/libs/standard-tests/langchain_tests/unit_tests/embeddings.py +++ b/libs/standard-tests/langchain_tests/unit_tests/embeddings.py @@ -86,6 +86,7 @@ class EmbeddingsUnitTests(EmbeddingsTests): "my_api_key": "api_key", }, ) + """ def test_init(self) -> None: diff --git a/libs/standard-tests/tests/unit_tests/custom_chat_model.py b/libs/standard-tests/tests/unit_tests/custom_chat_model.py index 0529480ebf1..cc9be763989 100644 --- a/libs/standard-tests/tests/unit_tests/custom_chat_model.py +++ b/libs/standard-tests/tests/unit_tests/custom_chat_model.py @@ -32,6 +32,7 @@ class ChatParrotLink(BaseChatModel): result = model.invoke([HumanMessage(content="hello")]) result = model.batch([[HumanMessage(content="hello")], [HumanMessage(content="world")]]) + """ model_name: str = Field(alias="model") diff --git a/libs/text-splitters/langchain_text_splitters/html.py b/libs/text-splitters/langchain_text_splitters/html.py index 1f7c30bc132..37358b0d3fa 100644 --- a/libs/text-splitters/langchain_text_splitters/html.py +++ b/libs/text-splitters/langchain_text_splitters/html.py @@ -107,6 +107,7 @@ class HTMLHeaderTextSplitter: # content="Conclusion" # - Document with metadata={"Main Topic": "Conclusion"} and # content="Final thoughts." + """ def __init__( @@ -562,6 +563,7 @@ class HTMLSemanticPreservingSplitter(BaseDocumentTransformer): preserve_images=True, custom_handlers={"iframe": custom_iframe_extractor} ) + """ # noqa: E501, D214 def __init__( diff --git a/libs/text-splitters/tests/unit_tests/conftest.py b/libs/text-splitters/tests/unit_tests/conftest.py index 83c9f53b64d..ad5ea6273cb 100644 --- a/libs/text-splitters/tests/unit_tests/conftest.py +++ b/libs/text-splitters/tests/unit_tests/conftest.py @@ -36,6 +36,7 @@ def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> @pytest.mark.requires("package1", "package2") def test_something(): ... + """ # Mapping from the name of a package to whether it is installed or not. # Used to avoid repeated calls to `util.find_spec`