From 5840dad40ba0d1c1be7513a59ac381327ff3e7c9 Mon Sep 17 00:00:00 2001 From: Christophe Bornet Date: Mon, 8 Sep 2025 17:13:50 +0200 Subject: [PATCH] chore(core): enable ruff docstring-code-format (#32834) See https://docs.astral.sh/ruff/settings/#format_docstring-code-format --------- Co-authored-by: Mason Daugherty --- libs/core/langchain_core/_api/deprecation.py | 2 +- libs/core/langchain_core/agents.py | 8 +- .../langchain_core/beta/runnables/context.py | 3 +- libs/core/langchain_core/callbacks/base.py | 13 +- libs/core/langchain_core/callbacks/manager.py | 52 +++- libs/core/langchain_core/documents/base.py | 5 +- .../langchain_core/documents/transformers.py | 6 +- libs/core/langchain_core/embeddings/fake.py | 2 + libs/core/langchain_core/indexing/base.py | 2 +- .../language_models/__init__.py | 5 +- .../language_models/chat_models.py | 27 +- libs/core/langchain_core/memory.py | 10 +- libs/core/langchain_core/messages/ai.py | 14 +- libs/core/langchain_core/messages/human.py | 8 +- libs/core/langchain_core/messages/system.py | 8 +- libs/core/langchain_core/messages/tool.py | 19 +- libs/core/langchain_core/messages/utils.py | 182 ++++++++--- .../langchain_core/output_parsers/base.py | 7 +- libs/core/langchain_core/prompts/chat.py | 111 ++++--- libs/core/langchain_core/prompts/few_shot.py | 14 +- .../core/langchain_core/prompts/structured.py | 2 + libs/core/langchain_core/rate_limiters.py | 4 +- libs/core/langchain_core/retrievers.py | 7 +- libs/core/langchain_core/runnables/base.py | 291 +++++++++++------- libs/core/langchain_core/runnables/branch.py | 4 +- .../langchain_core/runnables/configurable.py | 33 +- .../langchain_core/runnables/fallbacks.py | 18 +- libs/core/langchain_core/runnables/history.py | 58 ++-- .../langchain_core/runnables/passthrough.py | 45 +-- libs/core/langchain_core/runnables/retry.py | 14 +- libs/core/langchain_core/runnables/schema.py | 2 + libs/core/langchain_core/stores.py | 18 +- libs/core/langchain_core/sys_info.py | 6 +- libs/core/langchain_core/tools/convert.py | 13 +- .../langchain_core/utils/function_calling.py | 7 +- libs/core/langchain_core/vectorstores/base.py | 12 +- .../langchain_core/vectorstores/in_memory.py | 9 +- libs/core/pyproject.toml | 2 + libs/core/tests/unit_tests/conftest.py | 3 +- .../prompts/__snapshots__/test_chat.ambr | 70 ++--- .../runnables/__snapshots__/test_graph.ambr | 35 +-- .../__snapshots__/test_runnable.ambr | 280 ++++++----------- .../unit_tests/runnables/test_runnable.py | 9 +- libs/core/uv.lock | 6 +- libs/langchain_v1/langchain/__init__.py | 2 +- 45 files changed, 797 insertions(+), 651 deletions(-) diff --git a/libs/core/langchain_core/_api/deprecation.py b/libs/core/langchain_core/_api/deprecation.py index 195812236c6..dc83afa2d17 100644 --- a/libs/core/langchain_core/_api/deprecation.py +++ b/libs/core/langchain_core/_api/deprecation.py @@ -133,7 +133,7 @@ def deprecated( .. code-block:: python - @deprecated('1.4.0') + @deprecated("1.4.0") def the_function_to_deprecate(): pass diff --git a/libs/core/langchain_core/agents.py b/libs/core/langchain_core/agents.py index 74bfcdd4012..8fa3fde3888 100644 --- a/libs/core/langchain_core/agents.py +++ b/libs/core/langchain_core/agents.py @@ -14,13 +14,15 @@ Agents use language models to choose a sequence of actions to take. A basic agent works in the following manner: -1. Given a prompt an agent uses an LLM to request an action to take (e.g., a tool to run). +1. Given a prompt an agent uses an LLM to request an action to take + (e.g., a tool to run). 2. The agent executes the action (e.g., runs the tool), and receives an observation. -3. The agent returns the observation to the LLM, which can then be used to generate the next action. +3. The agent returns the observation to the LLM, which can then be used to generate + the next action. 4. When the agent reaches a stopping condition, it returns a final return value. The schemas for the agents themselves are defined in langchain.agents.agent. -""" # noqa: E501 +""" from __future__ import annotations diff --git a/libs/core/langchain_core/beta/runnables/context.py b/libs/core/langchain_core/beta/runnables/context.py index 993b4665b2b..00d5e936bba 100644 --- a/libs/core/langchain_core/beta/runnables/context.py +++ b/libs/core/langchain_core/beta/runnables/context.py @@ -344,8 +344,7 @@ class Context: chain = ( Context.setter("input") | { - "context": RunnablePassthrough() - | Context.setter("context"), + "context": RunnablePassthrough() | Context.setter("context"), "question": RunnablePassthrough(), } | PromptTemplate.from_template("{context} {question}") diff --git a/libs/core/langchain_core/callbacks/base.py b/libs/core/langchain_core/callbacks/base.py index 5365fcb9ef1..68c4e4a254b 100644 --- a/libs/core/langchain_core/callbacks/base.py +++ b/libs/core/langchain_core/callbacks/base.py @@ -947,11 +947,18 @@ class BaseCallbackManager(CallbackManagerMixin): .. code-block:: python - from langchain_core.callbacks.manager import CallbackManager, trace_as_chain_group + from langchain_core.callbacks.manager import ( + CallbackManager, + trace_as_chain_group, + ) from langchain_core.callbacks.stdout import StdOutCallbackHandler - manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"]) - with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager: + manager = CallbackManager( + handlers=[StdOutCallbackHandler()], tags=["tag2"] + ) + with trace_as_chain_group( + "My Group Name", tags=["tag1"] + ) as group_manager: merged_manager = group_manager.merge(manager) print(merged_manager.handlers) # [ diff --git a/libs/core/langchain_core/callbacks/manager.py b/libs/core/langchain_core/callbacks/manager.py index 90009fd5a0f..8498995f86d 100644 --- a/libs/core/langchain_core/callbacks/manager.py +++ b/libs/core/langchain_core/callbacks/manager.py @@ -85,7 +85,8 @@ def trace_as_chain_group( Defaults to None. .. note: - Must have ``LANGCHAIN_TRACING_V2`` env var set to true to see the trace in LangSmith. + Must have ``LANGCHAIN_TRACING_V2`` env var set to true to see the trace in + LangSmith. Returns: CallbackManagerForChainGroup: The callback manager for the chain group. @@ -94,12 +95,14 @@ def trace_as_chain_group( .. code-block:: python llm_input = "Foo" - with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: + with trace_as_chain_group( + "group_name", inputs={"input": llm_input} + ) as manager: # Use the callback manager for the chain group res = llm.invoke(llm_input, {"callbacks": manager}) manager.on_chain_end({"output": res}) - """ # noqa: E501 + """ from langchain_core.tracers.context import _get_trace_callbacks cb = _get_trace_callbacks( @@ -153,8 +156,8 @@ async def atrace_as_chain_group( Args: group_name (str): The name of the chain group. - callback_manager (AsyncCallbackManager, optional): The async callback manager to use, - which manages tracing and other callback behavior. Defaults to None. + callback_manager (AsyncCallbackManager, optional): The async callback manager + to use, which manages tracing and other callback behavior. Defaults to None. inputs (dict[str, Any], optional): The inputs to the chain group. Defaults to None. project_name (str, optional): The name of the project. @@ -171,18 +174,21 @@ async def atrace_as_chain_group( AsyncCallbackManager: The async callback manager for the chain group. .. note: - Must have ``LANGCHAIN_TRACING_V2`` env var set to true to see the trace in LangSmith. + Must have ``LANGCHAIN_TRACING_V2`` env var set to true to see the trace in + LangSmith. Example: .. code-block:: python llm_input = "Foo" - async with atrace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: + async with atrace_as_chain_group( + "group_name", inputs={"input": llm_input} + ) as manager: # Use the async callback manager for the chain group res = await llm.ainvoke(llm_input, {"callbacks": manager}) await manager.on_chain_end({"output": res}) - """ # noqa: E501 + """ from langchain_core.tracers.context import _get_trace_callbacks cb = _get_trace_callbacks( @@ -1734,11 +1740,18 @@ class CallbackManagerForChainGroup(CallbackManager): .. code-block:: python - from langchain_core.callbacks.manager import CallbackManager, trace_as_chain_group + from langchain_core.callbacks.manager import ( + CallbackManager, + trace_as_chain_group, + ) from langchain_core.callbacks.stdout import StdOutCallbackHandler - manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"]) - with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager: + manager = CallbackManager( + handlers=[StdOutCallbackHandler()], tags=["tag2"] + ) + with trace_as_chain_group( + "My Group Name", tags=["tag1"] + ) as group_manager: merged_manager = group_manager.merge(manager) print(type(merged_manager)) # @@ -2258,18 +2271,25 @@ class AsyncCallbackManagerForChainGroup(AsyncCallbackManager): from the current object. Returns: - AsyncCallbackManagerForChainGroup: A copy of the current AsyncCallbackManagerForChainGroup - with the handlers, tags, etc. of the other callback manager merged in. + A copy of the current AsyncCallbackManagerForChainGroup + with the handlers, tags, etc. of the other callback manager merged in. Example: Merging two callback managers. .. code-block:: python - from langchain_core.callbacks.manager import CallbackManager, atrace_as_chain_group + from langchain_core.callbacks.manager import ( + CallbackManager, + atrace_as_chain_group, + ) from langchain_core.callbacks.stdout import StdOutCallbackHandler - manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"]) - async with atrace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager: + manager = CallbackManager( + handlers=[StdOutCallbackHandler()], tags=["tag2"] + ) + async with atrace_as_chain_group( + "My Group Name", tags=["tag1"] + ) as group_manager: merged_manager = group_manager.merge(manager) print(type(merged_manager)) # diff --git a/libs/core/langchain_core/documents/base.py b/libs/core/langchain_core/documents/base.py index 601e831a959..99ae9b62892 100644 --- a/libs/core/langchain_core/documents/base.py +++ b/libs/core/langchain_core/documents/base.py @@ -82,7 +82,7 @@ class Blob(BaseMedia): blob = Blob.from_data( data="Hello, world!", mime_type="text/plain", - metadata={"source": "https://example.com"} + metadata={"source": "https://example.com"}, ) Example: Load the blob from a file @@ -263,8 +263,7 @@ class Document(BaseMedia): from langchain_core.documents import Document document = Document( - page_content="Hello, world!", - metadata={"source": "https://example.com"} + page_content="Hello, world!", metadata={"source": "https://example.com"} ) """ diff --git a/libs/core/langchain_core/documents/transformers.py b/libs/core/langchain_core/documents/transformers.py index 171a98b458e..f70fa0d491e 100644 --- a/libs/core/langchain_core/documents/transformers.py +++ b/libs/core/langchain_core/documents/transformers.py @@ -38,7 +38,9 @@ class BaseDocumentTransformer(ABC): self.embeddings, stateful_documents ) included_idxs = _filter_similar_embeddings( - embedded_documents, self.similarity_fn, self.similarity_threshold + embedded_documents, + self.similarity_fn, + self.similarity_threshold, ) return [stateful_documents[i] for i in sorted(included_idxs)] @@ -47,7 +49,7 @@ class BaseDocumentTransformer(ABC): ) -> Sequence[Document]: raise NotImplementedError - """ # noqa: E501 + """ @abstractmethod def transform_documents( diff --git a/libs/core/langchain_core/embeddings/fake.py b/libs/core/langchain_core/embeddings/fake.py index 99069d08fb3..73b605dfa4b 100644 --- a/libs/core/langchain_core/embeddings/fake.py +++ b/libs/core/langchain_core/embeddings/fake.py @@ -20,6 +20,7 @@ class FakeEmbeddings(Embeddings, BaseModel): .. code-block:: python from langchain_core.embeddings import FakeEmbeddings + embed = FakeEmbeddings(size=100) Embed single text: @@ -78,6 +79,7 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel): .. code-block:: python from langchain_core.embeddings import DeterministicFakeEmbedding + embed = DeterministicFakeEmbedding(size=100) Embed single text: diff --git a/libs/core/langchain_core/indexing/base.py b/libs/core/langchain_core/indexing/base.py index 5110e8c3630..0de0b8cdee2 100644 --- a/libs/core/langchain_core/indexing/base.py +++ b/libs/core/langchain_core/indexing/base.py @@ -254,7 +254,7 @@ class InMemoryRecordManager(RecordManager): """In-memory schema creation is simply ensuring the structure is initialized.""" async def acreate_schema(self) -> None: - """Async in-memory schema creation is simply ensuring the structure is initialized.""" # noqa: E501 + """In-memory schema creation is simply ensuring the structure is initialized.""" def get_time(self) -> float: """Get the current server time as a high resolution timestamp!""" diff --git a/libs/core/langchain_core/language_models/__init__.py b/libs/core/langchain_core/language_models/__init__.py index 4369d597d2e..7dc5791a16b 100644 --- a/libs/core/langchain_core/language_models/__init__.py +++ b/libs/core/langchain_core/language_models/__init__.py @@ -26,7 +26,8 @@ https://python.langchain.com/docs/how_to/custom_chat_model/ **LLMs** Language models that takes a string as input and returns a string. -These are traditionally older models (newer models generally are Chat Models, see below). +These are traditionally older models (newer models generally are Chat Models, +see below). Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input. This gives them the same interface @@ -39,7 +40,7 @@ Please see the following guide for more information on how to implement a custom https://python.langchain.com/docs/how_to/custom_llm/ -""" # noqa: E501 +""" from typing import TYPE_CHECKING diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 23872752da5..2ab0d6bb759 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -1453,15 +1453,20 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): from pydantic import BaseModel + class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' + answer: str justification: str + llm = ChatModel(model="model-name", temperature=0) structured_llm = llm.with_structured_output(AnswerWithJustification) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_llm.invoke( + "What weighs more a pound of bricks or a pound of feathers" + ) # -> AnswerWithJustification( # answer='They weigh the same', @@ -1473,15 +1478,22 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): from pydantic import BaseModel + class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' + answer: str justification: str - llm = ChatModel(model="model-name", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output( + AnswerWithJustification, include_raw=True + ) + + structured_llm.invoke( + "What weighs more a pound of bricks or a pound of feathers" + ) # -> { # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), @@ -1494,16 +1506,21 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): from pydantic import BaseModel from langchain_core.utils.function_calling import convert_to_openai_tool + class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' + answer: str justification: str + dict_schema = convert_to_openai_tool(AnswerWithJustification) llm = ChatModel(model="model-name", temperature=0) structured_llm = llm.with_structured_output(dict_schema) - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + structured_llm.invoke( + "What weighs more a pound of bricks or a pound of feathers" + ) # -> { # 'answer': 'They weigh the same', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' diff --git a/libs/core/langchain_core/memory.py b/libs/core/langchain_core/memory.py index b9249567b1c..bd6ac278808 100644 --- a/libs/core/langchain_core/memory.py +++ b/libs/core/langchain_core/memory.py @@ -45,16 +45,20 @@ class BaseMemory(Serializable, ABC): def memory_variables(self) -> list[str]: return list(self.memories.keys()) - def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]: + def load_memory_variables( + self, inputs: dict[str, Any] + ) -> dict[str, str]: return self.memories - def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None: + def save_context( + self, inputs: dict[str, Any], outputs: dict[str, str] + ) -> None: pass def clear(self) -> None: pass - """ # noqa: E501 + """ model_config = ConfigDict( arbitrary_types_allowed=True, diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index 27ae49dca9f..86d48ec3254 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -124,7 +124,7 @@ class UsageMetadata(TypedDict): "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 @@ -468,13 +468,13 @@ def add_usage( input_tokens=5, output_tokens=0, total_tokens=5, - input_token_details=InputTokenDetails(cache_read=3) + input_token_details=InputTokenDetails(cache_read=3), ) right = UsageMetadata( input_tokens=0, output_tokens=10, total_tokens=10, - output_token_details=OutputTokenDetails(reasoning=4) + output_token_details=OutputTokenDetails(reasoning=4), ) add_usage(left, right) @@ -488,7 +488,7 @@ def add_usage( output_tokens=10, total_tokens=15, input_token_details=InputTokenDetails(cache_read=3), - output_token_details=OutputTokenDetails(reasoning=4) + output_token_details=OutputTokenDetails(reasoning=4), ) """ @@ -525,13 +525,13 @@ def subtract_usage( input_tokens=5, output_tokens=10, total_tokens=15, - input_token_details=InputTokenDetails(cache_read=4) + input_token_details=InputTokenDetails(cache_read=4), ) right = UsageMetadata( input_tokens=3, output_tokens=8, total_tokens=11, - output_token_details=OutputTokenDetails(reasoning=4) + output_token_details=OutputTokenDetails(reasoning=4), ) subtract_usage(left, right) @@ -545,7 +545,7 @@ def subtract_usage( output_tokens=2, total_tokens=4, input_token_details=InputTokenDetails(cache_read=4), - output_token_details=OutputTokenDetails(reasoning=0) + output_token_details=OutputTokenDetails(reasoning=0), ) """ diff --git a/libs/core/langchain_core/messages/human.py b/libs/core/langchain_core/messages/human.py index 1be4cbfa9d3..d6260cd14bd 100644 --- a/libs/core/langchain_core/messages/human.py +++ b/libs/core/langchain_core/messages/human.py @@ -17,12 +17,8 @@ class HumanMessage(BaseMessage): from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages diff --git a/libs/core/langchain_core/messages/system.py b/libs/core/langchain_core/messages/system.py index d63bd53a0fe..491bea204ea 100644 --- a/libs/core/langchain_core/messages/system.py +++ b/libs/core/langchain_core/messages/system.py @@ -18,12 +18,8 @@ class SystemMessage(BaseMessage): from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages diff --git a/libs/core/langchain_core/messages/tool.py b/libs/core/langchain_core/messages/tool.py index 10e2d18917a..4d847c86d97 100644 --- a/libs/core/langchain_core/messages/tool.py +++ b/libs/core/langchain_core/messages/tool.py @@ -32,7 +32,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin): from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -45,7 +45,8 @@ class ToolMessage(BaseMessage, ToolOutputMixin): from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -53,14 +54,14 @@ class ToolMessage(BaseMessage, ToolOutputMixin): ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the tool call response. This is useful in situations where a chat model is able to request multiple tool calls in parallel. - """ # noqa: E501 + """ tool_call_id: str """Tool call that this message is responding to.""" @@ -184,11 +185,7 @@ class ToolCall(TypedDict): .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -236,12 +233,12 @@ class ToolCallChunk(TypedDict): .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] """ diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index 2796707aa24..16285eaefb2 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -424,11 +424,16 @@ def filter_messages( exclude_ids: Message IDs to exclude. Default is None. exclude_tool_calls: Tool call IDs to exclude. Default is None. Can be one of the following: - - `True`: all AIMessages with tool calls and all ToolMessages will be excluded. + + - ``True``: all AIMessages with tool calls and all ToolMessages will be + excluded. - a sequence of tool call IDs to exclude: + - ToolMessages with the corresponding tool call ID will be excluded. - - The `tool_calls` in the AIMessage will be updated to exclude matching tool calls. - If all tool_calls are filtered from an AIMessage, the whole message is excluded. + - The ``tool_calls`` in the AIMessage will be updated to exclude matching + tool calls. + If all tool_calls are filtered from an AIMessage, + the whole message is excluded. Returns: A list of Messages that meets at least one of the incl_* conditions and none @@ -441,14 +446,25 @@ def filter_messages( Example: .. code-block:: python - from langchain_core.messages import filter_messages, AIMessage, HumanMessage, SystemMessage + from langchain_core.messages import ( + filter_messages, + AIMessage, + HumanMessage, + SystemMessage, + ) messages = [ SystemMessage("you're a good assistant."), HumanMessage("what's your name", id="foo", name="example_user"), AIMessage("steve-o", id="bar", name="example_assistant"), - HumanMessage("what's your favorite color", id="baz",), - AIMessage("silicon blue", id="blah",), + HumanMessage( + "what's your favorite color", + id="baz", + ), + AIMessage( + "silicon blue", + id="blah", + ), ] filter_messages( @@ -465,7 +481,7 @@ def filter_messages( HumanMessage("what's your name", id="foo", name="example_user"), ] - """ # noqa: E501 + """ messages = convert_to_messages(messages) filtered: list[BaseMessage] = [] for msg in messages: @@ -544,12 +560,14 @@ def merge_message_runs( Returns: list of BaseMessages with consecutive runs of message types merged into single messages. By default, if two messages being merged both have string contents, - the merged content is a concatenation of the two strings with a new-line separator. + the merged content is a concatenation of the two strings with a new-line + separator. The separator inserted between message chunks can be controlled by specifying - any string with ``chunk_separator``. If at least one of the messages has a list of - content blocks, the merged content is a list of content blocks. + any string with ``chunk_separator``. If at least one of the messages has a list + of content blocks, the merged content is a list of content blocks. Example: + .. code-block:: python from langchain_core.messages import ( @@ -562,16 +580,33 @@ def merge_message_runs( messages = [ SystemMessage("you're a good assistant."), - HumanMessage("what's your favorite color", id="foo",), - HumanMessage("wait your favorite food", id="bar",), + HumanMessage( + "what's your favorite color", + id="foo", + ), + HumanMessage( + "wait your favorite food", + id="bar", + ), AIMessage( "my favorite colo", - tool_calls=[ToolCall(name="blah_tool", args={"x": 2}, id="123", type="tool_call")], + tool_calls=[ + ToolCall( + name="blah_tool", args={"x": 2}, id="123", type="tool_call" + ) + ], id="baz", ), AIMessage( [{"type": "text", "text": "my favorite dish is lasagna"}], - tool_calls=[ToolCall(name="blah_tool", args={"x": -10}, id="456", type="tool_call")], + tool_calls=[ + ToolCall( + name="blah_tool", + args={"x": -10}, + id="456", + type="tool_call", + ) + ], id="blur", ), ] @@ -582,21 +617,34 @@ def merge_message_runs( [ SystemMessage("you're a good assistant."), - HumanMessage("what's your favorite color\\nwait your favorite food", id="foo",), + HumanMessage( + "what's your favorite color\\n" + "wait your favorite food", id="foo", + ), AIMessage( [ "my favorite colo", {"type": "text", "text": "my favorite dish is lasagna"} ], tool_calls=[ - ToolCall({"name": "blah_tool", "args": {"x": 2}, "id": "123", "type": "tool_call"}), - ToolCall({"name": "blah_tool", "args": {"x": -10}, "id": "456", "type": "tool_call"}) + ToolCall({ + "name": "blah_tool", + "args": {"x": 2}, + "id": "123", + "type": "tool_call" + }), + ToolCall({ + "name": "blah_tool", + "args": {"x": -10}, + "id": "456", + "type": "tool_call" + }) ] id="baz" ), ] - """ # noqa: E501 + """ if not messages: return [] messages = convert_to_messages(messages) @@ -656,8 +704,8 @@ def trim_messages( properties: 1. The resulting chat history should be valid. Most chat models expect that chat - history starts with either (1) a ``HumanMessage`` or (2) a ``SystemMessage`` followed - by a ``HumanMessage``. To achieve this, set ``start_on="human"``. + history starts with either (1) a ``HumanMessage`` or (2) a ``SystemMessage`` + followed by a ``HumanMessage``. To achieve this, set ``start_on="human"``. In addition, generally a ``ToolMessage`` can only appear after an ``AIMessage`` that involved a tool call. Please see the following link for more information about messages: @@ -748,14 +796,18 @@ def trim_messages( ) messages = [ - SystemMessage("you're a good assistant, you always respond with a joke."), + SystemMessage( + "you're a good assistant, you always respond with a joke." + ), HumanMessage("i wonder why it's called langchain"), AIMessage( - 'Well, I guess they thought "WordRope" and "SentenceString" just didn\'t have the same ring to it!' + 'Well, I guess they thought "WordRope" and "SentenceString" just ' + "didn't have the same ring to it!" ), HumanMessage("and who is harrison chasing anyways"), AIMessage( - "Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!" + "Hmmm let me think.\n\nWhy, he's probably chasing after the last " + "cup of coffee in the office!" ), HumanMessage("what do you call a speechless parrot"), ] @@ -780,8 +832,10 @@ def trim_messages( .. code-block:: python [ - SystemMessage(content="you're a good assistant, you always respond with a joke."), - HumanMessage(content='what do you call a speechless parrot'), + SystemMessage( + content="you're a good assistant, you always respond with a joke." + ), + HumanMessage(content="what do you call a speechless parrot"), ] Trim chat history based on the message count, keeping the SystemMessage if @@ -811,10 +865,15 @@ def trim_messages( .. code-block:: python [ - SystemMessage(content="you're a good assistant, you always respond with a joke."), - HumanMessage(content='and who is harrison chasing anyways'), - AIMessage(content="Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!"), - HumanMessage(content='what do you call a speechless parrot'), + SystemMessage( + content="you're a good assistant, you always respond with a joke." + ), + HumanMessage(content="and who is harrison chasing anyways"), + AIMessage( + content="Hmmm let me think.\n\nWhy, he's probably chasing after " + "the last cup of coffee in the office!" + ), + HumanMessage(content="what do you call a speechless parrot"), ] @@ -825,7 +884,9 @@ def trim_messages( messages = [ SystemMessage("This is a 4 token text. The full message is 10 tokens."), - HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"), + HumanMessage( + "This is a 4 token text. The full message is 10 tokens.", id="first" + ), AIMessage( [ {"type": "text", "text": "This is the FIRST 4 token block."}, @@ -833,10 +894,16 @@ def trim_messages( ], id="second", ), - HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"), - AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"), + HumanMessage( + "This is a 4 token text. The full message is 10 tokens.", id="third" + ), + AIMessage( + "This is a 4 token text. The full message is 10 tokens.", + id="fourth", + ), ] + def dummy_token_counter(messages: list[BaseMessage]) -> int: # treat each message like it adds 3 default tokens at the beginning # of the message and at the end of the message. 3 + 4 + 3 = 10 tokens @@ -849,9 +916,17 @@ def trim_messages( count = 0 for msg in messages: if isinstance(msg.content, str): - count += default_msg_prefix_len + default_content_len + default_msg_suffix_len + count += ( + default_msg_prefix_len + + default_content_len + + default_msg_suffix_len + ) if isinstance(msg.content, list): - count += default_msg_prefix_len + len(msg.content) * default_content_len + default_msg_suffix_len + count += ( + default_msg_prefix_len + + len(msg.content) * default_content_len + + default_msg_suffix_len + ) return count First 30 tokens, allowing partial messages: @@ -868,12 +943,20 @@ def trim_messages( .. code-block:: python [ - SystemMessage("This is a 4 token text. The full message is 10 tokens."), - HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"), - AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"), + SystemMessage( + "This is a 4 token text. The full message is 10 tokens." + ), + HumanMessage( + "This is a 4 token text. The full message is 10 tokens.", + id="first", + ), + AIMessage( + [{"type": "text", "text": "This is the FIRST 4 token block."}], + id="second", + ), ] - """ # noqa: E501 + """ # Validate arguments if start_on and strategy == "first": msg = "start_on parameter is only valid with strategy='last'" @@ -985,8 +1068,27 @@ def convert_to_openai_messages( messages = [ SystemMessage([{"type": "text", "text": "foo"}]), - {"role": "user", "content": [{"type": "text", "text": "whats in this"}, {"type": "image_url", "image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"}}]}, - AIMessage("", tool_calls=[{"name": "analyze", "args": {"baz": "buz"}, "id": "1", "type": "tool_call"}]), + { + "role": "user", + "content": [ + {"type": "text", "text": "whats in this"}, + { + "type": "image_url", + "image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"}, + }, + ], + }, + AIMessage( + "", + tool_calls=[ + { + "name": "analyze", + "args": {"baz": "buz"}, + "id": "1", + "type": "tool_call", + } + ], + ), ToolMessage("foobar", tool_call_id="1", name="bar"), {"role": "assistant", "content": "thats nice"}, ] diff --git a/libs/core/langchain_core/output_parsers/base.py b/libs/core/langchain_core/output_parsers/base.py index a187efb4b23..6cefd3ef62f 100644 --- a/libs/core/langchain_core/output_parsers/base.py +++ b/libs/core/langchain_core/output_parsers/base.py @@ -144,7 +144,10 @@ class BaseOutputParser( def parse(self, text: str) -> bool: cleaned_text = text.strip().upper() - if cleaned_text not in (self.true_val.upper(), self.false_val.upper()): + if cleaned_text not in ( + self.true_val.upper(), + self.false_val.upper(), + ): raise OutputParserException( f"BooleanOutputParser expected output value to either be " f"{self.true_val} or {self.false_val} (case-insensitive). " @@ -156,7 +159,7 @@ class BaseOutputParser( def _type(self) -> str: return "boolean_output_parser" - """ # noqa: E501 + """ @property @override diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 489cbe7a703..12728ef76e8 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -67,10 +67,10 @@ class MessagesPlaceholder(BaseMessagePromptTemplate): from langchain_core.prompts import MessagesPlaceholder prompt = MessagesPlaceholder("history") - prompt.format_messages() # raises KeyError + prompt.format_messages() # raises KeyError prompt = MessagesPlaceholder("history", optional=True) - prompt.format_messages() # returns empty list [] + prompt.format_messages() # returns empty list [] prompt.format_messages( history=[ @@ -93,14 +93,14 @@ class MessagesPlaceholder(BaseMessagePromptTemplate): [ ("system", "You are a helpful assistant."), MessagesPlaceholder("history"), - ("human", "{question}") + ("human", "{question}"), ] ) prompt.invoke( - { - "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")], - "question": "now multiply that by 4" - } + { + "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")], + "question": "now multiply that by 4", + } ) # -> ChatPromptValue(messages=[ # SystemMessage(content="You are a helpful assistant."), @@ -795,18 +795,17 @@ class ChatPromptTemplate(BaseChatPromptTemplate): from langchain_core.prompts import ChatPromptTemplate - template = ChatPromptTemplate([ - ("system", "You are a helpful AI bot. Your name is {name}."), - ("human", "Hello, how are you doing?"), - ("ai", "I'm doing well, thanks!"), - ("human", "{user_input}"), - ]) + template = ChatPromptTemplate( + [ + ("system", "You are a helpful AI bot. Your name is {name}."), + ("human", "Hello, how are you doing?"), + ("ai", "I'm doing well, thanks!"), + ("human", "{user_input}"), + ] + ) prompt_value = template.invoke( - { - "name": "Bob", - "user_input": "What is your name?" - } + {"name": "Bob", "user_input": "What is your name?"} ) # Output: # ChatPromptValue( @@ -816,7 +815,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): # AIMessage(content="I'm doing well, thanks!"), # HumanMessage(content='What is your name?') # ] - #) + # ) Messages Placeholder: @@ -826,14 +825,16 @@ class ChatPromptTemplate(BaseChatPromptTemplate): # you can initialize the template with a MessagesPlaceholder # either using the class directly or with the shorthand tuple syntax: - template = ChatPromptTemplate([ - ("system", "You are a helpful AI bot."), - # Means the template will receive an optional list of messages under - # the "conversation" key - ("placeholder", "{conversation}") - # Equivalently: - # MessagesPlaceholder(variable_name="conversation", optional=True) - ]) + template = ChatPromptTemplate( + [ + ("system", "You are a helpful AI bot."), + # Means the template will receive an optional list of messages under + # the "conversation" key + ("placeholder", "{conversation}"), + # Equivalently: + # MessagesPlaceholder(variable_name="conversation", optional=True) + ] + ) prompt_value = template.invoke( { @@ -841,7 +842,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): ("human", "Hi!"), ("ai", "How can I assist you today?"), ("human", "Can you make me an ice cream sundae?"), - ("ai", "No.") + ("ai", "No."), ] } ) @@ -855,7 +856,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): # HumanMessage(content='Can you make me an ice cream sundae?'), # AIMessage(content='No.'), # ] - #) + # ) Single-variable template: @@ -868,10 +869,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate): from langchain_core.prompts import ChatPromptTemplate - template = ChatPromptTemplate([ - ("system", "You are a helpful AI bot. Your name is Carl."), - ("human", "{user_input}"), - ]) + template = ChatPromptTemplate( + [ + ("system", "You are a helpful AI bot. Your name is Carl."), + ("human", "{user_input}"), + ] + ) prompt_value = template.invoke("Hello, there!") # Equivalent to @@ -930,20 +933,24 @@ class ChatPromptTemplate(BaseChatPromptTemplate): .. code-block:: python - template = ChatPromptTemplate([ - ("human", "Hello, how are you?"), - ("ai", "I'm doing well, thanks!"), - ("human", "That's good to hear."), - ]) + template = ChatPromptTemplate( + [ + ("human", "Hello, how are you?"), + ("ai", "I'm doing well, thanks!"), + ("human", "That's good to hear."), + ] + ) Instantiation from mixed message formats: .. code-block:: python - template = ChatPromptTemplate([ - SystemMessage(content="hello"), - ("human", "Hello, how are you?"), - ]) + template = ChatPromptTemplate( + [ + SystemMessage(content="hello"), + ("human", "Hello, how are you?"), + ] + ) """ messages_ = [ @@ -1137,20 +1144,24 @@ class ChatPromptTemplate(BaseChatPromptTemplate): .. code-block:: python - template = ChatPromptTemplate.from_messages([ - ("human", "Hello, how are you?"), - ("ai", "I'm doing well, thanks!"), - ("human", "That's good to hear."), - ]) + template = ChatPromptTemplate.from_messages( + [ + ("human", "Hello, how are you?"), + ("ai", "I'm doing well, thanks!"), + ("human", "That's good to hear."), + ] + ) Instantiation from mixed message formats: .. code-block:: python - template = ChatPromptTemplate.from_messages([ - SystemMessage(content="hello"), - ("human", "Hello, how are you?"), - ]) + template = ChatPromptTemplate.from_messages( + [ + SystemMessage(content="hello"), + ("human", "Hello, how are you?"), + ] + ) Args: messages: sequence of message representations. diff --git a/libs/core/langchain_core/prompts/few_shot.py b/libs/core/langchain_core/prompts/few_shot.py index a1d237e66cf..ddb09c2e320 100644 --- a/libs/core/langchain_core/prompts/few_shot.py +++ b/libs/core/langchain_core/prompts/few_shot.py @@ -272,7 +272,7 @@ class FewShotChatMessagePromptTemplate( from langchain_core.prompts import ( FewShotChatMessagePromptTemplate, - ChatPromptTemplate + ChatPromptTemplate, ) examples = [ @@ -281,7 +281,7 @@ class FewShotChatMessagePromptTemplate( ] example_prompt = ChatPromptTemplate.from_messages( - [('human', 'What is {input}?'), ('ai', '{output}')] + [("human", "What is {input}?"), ("ai", "{output}")] ) few_shot_prompt = FewShotChatMessagePromptTemplate( @@ -292,9 +292,9 @@ class FewShotChatMessagePromptTemplate( final_prompt = ChatPromptTemplate.from_messages( [ - ('system', 'You are a helpful AI Assistant'), + ("system", "You are a helpful AI Assistant"), few_shot_prompt, - ('human', '{input}'), + ("human", "{input}"), ] ) final_prompt.format(input="What is 4+4?") @@ -314,10 +314,7 @@ class FewShotChatMessagePromptTemplate( # ... ] - to_vectorize = [ - " ".join(example.values()) - for example in examples - ] + to_vectorize = [" ".join(example.values()) for example in examples] embeddings = OpenAIEmbeddings() vectorstore = Chroma.from_texts( to_vectorize, embeddings, metadatas=examples @@ -355,6 +352,7 @@ class FewShotChatMessagePromptTemplate( # Use within an LLM from langchain_core.chat_models import ChatAnthropic + chain = final_prompt | ChatAnthropic(model="claude-3-haiku-20240307") chain.invoke({"input": "What's 3+3?"}) diff --git a/libs/core/langchain_core/prompts/structured.py b/libs/core/langchain_core/prompts/structured.py index 96dcb79124a..d05ace21ccf 100644 --- a/libs/core/langchain_core/prompts/structured.py +++ b/libs/core/langchain_core/prompts/structured.py @@ -89,10 +89,12 @@ class StructuredPrompt(ChatPromptTemplate): from langchain_core.prompts import StructuredPrompt + class OutputSchema(BaseModel): name: str value: int + template = StructuredPrompt( [ ("human", "Hello, how are you?"), diff --git a/libs/core/langchain_core/rate_limiters.py b/libs/core/langchain_core/rate_limiters.py index dffdb580fa0..9df065a8007 100644 --- a/libs/core/langchain_core/rate_limiters.py +++ b/libs/core/langchain_core/rate_limiters.py @@ -110,9 +110,9 @@ class InMemoryRateLimiter(BaseRateLimiter): ) from langchain_anthropic import ChatAnthropic + model = ChatAnthropic( - model_name="claude-3-opus-20240229", - rate_limiter=rate_limiter + model_name="claude-3-opus-20240229", rate_limiter=rate_limiter ) for _ in range(5): diff --git a/libs/core/langchain_core/retrievers.py b/libs/core/langchain_core/retrievers.py index efc9fc40a7b..2a7cb681ff8 100644 --- a/libs/core/langchain_core/retrievers.py +++ b/libs/core/langchain_core/retrievers.py @@ -109,6 +109,7 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): from sklearn.metrics.pairwise import cosine_similarity + class TFIDFRetriever(BaseRetriever, BaseModel): vectorizer: Any docs: list[Document] @@ -122,10 +123,12 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): # Ip -- (n_docs,x), Op -- (n_docs,n_Feats) query_vec = self.vectorizer.transform([query]) # Op -- (n_docs,1) -- Cosine Sim with each doc - results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,)) + results = cosine_similarity(self.tfidf_array, query_vec).reshape( + (-1,) + ) return [self.docs[i] for i in results.argsort()[-self.k :][::-1]] - """ # noqa: E501 + """ model_config = ConfigDict( arbitrary_types_allowed=True, diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 4da0a484415..c065cc7afbf 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -114,12 +114,13 @@ class Runnable(ABC, Generic[Input, Output]): - **``invoke``/``ainvoke``**: Transforms a single input into an output. - **``batch``/``abatch``**: Efficiently transforms multiple inputs into outputs. - **``stream``/``astream``**: Streams output from a single input as it's produced. - - **``astream_log``**: Streams output and selected intermediate results from an input. + - **``astream_log``**: Streams output and selected intermediate results from an + input. Built-in optimizations: - - **Batch**: By default, batch runs invoke() in parallel using a thread pool executor. - Override to optimize batching. + - **Batch**: By default, batch runs invoke() in parallel using a thread pool + executor. Override to optimize batching. - **Async**: Methods with ``'a'`` suffix are asynchronous. By default, they execute the sync counterpart using asyncio's thread pool. @@ -129,14 +130,16 @@ class Runnable(ABC, Generic[Input, Output]): execution, add tags and metadata for tracing and debugging etc. Runnables expose schematic information about their input, output and config via - the ``input_schema`` property, the ``output_schema`` property and ``config_schema`` method. + the ``input_schema`` property, the ``output_schema`` property and ``config_schema`` + method. LCEL and Composition ==================== - The LangChain Expression Language (LCEL) is a declarative way to compose ``Runnables`` - into chains. Any chain constructed this way will automatically have sync, async, - batch, and streaming support. + The LangChain Expression Language (LCEL) is a declarative way to compose + ``Runnables``into chains. + Any chain constructed this way will automatically have sync, async, batch, and + streaming support. The main composition primitives are ``RunnableSequence`` and ``RunnableParallel``. @@ -157,25 +160,27 @@ class Runnable(ABC, Generic[Input, Output]): # A RunnableSequence constructed using the `|` operator sequence = RunnableLambda(lambda x: x + 1) | RunnableLambda(lambda x: x * 2) - sequence.invoke(1) # 4 - sequence.batch([1, 2, 3]) # [4, 6, 8] + sequence.invoke(1) # 4 + sequence.batch([1, 2, 3]) # [4, 6, 8] # A sequence that contains a RunnableParallel constructed using a dict literal sequence = RunnableLambda(lambda x: x + 1) | { - 'mul_2': RunnableLambda(lambda x: x * 2), - 'mul_5': RunnableLambda(lambda x: x * 5) + "mul_2": RunnableLambda(lambda x: x * 2), + "mul_5": RunnableLambda(lambda x: x * 5), } - sequence.invoke(1) # {'mul_2': 4, 'mul_5': 10} + sequence.invoke(1) # {'mul_2': 4, 'mul_5': 10} Standard Methods ================ - All ``Runnable``s expose additional methods that can be used to modify their behavior - (e.g., add a retry policy, add lifecycle listeners, make them configurable, etc.). + All ``Runnable``s expose additional methods that can be used to modify their + behavior (e.g., add a retry policy, add lifecycle listeners, make them + configurable, etc.). - These methods will work on any ``Runnable``, including ``Runnable`` chains constructed - by composing other ``Runnable``s. See the individual methods for details. + These methods will work on any ``Runnable``, including ``Runnable`` chains + constructed by composing other ``Runnable``s. + See the individual methods for details. For example, @@ -219,6 +224,7 @@ class Runnable(ABC, Generic[Input, Output]): .. code-block:: python from langchain_core.globals import set_debug + set_debug(True) Alternatively, you can pass existing or custom callbacks to any given chain: @@ -227,14 +233,11 @@ class Runnable(ABC, Generic[Input, Output]): from langchain_core.tracers import ConsoleCallbackHandler - chain.invoke( - ..., - config={'callbacks': [ConsoleCallbackHandler()]} - ) + chain.invoke(..., config={"callbacks": [ConsoleCallbackHandler()]}) For a UI (and much more) checkout `LangSmith `__. - """ # noqa: E501 + """ name: Optional[str] """The name of the ``Runnable``. Used for debugging and tracing.""" @@ -381,9 +384,11 @@ class Runnable(ABC, Generic[Input, Output]): from langchain_core.runnables import RunnableLambda + def add_one(x: int) -> int: return x + 1 + runnable = RunnableLambda(add_one) print(runnable.get_input_jsonschema()) @@ -395,7 +400,10 @@ class Runnable(ABC, Generic[Input, Output]): @property def output_schema(self) -> type[BaseModel]: - """The type of output this ``Runnable`` produces specified as a pydantic model.""" # noqa: E501 + """Output schema. + + The type of output this ``Runnable`` produces specified as a pydantic model. + """ return self.get_output_schema() def get_output_schema( @@ -455,9 +463,11 @@ class Runnable(ABC, Generic[Input, Output]): from langchain_core.runnables import RunnableLambda + def add_one(x: int) -> int: return x + 1 + runnable = RunnableLambda(add_one) print(runnable.get_output_jsonschema()) @@ -603,12 +613,15 @@ class Runnable(ABC, Generic[Input, Output]): from langchain_core.runnables import RunnableLambda + def add_one(x: int) -> int: return x + 1 + def mul_two(x: int) -> int: return x * 2 + runnable_1 = RunnableLambda(add_one) runnable_2 = RunnableLambda(mul_two) sequence = runnable_1.pipe(runnable_2) @@ -658,13 +671,14 @@ class Runnable(ABC, Generic[Input, Output]): as_str = RunnableLambda(str) as_json = RunnableLambda(json.loads) + + def as_bytes(x: Any) -> bytes: return bytes(x, "utf-8") + chain = RunnableMap( - str=as_str, - json=as_json, - bytes=RunnableLambda(as_bytes) + str=as_str, json=as_json, bytes=RunnableLambda(as_bytes) ) chain.invoke("[1, 2, 3]") @@ -1254,6 +1268,7 @@ class Runnable(ABC, Generic[Input, Output]): '''Format the docs.''' return ", ".join([doc.page_content for doc in docs]) + format_docs = RunnableLambda(format_docs) ``some_tool``: @@ -1280,9 +1295,11 @@ class Runnable(ABC, Generic[Input, Output]): from langchain_core.runnables import RunnableLambda + async def reverse(s: str) -> str: return s[::-1] + chain = RunnableLambda(func=reverse) events = [ @@ -1422,7 +1439,9 @@ class Runnable(ABC, Generic[Input, Output]): config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> Iterator[Output]: - """Default implementation of transform, which buffers input and calls ``astream``. + """Transform inputs to outputs. + + Default implementation of transform, which buffers input and calls ``astream``. Subclasses should override this method if they can start producing output while input is still being generated. @@ -1435,7 +1454,7 @@ class Runnable(ABC, Generic[Input, Output]): Yields: The output of the ``Runnable``. - """ # noqa: E501 + """ final: Input got_first_val = False @@ -1465,7 +1484,9 @@ class Runnable(ABC, Generic[Input, Output]): config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> AsyncIterator[Output]: - """Default implementation of atransform, which buffers input and calls ``astream``. + """Transform inputs to outputs. + + Default implementation of atransform, which buffers input and calls ``astream``. Subclasses should override this method if they can start producing output while input is still being generated. @@ -1478,7 +1499,7 @@ class Runnable(ABC, Generic[Input, Output]): Yields: The output of the ``Runnable``. - """ # noqa: E501 + """ final: Input got_first_val = False @@ -1522,22 +1543,16 @@ class Runnable(ABC, Generic[Input, Output]): from langchain_ollama import ChatOllama from langchain_core.output_parsers import StrOutputParser - llm = ChatOllama(model='llama2') + llm = ChatOllama(model="llama2") # Without bind. - chain = ( - llm - | StrOutputParser() - ) + chain = llm | StrOutputParser() chain.invoke("Repeat quoted words exactly: 'One two three four five.'") # Output is 'One two three four five.' # With bind. - chain = ( - llm.bind(stop=["three"]) - | StrOutputParser() - ) + chain = llm.bind(stop=["three"]) | StrOutputParser() chain.invoke("Repeat quoted words exactly: 'One two three four five.'") # Output is 'One two' @@ -1609,18 +1624,21 @@ class Runnable(ABC, Generic[Input, Output]): import time - def test_runnable(time_to_sleep : int): + + def test_runnable(time_to_sleep: int): time.sleep(time_to_sleep) + def fn_start(run_obj: Run): print("start_time:", run_obj.start_time) + def fn_end(run_obj: Run): print("end_time:", run_obj.end_time) + chain = RunnableLambda(test_runnable).with_listeners( - on_start=fn_start, - on_end=fn_end + on_start=fn_start, on_end=fn_end ) chain.invoke(2) @@ -1650,7 +1668,9 @@ class Runnable(ABC, Generic[Input, Output]): on_end: Optional[AsyncListener] = None, on_error: Optional[AsyncListener] = None, ) -> Runnable[Input, Output]: - """Bind async lifecycle listeners to a ``Runnable``, returning a new ``Runnable``. + """Bind async lifecycle listeners to a ``Runnable``. + + Returns a new ``Runnable``. The Run object contains information about the run, including its ``id``, ``type``, ``input``, ``output``, ``error``, ``start_time``, ``end_time``, and @@ -1716,7 +1736,7 @@ class Runnable(ABC, Generic[Input, Output]): on end callback ends at 2025-03-01T07:05:29.883893+00:00 on end callback ends at 2025-03-01T07:05:30.884831+00:00 - """ # noqa: E501 + """ from langchain_core.tracers.root_listeners import AsyncRootListenersTracer return RunnableBinding( @@ -1796,7 +1816,7 @@ class Runnable(ABC, Generic[Input, Output]): if x == 1: raise ValueError("x is 1") else: - pass + pass runnable = RunnableLambda(_lambda) @@ -1808,7 +1828,7 @@ class Runnable(ABC, Generic[Input, Output]): except ValueError: pass - assert (count == 2) + assert count == 2 """ from langchain_core.runnables.retry import RunnableRetry @@ -1837,11 +1857,13 @@ class Runnable(ABC, Generic[Input, Output]): from langchain_core.runnables import RunnableLambda + def _lambda(x: int) -> int: return x + 1 + runnable = RunnableLambda(_lambda) - print(runnable.map().invoke([1, 2, 3])) # [2, 3, 4] + print(runnable.map().invoke([1, 2, 3])) # [2, 3, 4] """ return RunnableEach(bound=self) @@ -1859,13 +1881,15 @@ class Runnable(ABC, Generic[Input, Output]): in order, upon failures. Args: - fallbacks: A sequence of runnables to try if the original ``Runnable`` fails. + fallbacks: A sequence of runnables to try if the original ``Runnable`` + fails. exceptions_to_handle: A tuple of exception types to handle. Defaults to ``(Exception,)``. exception_key: If string is specified then handled exceptions will be passed - to fallbacks as part of the input under the specified key. If None, - exceptions will not be passed to fallbacks. If used, the base ``Runnable`` - and its fallbacks must accept a dictionary as input. Defaults to None. + to fallbacks as part of the input under the specified key. + If None, exceptions will not be passed to fallbacks. + If used, the base ``Runnable`` and its fallbacks must accept a + dictionary as input. Defaults to None. Returns: A new ``Runnable`` that will try the original ``Runnable``, and then each @@ -1891,22 +1915,24 @@ class Runnable(ABC, Generic[Input, Output]): runnable = RunnableGenerator(_generate_immediate_error).with_fallbacks( [RunnableGenerator(_generate)] - ) - print(''.join(runnable.stream({}))) #foo bar + ) + print("".join(runnable.stream({}))) # foo bar Args: - fallbacks: A sequence of runnables to try if the original ``Runnable`` fails. + fallbacks: A sequence of runnables to try if the original ``Runnable`` + fails. exceptions_to_handle: A tuple of exception types to handle. exception_key: If string is specified then handled exceptions will be passed - to fallbacks as part of the input under the specified key. If None, - exceptions will not be passed to fallbacks. If used, the base ``Runnable`` - and its fallbacks must accept a dictionary as input. + to fallbacks as part of the input under the specified key. + If None, exceptions will not be passed to fallbacks. + If used, the base ``Runnable`` and its fallbacks must accept a + dictionary as input. Returns: A new ``Runnable`` that will try the original ``Runnable``, and then each fallback in order, upon failures. - """ # noqa: E501 + """ from langchain_core.runnables.fallbacks import RunnableWithFallbacks return RunnableWithFallbacks( @@ -1931,11 +1957,14 @@ class Runnable(ABC, Generic[Input, Output]): serialized: Optional[dict[str, Any]] = None, **kwargs: Optional[Any], ) -> Output: - """Helper method to transform an ``Input`` value to an ``Output`` value, with callbacks. + """Call with config. + + Helper method to transform an ``Input`` value to an ``Output`` value, + with callbacks. Use this method to implement ``invoke`` in subclasses. - """ # noqa: E501 + """ config = ensure_config(config) callback_manager = get_callback_manager_for_config(config) run_manager = callback_manager.on_chain_start( @@ -1982,10 +2011,13 @@ class Runnable(ABC, Generic[Input, Output]): serialized: Optional[dict[str, Any]] = None, **kwargs: Optional[Any], ) -> Output: - """Helper method to transform an ``Input`` value to an ``Output`` value, with callbacks. + """Async call with config. + + Helper method to transform an ``Input`` value to an ``Output`` value, + with callbacks. Use this method to implement ``ainvoke`` in subclasses. - """ # noqa: E501 + """ config = ensure_config(config) callback_manager = get_async_callback_manager_for_config(config) run_manager = await callback_manager.on_chain_start( @@ -2411,13 +2443,16 @@ class Runnable(ABC, Generic[Input, Output]): from typing_extensions import TypedDict from langchain_core.runnables import RunnableLambda + class Args(TypedDict): a: int b: list[int] + def f(x: Args) -> str: return str(x["a"] * max(x["b"])) + runnable = RunnableLambda(f) as_tool = runnable.as_tool() as_tool.invoke({"a": 3, "b": [1, 2]}) @@ -2450,9 +2485,11 @@ class Runnable(ABC, Generic[Input, Output]): from typing import Any from langchain_core.runnables import RunnableLambda + def f(x: dict[str, Any]) -> str: return str(x["a"] * max(x["b"])) + runnable = RunnableLambda(f) as_tool = runnable.as_tool(arg_types={"a": int, "b": list[int]}) as_tool.invoke({"a": 3, "b": [1, 2]}) @@ -2463,12 +2500,15 @@ class Runnable(ABC, Generic[Input, Output]): from langchain_core.runnables import RunnableLambda + def f(x: str) -> str: return x + "a" + def g(x: str) -> str: return x + "z" + runnable = RunnableLambda(f) | g as_tool = runnable.as_tool() as_tool.invoke("b") @@ -2538,14 +2578,15 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]): # max_tokens = 20 print( - "max_tokens_20: ", - model.invoke("tell me something about chess").content + "max_tokens_20: ", model.invoke("tell me something about chess").content ) # max_tokens = 200 - print("max_tokens_200: ", model.with_config( - configurable={"output_token_number": 200} - ).invoke("tell me something about chess").content + print( + "max_tokens_200: ", + model.with_config(configurable={"output_token_number": 200}) + .invoke("tell me something about chess") + .content, ) """ @@ -2596,7 +2637,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]): ).configurable_alternatives( ConfigurableField(id="llm"), default_key="anthropic", - openai=ChatOpenAI() + openai=ChatOpenAI(), ) # uses the default model ChatAnthropic @@ -2604,9 +2645,9 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]): # uses ChatOpenAI print( - model.with_config( - configurable={"llm": "openai"} - ).invoke("which organization created you?").content + model.with_config(configurable={"llm": "openai"}) + .invoke("which organization created you?") + .content ) """ @@ -2739,12 +2780,15 @@ class RunnableSequence(RunnableSerializable[Input, Output]): from langchain_core.runnables import RunnableLambda + def add_one(x: int) -> int: return x + 1 + def mul_two(x: int) -> int: return x * 2 + runnable_1 = RunnableLambda(add_one) runnable_2 = RunnableLambda(mul_two) sequence = runnable_1 | runnable_2 @@ -2764,17 +2808,17 @@ class RunnableSequence(RunnableSerializable[Input, Output]): from langchain_openai import ChatOpenAI prompt = PromptTemplate.from_template( - 'In JSON format, give me a list of {topic} and their ' - 'corresponding names in French, Spanish and in a ' - 'Cat Language.' + "In JSON format, give me a list of {topic} and their " + "corresponding names in French, Spanish and in a " + "Cat Language." ) model = ChatOpenAI() chain = prompt | model | SimpleJsonOutputParser() - async for chunk in chain.astream({'topic': 'colors'}): - print('-') # noqa: T201 - print(chunk, sep='', flush=True) # noqa: T201 + async for chunk in chain.astream({"topic": "colors"}): + print("-") # noqa: T201 + print(chunk, sep="", flush=True) # noqa: T201 """ @@ -3520,15 +3564,19 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]): from langchain_core.runnables import RunnableLambda + def add_one(x: int) -> int: return x + 1 + def mul_two(x: int) -> int: return x * 2 + def mul_three(x: int) -> int: return x * 3 + runnable_1 = RunnableLambda(add_one) runnable_2 = RunnableLambda(mul_two) runnable_3 = RunnableLambda(mul_three) @@ -3564,8 +3612,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]): model = ChatOpenAI() joke_chain = ( - ChatPromptTemplate.from_template("tell me a joke about {topic}") - | model + ChatPromptTemplate.from_template("tell me a joke about {topic}") | model ) poem_chain = ( ChatPromptTemplate.from_template("write a 2-line poem about {topic}") @@ -4068,9 +4115,10 @@ class RunnableGenerator(Runnable[Input, Output]): for token in ["Have", " a", " nice", " day"]: yield token + runnable = RunnableGenerator(agen) await runnable.ainvoke(None) # "Have a nice day" - [p async for p in runnable.astream(None)] # ["Have", " a", " nice", " day"] + [p async for p in runnable.astream(None)] # ["Have", " a", " nice", " day"] ``RunnableGenerator`` makes it easy to implement custom behavior within a streaming context. Below we show an example: @@ -4090,6 +4138,7 @@ class RunnableGenerator(Runnable[Input, Output]): | StrOutputParser() ) + def character_generator(input: Iterator[str]) -> Iterator[str]: for token in input: if "," in token or "." in token: @@ -4100,7 +4149,10 @@ class RunnableGenerator(Runnable[Input, Output]): runnable = chant_chain | character_generator assert type(runnable.last) is RunnableGenerator - "".join(runnable.stream({"topic": "waste"})) # Reduce👏, Reuse👏, Recycle👏. + "".join( + runnable.stream({"topic": "waste"}) + ) # Reduce👏, Reuse👏, Recycle👏. + # Note that RunnableLambda can be used to delay streaming of one step in a # sequence until the previous step is finished: @@ -4109,6 +4161,7 @@ class RunnableGenerator(Runnable[Input, Output]): for character in input[::-1]: yield character + runnable = chant_chain | RunnableLambda(reverse_generator) "".join(runnable.stream({"topic": "waste"})) # ".elcycer ,esuer ,ecudeR" @@ -4353,26 +4406,29 @@ class RunnableLambda(Runnable[Input, Output]): # This is a RunnableLambda from langchain_core.runnables import RunnableLambda + def add_one(x: int) -> int: return x + 1 + runnable = RunnableLambda(add_one) - runnable.invoke(1) # returns 2 - runnable.batch([1, 2, 3]) # returns [2, 3, 4] + runnable.invoke(1) # returns 2 + runnable.batch([1, 2, 3]) # returns [2, 3, 4] # Async is supported by default by delegating to the sync implementation - await runnable.ainvoke(1) # returns 2 - await runnable.abatch([1, 2, 3]) # returns [2, 3, 4] + await runnable.ainvoke(1) # returns 2 + await runnable.abatch([1, 2, 3]) # returns [2, 3, 4] # Alternatively, can provide both synd and sync implementations async def add_one_async(x: int) -> int: return x + 1 + runnable = RunnableLambda(add_one, afunc=add_one_async) - runnable.invoke(1) # Uses add_one - await runnable.ainvoke(1) # Uses add_one_async + runnable.invoke(1) # Uses add_one + await runnable.ainvoke(1) # Uses add_one_async """ @@ -5080,13 +5136,16 @@ class RunnableLambda(Runnable[Input, Output]): class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]): - """``Runnable`` that calls another ``Runnable`` for each element of the input sequence. + """RunnableEachBase class. - Use only if creating a new ``RunnableEach`` subclass with different ``__init__`` args. + ``Runnable`` that calls another ``Runnable`` for each element of the input sequence. + + Use only if creating a new ``RunnableEach`` subclass with different ``__init__`` + args. See documentation for ``RunnableEach`` for more details. - """ # noqa: E501 + """ bound: Runnable[Input, Output] @@ -5214,7 +5273,9 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]): class RunnableEach(RunnableEachBase[Input, Output]): - """``Runnable`` that calls another ``Runnable`` for each element of the input sequence. + """RunnableEach class. + + ``Runnable`` that calls another ``Runnable`` for each element of the input sequence. It allows you to call multiple inputs with the bounded ``Runnable``. @@ -5239,7 +5300,7 @@ class RunnableEach(RunnableEachBase[Input, Output]): {'topic':'Biology'}]) print(output) # noqa: T201 - """ # noqa: E501 + """ @override def get_name( @@ -5303,7 +5364,9 @@ class RunnableEach(RunnableEachBase[Input, Output]): on_end: Optional[AsyncListener] = None, on_error: Optional[AsyncListener] = None, ) -> RunnableEach[Input, Output]: - """Bind async lifecycle listeners to a ``Runnable``, returning a new ``Runnable``. + """Bind async lifecycle listeners to a ``Runnable``. + + Returns a new ``Runnable``. The ``Run`` object contains information about the run, including its ``id``, ``type``, ``input``, ``output``, ``error``, ``start_time``, ``end_time``, and @@ -5320,7 +5383,7 @@ class RunnableEach(RunnableEachBase[Input, Output]): Returns: A new ``Runnable`` with the listeners bound. - """ # noqa: E501 + """ return RunnableEach( bound=self.bound.with_alisteners( on_start=on_start, on_end=on_end, on_error=on_error @@ -5391,22 +5454,23 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[ """Create a ``RunnableBinding`` from a ``Runnable`` and kwargs. Args: - bound: The underlying ``Runnable`` that this ``Runnable`` delegates calls to. + bound: The underlying ``Runnable`` that this ``Runnable`` delegates calls + to. kwargs: optional kwargs to pass to the underlying ``Runnable``, when running - the underlying ``Runnable`` (e.g., via ``invoke``, ``batch``, - ``transform``, or ``stream`` or async variants) - Defaults to None. + the underlying ``Runnable`` (e.g., via ``invoke``, ``batch``, + ``transform``, or ``stream`` or async variants) + Defaults to None. config: optional config to bind to the underlying ``Runnable``. - Defaults to None. + Defaults to None. config_factories: optional list of config factories to apply to the - config before binding to the underlying ``Runnable``. - Defaults to None. + config before binding to the underlying ``Runnable``. + Defaults to None. custom_input_type: Specify to override the input type of the underlying - ``Runnable`` with a custom type. Defaults to None. + ``Runnable`` with a custom type. Defaults to None. custom_output_type: Specify to override the output type of the underlying - ``Runnable`` with a custom type. Defaults to None. + ``Runnable`` with a custom type. Defaults to None. **other_kwargs: Unpacked into the base class. - """ # noqa: E501 + """ super().__init__( bound=bound, kwargs=kwargs or {}, @@ -5747,9 +5811,11 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re These methods include: - ``bind``: Bind kwargs to pass to the underlying ``Runnable`` when running it. - - ``with_config``: Bind config to pass to the underlying ``Runnable`` when running it. + - ``with_config``: Bind config to pass to the underlying ``Runnable`` when running + it. - ``with_listeners``: Bind lifecycle listeners to the underlying ``Runnable``. - - ``with_types``: Override the input and output types of the underlying ``Runnable``. + - ``with_types``: Override the input and output types of the underlying + ``Runnable``. - ``with_retry``: Bind a retry policy to the underlying ``Runnable``. - ``with_fallbacks``: Bind a fallback policy to the underlying ``Runnable``. @@ -5761,12 +5827,13 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re # Create a Runnable binding that invokes the ChatModel with the # additional kwarg `stop=['-']` when running it. from langchain_community.chat_models import ChatOpenAI + model = ChatOpenAI() - model.invoke('Say "Parrot-MAGIC"', stop=['-']) # Should return `Parrot` + model.invoke('Say "Parrot-MAGIC"', stop=["-"]) # Should return `Parrot` # Using it the easy way via `bind` method which returns a new # RunnableBinding - runnable_binding = model.bind(stop=['-']) - runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot` + runnable_binding = model.bind(stop=["-"]) + runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot` Can also be done by instantiating a ``RunnableBinding`` directly (not recommended): @@ -5774,13 +5841,14 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re .. code-block:: python from langchain_core.runnables import RunnableBinding + runnable_binding = RunnableBinding( bound=model, - kwargs={'stop': ['-']} # <-- Note the additional kwargs + kwargs={"stop": ["-"]}, # <-- Note the additional kwargs ) - runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot` + runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot` - """ # noqa: E501 + """ @override def bind(self, **kwargs: Any) -> Runnable[Input, Output]: @@ -6053,6 +6121,7 @@ def chain( from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI + @chain def my_func(fields): prompt = PromptTemplate("Hello, {name}!") diff --git a/libs/core/langchain_core/runnables/branch.py b/libs/core/langchain_core/runnables/branch.py index f0975523e15..cecc8f34b5f 100644 --- a/libs/core/langchain_core/runnables/branch.py +++ b/libs/core/langchain_core/runnables/branch.py @@ -61,8 +61,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]): lambda x: "goodbye", ) - branch.invoke("hello") # "HELLO" - branch.invoke(None) # "goodbye" + branch.invoke("hello") # "HELLO" + branch.invoke(None) # "goodbye" """ diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index eaf1b3d8c9a..a0f9f53a06d 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -501,7 +501,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): ).configurable_alternatives( ConfigurableField(id="prompt"), default_key="joke", - poem=PromptTemplate.from_template("Write a short poem about {topic}") + poem=PromptTemplate.from_template("Write a short poem about {topic}"), ) # When invoking the created RunnableSequence, you can pass in the @@ -511,7 +511,9 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): # The `with_config` method brings in the desired Prompt Runnable in your # Runnable Sequence. - chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"}) + chain.with_config(configurable={"prompt": "poem"}).invoke( + {"topic": "bears"} + ) Equivalently, you can initialize RunnableConfigurableAlternatives directly @@ -520,20 +522,28 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): .. code-block:: python from langchain_core.runnables import ConfigurableField - from langchain_core.runnables.configurable import RunnableConfigurableAlternatives + from langchain_core.runnables.configurable import ( + RunnableConfigurableAlternatives, + ) from langchain_openai import ChatOpenAI prompt = RunnableConfigurableAlternatives( - which=ConfigurableField(id='prompt'), + which=ConfigurableField(id="prompt"), default=PromptTemplate.from_template("Tell me a joke about {topic}"), - default_key='joke', + default_key="joke", prefix_keys=False, - alternatives={"poem":PromptTemplate.from_template("Write a short poem about {topic}")} + alternatives={ + "poem": PromptTemplate.from_template( + "Write a short poem about {topic}" + ) + }, ) chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) - chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"}) + chain.with_config(configurable={"prompt": "poem"}).invoke( + {"topic": "bears"} + ) - """ # noqa: E501 + """ which: ConfigurableField """The ConfigurableField to use to choose between alternatives.""" @@ -680,7 +690,10 @@ def make_options_spec( spec: Union[ConfigurableFieldSingleOption, ConfigurableFieldMultiOption], description: Optional[str], ) -> ConfigurableFieldSpec: - """Make a ConfigurableFieldSpec for a ConfigurableFieldSingleOption or ConfigurableFieldMultiOption. + """Make options spec. + + Make a ConfigurableFieldSpec for a ConfigurableFieldSingleOption or + ConfigurableFieldMultiOption. Args: spec: The ConfigurableFieldSingleOption or ConfigurableFieldMultiOption. @@ -688,7 +701,7 @@ def make_options_spec( Returns: The ConfigurableFieldSpec. - """ # noqa: E501 + """ with _enums_for_spec_lock: if enum := _enums_for_spec.get(spec): pass diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index ab280790c8e..7b0b845c206 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -56,12 +56,12 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): from langchain_core.chat_models.openai import ChatOpenAI from langchain_core.chat_models.anthropic import ChatAnthropic - model = ChatAnthropic( - model="claude-3-haiku-20240307" - ).with_fallbacks([ChatOpenAI(model="gpt-3.5-turbo-0125")]) + model = ChatAnthropic(model="claude-3-haiku-20240307").with_fallbacks( + [ChatOpenAI(model="gpt-3.5-turbo-0125")] + ) # Will usually use ChatAnthropic, but fallback to ChatOpenAI # if ChatAnthropic fails. - model.invoke('hello') + model.invoke("hello") # And you can also use fallbacks at the level of a chain. # Here if both LLM providers fail, we'll fallback to a good hardcoded @@ -71,12 +71,16 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): from langchain_core.output_parser import StrOutputParser from langchain_core.runnables import RunnableLambda + def when_all_is_lost(inputs): - return ("Looks like our LLM providers are down. " - "Here's a nice 🦜️ emoji for you instead.") + return ( + "Looks like our LLM providers are down. " + "Here's a nice 🦜️ emoji for you instead." + ) + chain_with_fallback = ( - PromptTemplate.from_template('Tell me a joke about {topic}') + PromptTemplate.from_template("Tell me a joke about {topic}") | model | StrOutputParser() ).with_fallbacks([RunnableLambda(when_all_is_lost)]) diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index 42a206b9ae7..e838ee1e16f 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -145,11 +145,13 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] from langchain_core.runnables.history import RunnableWithMessageHistory - prompt = ChatPromptTemplate.from_messages([ - ("system", "You're an assistant who's good at {ability}"), - MessagesPlaceholder(variable_name="history"), - ("human", "{question}"), - ]) + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You're an assistant who's good at {ability}"), + MessagesPlaceholder(variable_name="history"), + ("human", "{question}"), + ] + ) chain = prompt | ChatAnthropic(model="claude-2") @@ -162,18 +164,22 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] history_messages_key="history", ) - print(chain_with_history.invoke( # noqa: T201 - {"ability": "math", "question": "What does cosine mean?"}, - config={"configurable": {"session_id": "foo"}} - )) + print( + chain_with_history.invoke( # noqa: T201 + {"ability": "math", "question": "What does cosine mean?"}, + config={"configurable": {"session_id": "foo"}}, + ) + ) # Uses the store defined in the example above. print(store) # noqa: T201 - print(chain_with_history.invoke( # noqa: T201 - {"ability": "math", "question": "What's its inverse"}, - config={"configurable": {"session_id": "foo"}} - )) + print( + chain_with_history.invoke( # noqa: T201 + {"ability": "math", "question": "What's its inverse"}, + config={"configurable": {"session_id": "foo"}}, + ) + ) print(store) # noqa: T201 @@ -184,6 +190,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] store = {} + def get_session_history( user_id: str, conversation_id: str ) -> BaseChatMessageHistory: @@ -191,11 +198,14 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] store[(user_id, conversation_id)] = InMemoryHistory() return store[(user_id, conversation_id)] - prompt = ChatPromptTemplate.from_messages([ - ("system", "You're an assistant who's good at {ability}"), - MessagesPlaceholder(variable_name="history"), - ("human", "{question}"), - ]) + + prompt = ChatPromptTemplate.from_messages( + [ + ("system", "You're an assistant who's good at {ability}"), + MessagesPlaceholder(variable_name="history"), + ("human", "{question}"), + ] + ) chain = prompt | ChatAnthropic(model="claude-2") @@ -226,7 +236,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] with_message_history.invoke( {"ability": "math", "question": "What does cosine mean?"}, - config={"configurable": {"user_id": "123", "conversation_id": "1"}} + config={"configurable": {"user_id": "123", "conversation_id": "1"}}, ) """ @@ -280,11 +290,8 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] .. code-block:: python def get_session_history( - session_id: str, - *, - user_id: Optional[str]=None - ) -> BaseChatMessageHistory: - ... + session_id: str, *, user_id: Optional[str] = None + ) -> BaseChatMessageHistory: ... Or it should take keyword arguments that match the keys of `session_history_config_specs` and return a corresponding @@ -296,8 +303,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] *, user_id: str, thread_id: str, - ) -> BaseChatMessageHistory: - ... + ) -> BaseChatMessageHistory: ... input_messages_key: Must be specified if the base runnable accepts a dict as input. Default is None. diff --git a/libs/core/langchain_core/runnables/passthrough.py b/libs/core/langchain_core/runnables/passthrough.py index 7ca0634dfe9..afc28e54d42 100644 --- a/libs/core/langchain_core/runnables/passthrough.py +++ b/libs/core/langchain_core/runnables/passthrough.py @@ -96,22 +96,22 @@ class RunnablePassthrough(RunnableSerializable[Other, Other]): ) runnable = RunnableParallel( - origin=RunnablePassthrough(), - modified=lambda x: x+1 + origin=RunnablePassthrough(), modified=lambda x: x + 1 ) - runnable.invoke(1) # {'origin': 1, 'modified': 2} + runnable.invoke(1) # {'origin': 1, 'modified': 2} - def fake_llm(prompt: str) -> str: # Fake LLM for the example + def fake_llm(prompt: str) -> str: # Fake LLM for the example return "completion" + chain = RunnableLambda(fake_llm) | { - 'original': RunnablePassthrough(), # Original LLM output - 'parsed': lambda text: text[::-1] # Parsing logic + "original": RunnablePassthrough(), # Original LLM output + "parsed": lambda text: text[::-1], # Parsing logic } - chain.invoke('hello') # {'original': 'completion', 'parsed': 'noitelpmoc'} + chain.invoke("hello") # {'original': 'completion', 'parsed': 'noitelpmoc'} In some cases, it may be useful to pass the input through while adding some keys to the output. In this case, you can use the `assign` method: @@ -120,17 +120,19 @@ class RunnablePassthrough(RunnableSerializable[Other, Other]): from langchain_core.runnables import RunnablePassthrough - def fake_llm(prompt: str) -> str: # Fake LLM for the example + + def fake_llm(prompt: str) -> str: # Fake LLM for the example return "completion" + runnable = { - 'llm1': fake_llm, - 'llm2': fake_llm, + "llm1": fake_llm, + "llm2": fake_llm, } | RunnablePassthrough.assign( - total_chars=lambda inputs: len(inputs['llm1'] + inputs['llm2']) + total_chars=lambda inputs: len(inputs["llm1"] + inputs["llm2"]) ) - runnable.invoke('hello') + runnable.invoke("hello") # {'llm1': 'completion', 'llm2': 'completion', 'total_chars': 20} """ @@ -378,11 +380,15 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): ) from langchain_core.runnables.base import RunnableLambda + def add_ten(x: dict[str, int]) -> dict[str, int]: return {"added": x["input"] + 10} + mapper = RunnableParallel( - {"add_step": RunnableLambda(add_ten),} + { + "add_step": RunnableLambda(add_ten), + } ) runnable_assign = RunnableAssign(mapper) @@ -688,13 +694,13 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]): from langchain_core.runnables.passthrough import RunnablePick input_data = { - 'name': 'John', - 'age': 30, - 'city': 'New York', - 'country': 'USA' + "name": "John", + "age": 30, + "city": "New York", + "country": "USA", } - runnable = RunnablePick(keys=['name', 'age']) + runnable = RunnablePick(keys=["name", "age"]) output_data = runnable.invoke(input_data) @@ -730,7 +736,8 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]): name = ( name or self.name - or f"RunnablePick<{','.join([self.keys] if isinstance(self.keys, str) else self.keys)}>" # noqa: E501 + or "RunnablePick" + f"<{','.join([self.keys] if isinstance(self.keys, str) else self.keys)}>" ) return super().get_name(suffix, name=name) diff --git a/libs/core/langchain_core/runnables/retry.py b/libs/core/langchain_core/runnables/retry.py index e909b9b175b..d3bb74547b0 100644 --- a/libs/core/langchain_core/runnables/retry.py +++ b/libs/core/langchain_core/runnables/retry.py @@ -66,17 +66,21 @@ class RunnableRetry(RunnableBindingBase[Input, Output]): # type: ignore[no-rede import time + def foo(input) -> None: '''Fake function that raises an exception.''' raise ValueError(f"Invoking foo failed. At time {time.time()}") + runnable = RunnableLambda(foo) runnable_with_retries = runnable.with_retry( - retry_if_exception_type=(ValueError,), # Retry only on ValueError - wait_exponential_jitter=True, # Add jitter to the exponential backoff - stop_after_attempt=2, # Try twice - exponential_jitter_params={"initial": 2}, # if desired, customize backoff + retry_if_exception_type=(ValueError,), # Retry only on ValueError + wait_exponential_jitter=True, # Add jitter to the exponential backoff + stop_after_attempt=2, # Try twice + exponential_jitter_params={ + "initial": 2 + }, # if desired, customize backoff ) # The method invocation above is equivalent to the longer form below: @@ -111,7 +115,7 @@ class RunnableRetry(RunnableBindingBase[Input, Output]): # type: ignore[no-rede chain = template | model retryable_chain = chain.with_retry() - """ # noqa: E501 + """ retry_exception_types: tuple[type[BaseException], ...] = (Exception,) """The exception types to retry on. By default all exceptions are retried. diff --git a/libs/core/langchain_core/runnables/schema.py b/libs/core/langchain_core/runnables/schema.py index 1ec2c58a48f..3d6471022b8 100644 --- a/libs/core/langchain_core/runnables/schema.py +++ b/libs/core/langchain_core/runnables/schema.py @@ -51,9 +51,11 @@ class BaseStreamEvent(TypedDict): from langchain_core.runnables import RunnableLambda + async def reverse(s: str) -> str: return s[::-1] + chain = RunnableLambda(func=reverse) events = [event async for event in chain.astream_events("hello")] diff --git a/libs/core/langchain_core/stores.py b/libs/core/langchain_core/stores.py index 24ab0ea74d6..e6df8fd93eb 100644 --- a/libs/core/langchain_core/stores.py +++ b/libs/core/langchain_core/stores.py @@ -52,8 +52,8 @@ class BaseStore(ABC, Generic[K, V]): from langchain.storage import BaseStore - class MyInMemoryStore(BaseStore[str, int]): + class MyInMemoryStore(BaseStore[str, int]): def __init__(self): self.store = {} @@ -295,13 +295,13 @@ class InMemoryStore(InMemoryBaseStore[Any]): from langchain.storage import InMemoryStore store = InMemoryStore() - store.mset([('key1', 'value1'), ('key2', 'value2')]) - store.mget(['key1', 'key2']) + store.mset([("key1", "value1"), ("key2", "value2")]) + store.mget(["key1", "key2"]) # ['value1', 'value2'] - store.mdelete(['key1']) + store.mdelete(["key1"]) list(store.yield_keys()) # ['key2'] - list(store.yield_keys(prefix='k')) + list(store.yield_keys(prefix="k")) # ['key2'] """ @@ -321,13 +321,13 @@ class InMemoryByteStore(InMemoryBaseStore[bytes]): from langchain.storage import InMemoryByteStore store = InMemoryByteStore() - store.mset([('key1', b'value1'), ('key2', b'value2')]) - store.mget(['key1', 'key2']) + store.mset([("key1", b"value1"), ("key2", b"value2")]) + store.mget(["key1", "key2"]) # [b'value1', b'value2'] - store.mdelete(['key1']) + store.mdelete(["key1"]) list(store.yield_keys()) # ['key2'] - list(store.yield_keys(prefix='k')) + list(store.yield_keys(prefix="k")) # ['key2'] """ diff --git a/libs/core/langchain_core/sys_info.py b/libs/core/langchain_core/sys_info.py index 96deed97dd1..b7073ffb9cd 100644 --- a/libs/core/langchain_core/sys_info.py +++ b/libs/core/langchain_core/sys_info.py @@ -1,4 +1,8 @@ -"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501 +"""**sys_info** implementation. + +sys_info prints information about the system and langchain packages for +debugging purposes. +""" from collections.abc import Sequence diff --git a/libs/core/langchain_core/tools/convert.py b/libs/core/langchain_core/tools/convert.py index 8b103fd54d6..2693ce80150 100644 --- a/libs/core/langchain_core/tools/convert.py +++ b/libs/core/langchain_core/tools/convert.py @@ -134,11 +134,13 @@ def tool( # Searches the API for the query. return + @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return + @tool(response_format="content_and_artifact") def search_api(query: str) -> tuple[str, dict]: return "partial json of results", {"full": "object of results"} @@ -171,18 +173,15 @@ def tool( "bar": { "title": "Bar", "description": "The bar.", - "type": "string" + "type": "string", }, "baz": { "title": "Baz", "description": "The baz.", - "type": "integer" - } + "type": "integer", + }, }, - "required": [ - "bar", - "baz" - ] + "required": ["bar", "baz"], } Note that parsing by default will raise ``ValueError`` if the docstring diff --git a/libs/core/langchain_core/utils/function_calling.py b/libs/core/langchain_core/utils/function_calling.py index acc50e7acdd..61849113d1b 100644 --- a/libs/core/langchain_core/utils/function_calling.py +++ b/libs/core/langchain_core/utils/function_calling.py @@ -672,8 +672,10 @@ def tool_example_to_messages( from pydantic import BaseModel, Field from langchain_openai import ChatOpenAI + class Person(BaseModel): '''Information about a person.''' + name: Optional[str] = Field(..., description="The name of the person") hair_color: Optional[str] = Field( ..., description="The color of the person's hair if known" @@ -682,6 +684,7 @@ def tool_example_to_messages( ..., description="Height in METERS" ) + examples = [ ( "The ocean is vast and blue. It's more than 20,000 feet deep.", @@ -697,9 +700,7 @@ def tool_example_to_messages( messages = [] for txt, tool_call in examples: - messages.extend( - tool_example_to_messages(txt, [tool_call]) - ) + messages.extend(tool_example_to_messages(txt, [tool_call])) """ messages: list[BaseMessage] = [HumanMessage(content=input)] diff --git a/libs/core/langchain_core/vectorstores/base.py b/libs/core/langchain_core/vectorstores/base.py index c6bfc467260..17fa6592c72 100644 --- a/libs/core/langchain_core/vectorstores/base.py +++ b/libs/core/langchain_core/vectorstores/base.py @@ -969,30 +969,28 @@ class VectorStore(ABC): # Retrieve more documents with higher diversity # Useful if your dataset has many similar documents docsearch.as_retriever( - search_type="mmr", - search_kwargs={'k': 6, 'lambda_mult': 0.25} + search_type="mmr", search_kwargs={"k": 6, "lambda_mult": 0.25} ) # Fetch more documents for the MMR algorithm to consider # But only return the top 5 docsearch.as_retriever( - search_type="mmr", - search_kwargs={'k': 5, 'fetch_k': 50} + search_type="mmr", search_kwargs={"k": 5, "fetch_k": 50} ) # Only retrieve documents that have a relevance score # Above a certain threshold docsearch.as_retriever( search_type="similarity_score_threshold", - search_kwargs={'score_threshold': 0.8} + search_kwargs={"score_threshold": 0.8}, ) # Only get the single most similar document from the dataset - docsearch.as_retriever(search_kwargs={'k': 1}) + docsearch.as_retriever(search_kwargs={"k": 1}) # Use a filter to only retrieve documents from a specific paper docsearch.as_retriever( - search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}} + search_kwargs={"filter": {"paper_title": "GPT-4 Technical Report"}} ) """ diff --git a/libs/core/langchain_core/vectorstores/in_memory.py b/libs/core/langchain_core/vectorstores/in_memory.py index 23eb4127084..e7787f8a882 100644 --- a/libs/core/langchain_core/vectorstores/in_memory.py +++ b/libs/core/langchain_core/vectorstores/in_memory.py @@ -83,7 +83,7 @@ class InMemoryVectorStore(VectorStore): Search: .. code-block:: python - results = vector_store.similarity_search(query="thud",k=1) + results = vector_store.similarity_search(query="thud", k=1) for doc in results: print(f"* {doc.page_content} [{doc.metadata}]") @@ -97,6 +97,7 @@ class InMemoryVectorStore(VectorStore): def _filter_function(doc: Document) -> bool: return doc.metadata.get("bar") == "baz" + results = vector_store.similarity_search( query="thud", k=1, filter=_filter_function ) @@ -111,9 +112,7 @@ class InMemoryVectorStore(VectorStore): Search with score: .. code-block:: python - results = vector_store.similarity_search_with_score( - query="qux", k=1 - ) + results = vector_store.similarity_search_with_score(query="qux", k=1) for doc, score in results: print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]") @@ -135,7 +134,7 @@ class InMemoryVectorStore(VectorStore): # search with score results = await vector_store.asimilarity_search_with_score(query="qux", k=1) - for doc,score in results: + for doc, score in results: print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]") .. code-block:: none diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 14b8bfbaa2e..1ea42cee123 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -77,6 +77,8 @@ warn_return_any = "False" [tool.ruff] target-version = "py39" +[tool.ruff.format] +docstring-code-format = true [tool.ruff.lint] select = [ "ALL",] diff --git a/libs/core/tests/unit_tests/conftest.py b/libs/core/tests/unit_tests/conftest.py index aceca3156b8..83f39adc1c1 100644 --- a/libs/core/tests/unit_tests/conftest.py +++ b/libs/core/tests/unit_tests/conftest.py @@ -64,8 +64,7 @@ def pytest_collection_modifyitems( .. code-block:: python @pytest.mark.requires("package1", "package2") - def test_something(): - ... + def test_something(): ... """ # Mapping from the name of a package to whether it is installed or not. diff --git a/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr b/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr index 7c07416fe5d..c851464cd5b 100644 --- a/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr +++ b/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr @@ -526,12 +526,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -834,12 +830,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -990,11 +982,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -1046,12 +1034,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -1127,7 +1115,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -1140,7 +1128,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -1148,7 +1137,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -1338,7 +1327,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 @@ -1958,12 +1947,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -2266,12 +2251,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -2422,11 +2403,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -2478,12 +2455,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -2559,7 +2536,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -2572,7 +2549,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -2580,7 +2558,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -2770,7 +2748,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr index a788c425fce..a7dcec5b72b 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr @@ -929,12 +929,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -1237,12 +1233,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -1393,11 +1385,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -1449,12 +1437,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -1530,7 +1518,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -1543,7 +1531,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -1551,7 +1540,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -1741,7 +1730,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr index 079e4909061..4cd36110b0e 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr @@ -2476,12 +2476,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -2781,12 +2777,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -2935,11 +2927,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -2990,12 +2978,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -3070,7 +3058,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -3083,7 +3071,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -3091,7 +3080,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -3279,7 +3268,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 @@ -3952,12 +3941,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -4276,12 +4261,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -4430,11 +4411,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -4485,12 +4462,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -4565,7 +4542,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -4578,7 +4555,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -4586,7 +4564,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -4774,7 +4752,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 @@ -5459,12 +5437,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -5783,12 +5757,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -5937,11 +5907,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -5992,12 +5958,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -6072,7 +6038,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -6085,7 +6051,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -6093,7 +6060,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -6281,7 +6248,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 @@ -6841,12 +6808,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -7146,12 +7109,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -7300,11 +7259,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -7355,12 +7310,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -7435,7 +7390,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -7448,7 +7403,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -7456,7 +7412,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -7644,7 +7600,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 @@ -8359,12 +8315,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -8683,12 +8635,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -8837,11 +8785,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -8892,12 +8836,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -8972,7 +8916,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -8985,7 +8929,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -8993,7 +8938,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -9181,7 +9126,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 @@ -9786,12 +9731,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -10091,12 +10032,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -10245,11 +10182,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -10300,12 +10233,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -10380,7 +10313,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -10393,7 +10326,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -10401,7 +10335,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -10589,7 +10523,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 @@ -11212,12 +11146,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -11547,12 +11477,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -11701,11 +11627,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -11756,12 +11678,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -11836,7 +11758,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -11849,7 +11771,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -11857,7 +11780,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -12045,7 +11968,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 @@ -12680,12 +12603,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Instantiate a chat model and invoke it with the messages @@ -13004,12 +12923,8 @@ from langchain_core.messages import HumanMessage, SystemMessage messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) + SystemMessage(content="You are a helpful assistant! Your name is Bob."), + HumanMessage(content="What is your name?"), ] # Define a chat model and invoke it with the messages @@ -13158,11 +13073,7 @@ .. code-block:: python - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } + {"name": "foo", "args": {"a": 1}, "id": "123"} This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". @@ -13213,12 +13124,12 @@ .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + right_chunks = [ToolCallChunk(name=None, args="1}", index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)] ''', 'properties': dict({ 'args': dict({ @@ -13293,7 +13204,7 @@ from langchain_core.messages import ToolMessage - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL") Example: A ToolMessage where only part of the tool output is sent to the model @@ -13306,7 +13217,8 @@ from langchain_core.messages import ToolMessage tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", + "stdout": "From the graph we can see that the correlation between " + "x and y is ...", "stderr": None, "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, } @@ -13314,7 +13226,7 @@ ToolMessage( content=tool_output["stdout"], artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL", ) The tool_call_id field is used to associate the tool call request with the @@ -13502,7 +13414,7 @@ "output_token_details": { "audio": 10, "reasoning": 200, - } + }, } .. versionchanged:: 0.3.9 diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index d9a9db349e8..5c33c5a99c4 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -314,14 +314,11 @@ def test_schemas(snapshot: SnapshotAssertion) -> None: "\n" " .. code-block:: python\n" "\n" - " from langchain_core.documents " - "import Document\n" + " from langchain_core.documents import Document\n" "\n" " document = Document(\n" - ' page_content="Hello, ' - 'world!",\n' - ' metadata={"source": ' - '"https://example.com"}\n' + ' page_content="Hello, world!", ' + 'metadata={"source": "https://example.com"}\n' " )", "properties": { "id": { diff --git a/libs/core/uv.lock b/libs/core/uv.lock index a4bb351a562..0b0c16552d2 100644 --- a/libs/core/uv.lock +++ b/libs/core/uv.lock @@ -1174,7 +1174,8 @@ test = [{ name = "langchain-core", editable = "." }] test-integration = [] typing = [ { name = "langchain-core", editable = "." }, - { name = "mypy", specifier = ">=1.17.1,<2" }, + { name = "mypy", specifier = ">=1.17.1,<1.18" }, + { name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" }, ] [[package]] @@ -1217,9 +1218,10 @@ test-integration = [ { name = "transformers", specifier = ">=4.51.3,<5.0.0" }, ] typing = [ + { name = "beautifulsoup4", specifier = ">=4.13.5,<5.0.0" }, { name = "lxml-stubs", specifier = ">=0.5.1,<1.0.0" }, { name = "mypy", specifier = ">=1.17.1,<1.18" }, - { name = "tiktoken", specifier = ">=0.8.0,<1.0.0" }, + { name = "tiktoken", specifier = ">=0.11.0,<1.0.0" }, { name = "types-requests", specifier = ">=2.31.0.20240218,<3.0.0.0" }, ] diff --git a/libs/langchain_v1/langchain/__init__.py b/libs/langchain_v1/langchain/__init__.py index d3d37dc36db..2455640fc22 100644 --- a/libs/langchain_v1/langchain/__init__.py +++ b/libs/langchain_v1/langchain/__init__.py @@ -2,7 +2,7 @@ from typing import Any -__version__ = "1.0.0a3" +__version__ = "1.0.0a4" def __getattr__(name: str) -> Any: # noqa: ANN401