core[minor]: Remove serialized manifest from tracing requests for non-llm runs (#26270)

- This takes a long time to compute, isn't used, and currently called on
every invocation of every chain/retriever/etc
This commit is contained in:
Nuno Campos 2024-09-10 12:58:24 -07:00 committed by GitHub
parent 979232257b
commit 212c688ee0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 221 additions and 7119 deletions

View File

@ -87,44 +87,7 @@
"template": "hello {name}!",
"template_format": "f-string"
},
"name": "PromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "PromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "PromptTemplate"
},
"llm": {
"lc": 1,
@ -154,44 +117,7 @@
"max_retries": 2,
"disallowed_special": "all"
},
"name": "OpenAI",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "OpenAIInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"llms",
"openai",
"OpenAI"
],
"name": "OpenAI"
}
},
{
"id": 2,
"type": "schema",
"data": "OpenAIOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "OpenAI"
},
"output_key": "text",
"output_parser": {
@ -204,85 +130,11 @@
"StrOutputParser"
],
"kwargs": {},
"name": "StrOutputParser",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "StrOutputParserInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"output_parser",
"StrOutputParser"
],
"name": "StrOutputParser"
}
},
{
"id": 2,
"type": "schema",
"data": "StrOutputParserOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "StrOutputParser"
},
"return_final_only": true
},
"name": "LLMChain",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "ChainInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"chains",
"llm",
"LLMChain"
],
"name": "LLMChain"
}
},
{
"id": 2,
"type": "schema",
"data": "ChainOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "LLMChain"
}
'''
# ---
@ -338,87 +190,13 @@
"template": "hello {name}!",
"template_format": "f-string"
},
"name": "PromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "PromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "PromptTemplate"
}
}
}
]
},
"name": "ChatPromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"chat",
"ChatPromptTemplate"
],
"name": "ChatPromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "ChatPromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "ChatPromptTemplate"
},
"llm": {
"lc": 1,
@ -443,44 +221,7 @@
"max_retries": 2,
"n": 1
},
"name": "ChatOpenAI",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "ChatOpenAIInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"chat_models",
"openai",
"ChatOpenAI"
],
"name": "ChatOpenAI"
}
},
{
"id": 2,
"type": "schema",
"data": "ChatOpenAIOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "ChatOpenAI"
},
"output_key": "text",
"output_parser": {
@ -493,85 +234,11 @@
"StrOutputParser"
],
"kwargs": {},
"name": "StrOutputParser",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "StrOutputParserInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"output_parser",
"StrOutputParser"
],
"name": "StrOutputParser"
}
},
{
"id": 2,
"type": "schema",
"data": "StrOutputParserOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "StrOutputParser"
},
"return_final_only": true
},
"name": "LLMChain",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "ChainInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"chains",
"llm",
"LLMChain"
],
"name": "LLMChain"
}
},
{
"id": 2,
"type": "schema",
"data": "ChainOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "LLMChain"
}
'''
# ---
@ -603,44 +270,7 @@
"template": "hello {name}!",
"template_format": "f-string"
},
"name": "PromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "PromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "PromptTemplate"
},
"llm": {
"lc": 1,
@ -670,44 +300,7 @@
"max_retries": 2,
"disallowed_special": "all"
},
"name": "OpenAI",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "OpenAIInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"llms",
"openai",
"OpenAI"
],
"name": "OpenAI"
}
},
{
"id": 2,
"type": "schema",
"data": "OpenAIOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "OpenAI"
},
"output_key": "text",
"output_parser": {
@ -720,85 +313,11 @@
"StrOutputParser"
],
"kwargs": {},
"name": "StrOutputParser",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "StrOutputParserInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"output_parser",
"StrOutputParser"
],
"name": "StrOutputParser"
}
},
{
"id": 2,
"type": "schema",
"data": "StrOutputParserOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "StrOutputParser"
},
"return_final_only": true
},
"name": "LLMChain",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "ChainInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"chains",
"llm",
"LLMChain"
],
"name": "LLMChain"
}
},
{
"id": 2,
"type": "schema",
"data": "ChainOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "LLMChain"
}
'''
# ---
@ -832,44 +351,7 @@
"max_retries": 2,
"disallowed_special": "all"
},
"name": "OpenAI",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "OpenAIInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"llms",
"openai",
"OpenAI"
],
"name": "OpenAI"
}
},
{
"id": 2,
"type": "schema",
"data": "OpenAIOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "OpenAI"
}
'''
# ---

View File

@ -1354,7 +1354,7 @@ class CallbackManager(BaseCallbackManager):
def on_chain_start(
self,
serialized: Dict[str, Any],
serialized: Optional[Dict[str, Any]],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
@ -1362,7 +1362,7 @@ class CallbackManager(BaseCallbackManager):
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
serialized (Optional[Dict[str, Any]]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
**kwargs (Any): Additional keyword arguments.
@ -1398,7 +1398,7 @@ class CallbackManager(BaseCallbackManager):
def on_tool_start(
self,
serialized: Dict[str, Any],
serialized: Optional[Dict[str, Any]],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
@ -1453,7 +1453,7 @@ class CallbackManager(BaseCallbackManager):
def on_retriever_start(
self,
serialized: Dict[str, Any],
serialized: Optional[Dict[str, Any]],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
@ -1462,7 +1462,7 @@ class CallbackManager(BaseCallbackManager):
"""Run when the retriever starts running.
Args:
serialized (Dict[str, Any]): The serialized retriever.
serialized (Optional[Dict[str, Any]]): The serialized retriever.
query (str): The query.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run. Defaults to None.
@ -1840,7 +1840,7 @@ class AsyncCallbackManager(BaseCallbackManager):
async def on_chain_start(
self,
serialized: Dict[str, Any],
serialized: Optional[Dict[str, Any]],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
@ -1848,7 +1848,7 @@ class AsyncCallbackManager(BaseCallbackManager):
"""Async run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
serialized (Optional[Dict[str, Any]]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
**kwargs (Any): Additional keyword arguments.
@ -1886,7 +1886,7 @@ class AsyncCallbackManager(BaseCallbackManager):
async def on_tool_start(
self,
serialized: Dict[str, Any],
serialized: Optional[Dict[str, Any]],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
@ -1895,7 +1895,7 @@ class AsyncCallbackManager(BaseCallbackManager):
"""Run when the tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
serialized (Optional[Dict[str, Any]]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run.
@ -1975,7 +1975,7 @@ class AsyncCallbackManager(BaseCallbackManager):
async def on_retriever_start(
self,
serialized: Dict[str, Any],
serialized: Optional[Dict[str, Any]],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
@ -1984,7 +1984,7 @@ class AsyncCallbackManager(BaseCallbackManager):
"""Run when the retriever starts running.
Args:
serialized (Dict[str, Any]): The serialized retriever.
serialized (Optional[Dict[str, Any]]): The serialized retriever.
query (str): The query.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run. Defaults to None.

View File

@ -19,6 +19,7 @@ from typing import (
import yaml
from langchain_core.load import dumpd
from langchain_core.output_parsers.base import BaseOutputParser
from langchain_core.prompt_values import (
ChatPromptValueConcrete,
@ -188,6 +189,7 @@ class BasePromptTemplate(
input,
config,
run_type="prompt",
serialized=dumpd(self),
)
async def ainvoke(
@ -212,6 +214,7 @@ class BasePromptTemplate(
input,
config,
run_type="prompt",
serialized=dumpd(self),
)
@abstractmethod

View File

@ -30,7 +30,6 @@ from typing_extensions import TypedDict
from langchain_core._api import deprecated
from langchain_core.documents import Document
from langchain_core.load.dump import dumpd
from langchain_core.runnables import (
Runnable,
RunnableConfig,
@ -235,9 +234,9 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
local_metadata=self.metadata,
)
run_manager = callback_manager.on_retriever_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=kwargs.pop("run_id", None),
)
try:
@ -298,9 +297,9 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
local_metadata=self.metadata,
)
run_manager = await callback_manager.on_retriever_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=kwargs.pop("run_id", None),
)
try:

View File

@ -38,7 +38,6 @@ from typing import (
from typing_extensions import Literal, get_args
from langchain_core._api import beta_decorator
from langchain_core.load.dump import dumpd
from langchain_core.load.serializable import (
Serializable,
SerializedConstructor,
@ -1763,6 +1762,7 @@ class Runnable(Generic[Input, Output], ABC):
input: Input,
config: Optional[RunnableConfig],
run_type: Optional[str] = None,
serialized: Optional[Dict[str, Any]] = None,
**kwargs: Optional[Any],
) -> Output:
"""Helper method to transform an Input value to an Output value,
@ -1770,7 +1770,7 @@ class Runnable(Generic[Input, Output], ABC):
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
serialized,
input,
run_type=run_type,
name=config.get("run_name") or self.get_name(),
@ -1811,6 +1811,7 @@ class Runnable(Generic[Input, Output], ABC):
input: Input,
config: Optional[RunnableConfig],
run_type: Optional[str] = None,
serialized: Optional[Dict[str, Any]] = None,
**kwargs: Optional[Any],
) -> Output:
"""Helper method to transform an Input value to an Output value,
@ -1818,7 +1819,7 @@ class Runnable(Generic[Input, Output], ABC):
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
run_manager = await callback_manager.on_chain_start(
dumpd(self),
serialized,
input,
run_type=run_type,
name=config.get("run_name") or self.get_name(),
@ -1871,7 +1872,7 @@ class Runnable(Generic[Input, Output], ABC):
callback_managers = [get_callback_manager_for_config(c) for c in configs]
run_managers = [
callback_manager.on_chain_start(
dumpd(self),
None,
input,
run_type=run_type,
name=config.get("run_name") or self.get_name(),
@ -1944,7 +1945,7 @@ class Runnable(Generic[Input, Output], ABC):
run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
callback_manager.on_chain_start(
dumpd(self),
None,
input,
run_type=run_type,
name=config.get("run_name") or self.get_name(),
@ -2023,7 +2024,7 @@ class Runnable(Generic[Input, Output], ABC):
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
{"input": ""},
run_type=run_type,
name=config.get("run_name") or self.get_name(),
@ -2123,7 +2124,7 @@ class Runnable(Generic[Input, Output], ABC):
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
run_manager = await callback_manager.on_chain_start(
dumpd(self),
None,
{"input": ""},
run_type=run_type,
name=config.get("run_name") or self.get_name(),
@ -2325,7 +2326,6 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
dumped = super().to_json()
try:
dumped["name"] = self.get_name()
dumped["graph"] = self.get_graph().to_json()
except Exception:
pass
return dumped
@ -2857,7 +2857,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
@ -2897,7 +2897,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
@ -2962,7 +2962,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
# start the root runs, one per input
run_managers = [
cm.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
@ -3089,7 +3089,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
cm.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
@ -3544,7 +3544,7 @@ class RunnableParallel(RunnableSerializable[Input, Dict[str, Any]]):
)
# start the root run
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
@ -3596,7 +3596,7 @@ class RunnableParallel(RunnableSerializable[Input, Dict[str, Any]]):
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),

View File

@ -14,7 +14,6 @@ from typing import (
cast,
)
from langchain_core.load.dump import dumpd
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables.base import (
Runnable,
@ -207,9 +206,9 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
@ -246,7 +245,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
except BaseException as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(dumpd(output))
run_manager.on_chain_end(output)
return output
async def ainvoke(
@ -256,9 +255,9 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
run_manager = await callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
try:
@ -294,7 +293,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
except BaseException as e:
await run_manager.on_chain_error(e)
raise
await run_manager.on_chain_end(dumpd(output))
await run_manager.on_chain_end(output)
return output
def stream(
@ -320,9 +319,9 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
final_output: Optional[Output] = None
@ -407,9 +406,9 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
run_manager = await callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
final_output: Optional[Output] = None

View File

@ -18,7 +18,6 @@ from typing import (
cast,
)
from langchain_core.load.dump import dumpd
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables.base import Runnable, RunnableSerializable
from langchain_core.runnables.config import (
@ -163,9 +162,9 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
first_error = None
@ -214,9 +213,9 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
@ -287,9 +286,9 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
# start the root runs, one per input
run_managers = [
cm.on_chain_start(
dumpd(self),
None,
input if isinstance(input, dict) else {"input": input},
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
for cm, input, config in zip(callback_managers, inputs, configs)
@ -380,9 +379,9 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
cm.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
for cm, input, config in zip(callback_managers, inputs, configs)
@ -458,9 +457,9 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
first_error = None
@ -521,9 +520,9 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
first_error = None

View File

@ -73,14 +73,15 @@ class RunInfo(TypedDict):
parent_run_id: Optional[UUID]
def _assign_name(name: Optional[str], serialized: Dict[str, Any]) -> str:
def _assign_name(name: Optional[str], serialized: Optional[Dict[str, Any]]) -> str:
"""Assign a name to a run."""
if name is not None:
return name
if "name" in serialized:
return serialized["name"]
elif "id" in serialized:
return serialized["id"][-1]
if serialized is not None:
if "name" in serialized:
return serialized["name"]
elif "id" in serialized:
return serialized["id"][-1]
return "Unnamed"

View File

@ -1236,43 +1236,6 @@
# ---
# name: test_chat_prompt_w_msgs_placeholder_ser_des[chat_prompt]
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'chat',
'ChatPromptTemplate',
]),
'name': 'ChatPromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'ChatPromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1293,43 +1256,6 @@
]),
'kwargs': dict({
'prompt': dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'PromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1372,43 +1298,6 @@
]),
'kwargs': dict({
'prompt': dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'PromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1453,43 +1342,6 @@
# ---
# name: test_chat_tmpl_serdes
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'chat',
'ChatPromptTemplate',
]),
'name': 'ChatPromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'ChatPromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1514,43 +1366,6 @@
]),
'kwargs': dict({
'prompt': dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'PromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1582,43 +1397,6 @@
'kwargs': dict({
'prompt': list([
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'PromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1650,43 +1428,6 @@
]),
'kwargs': dict({
'prompt': dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'PromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1718,43 +1459,6 @@
'kwargs': dict({
'prompt': list([
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'PromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1772,43 +1476,6 @@
'type': 'constructor',
}),
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'PromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1826,43 +1493,6 @@
'type': 'constructor',
}),
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'PromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1880,43 +1510,6 @@
'type': 'constructor',
}),
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'image',
'ImagePromptTemplate',
]),
'name': 'ImagePromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'ImagePromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1936,43 +1529,6 @@
'type': 'constructor',
}),
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'image',
'ImagePromptTemplate',
]),
'name': 'ImagePromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'ImagePromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -1992,43 +1548,6 @@
'type': 'constructor',
}),
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'image',
'ImagePromptTemplate',
]),
'name': 'ImagePromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'ImagePromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -2048,43 +1567,6 @@
'type': 'constructor',
}),
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'image',
'ImagePromptTemplate',
]),
'name': 'ImagePromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'ImagePromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -2105,43 +1587,6 @@
'type': 'constructor',
}),
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'image',
'ImagePromptTemplate',
]),
'name': 'ImagePromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'ImagePromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -2160,43 +1605,6 @@
'type': 'constructor',
}),
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'image',
'ImagePromptTemplate',
]),
'name': 'ImagePromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'ImagePromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',
@ -2215,43 +1623,6 @@
'type': 'constructor',
}),
dict({
'graph': dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'image',
'ImagePromptTemplate',
]),
'name': 'ImagePromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': 'ImagePromptTemplateOutput',
'id': 2,
'type': 'schema',
}),
]),
}),
'id': list([
'langchain',
'prompts',

View File

@ -35,44 +35,7 @@
}
}
},
"name": "RunnableParallel<buz>",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "Parallel<buz>Input"
},
{
"id": 1,
"type": "schema",
"data": "Parallel<buz>Output"
},
{
"id": 2,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"runnables",
"base",
"RunnableLambda"
],
"name": "Lambda"
}
}
],
"edges": [
{
"source": 0,
"target": 2
},
{
"source": 2,
"target": 1
}
]
}
"name": "RunnableParallel<buz>"
},
"last": {
"lc": 1,
@ -110,44 +73,7 @@
"template": "what did baz say to {buz}",
"template_format": "f-string"
},
"name": "PromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "PromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "PromptTemplate"
},
"last": {
"lc": 1,
@ -159,101 +85,10 @@
"FakeListLLM"
],
"repr": "FakeListLLM(responses=['foo'], i=1)",
"name": "FakeListLLM",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "FakeListLLMInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeListLLM"
],
"name": "FakeListLLM"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "FakeListLLM"
}
},
"name": "RunnableSequence",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeListLLM"
],
"name": "FakeListLLM"
}
},
{
"id": 3,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 2,
"target": 3
},
{
"source": 1,
"target": 2
}
]
}
"name": "RunnableSequence"
},
"fallbacks": [
{
@ -282,44 +117,7 @@
"template": "what did baz say to {buz}",
"template_format": "f-string"
},
"name": "PromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "PromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "PromptTemplate"
},
"last": {
"lc": 1,
@ -331,101 +129,10 @@
"FakeListLLM"
],
"repr": "FakeListLLM(responses=['bar'])",
"name": "FakeListLLM",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "FakeListLLMInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeListLLM"
],
"name": "FakeListLLM"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "FakeListLLM"
}
},
"name": "RunnableSequence",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeListLLM"
],
"name": "FakeListLLM"
}
},
{
"id": 3,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 2,
"target": 3
},
{
"source": 1,
"target": 2
}
]
}
"name": "RunnableSequence"
}
],
"exceptions_to_handle": [
@ -440,101 +147,10 @@
}
]
},
"name": "RunnableWithFallbacks",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableWithFallbacks"
],
"name": "WithFallbacks"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "RunnableWithFallbacks"
}
},
"name": "RunnableSequence",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "Parallel<buz>Input"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"runnables",
"base",
"RunnableLambda"
],
"name": "Lambda"
}
},
{
"id": 2,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableWithFallbacks"
],
"name": "WithFallbacks"
}
},
{
"id": 3,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 2,
"target": 3
},
{
"source": 1,
"target": 2
}
]
}
"name": "RunnableSequence"
}
'''
# ---
@ -571,85 +187,11 @@
"RunnablePassthrough"
],
"kwargs": {},
"name": "RunnablePassthrough",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PassthroughInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnablePassthrough"
],
"name": "Passthrough"
}
},
{
"id": 2,
"type": "schema",
"data": "PassthroughOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "RunnablePassthrough"
}
}
},
"name": "RunnableParallel<text>",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "Parallel<text>Input"
},
{
"id": 1,
"type": "schema",
"data": "Parallel<text>Output"
},
{
"id": 2,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnablePassthrough"
],
"name": "Passthrough"
}
}
],
"edges": [
{
"source": 0,
"target": 2
},
{
"source": 2,
"target": 1
}
]
}
"name": "RunnableParallel<text>"
},
"last": {
"lc": 1,
@ -698,101 +240,10 @@
],
"exception_key": "exception"
},
"name": "RunnableWithFallbacks",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "_raise_error_input"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableWithFallbacks"
],
"name": "WithFallbacks"
}
},
{
"id": 2,
"type": "schema",
"data": "_raise_error_output"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "RunnableWithFallbacks"
}
},
"name": "RunnableSequence",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "Parallel<text>Input"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnablePassthrough"
],
"name": "Passthrough"
}
},
{
"id": 2,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableWithFallbacks"
],
"name": "WithFallbacks"
}
},
{
"id": 3,
"type": "schema",
"data": "_raise_error_output"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 2,
"target": 3
},
{
"source": 1,
"target": 2
}
]
}
"name": "RunnableSequence"
}
'''
# ---
@ -818,44 +269,7 @@
"FakeListLLM"
],
"repr": "FakeListLLM(responses=['foo'], i=1)",
"name": "FakeListLLM",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "FakeListLLMInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeListLLM"
],
"name": "FakeListLLM"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "FakeListLLM"
},
"fallbacks": [
{
@ -868,44 +282,7 @@
"FakeListLLM"
],
"repr": "FakeListLLM(responses=['bar'])",
"name": "FakeListLLM",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "FakeListLLMInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeListLLM"
],
"name": "FakeListLLM"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "FakeListLLM"
}
],
"exceptions_to_handle": [
@ -920,44 +297,7 @@
}
]
},
"name": "RunnableWithFallbacks",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "FakeListLLMInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableWithFallbacks"
],
"name": "WithFallbacks"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "RunnableWithFallbacks"
}
'''
# ---
@ -983,44 +323,7 @@
"FakeListLLM"
],
"repr": "FakeListLLM(responses=['foo'], i=1)",
"name": "FakeListLLM",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "FakeListLLMInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeListLLM"
],
"name": "FakeListLLM"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "FakeListLLM"
},
"fallbacks": [
{
@ -1033,44 +336,7 @@
"FakeListLLM"
],
"repr": "FakeListLLM(responses=['baz'], i=1)",
"name": "FakeListLLM",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "FakeListLLMInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeListLLM"
],
"name": "FakeListLLM"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "FakeListLLM"
},
{
"lc": 1,
@ -1082,44 +348,7 @@
"FakeListLLM"
],
"repr": "FakeListLLM(responses=['bar'])",
"name": "FakeListLLM",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "FakeListLLMInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeListLLM"
],
"name": "FakeListLLM"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "FakeListLLM"
}
],
"exceptions_to_handle": [
@ -1134,44 +363,7 @@
}
]
},
"name": "RunnableWithFallbacks",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "FakeListLLMInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableWithFallbacks"
],
"name": "WithFallbacks"
}
},
{
"id": 2,
"type": "schema",
"data": "FakeListLLMOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
"name": "RunnableWithFallbacks"
}
'''
# ---

File diff suppressed because one or more lines are too long

View File

@ -2520,7 +2520,7 @@ async def test_stream_log_retriever() -> None:
"ChatPromptTemplate",
"FakeListLLM",
"FakeListLLM:2",
"Retriever",
"FakeRetriever",
"RunnableLambda",
"RunnableParallel<documents,question>",
"RunnableParallel<one,two>",

View File

@ -1280,7 +1280,7 @@ async def test_event_stream_with_retriever_and_formatter() -> None:
"data": {"input": {"query": "hello"}},
"event": "on_retriever_start",
"metadata": {},
"name": "Retriever",
"name": "HardCodedRetriever",
"run_id": "",
"parent_ids": [],
"tags": ["seq:step:1"],
@ -1302,7 +1302,7 @@ async def test_event_stream_with_retriever_and_formatter() -> None:
},
"event": "on_retriever_end",
"metadata": {},
"name": "Retriever",
"name": "HardCodedRetriever",
"run_id": "",
"parent_ids": [],
"tags": ["seq:step:1"],

View File

@ -23,7 +23,6 @@ from langchain_core.callbacks import CallbackManagerForRetrieverRun, Callbacks
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.documents import Document
from langchain_core.language_models import FakeStreamingListLLM, GenericFakeChatModel
from langchain_core.load import dumpd
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
@ -2080,7 +2079,7 @@ class StreamingRunnable(Runnable[Input, Output]):
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
input,
name=config.get("run_name", self.get_name()),
run_id=config.get("run_id"),

View File

@ -18,7 +18,6 @@ from langchain_core.callbacks import (
CallbackManagerForChainRun,
Callbacks,
)
from langchain_core.load.dump import dumpd
from langchain_core.memory import BaseMemory
from langchain_core.outputs import RunInfo
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator, validator
@ -143,7 +142,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
inputs,
run_id,
name=run_name,
@ -195,7 +194,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
dumpd(self),
None,
inputs,
run_id,
name=run_name,

View File

@ -17,7 +17,6 @@ from langchain_core.language_models import (
BaseLanguageModel,
LanguageModelInput,
)
from langchain_core.load.dump import dumpd
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers import BaseLLMOutputParser, StrOutputParser
from langchain_core.outputs import ChatGeneration, Generation, LLMResult
@ -240,7 +239,7 @@ class LLMChain(Chain):
callbacks, self.callbacks, self.verbose
)
run_manager = callback_manager.on_chain_start(
dumpd(self),
None,
{"input_list": input_list},
)
try:
@ -260,7 +259,7 @@ class LLMChain(Chain):
callbacks, self.callbacks, self.verbose
)
run_manager = await callback_manager.on_chain_start(
dumpd(self),
None,
{"input_list": input_list},
)
try:

View File

@ -24,7 +24,6 @@ from langchain_core.callbacks import (
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.load.dump import dumpd
from langchain_core.pydantic_v1 import root_validator
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from langchain_core.runnables import RunnableConfig
@ -106,9 +105,9 @@ class EnsembleRetriever(BaseRetriever):
local_metadata=self.metadata,
)
run_manager = callback_manager.on_retriever_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
**kwargs,
)
try:
@ -139,9 +138,9 @@ class EnsembleRetriever(BaseRetriever):
local_metadata=self.metadata,
)
run_manager = await callback_manager.on_retriever_start(
dumpd(self),
None,
input,
name=config.get("run_name"),
name=config.get("run_name") or self.get_name(),
**kwargs,
)
try: