core[patch]: Propagate module name to create model (#26267)

* This allows pydantic to correctly resolve annotations necessary for
building pydantic models dynamically.
* Makes a small fix for RunnableWithMessageHistory which was fetching
the OutputType from the RunnableLambda that was yielding another
RunnableLambda. This doesn't propagate the output of the RunnableAssign
fully (i.e., with concrete type information etc.)

Resolves issue: https://github.com/langchain-ai/langchain/issues/26250
This commit is contained in:
Eugene Yurtsev
2024-09-10 15:22:56 -04:00
committed by GitHub
parent 622cb7d2cf
commit 7975c1f0ca
4 changed files with 194 additions and 5 deletions

View File

@@ -454,6 +454,41 @@ def test_get_input_schema_input_dict() -> None:
)
def test_get_output_schema() -> None:
"""Test get output schema."""
runnable = RunnableLambda(
lambda input: {
"output": [
AIMessage(
content="you said: "
+ "\n".join(
[
str(m.content)
for m in input["history"]
if isinstance(m, HumanMessage)
]
+ [input["input"]]
)
)
]
}
)
get_session_history = _get_get_session_history()
with_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
output_messages_key="output",
)
output_type = with_history.get_output_schema()
assert _schema(output_type) == {
"title": "RunnableWithChatHistoryOutput",
"type": "object",
}
def test_get_input_schema_input_messages() -> None:
from pydantic import RootModel