Compare commits

...

6 Commits

Author SHA1 Message Date
Eugene Yurtsev
7ed48c0e9d Update libs/core/langchain_core/runnables/base.py 2024-08-01 15:16:02 -04:00
Eugene Yurtsev
1dcc561a45 x 2024-08-01 15:11:32 -04:00
Eugene Yurtsev
ad3ae6d4b1 update 2024-08-01 14:56:20 -04:00
Eugene Yurtsev
a7b625b820 fix rst directive for note 2024-08-01 14:47:51 -04:00
Eugene Yurtsev
6e9dd04780 x 2024-08-01 14:46:00 -04:00
Eugene Yurtsev
617a0fb5e6 Update 2024-08-01 14:45:16 -04:00
5 changed files with 203 additions and 113 deletions

View File

@@ -309,6 +309,43 @@ class Runnable(Generic[Input, Output], ABC):
__root__=(root_type, None),
)
def get_input_jsonschema(
self, config: Optional[RunnableConfig] = None
) -> Dict[str, Any]:
"""Get a JSON schema that represents the input to the Runnable.
Args:
config: A config to use when generating the schema.
Returns:
A JSON schema that represents the input to the Runnable.
Example:
.. code-block:: python
from langchain_core.runnables import RunnableLambda
def add_one(x: int) -> int:
return x + 1
runnable = RunnableLambda(add_one)
print(runnable.get_input_jsonschema())
.. note:: JSON Schema has different versions
The JSON schema is currently formatted according to pydnatic.v1
which returns draft 7 JSON schema.
The format is likely to change after LangChain's upgrade to pydantic
v2 is completed. It will likely return the 2020-12 JSON schema as
is the default for pydantic.v2. However, this is still TBD.
.. versionadded:: 0.2.27
"""
return self.get_input_schema(config).schema()
@property
def output_schema(self) -> Type[BaseModel]:
"""The type of output this Runnable produces specified as a pydantic model."""
@@ -341,6 +378,43 @@ class Runnable(Generic[Input, Output], ABC):
__root__=(root_type, None),
)
def get_output_jsonschema(
self, config: Optional[RunnableConfig] = None
) -> Dict[str, Any]:
"""Get a JSON schema that represents the output of the Runnable.
Args:
config: A config to use when generating the schema.
Returns:
A JSON schema that represents the output of the Runnable.
Example:
.. code-block:: python
from langchain_core.runnables import RunnableLambda
def add_one(x: int) -> int:
return x + 1
runnable = RunnableLambda(add_one)
print(runnable.get_output_jsonschema())
.. note:: JSON Schema has different versions
The JSON schema is currently formatted according to pydnatic.v1
which returns draft 7 JSON schema.
The format is likely to change after LangChain's upgrade to pydantic
v2 is completed. It will likely return the 2020-12 JSON schema as
is the default for pydantic.v2. However, this is still TBD.
.. versionadded:: 0.2.27
"""
return self.get_output_schema(config).schema()
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
"""List configurable fields for this Runnable."""
@@ -390,6 +464,30 @@ class Runnable(Generic[Input, Output], ABC):
},
)
def get_config_jsonschema(
self, *, include: Optional[Sequence[str]] = None
) -> Dict[str, Any]:
"""Get a JSON schema that represents the output of the Runnable.
Args:
include: A list of fields to include in the config schema.
Returns:
A JSON schema that represents the output of the Runnable.
.. note:: JSON Schema has different versions
The JSON schema is currently formatted according to pydnatic.v1
which returns draft 7 JSON schema.
The format is likely to change after LangChain's upgrade to pydantic
v2 is completed. It will likely return the 2020-12 JSON schema as
is the default for pydantic.v2. However, this is still TBD.
.. versionadded:: 0.2.27
"""
return self.config_schema(include=include).schema()
def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph:
"""Return a graph representation of this Runnable."""
from langchain_core.runnables.graph import Graph

View File

@@ -29,7 +29,6 @@ from langchain_core.prompts.chat import (
_convert_to_message,
)
from langchain_core.pydantic_v1 import ValidationError
from tests.unit_tests.pydantic_utils import _schema
@pytest.fixture
@@ -796,14 +795,14 @@ def test_chat_input_schema(snapshot: SnapshotAssertion) -> None:
assert prompt_all_required.optional_variables == []
with pytest.raises(ValidationError):
prompt_all_required.input_schema(input="")
assert _schema(prompt_all_required.input_schema) == snapshot(name="required")
assert prompt_all_required.get_input_jsonschema() == snapshot(name="required")
prompt_optional = ChatPromptTemplate(
messages=[MessagesPlaceholder("history", optional=True), ("user", "${input}")]
)
# input variables only lists required variables
assert set(prompt_optional.input_variables) == {"input"}
prompt_optional.input_schema(input="") # won't raise error
assert _schema(prompt_optional.input_schema) == snapshot(name="partial")
assert prompt_optional.get_input_jsonschema() == snapshot(name="partial")
def test_chat_prompt_w_msgs_placeholder_ser_des(snapshot: SnapshotAssertion) -> None:

View File

@@ -7,7 +7,6 @@ import pytest
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.tracers.run_collector import RunCollectorCallbackHandler
from tests.unit_tests.pydantic_utils import _schema
def test_prompt_valid() -> None:
@@ -70,7 +69,7 @@ def test_mustache_prompt_from_template() -> None:
prompt = PromptTemplate.from_template(template, template_format="mustache")
assert prompt.format(foo="bar") == "This is a bar test."
assert prompt.input_variables == ["foo"]
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"foo": {"title": "Foo", "type": "string"}},
@@ -81,7 +80,7 @@ def test_mustache_prompt_from_template() -> None:
prompt = PromptTemplate.from_template(template, template_format="mustache")
assert prompt.format(bar="baz", foo="bar") == "This baz is a bar test."
assert prompt.input_variables == ["bar", "foo"]
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {
@@ -95,7 +94,7 @@ def test_mustache_prompt_from_template() -> None:
prompt = PromptTemplate.from_template(template, template_format="mustache")
assert prompt.format(bar="baz", foo="bar") == "This baz is a bar test bar."
assert prompt.input_variables == ["bar", "foo"]
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {
@@ -111,7 +110,7 @@ def test_mustache_prompt_from_template() -> None:
"This foo is a bar test baz."
)
assert prompt.input_variables == ["foo", "obj"]
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {
@@ -135,7 +134,7 @@ def test_mustache_prompt_from_template() -> None:
prompt = PromptTemplate.from_template(template, template_format="mustache")
assert prompt.format(foo="baz") == ("This {'foo': 'baz'} is a test.")
assert prompt.input_variables == []
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {},
@@ -152,7 +151,7 @@ def test_mustache_prompt_from_template() -> None:
is a test."""
)
assert prompt.input_variables == ["foo"]
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"foo": {"$ref": "#/definitions/foo"}},
@@ -184,7 +183,7 @@ def test_mustache_prompt_from_template() -> None:
is a test."""
)
assert prompt.input_variables == ["foo"]
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"foo": {"$ref": "#/definitions/foo"}},
@@ -239,7 +238,7 @@ def test_mustache_prompt_from_template() -> None:
is a test."""
)
assert prompt.input_variables == ["foo"]
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"foo": {"$ref": "#/definitions/foo"}},
@@ -287,7 +286,7 @@ def test_mustache_prompt_from_template() -> None:
is a test."""
)
assert prompt.input_variables == ["foo"]
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"foo": {"$ref": "#/definitions/foo"}},
@@ -310,7 +309,7 @@ def test_mustache_prompt_from_template() -> None:
is a test."""
)
assert prompt.input_variables == ["foo"]
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"foo": {"title": "Foo", "type": "object"}},

View File

@@ -19,10 +19,10 @@ def test_graph_single_runnable(snapshot: SnapshotAssertion) -> None:
graph = StrOutputParser().get_graph()
first_node = graph.first_node()
assert first_node is not None
assert _schema(first_node.data) == _schema(runnable.input_schema) # type: ignore[union-attr]
assert _schema(first_node.data) == runnable.get_input_jsonschema() # type: ignore[union-attr]
last_node = graph.last_node()
assert last_node is not None
assert _schema(last_node.data) == _schema(runnable.output_schema) # type: ignore[union-attr]
assert _schema(last_node.data) == runnable.get_output_jsonschema() # type: ignore[union-attr]
assert len(graph.nodes) == 3
assert len(graph.edges) == 2
assert graph.edges[0].source == first_node.id

View File

@@ -225,15 +225,15 @@ class FakeRetriever(BaseRetriever):
def test_schemas(snapshot: SnapshotAssertion) -> None:
fake = FakeRunnable() # str -> int
assert _schema(fake.input_schema) == {
assert fake.get_input_jsonschema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert _schema(fake.output_schema) == {
assert fake.get_output_jsonschema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
assert _schema(fake.config_schema(include=["tags", "metadata", "run_name"])) == {
assert fake.get_config_jsonschema(include=["tags", "metadata", "run_name"]) == {
"title": "FakeRunnableConfig",
"type": "object",
"properties": {
@@ -245,22 +245,22 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
fake_bound = FakeRunnable().bind(a="b") # str -> int
assert _schema(fake_bound.input_schema) == {
assert fake_bound.get_input_jsonschema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert _schema(fake_bound.output_schema) == {
assert fake_bound.get_output_jsonschema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
fake_w_fallbacks = FakeRunnable().with_fallbacks((fake,)) # str -> int
assert _schema(fake_w_fallbacks.input_schema) == {
assert fake_w_fallbacks.get_input_jsonschema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert _schema(fake_w_fallbacks.output_schema) == {
assert fake_w_fallbacks.get_output_jsonschema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
@@ -270,11 +270,11 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
typed_lambda = RunnableLambda(typed_lambda_impl) # str -> int
assert _schema(typed_lambda.input_schema) == {
assert typed_lambda.get_input_jsonschema() == {
"title": "typed_lambda_impl_input",
"type": "string",
}
assert _schema(typed_lambda.output_schema) == {
assert typed_lambda.get_output_jsonschema() == {
"title": "typed_lambda_impl_output",
"type": "integer",
}
@@ -284,22 +284,22 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
typed_async_lambda: Runnable = RunnableLambda(typed_async_lambda_impl) # str -> int
assert _schema(typed_async_lambda.input_schema) == {
assert typed_async_lambda.get_input_jsonschema() == {
"title": "typed_async_lambda_impl_input",
"type": "string",
}
assert _schema(typed_async_lambda.output_schema) == {
assert typed_async_lambda.get_output_jsonschema() == {
"title": "typed_async_lambda_impl_output",
"type": "integer",
}
fake_ret = FakeRetriever() # str -> List[Document]
assert _schema(fake_ret.input_schema) == {
assert fake_ret.get_input_jsonschema() == {
"title": "FakeRetrieverInput",
"type": "string",
}
assert _schema(fake_ret.output_schema) == {
assert fake_ret.get_output_jsonschema() == {
"title": "FakeRetrieverOutput",
"type": "array",
"items": {"$ref": "#/definitions/Document"},
@@ -329,16 +329,16 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]]
assert _schema(fake_llm.input_schema) == snapshot
assert _schema(fake_llm.output_schema) == {
assert fake_llm.get_input_jsonschema() == snapshot
assert fake_llm.get_output_jsonschema() == {
"title": "FakeListLLMOutput",
"type": "string",
}
fake_chat = FakeListChatModel(responses=["a"]) # str -> List[List[str]]
assert _schema(fake_chat.input_schema) == snapshot
assert _schema(fake_chat.output_schema) == snapshot
assert fake_chat.get_input_jsonschema() == snapshot
assert fake_chat.get_output_jsonschema() == snapshot
chat_prompt = ChatPromptTemplate.from_messages(
[
@@ -347,26 +347,26 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
]
)
assert _schema(chat_prompt.input_schema) == snapshot(
assert chat_prompt.get_input_jsonschema() == snapshot(
name="chat_prompt_input_schema"
)
assert _schema(chat_prompt.output_schema) == snapshot(
assert chat_prompt.get_output_jsonschema() == snapshot(
name="chat_prompt_output_schema"
)
prompt = PromptTemplate.from_template("Hello, {name}!")
assert _schema(prompt.input_schema) == {
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
"required": ["name"],
}
assert _schema(prompt.output_schema) == snapshot
assert prompt.get_output_jsonschema() == snapshot
prompt_mapper = PromptTemplate.from_template("Hello, {name}!").map()
assert _schema(prompt_mapper.input_schema) == {
assert prompt_mapper.get_input_jsonschema() == {
"definitions": {
"PromptInput": {
"properties": {"name": {"title": "Name", "type": "string"}},
@@ -379,12 +379,12 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
"type": "array",
"title": "RunnableEach<PromptTemplate>Input",
}
assert _schema(prompt_mapper.output_schema) == snapshot
assert prompt_mapper.get_output_jsonschema() == snapshot
list_parser = CommaSeparatedListOutputParser()
assert _schema(list_parser.input_schema) == snapshot
assert _schema(list_parser.output_schema) == {
assert list_parser.get_input_jsonschema() == snapshot
assert list_parser.get_output_jsonschema() == {
"title": "CommaSeparatedListOutputParserOutput",
"type": "array",
"items": {"type": "string"},
@@ -392,13 +392,13 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
seq = prompt | fake_llm | list_parser
assert _schema(seq.input_schema) == {
assert seq.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
"required": ["name"],
}
assert _schema(seq.output_schema) == {
assert seq.get_output_jsonschema() == {
"type": "array",
"items": {"type": "string"},
"title": "CommaSeparatedListOutputParserOutput",
@@ -406,7 +406,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
router: Runnable = RouterRunnable({})
assert _schema(router.input_schema) == {
assert router.get_input_jsonschema() == {
"title": "RouterRunnableInput",
"$ref": "#/definitions/RouterInput",
"definitions": {
@@ -421,7 +421,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
}
},
}
assert _schema(router.output_schema) == {"title": "RouterRunnableOutput"}
assert router.get_output_jsonschema() == {"title": "RouterRunnableOutput"}
seq_w_map: Runnable = (
prompt
@@ -433,13 +433,13 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
}
)
assert _schema(seq_w_map.input_schema) == {
assert seq_w_map.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
"required": ["name"],
}
assert _schema(seq_w_map.output_schema) == {
assert seq_w_map.get_output_jsonschema() == {
"title": "RunnableParallel<original,as_list,length>Output",
"type": "object",
"properties": {
@@ -465,12 +465,12 @@ def test_passthrough_assign_schema() -> None:
| fake_llm
)
assert _schema(seq_w_assign.input_schema) == {
assert seq_w_assign.get_input_jsonschema() == {
"properties": {"question": {"title": "Question", "type": "string"}},
"title": "RunnableSequenceInput",
"type": "object",
}
assert _schema(seq_w_assign.output_schema) == {
assert seq_w_assign.get_output_jsonschema() == {
"title": "FakeListLLMOutput",
"type": "string",
}
@@ -482,7 +482,7 @@ def test_passthrough_assign_schema() -> None:
# fallback to RunnableAssign.input_schema if next runnable doesn't have
# expected dict input_schema
assert _schema(invalid_seq_w_assign.input_schema) == {
assert invalid_seq_w_assign.get_input_jsonschema() == {
"properties": {"question": {"title": "Question"}},
"title": "RunnableParallel<context>Input",
"type": "object",
@@ -494,14 +494,14 @@ def test_passthrough_assign_schema() -> None:
)
def test_lambda_schemas() -> None:
first_lambda = lambda x: x["hello"] # noqa: E731
assert _schema(RunnableLambda(first_lambda).input_schema) == {
assert RunnableLambda(first_lambda).get_input_jsonschema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"hello": {"title": "Hello"}},
}
second_lambda = lambda x, y: (x["hello"], x["bye"], y["bah"]) # noqa: E731
assert _schema(RunnableLambda(second_lambda).input_schema) == { # type: ignore[arg-type]
assert RunnableLambda(second_lambda).get_input_jsonschema() == { # type: ignore[arg-type]
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"hello": {"title": "Hello"}, "bye": {"title": "Bye"}},
@@ -510,7 +510,7 @@ def test_lambda_schemas() -> None:
def get_value(input): # type: ignore[no-untyped-def]
return input["variable_name"]
assert _schema(RunnableLambda(get_value).input_schema) == {
assert RunnableLambda(get_value).get_input_jsonschema() == {
"title": "get_value_input",
"type": "object",
"properties": {"variable_name": {"title": "Variable Name"}},
@@ -519,7 +519,7 @@ def test_lambda_schemas() -> None:
async def aget_value(input): # type: ignore[no-untyped-def]
return (input["variable_name"], input.get("another"))
assert _schema(RunnableLambda(aget_value).input_schema) == {
assert RunnableLambda(aget_value).get_input_jsonschema() == {
"title": "aget_value_input",
"type": "object",
"properties": {
@@ -535,7 +535,7 @@ def test_lambda_schemas() -> None:
"byebye": input["yo"],
}
assert _schema(RunnableLambda(aget_values).input_schema) == {
assert RunnableLambda(aget_values).get_input_jsonschema() == {
"title": "aget_values_input",
"type": "object",
"properties": {
@@ -561,11 +561,9 @@ def test_lambda_schemas() -> None:
}
assert (
_schema(
RunnableLambda(
aget_values_typed # type: ignore[arg-type]
).input_schema
)
RunnableLambda(
aget_values_typed # type: ignore[arg-type]
).get_input_jsonschema()
== {
"title": "aget_values_typed_input",
"$ref": "#/definitions/InputType",
@@ -586,7 +584,7 @@ def test_lambda_schemas() -> None:
}
)
assert _schema(RunnableLambda(aget_values_typed).output_schema) == { # type: ignore[arg-type]
assert RunnableLambda(aget_values_typed).get_output_jsonschema() == { # type: ignore[arg-type]
"title": "aget_values_typed_output",
"$ref": "#/definitions/OutputType",
"definitions": {
@@ -643,7 +641,7 @@ def test_schema_complex_seq() -> None:
| StrOutputParser()
)
assert _schema(chain2.input_schema) == {
assert chain2.get_input_jsonschema() == {
"title": "RunnableParallel<city,language>Input",
"type": "object",
"properties": {
@@ -652,17 +650,17 @@ def test_schema_complex_seq() -> None:
},
}
assert _schema(chain2.output_schema) == {
assert chain2.get_output_jsonschema() == {
"title": "StrOutputParserOutput",
"type": "string",
}
assert _schema(chain2.with_types(input_type=str).input_schema) == {
assert chain2.with_types(input_type=str).get_input_jsonschema() == {
"title": "RunnableSequenceInput",
"type": "string",
}
assert _schema(chain2.with_types(input_type=int).output_schema) == {
assert chain2.with_types(input_type=int).get_output_jsonschema() == {
"title": "StrOutputParserOutput",
"type": "string",
}
@@ -670,7 +668,7 @@ def test_schema_complex_seq() -> None:
class InputType(BaseModel):
person: str
assert _schema(chain2.with_types(input_type=InputType).input_schema) == {
assert chain2.with_types(input_type=InputType).get_input_jsonschema() == {
"title": "InputType",
"type": "object",
"properties": {"person": {"title": "Person", "type": "string"}},
@@ -693,7 +691,7 @@ def test_configurable_fields() -> None:
assert fake_llm_configurable.invoke("...") == "a"
assert _schema(fake_llm_configurable.config_schema()) == {
assert fake_llm_configurable.get_config_jsonschema() == {
"title": "RunnableConfigurableFieldsConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
@@ -736,7 +734,7 @@ def test_configurable_fields() -> None:
text="Hello, John!"
)
assert _schema(prompt_configurable.config_schema()) == {
assert prompt_configurable.get_config_jsonschema() == {
"title": "RunnableConfigurableFieldsConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
@@ -764,11 +762,9 @@ def test_configurable_fields() -> None:
text="Hello, John! John!"
)
assert _schema(
prompt_configurable.with_config(
configurable={"prompt_template": "Hello {name} in {lang}"}
).input_schema
) == {
assert prompt_configurable.with_config(
configurable={"prompt_template": "Hello {name} in {lang}"}
).get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {
@@ -782,7 +778,7 @@ def test_configurable_fields() -> None:
assert chain_configurable.invoke({"name": "John"}) == "a"
assert _schema(chain_configurable.config_schema()) == {
assert chain_configurable.get_config_jsonschema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
@@ -819,14 +815,12 @@ def test_configurable_fields() -> None:
== "c"
)
assert _schema(
chain_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name} {lang}!",
"llm_responses": ["c"],
}
).input_schema
) == {
assert chain_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name} {lang}!",
"llm_responses": ["c"],
}
).get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {
@@ -851,7 +845,7 @@ def test_configurable_fields() -> None:
"llm3": "a",
}
assert _schema(chain_with_map_configurable.config_schema()) == {
assert chain_with_map_configurable.get_config_jsonschema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
@@ -952,7 +946,7 @@ def test_configurable_fields_prefix_keys() -> None:
chain = prompt | fake_llm
assert _schema(chain.config_schema()) == {
assert chain.get_config_jsonschema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
@@ -1062,7 +1056,7 @@ def test_configurable_fields_example() -> None:
assert chain_configurable.invoke({"name": "John"}) == "a"
assert _schema(chain_configurable.config_schema()) == {
assert chain_configurable.get_config_jsonschema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
@@ -3143,7 +3137,7 @@ def test_map_stream() -> None:
chain_pick_one = chain.pick("llm")
assert _schema(chain_pick_one.output_schema) == {
assert chain_pick_one.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "string",
}
@@ -3166,7 +3160,7 @@ def test_map_stream() -> None:
["llm", "hello"]
)
assert _schema(chain_pick_two.output_schema) == {
assert chain_pick_two.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
@@ -3531,13 +3525,13 @@ def test_deep_stream_assign() -> None:
chain_with_assign = chain.assign(hello=itemgetter("str") | llm)
assert _schema(chain_with_assign.input_schema) == {
assert chain_with_assign.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
"required": ["question"],
}
assert _schema(chain_with_assign.output_schema) == {
assert chain_with_assign.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
@@ -3582,13 +3576,13 @@ def test_deep_stream_assign() -> None:
hello=itemgetter("str") | llm,
)
assert _schema(chain_with_assign_shadow.input_schema) == {
assert chain_with_assign_shadow.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
"required": ["question"],
}
assert _schema(chain_with_assign_shadow.output_schema) == {
assert chain_with_assign_shadow.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
@@ -3657,13 +3651,13 @@ async def test_deep_astream_assign() -> None:
hello=itemgetter("str") | llm,
)
assert _schema(chain_with_assign.input_schema) == {
assert chain_with_assign.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
"required": ["question"],
}
assert _schema(chain_with_assign.output_schema) == {
assert chain_with_assign.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
@@ -3708,13 +3702,13 @@ async def test_deep_astream_assign() -> None:
hello=itemgetter("str") | llm,
)
assert _schema(chain_with_assign_shadow.input_schema) == {
assert chain_with_assign_shadow.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
"required": ["question"],
}
assert _schema(chain_with_assign_shadow.output_schema) == {
assert chain_with_assign_shadow.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
@@ -4362,7 +4356,7 @@ def test_runnable_branch_init_coercion(branches: Sequence[Any]) -> None:
assert isinstance(body, Runnable)
assert isinstance(runnable.default, Runnable)
assert _schema(runnable.input_schema) == {"title": "RunnableBranchInput"}
assert runnable.get_input_jsonschema() == {"title": "RunnableBranchInput"}
def test_runnable_branch_invoke_call_counts(mocker: MockerFixture) -> None:
@@ -4709,7 +4703,7 @@ async def test_tool_from_runnable() -> None:
{"question": "What up"}
)
assert chain_tool.description.endswith(repr(chain))
assert _schema(chain_tool.args_schema) == _schema(chain.input_schema)
assert _schema(chain_tool.args_schema) == chain.get_input_jsonschema()
assert _schema(chain_tool.args_schema) == {
"properties": {"question": {"title": "Question", "type": "string"}},
"title": "PromptInput",
@@ -4728,8 +4722,8 @@ async def test_runnable_gen() -> None:
runnable = RunnableGenerator(gen)
assert _schema(runnable.input_schema) == {"title": "gen_input"}
assert _schema(runnable.output_schema) == {
assert runnable.get_input_jsonschema() == {"title": "gen_input"}
assert runnable.get_output_jsonschema() == {
"title": "gen_output",
"type": "integer",
}
@@ -4780,8 +4774,8 @@ async def test_runnable_gen_context_config() -> None:
runnable = RunnableGenerator(gen)
assert _schema(runnable.input_schema) == {"title": "gen_input"}
assert _schema(runnable.output_schema) == {
assert runnable.get_input_jsonschema() == {"title": "gen_input"}
assert runnable.get_output_jsonschema() == {
"title": "gen_output",
"type": "integer",
}
@@ -4914,11 +4908,11 @@ async def test_runnable_iter_context_config() -> None:
yield fake.invoke(input * 2)
yield fake.invoke(input * 3)
assert _schema(gen.input_schema) == {
assert gen.get_input_jsonschema() == {
"title": "gen_input",
"type": "string",
}
assert _schema(gen.output_schema) == {
assert gen.get_output_jsonschema() == {
"title": "gen_output",
"type": "integer",
}
@@ -4965,11 +4959,11 @@ async def test_runnable_iter_context_config() -> None:
yield await fake.ainvoke(input * 2)
yield await fake.ainvoke(input * 3)
assert _schema(agen.input_schema) == {
assert agen.get_input_jsonschema() == {
"title": "agen_input",
"type": "string",
}
assert _schema(agen.output_schema) == {
assert agen.get_output_jsonschema() == {
"title": "agen_output",
"type": "integer",
}
@@ -5032,8 +5026,8 @@ async def test_runnable_lambda_context_config() -> None:
output += fake.invoke(input * 3)
return output
assert _schema(fun.input_schema) == {"title": "fun_input", "type": "string"}
assert _schema(fun.output_schema) == {
assert fun.get_input_jsonschema() == {"title": "fun_input", "type": "string"}
assert fun.get_output_jsonschema() == {
"title": "fun_output",
"type": "integer",
}
@@ -5081,8 +5075,8 @@ async def test_runnable_lambda_context_config() -> None:
output += await fake.ainvoke(input * 3)
return output
assert _schema(afun.input_schema) == {"title": "afun_input", "type": "string"}
assert _schema(afun.output_schema) == {
assert afun.get_input_jsonschema() == {"title": "afun_input", "type": "string"}
assert afun.get_output_jsonschema() == {
"title": "afun_output",
"type": "integer",
}
@@ -5143,19 +5137,19 @@ async def test_runnable_gen_transform() -> None:
chain: Runnable = RunnableGenerator(gen_indexes, agen_indexes) | plus_one
achain = RunnableGenerator(gen_indexes, agen_indexes) | aplus_one
assert _schema(chain.input_schema) == {
assert chain.get_input_jsonschema() == {
"title": "gen_indexes_input",
"type": "integer",
}
assert _schema(chain.output_schema) == {
assert chain.get_output_jsonschema() == {
"title": "plus_one_output",
"type": "integer",
}
assert _schema(achain.input_schema) == {
assert achain.get_input_jsonschema() == {
"title": "gen_indexes_input",
"type": "integer",
}
assert _schema(achain.output_schema) == {
assert achain.get_output_jsonschema() == {
"title": "aplus_one_output",
"type": "integer",
}