From 61087b0c0d11a9ae2e536eb5888a8e7367f9ba00 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Fri, 6 Sep 2024 17:24:10 -0400 Subject: [PATCH] core[patch]: Fix changes to pydantic schema due to pydantic 2.8.2 -> 2.9 changes (#26166) Minor non functional change in pydantic schema generation --- .../prompts/__snapshots__/test_prompt.ambr | 180 ++++++++ .../tests/unit_tests/prompts/test_prompt.py | 134 ++---- libs/core/tests/unit_tests/pydantic_utils.py | 30 +- .../__snapshots__/test_runnable.ambr | 330 +++++++++++++++ .../unit_tests/runnables/test_runnable.py | 384 +++--------------- 5 files changed, 625 insertions(+), 433 deletions(-) create mode 100644 libs/core/tests/unit_tests/prompts/__snapshots__/test_prompt.ambr diff --git a/libs/core/tests/unit_tests/prompts/__snapshots__/test_prompt.ambr b/libs/core/tests/unit_tests/prompts/__snapshots__/test_prompt.ambr new file mode 100644 index 00000000000..6f32a833bde --- /dev/null +++ b/libs/core/tests/unit_tests/prompts/__snapshots__/test_prompt.ambr @@ -0,0 +1,180 @@ +# serializer version: 1 +# name: test_mustache_prompt_from_template[schema_0] + dict({ + '$defs': dict({ + 'obj': dict({ + 'properties': dict({ + 'bar': dict({ + 'title': 'Bar', + 'type': 'string', + }), + 'foo': dict({ + 'title': 'Foo', + 'type': 'string', + }), + }), + 'title': 'obj', + 'type': 'object', + }), + }), + 'properties': dict({ + 'foo': dict({ + 'title': 'Foo', + 'type': 'string', + }), + 'obj': dict({ + '$ref': '#/$defs/obj', + }), + }), + 'title': 'PromptInput', + 'type': 'object', + }) +# --- +# name: test_mustache_prompt_from_template[schema_2] + dict({ + '$defs': dict({ + 'foo': dict({ + 'properties': dict({ + 'bar': dict({ + 'title': 'Bar', + 'type': 'string', + }), + }), + 'title': 'foo', + 'type': 'object', + }), + }), + 'properties': dict({ + 'foo': dict({ + '$ref': '#/$defs/foo', + }), + }), + 'title': 'PromptInput', + 'type': 'object', + }) +# --- +# name: test_mustache_prompt_from_template[schema_3] + dict({ + '$defs': dict({ + 'baz': dict({ + 'properties': dict({ + 'qux': dict({ + 'title': 'Qux', + 'type': 'string', + }), + }), + 'title': 'baz', + 'type': 'object', + }), + 'foo': dict({ + 'properties': dict({ + 'bar': dict({ + 'title': 'Bar', + 'type': 'string', + }), + 'baz': dict({ + '$ref': '#/$defs/baz', + }), + 'quux': dict({ + 'title': 'Quux', + 'type': 'string', + }), + }), + 'title': 'foo', + 'type': 'object', + }), + }), + 'properties': dict({ + 'foo': dict({ + '$ref': '#/$defs/foo', + }), + }), + 'title': 'PromptInput', + 'type': 'object', + }) +# --- +# name: test_mustache_prompt_from_template[schema_4] + dict({ + '$defs': dict({ + 'barfoo': dict({ + 'properties': dict({ + 'foobar': dict({ + 'title': 'Foobar', + 'type': 'string', + }), + }), + 'title': 'barfoo', + 'type': 'object', + }), + 'baz': dict({ + 'properties': dict({ + 'qux': dict({ + '$ref': '#/$defs/qux', + }), + }), + 'title': 'baz', + 'type': 'object', + }), + 'foo': dict({ + 'properties': dict({ + 'bar': dict({ + 'title': 'Bar', + 'type': 'string', + }), + 'baz': dict({ + '$ref': '#/$defs/baz', + }), + 'quux': dict({ + 'title': 'Quux', + 'type': 'string', + }), + }), + 'title': 'foo', + 'type': 'object', + }), + 'qux': dict({ + 'properties': dict({ + 'barfoo': dict({ + '$ref': '#/$defs/barfoo', + }), + 'foobar': dict({ + 'title': 'Foobar', + 'type': 'string', + }), + }), + 'title': 'qux', + 'type': 'object', + }), + }), + 'properties': dict({ + 'foo': dict({ + '$ref': '#/$defs/foo', + }), + }), + 'title': 'PromptInput', + 'type': 'object', + }) +# --- +# name: test_mustache_prompt_from_template[schema_5] + dict({ + '$defs': dict({ + 'foo': dict({ + 'properties': dict({ + 'bar': dict({ + 'title': 'Bar', + 'type': 'string', + }), + }), + 'title': 'foo', + 'type': 'object', + }), + }), + 'properties': dict({ + 'foo': dict({ + '$ref': '#/$defs/foo', + }), + }), + 'title': 'PromptInput', + 'type': 'object', + }) +# --- diff --git a/libs/core/tests/unit_tests/prompts/test_prompt.py b/libs/core/tests/unit_tests/prompts/test_prompt.py index 396114c718d..bd73c93773c 100644 --- a/libs/core/tests/unit_tests/prompts/test_prompt.py +++ b/libs/core/tests/unit_tests/prompts/test_prompt.py @@ -3,10 +3,15 @@ from typing import Any, Dict, Union from unittest import mock +import pydantic import pytest +from syrupy import SnapshotAssertion from langchain_core.prompts.prompt import PromptTemplate from langchain_core.tracers.run_collector import RunCollectorCallbackHandler +from tests.unit_tests.pydantic_utils import _normalize_schema + +PYDANTIC_VERSION = tuple(map(int, pydantic.__version__.split("."))) def test_prompt_valid() -> None: @@ -62,7 +67,7 @@ def test_prompt_from_template() -> None: assert prompt == expected_prompt -def test_mustache_prompt_from_template() -> None: +def test_mustache_prompt_from_template(snapshot: SnapshotAssertion) -> None: """Test prompts can be constructed from a template.""" # Single input variable. template = "This is a {{foo}} test." @@ -110,24 +115,10 @@ def test_mustache_prompt_from_template() -> None: "This foo is a bar test baz." ) assert prompt.input_variables == ["foo", "obj"] - assert prompt.get_input_jsonschema() == { - "$defs": { - "obj": { - "properties": { - "bar": {"default": None, "title": "Bar", "type": "string"}, - "foo": {"default": None, "title": "Foo", "type": "string"}, - }, - "title": "obj", - "type": "object", - } - }, - "properties": { - "foo": {"default": None, "title": "Foo", "type": "string"}, - "obj": {"allOf": [{"$ref": "#/$defs/obj"}], "default": None}, - }, - "title": "PromptInput", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema(prompt.get_input_jsonschema()) == snapshot( + name="schema_0" + ) # . variables template = "This {{.}} is a test." @@ -151,20 +142,10 @@ def test_mustache_prompt_from_template() -> None: is a test.""" ) assert prompt.input_variables == ["foo"] - assert prompt.get_input_jsonschema() == { - "$defs": { - "foo": { - "properties": { - "bar": {"default": None, "title": "Bar", "type": "string"} - }, - "title": "foo", - "type": "object", - } - }, - "properties": {"foo": {"allOf": [{"$ref": "#/$defs/foo"}], "default": None}}, - "title": "PromptInput", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema(prompt.get_input_jsonschema()) == snapshot( + name="schema_2" + ) # more complex nested section/context variables template = """This{{#foo}} @@ -185,29 +166,10 @@ def test_mustache_prompt_from_template() -> None: is a test.""" ) assert prompt.input_variables == ["foo"] - assert prompt.get_input_jsonschema() == { - "$defs": { - "baz": { - "properties": { - "qux": {"default": None, "title": "Qux", "type": "string"} - }, - "title": "baz", - "type": "object", - }, - "foo": { - "properties": { - "bar": {"default": None, "title": "Bar", "type": "string"}, - "baz": {"allOf": [{"$ref": "#/$defs/baz"}], "default": None}, - "quux": {"default": None, "title": "Quux", "type": "string"}, - }, - "title": "foo", - "type": "object", - }, - }, - "properties": {"foo": {"allOf": [{"$ref": "#/$defs/foo"}], "default": None}}, - "title": "PromptInput", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema(prompt.get_input_jsonschema()) == snapshot( + name="schema_3" + ) # triply nested section/context variables template = """This{{#foo}} @@ -242,44 +204,10 @@ def test_mustache_prompt_from_template() -> None: is a test.""" ) assert prompt.input_variables == ["foo"] - assert prompt.get_input_jsonschema() == { - "$defs": { - "barfoo": { - "properties": { - "foobar": {"default": None, "title": "Foobar", "type": "string"} - }, - "title": "barfoo", - "type": "object", - }, - "baz": { - "properties": { - "qux": {"allOf": [{"$ref": "#/$defs/qux"}], "default": None} - }, - "title": "baz", - "type": "object", - }, - "foo": { - "properties": { - "bar": {"default": None, "title": "Bar", "type": "string"}, - "baz": {"allOf": [{"$ref": "#/$defs/baz"}], "default": None}, - "quux": {"default": None, "title": "Quux", "type": "string"}, - }, - "title": "foo", - "type": "object", - }, - "qux": { - "properties": { - "barfoo": {"allOf": [{"$ref": "#/$defs/barfoo"}], "default": None}, - "foobar": {"default": None, "title": "Foobar", "type": "string"}, - }, - "title": "qux", - "type": "object", - }, - }, - "properties": {"foo": {"allOf": [{"$ref": "#/$defs/foo"}], "default": None}}, - "title": "PromptInput", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema(prompt.get_input_jsonschema()) == snapshot( + name="schema_4" + ) # section/context variables with repeats template = """This{{#foo}} @@ -294,20 +222,10 @@ def test_mustache_prompt_from_template() -> None: is a test.""" ) assert prompt.input_variables == ["foo"] - assert prompt.get_input_jsonschema() == { - "$defs": { - "foo": { - "properties": { - "bar": {"default": None, "title": "Bar", "type": "string"} - }, - "title": "foo", - "type": "object", - } - }, - "properties": {"foo": {"allOf": [{"$ref": "#/$defs/foo"}], "default": None}}, - "title": "PromptInput", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema(prompt.get_input_jsonschema()) == snapshot( + name="schema_5" + ) template = """This{{^foo}} no foos {{/foo}}is a test.""" diff --git a/libs/core/tests/unit_tests/pydantic_utils.py b/libs/core/tests/unit_tests/pydantic_utils.py index c9d258d3dbd..10a3318c7ba 100644 --- a/libs/core/tests/unit_tests/pydantic_utils.py +++ b/libs/core/tests/unit_tests/pydantic_utils.py @@ -1,4 +1,6 @@ -from typing import Any +from typing import Any, Dict + +from pydantic import BaseModel from langchain_core.utils.pydantic import is_basemodel_subclass @@ -92,3 +94,29 @@ def _schema(obj: Any) -> dict: _remove_enum_description(schema_) return schema_ + + +def _normalize_schema(obj: Any) -> Dict[str, Any]: + """Generate a schema and normalize it. + + This will collapse single element allOfs into $ref. + + For example, + + 'obj': {'allOf': [{'$ref': '#/$defs/obj'}] + + to: + + 'obj': {'$ref': '#/$defs/obj'} + + Args: + obj: The object to generate the schema for + """ + if isinstance(obj, BaseModel): + data = obj.model_json_schema() + else: + data = obj + remove_all_none_default(data) + replace_all_of_with_ref(data) + _remove_enum_description(data) + return data diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr index 07be8ecbd5d..bee55716e86 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr @@ -1521,6 +1521,305 @@ Run(id=UUID('00000000-0000-4000-8000-000000000000'), name='RunnableSequence', start_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), run_type='chain', end_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), extra={}, error=None, serialized={'lc': 1, 'type': 'constructor', 'id': ['langchain', 'schema', 'runnable', 'RunnableSequence'], 'kwargs': {'first': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'kwargs': {'input_variables': ['question'], 'messages': [{'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'SystemMessagePromptTemplate'], 'kwargs': {'prompt': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'kwargs': {'input_variables': [], 'template': 'You are a nice assistant.', 'template_format': 'f-string'}, 'name': 'PromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'name': 'PromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'PromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}}}, {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'HumanMessagePromptTemplate'], 'kwargs': {'prompt': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'kwargs': {'input_variables': ['question'], 'template': '{question}', 'template_format': 'f-string'}, 'name': 'PromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'name': 'PromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'PromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}}}]}, 'name': 'ChatPromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'name': 'ChatPromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'ChatPromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, 'middle': [{'lc': 1, 'type': 'not_implemented', 'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'repr': "FakeListChatModel(responses=['foo, bar'])", 'name': 'FakeListChatModel', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'FakeListChatModelInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'name': 'FakeListChatModel'}}, {'id': 2, 'type': 'schema', 'data': 'FakeListChatModelOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'kwargs': {}, 'name': 'CommaSeparatedListOutputParser', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'CommaSeparatedListOutputParserInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'name': 'CommaSeparatedListOutputParser'}}, {'id': 2, 'type': 'schema', 'data': 'CommaSeparatedListOutputParserOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, {'lc': 1, 'type': 'not_implemented', 'id': ['langchain_core', 'runnables', 'base', 'RunnableLambda'], 'repr': "RunnableLambda(lambda x: {'question': x[0] + x[1]})"}, {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'kwargs': {'input_variables': ['question'], 'messages': [{'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'SystemMessagePromptTemplate'], 'kwargs': {'prompt': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'kwargs': {'input_variables': [], 'template': 'You are a nicer assistant.', 'template_format': 'f-string'}, 'name': 'PromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'name': 'PromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'PromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}}}, {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'HumanMessagePromptTemplate'], 'kwargs': {'prompt': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'kwargs': {'input_variables': ['question'], 'template': '{question}', 'template_format': 'f-string'}, 'name': 'PromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'name': 'PromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'PromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}}}]}, 'name': 'ChatPromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'name': 'ChatPromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'ChatPromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, {'lc': 1, 'type': 'not_implemented', 'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'repr': "FakeListChatModel(responses=['baz, qux'])", 'name': 'FakeListChatModel', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'FakeListChatModelInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'name': 'FakeListChatModel'}}, {'id': 2, 'type': 'schema', 'data': 'FakeListChatModelOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}], 'last': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'kwargs': {}, 'name': 'CommaSeparatedListOutputParser', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'CommaSeparatedListOutputParserInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'name': 'CommaSeparatedListOutputParser'}}, {'id': 2, 'type': 'schema', 'data': 'CommaSeparatedListOutputParserOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}}, 'name': 'RunnableSequence', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'name': 'ChatPromptTemplate'}}, {'id': 2, 'type': 'runnable', 'data': {'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'name': 'FakeListChatModel'}}, {'id': 3, 'type': 'runnable', 'data': {'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'name': 'CommaSeparatedListOutputParser'}}, {'id': 4, 'type': 'runnable', 'data': {'id': ['langchain_core', 'runnables', 'base', 'RunnableLambda'], 'name': 'Lambda'}}, {'id': 5, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'name': 'ChatPromptTemplate'}}, {'id': 6, 'type': 'runnable', 'data': {'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'name': 'FakeListChatModel'}}, {'id': 7, 'type': 'runnable', 'data': {'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'name': 'CommaSeparatedListOutputParser'}}, {'id': 8, 'type': 'schema', 'data': 'CommaSeparatedListOutputParserOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}, {'source': 2, 'target': 3}, {'source': 3, 'target': 4}, {'source': 4, 'target': 5}, {'source': 5, 'target': 6}, {'source': 7, 'target': 8}, {'source': 6, 'target': 7}]}}, events=[{'name': 'start', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}, {'name': 'end', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}], inputs={'question': 'What is your name?'}, outputs={'output': ['baz', 'qux']}, reference_example_id=None, parent_run_id=None, tags=[], child_runs=[Run(id=UUID('00000000-0000-4000-8000-000000000001'), name='ChatPromptTemplate', start_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), run_type='prompt', end_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), extra={}, error=None, serialized={'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'kwargs': {'input_variables': ['question'], 'messages': [{'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'SystemMessagePromptTemplate'], 'kwargs': {'prompt': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'kwargs': {'input_variables': [], 'template': 'You are a nice assistant.', 'template_format': 'f-string'}, 'name': 'PromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'name': 'PromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'PromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}}}, {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'HumanMessagePromptTemplate'], 'kwargs': {'prompt': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'kwargs': {'input_variables': ['question'], 'template': '{question}', 'template_format': 'f-string'}, 'name': 'PromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'name': 'PromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'PromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}}}]}, 'name': 'ChatPromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'name': 'ChatPromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'ChatPromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, events=[{'name': 'start', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}, {'name': 'end', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}], inputs={'question': 'What is your name?'}, outputs={'output': ChatPromptValue(messages=[SystemMessage(content='You are a nice assistant.', additional_kwargs={}, response_metadata={}), HumanMessage(content='What is your name?', additional_kwargs={}, response_metadata={})])}, reference_example_id=None, parent_run_id=UUID('00000000-0000-4000-8000-000000000000'), tags=['seq:step:1'], child_runs=[], trace_id=UUID('00000000-0000-4000-8000-000000000000'), dotted_order='20230101T000000000000Z00000000-0000-4000-8000-000000000000.20230101T000000000000Z00000000-0000-4000-8000-000000000001'), Run(id=UUID('00000000-0000-4000-8000-000000000002'), name='FakeListChatModel', start_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), run_type='llm', end_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), extra={'invocation_params': {'responses': ['foo, bar'], '_type': 'fake-list-chat-model', 'stop': None}, 'options': {'stop': None}, 'batch_size': 1, 'metadata': {'ls_provider': 'fakelistchatmodel', 'ls_model_type': 'chat'}}, error=None, serialized={'lc': 1, 'type': 'not_implemented', 'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'repr': "FakeListChatModel(responses=['foo, bar'])", 'name': 'FakeListChatModel', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'FakeListChatModelInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'name': 'FakeListChatModel'}}, {'id': 2, 'type': 'schema', 'data': 'FakeListChatModelOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, events=[{'name': 'start', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}, {'name': 'end', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}], inputs={'prompts': ['System: You are a nice assistant.\nHuman: What is your name?']}, outputs={'generations': [[{'text': 'foo, bar', 'generation_info': None, 'type': 'ChatGeneration', 'message': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'schema', 'messages', 'AIMessage'], 'kwargs': {'content': 'foo, bar', 'type': 'ai', 'id': 'run-00000000-0000-4000-8000-000000000002-0', 'tool_calls': [], 'invalid_tool_calls': []}}}]], 'llm_output': None, 'run': None}, reference_example_id=None, parent_run_id=UUID('00000000-0000-4000-8000-000000000000'), tags=['seq:step:2'], child_runs=[], trace_id=UUID('00000000-0000-4000-8000-000000000000'), dotted_order='20230101T000000000000Z00000000-0000-4000-8000-000000000000.20230101T000000000000Z00000000-0000-4000-8000-000000000002'), Run(id=UUID('00000000-0000-4000-8000-000000000003'), name='CommaSeparatedListOutputParser', start_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), run_type='parser', end_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), extra={}, error=None, serialized={'lc': 1, 'type': 'constructor', 'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'kwargs': {}, 'name': 'CommaSeparatedListOutputParser', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'CommaSeparatedListOutputParserInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'name': 'CommaSeparatedListOutputParser'}}, {'id': 2, 'type': 'schema', 'data': 'CommaSeparatedListOutputParserOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, events=[{'name': 'start', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}, {'name': 'end', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}], inputs={'input': AIMessage(content='foo, bar', additional_kwargs={}, response_metadata={}, id='00000000-0000-4000-8000-000000000004')}, outputs={'output': ['foo', 'bar']}, reference_example_id=None, parent_run_id=UUID('00000000-0000-4000-8000-000000000000'), tags=['seq:step:3'], child_runs=[], trace_id=UUID('00000000-0000-4000-8000-000000000000'), dotted_order='20230101T000000000000Z00000000-0000-4000-8000-000000000000.20230101T000000000000Z00000000-0000-4000-8000-000000000003'), Run(id=UUID('00000000-0000-4000-8000-000000000005'), name='RunnableLambda', start_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), run_type='chain', end_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), extra={}, error=None, serialized={'lc': 1, 'type': 'not_implemented', 'id': ['langchain_core', 'runnables', 'base', 'RunnableLambda'], 'repr': "RunnableLambda(lambda x: {'question': x[0] + x[1]})"}, events=[{'name': 'start', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}, {'name': 'end', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}], inputs={'input': ['foo', 'bar']}, outputs={'question': 'foobar'}, reference_example_id=None, parent_run_id=UUID('00000000-0000-4000-8000-000000000000'), tags=['seq:step:4'], child_runs=[], trace_id=UUID('00000000-0000-4000-8000-000000000000'), dotted_order='20230101T000000000000Z00000000-0000-4000-8000-000000000000.20230101T000000000000Z00000000-0000-4000-8000-000000000005'), Run(id=UUID('00000000-0000-4000-8000-000000000006'), name='ChatPromptTemplate', start_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), run_type='prompt', end_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), extra={}, error=None, serialized={'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'kwargs': {'input_variables': ['question'], 'messages': [{'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'SystemMessagePromptTemplate'], 'kwargs': {'prompt': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'kwargs': {'input_variables': [], 'template': 'You are a nicer assistant.', 'template_format': 'f-string'}, 'name': 'PromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'name': 'PromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'PromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}}}, {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'chat', 'HumanMessagePromptTemplate'], 'kwargs': {'prompt': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'kwargs': {'input_variables': ['question'], 'template': '{question}', 'template_format': 'f-string'}, 'name': 'PromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'prompt', 'PromptTemplate'], 'name': 'PromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'PromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}}}]}, 'name': 'ChatPromptTemplate', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'PromptInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'prompts', 'chat', 'ChatPromptTemplate'], 'name': 'ChatPromptTemplate'}}, {'id': 2, 'type': 'schema', 'data': 'ChatPromptTemplateOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, events=[{'name': 'start', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}, {'name': 'end', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}], inputs={'question': 'foobar'}, outputs={'output': ChatPromptValue(messages=[SystemMessage(content='You are a nicer assistant.', additional_kwargs={}, response_metadata={}), HumanMessage(content='foobar', additional_kwargs={}, response_metadata={})])}, reference_example_id=None, parent_run_id=UUID('00000000-0000-4000-8000-000000000000'), tags=['seq:step:5'], child_runs=[], trace_id=UUID('00000000-0000-4000-8000-000000000000'), dotted_order='20230101T000000000000Z00000000-0000-4000-8000-000000000000.20230101T000000000000Z00000000-0000-4000-8000-000000000006'), Run(id=UUID('00000000-0000-4000-8000-000000000007'), name='FakeListChatModel', start_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), run_type='llm', end_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), extra={'invocation_params': {'responses': ['baz, qux'], '_type': 'fake-list-chat-model', 'stop': None}, 'options': {'stop': None}, 'batch_size': 1, 'metadata': {'ls_provider': 'fakelistchatmodel', 'ls_model_type': 'chat'}}, error=None, serialized={'lc': 1, 'type': 'not_implemented', 'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'repr': "FakeListChatModel(responses=['baz, qux'])", 'name': 'FakeListChatModel', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'FakeListChatModelInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain_core', 'language_models', 'fake_chat_models', 'FakeListChatModel'], 'name': 'FakeListChatModel'}}, {'id': 2, 'type': 'schema', 'data': 'FakeListChatModelOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, events=[{'name': 'start', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}, {'name': 'end', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}], inputs={'prompts': ['System: You are a nicer assistant.\nHuman: foobar']}, outputs={'generations': [[{'text': 'baz, qux', 'generation_info': None, 'type': 'ChatGeneration', 'message': {'lc': 1, 'type': 'constructor', 'id': ['langchain', 'schema', 'messages', 'AIMessage'], 'kwargs': {'content': 'baz, qux', 'type': 'ai', 'id': 'run-00000000-0000-4000-8000-000000000006-0', 'tool_calls': [], 'invalid_tool_calls': []}}}]], 'llm_output': None, 'run': None}, reference_example_id=None, parent_run_id=UUID('00000000-0000-4000-8000-000000000000'), tags=['seq:step:6'], child_runs=[], trace_id=UUID('00000000-0000-4000-8000-000000000000'), dotted_order='20230101T000000000000Z00000000-0000-4000-8000-000000000000.20230101T000000000000Z00000000-0000-4000-8000-000000000007'), Run(id=UUID('00000000-0000-4000-8000-000000000008'), name='CommaSeparatedListOutputParser', start_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), run_type='parser', end_time=FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), extra={}, error=None, serialized={'lc': 1, 'type': 'constructor', 'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'kwargs': {}, 'name': 'CommaSeparatedListOutputParser', 'graph': {'nodes': [{'id': 0, 'type': 'schema', 'data': 'CommaSeparatedListOutputParserInput'}, {'id': 1, 'type': 'runnable', 'data': {'id': ['langchain', 'output_parsers', 'list', 'CommaSeparatedListOutputParser'], 'name': 'CommaSeparatedListOutputParser'}}, {'id': 2, 'type': 'schema', 'data': 'CommaSeparatedListOutputParserOutput'}], 'edges': [{'source': 0, 'target': 1}, {'source': 1, 'target': 2}]}}, events=[{'name': 'start', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}, {'name': 'end', 'time': FakeDatetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)}], inputs={'input': AIMessage(content='baz, qux', additional_kwargs={}, response_metadata={}, id='00000000-0000-4000-8000-000000000009')}, outputs={'output': ['baz', 'qux']}, reference_example_id=None, parent_run_id=UUID('00000000-0000-4000-8000-000000000000'), tags=['seq:step:7'], child_runs=[], trace_id=UUID('00000000-0000-4000-8000-000000000000'), dotted_order='20230101T000000000000Z00000000-0000-4000-8000-000000000000.20230101T000000000000Z00000000-0000-4000-8000-000000000008')], trace_id=UUID('00000000-0000-4000-8000-000000000000'), dotted_order='20230101T000000000000Z00000000-0000-4000-8000-000000000000'), ]) # --- +# name: test_configurable_fields[schema2] + dict({ + '$defs': dict({ + 'Configurable': dict({ + 'properties': dict({ + 'llm_responses': dict({ + 'default': list([ + 'a', + ]), + 'description': 'A list of fake responses for this LLM', + 'items': dict({ + 'type': 'string', + }), + 'title': 'LLM Responses', + 'type': 'array', + }), + }), + 'title': 'Configurable', + 'type': 'object', + }), + }), + 'properties': dict({ + 'configurable': dict({ + '$ref': '#/$defs/Configurable', + }), + }), + 'title': 'RunnableConfigurableFieldsConfig', + 'type': 'object', + }) +# --- +# name: test_configurable_fields[schema3] + dict({ + '$defs': dict({ + 'Configurable': dict({ + 'properties': dict({ + 'prompt_template': dict({ + 'default': 'Hello, {name}!', + 'description': 'The prompt template for this chain', + 'title': 'Prompt Template', + 'type': 'string', + }), + }), + 'title': 'Configurable', + 'type': 'object', + }), + }), + 'properties': dict({ + 'configurable': dict({ + '$ref': '#/$defs/Configurable', + }), + }), + 'title': 'RunnableConfigurableFieldsConfig', + 'type': 'object', + }) +# --- +# name: test_configurable_fields[schema4] + dict({ + '$defs': dict({ + 'Configurable': dict({ + 'properties': dict({ + 'llm_responses': dict({ + 'default': list([ + 'a', + ]), + 'description': 'A list of fake responses for this LLM', + 'items': dict({ + 'type': 'string', + }), + 'title': 'LLM Responses', + 'type': 'array', + }), + 'prompt_template': dict({ + 'default': 'Hello, {name}!', + 'description': 'The prompt template for this chain', + 'title': 'Prompt Template', + 'type': 'string', + }), + }), + 'title': 'Configurable', + 'type': 'object', + }), + }), + 'properties': dict({ + 'configurable': dict({ + '$ref': '#/$defs/Configurable', + }), + }), + 'title': 'RunnableSequenceConfig', + 'type': 'object', + }) +# --- +# name: test_configurable_fields[schema5] + dict({ + '$defs': dict({ + 'Configurable': dict({ + 'properties': dict({ + 'llm_responses': dict({ + 'default': list([ + 'a', + ]), + 'description': 'A list of fake responses for this LLM', + 'items': dict({ + 'type': 'string', + }), + 'title': 'LLM Responses', + 'type': 'array', + }), + 'other_responses': dict({ + 'default': list([ + 'a', + ]), + 'items': dict({ + 'type': 'string', + }), + 'title': 'Other Responses', + 'type': 'array', + }), + 'prompt_template': dict({ + 'default': 'Hello, {name}!', + 'description': 'The prompt template for this chain', + 'title': 'Prompt Template', + 'type': 'string', + }), + }), + 'title': 'Configurable', + 'type': 'object', + }), + }), + 'properties': dict({ + 'configurable': dict({ + '$ref': '#/$defs/Configurable', + }), + }), + 'title': 'RunnableSequenceConfig', + 'type': 'object', + }) +# --- +# name: test_configurable_fields_example[schema7] + dict({ + '$defs': dict({ + 'Chat_Responses': dict({ + 'enum': list([ + 'hello', + 'bye', + 'helpful', + ]), + 'title': 'Chat Responses', + 'type': 'string', + }), + 'Configurable': dict({ + 'properties': dict({ + 'chat_responses': dict({ + 'default': list([ + 'hello', + 'bye', + ]), + 'items': dict({ + '$ref': '#/$defs/Chat_Responses', + }), + 'title': 'Chat Responses', + 'type': 'array', + }), + 'llm': dict({ + '$ref': '#/$defs/LLM', + 'default': 'default', + }), + 'llm_responses': dict({ + 'default': list([ + 'a', + ]), + 'description': 'A list of fake responses for this LLM', + 'items': dict({ + 'type': 'string', + }), + 'title': 'LLM Responses', + 'type': 'array', + }), + 'prompt_template': dict({ + '$ref': '#/$defs/Prompt_Template', + 'default': 'hello', + 'description': 'The prompt template for this chain', + }), + }), + 'title': 'Configurable', + 'type': 'object', + }), + 'LLM': dict({ + 'enum': list([ + 'chat', + 'default', + ]), + 'title': 'LLM', + 'type': 'string', + }), + 'Prompt_Template': dict({ + 'enum': list([ + 'hello', + 'good_morning', + ]), + 'title': 'Prompt Template', + 'type': 'string', + }), + }), + 'properties': dict({ + 'configurable': dict({ + '$ref': '#/$defs/Configurable', + }), + }), + 'title': 'RunnableSequenceConfig', + 'type': 'object', + }) +# --- +# name: test_configurable_fields_prefix_keys[schema6] + dict({ + 'definitions': dict({ + 'Chat_Responses': dict({ + 'enum': list([ + 'hello', + 'bye', + 'helpful', + ]), + 'title': 'Chat Responses', + 'type': 'string', + }), + 'Configurable': dict({ + 'properties': dict({ + 'chat_sleep': dict({ + 'anyOf': list([ + dict({ + 'type': 'number', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Chat Sleep', + }), + 'llm': dict({ + '$ref': '#/definitions/LLM', + 'default': 'default', + }), + 'llm==chat/responses': dict({ + 'default': list([ + 'hello', + 'bye', + ]), + 'items': dict({ + '$ref': '#/definitions/Chat_Responses', + }), + 'title': 'Chat Responses', + 'type': 'array', + }), + 'llm==default/responses': dict({ + 'default': list([ + 'a', + ]), + 'description': 'A list of fake responses for this LLM', + 'items': dict({ + 'type': 'string', + }), + 'title': 'LLM Responses', + 'type': 'array', + }), + 'prompt_template': dict({ + '$ref': '#/definitions/Prompt_Template', + 'default': 'hello', + 'description': 'The prompt template for this chain', + }), + }), + 'title': 'Configurable', + 'type': 'object', + }), + 'LLM': dict({ + 'enum': list([ + 'chat', + 'default', + ]), + 'title': 'LLM', + 'type': 'string', + }), + 'Prompt_Template': dict({ + 'enum': list([ + 'hello', + 'good_morning', + ]), + 'title': 'Prompt Template', + 'type': 'string', + }), + }), + 'properties': dict({ + 'configurable': dict({ + '$ref': '#/definitions/Configurable', + }), + }), + 'title': 'RunnableSequenceConfig', + 'type': 'object', + }) +# --- # name: test_each ''' { @@ -2338,6 +2637,37 @@ } ''' # --- +# name: test_lambda_schemas[schema8] + dict({ + '$defs': dict({ + 'OutputType': dict({ + 'properties': dict({ + 'bye': dict({ + 'title': 'Bye', + 'type': 'string', + }), + 'byebye': dict({ + 'title': 'Byebye', + 'type': 'integer', + }), + 'hello': dict({ + 'title': 'Hello', + 'type': 'string', + }), + }), + 'required': list([ + 'hello', + 'bye', + 'byebye', + ]), + 'title': 'OutputType', + 'type': 'object', + }), + }), + '$ref': '#/$defs/OutputType', + 'title': 'aget_values_typed_output', + }) +# --- # name: test_prompt_with_chat_model ''' ChatPromptTemplate(input_variables=['question'], input_types={}, partial_variables={}, messages=[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], input_types={}, partial_variables={}, template='You are a nice assistant.'), additional_kwargs={}), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['question'], input_types={}, partial_variables={}, template='{question}'), additional_kwargs={})]) diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index ab2957035d5..165fc77030d 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -18,6 +18,7 @@ from typing import ( ) from uuid import UUID +import pydantic import pytest from freezegun import freeze_time from pydantic import BaseModel, Field @@ -90,9 +91,11 @@ from langchain_core.tracers import ( RunLogPatch, ) from langchain_core.tracers.context import collect_runs -from tests.unit_tests.pydantic_utils import _schema +from tests.unit_tests.pydantic_utils import _normalize_schema, _schema from tests.unit_tests.stubs import AnyStr, _AnyIdAIMessage, _AnyIdAIMessageChunk +PYDANTIC_VERSION = tuple(map(int, pydantic.__version__.split("."))) + class FakeTracer(BaseTracer): """Fake tracer that records LangChain execution. @@ -543,7 +546,7 @@ def test_passthrough_assign_schema() -> None: @pytest.mark.skipif( sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." ) -def test_lambda_schemas() -> None: +def test_lambda_schemas(snapshot: SnapshotAssertion) -> None: first_lambda = lambda x: x["hello"] # noqa: E731 assert RunnableLambda(first_lambda).get_input_jsonschema() == { "title": "RunnableLambdaInput", @@ -617,45 +620,37 @@ def test_lambda_schemas() -> None: } assert ( - RunnableLambda( - aget_values_typed # type: ignore[arg-type] - ).get_input_jsonschema() - == { - "$defs": { - "InputType": { - "properties": { - "variable_name": { - "title": "Variable " "Name", - "type": "string", + _normalize_schema( + RunnableLambda( + aget_values_typed # type: ignore[arg-type] + ).get_input_jsonschema() + ) + == _normalize_schema( + { + "$defs": { + "InputType": { + "properties": { + "variable_name": { + "title": "Variable " "Name", + "type": "string", + }, + "yo": {"title": "Yo", "type": "integer"}, }, - "yo": {"title": "Yo", "type": "integer"}, - }, - "required": ["variable_name", "yo"], - "title": "InputType", - "type": "object", - } - }, - "allOf": [{"$ref": "#/$defs/InputType"}], - "title": "aget_values_typed_input", - } + "required": ["variable_name", "yo"], + "title": "InputType", + "type": "object", + } + }, + "allOf": [{"$ref": "#/$defs/InputType"}], + "title": "aget_values_typed_input", + } + ) ) - assert RunnableLambda(aget_values_typed).get_output_jsonschema() == { # type: ignore[arg-type] - "$defs": { - "OutputType": { - "properties": { - "bye": {"title": "Bye", "type": "string"}, - "byebye": {"title": "Byebye", "type": "integer"}, - "hello": {"title": "Hello", "type": "string"}, - }, - "required": ["hello", "bye", "byebye"], - "title": "OutputType", - "type": "object", - } - }, - "allOf": [{"$ref": "#/$defs/OutputType"}], - "title": "aget_values_typed_output", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema( + RunnableLambda(aget_values_typed).get_output_jsonschema() # type: ignore + ) == snapshot(name="schema8") def test_with_types_with_type_generics() -> None: @@ -752,7 +747,7 @@ def test_schema_complex_seq() -> None: } -def test_configurable_fields() -> None: +def test_configurable_fields(snapshot: SnapshotAssertion) -> None: fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]] assert fake_llm.invoke("...") == "a" @@ -767,38 +762,10 @@ def test_configurable_fields() -> None: assert fake_llm_configurable.invoke("...") == "a" - assert fake_llm_configurable.get_config_jsonschema() == { - "$defs": { - "Configurable": { - "properties": { - "llm_responses": { - "default": ["a"], - "description": "A " - "list " - "of " - "fake " - "responses " - "for " - "this " - "LLM", - "items": {"type": "string"}, - "title": "LLM " "Responses", - "type": "array", - } - }, - "title": "Configurable", - "type": "object", - } - }, - "properties": { - "configurable": { - "allOf": [{"$ref": "#/$defs/Configurable"}], - "default": None, - } - }, - "title": "RunnableConfigurableFieldsConfig", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema( + fake_llm_configurable.get_config_jsonschema() + ) == snapshot(name="schema2") fake_llm_configured = fake_llm_configurable.with_config( configurable={"llm_responses": ["b"]} @@ -822,35 +789,10 @@ def test_configurable_fields() -> None: text="Hello, John!" ) - assert prompt_configurable.get_config_jsonschema() == { - "$defs": { - "Configurable": { - "properties": { - "prompt_template": { - "default": "Hello, " "{name}!", - "description": "The " - "prompt " - "template " - "for " - "this " - "chain", - "title": "Prompt " "Template", - "type": "string", - } - }, - "title": "Configurable", - "type": "object", - } - }, - "properties": { - "configurable": { - "allOf": [{"$ref": "#/$defs/Configurable"}], - "default": None, - } - }, - "title": "RunnableConfigurableFieldsConfig", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema( + prompt_configurable.get_config_jsonschema() + ) == snapshot(name="schema3") prompt_configured = prompt_configurable.with_config( configurable={"prompt_template": "Hello, {name}! {name}!"} @@ -876,49 +818,10 @@ def test_configurable_fields() -> None: assert chain_configurable.invoke({"name": "John"}) == "a" - assert chain_configurable.get_config_jsonschema() == { - "$defs": { - "Configurable": { - "properties": { - "llm_responses": { - "default": ["a"], - "description": "A " - "list " - "of " - "fake " - "responses " - "for " - "this " - "LLM", - "items": {"type": "string"}, - "title": "LLM " "Responses", - "type": "array", - }, - "prompt_template": { - "default": "Hello, " "{name}!", - "description": "The " - "prompt " - "template " - "for " - "this " - "chain", - "title": "Prompt " "Template", - "type": "string", - }, - }, - "title": "Configurable", - "type": "object", - } - }, - "properties": { - "configurable": { - "allOf": [{"$ref": "#/$defs/Configurable"}], - "default": None, - } - }, - "title": "RunnableSequenceConfig", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema( + chain_configurable.get_config_jsonschema() + ) == snapshot(name="schema4") assert ( chain_configurable.with_config( @@ -960,55 +863,10 @@ def test_configurable_fields() -> None: "llm3": "a", } - assert chain_with_map_configurable.get_config_jsonschema() == { - "$defs": { - "Configurable": { - "properties": { - "llm_responses": { - "default": ["a"], - "description": "A " - "list " - "of " - "fake " - "responses " - "for " - "this " - "LLM", - "items": {"type": "string"}, - "title": "LLM " "Responses", - "type": "array", - }, - "other_responses": { - "default": ["a"], - "items": {"type": "string"}, - "title": "Other " "Responses", - "type": "array", - }, - "prompt_template": { - "default": "Hello, " "{name}!", - "description": "The " - "prompt " - "template " - "for " - "this " - "chain", - "title": "Prompt " "Template", - "type": "string", - }, - }, - "title": "Configurable", - "type": "object", - } - }, - "properties": { - "configurable": { - "allOf": [{"$ref": "#/$defs/Configurable"}], - "default": None, - } - }, - "title": "RunnableSequenceConfig", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema( + chain_with_map_configurable.get_config_jsonschema() + ) == snapshot(name="schema5") assert chain_with_map_configurable.with_config( configurable={ @@ -1030,7 +888,7 @@ def test_configurable_alts_factory() -> None: assert fake_llm.with_config(configurable={"llm": "chat"}).invoke("...") == "b" -def test_configurable_fields_prefix_keys() -> None: +def test_configurable_fields_prefix_keys(snapshot: SnapshotAssertion) -> None: fake_chat = FakeListChatModel(responses=["b"]).configurable_fields( responses=ConfigurableFieldMultiOption( id="responses", @@ -1078,74 +936,13 @@ def test_configurable_fields_prefix_keys() -> None: chain = prompt | fake_llm - assert _schema(chain.config_schema()) == { - "title": "RunnableSequenceConfig", - "type": "object", - "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, - "definitions": { - "Chat_Responses": { - "enum": ["hello", "bye", "helpful"], - "title": "Chat Responses", - "type": "string", - }, - "Configurable": { - "properties": { - "chat_sleep": { - "anyOf": [{"type": "number"}, {"type": "null"}], - "default": None, - "title": "Chat " "Sleep", - }, - "llm": { - "$ref": "#/definitions/LLM", - "default": "default", - "title": "LLM", - }, - "llm==chat/responses": { - "default": ["hello", "bye"], - "items": {"$ref": "#/definitions/Chat_Responses"}, - "title": "Chat " "Responses", - "type": "array", - }, - "llm==default/responses": { - "default": ["a"], - "description": "A " - "list " - "of " - "fake " - "responses " - "for " - "this " - "LLM", - "items": {"type": "string"}, - "title": "LLM " "Responses", - "type": "array", - }, - "prompt_template": { - "$ref": "#/definitions/Prompt_Template", - "default": "hello", - "description": "The " - "prompt " - "template " - "for " - "this " - "chain", - "title": "Prompt " "Template", - }, - }, - "title": "Configurable", - "type": "object", - }, - "LLM": {"enum": ["chat", "default"], "title": "LLM", "type": "string"}, - "Prompt_Template": { - "enum": ["hello", "good_morning"], - "title": "Prompt Template", - "type": "string", - }, - }, - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema(_schema(chain.config_schema())) == snapshot( + name="schema6" + ) -def test_configurable_fields_example() -> None: +def test_configurable_fields_example(snapshot: SnapshotAssertion) -> None: fake_chat = FakeListChatModel(responses=["b"]).configurable_fields( responses=ConfigurableFieldMultiOption( id="chat_responses", @@ -1191,71 +988,10 @@ def test_configurable_fields_example() -> None: assert chain_configurable.invoke({"name": "John"}) == "a" - assert chain_configurable.get_config_jsonschema() == { - "$defs": { - "Chat_Responses": { - "enum": ["hello", "bye", "helpful"], - "title": "Chat Responses", - "type": "string", - }, - "Configurable": { - "properties": { - "chat_responses": { - "default": ["hello", "bye"], - "items": {"$ref": "#/$defs/Chat_Responses"}, - "title": "Chat " "Responses", - "type": "array", - }, - "llm": { - "allOf": [{"$ref": "#/$defs/LLM"}], - "default": "default", - "title": "LLM", - }, - "llm_responses": { - "default": ["a"], - "description": "A " - "list " - "of " - "fake " - "responses " - "for " - "this " - "LLM", - "items": {"type": "string"}, - "title": "LLM " "Responses", - "type": "array", - }, - "prompt_template": { - "allOf": [{"$ref": "#/$defs/Prompt_Template"}], - "default": "hello", - "description": "The " - "prompt " - "template " - "for " - "this " - "chain", - "title": "Prompt " "Template", - }, - }, - "title": "Configurable", - "type": "object", - }, - "LLM": {"enum": ["chat", "default"], "title": "LLM", "type": "string"}, - "Prompt_Template": { - "enum": ["hello", "good_morning"], - "title": "Prompt Template", - "type": "string", - }, - }, - "properties": { - "configurable": { - "allOf": [{"$ref": "#/$defs/Configurable"}], - "default": None, - } - }, - "title": "RunnableSequenceConfig", - "type": "object", - } + if PYDANTIC_VERSION >= (2, 9): + assert _normalize_schema( + chain_configurable.get_config_jsonschema() + ) == snapshot(name="schema7") assert ( chain_configurable.with_config(configurable={"llm": "chat"}).invoke(