mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-02 11:39:18 +00:00
Add new beta StructuredPrompt (#19080)
Thank you for contributing to LangChain! - [ ] **PR title**: "package: description" - Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" - [ ] **PR message**: ***Delete this entire checklist*** and replace with - **Description:** a description of the change - **Issue:** the issue # it fixes, if applicable - **Dependencies:** any dependencies required for this change - **Twitter handle:** if your PR gets announced, and you'd like a mention, we'll gladly shout you out! - [ ] **Add tests and docs**: If you're adding a new integration, please include 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/docs/integrations` directory. - [ ] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ Additional guidelines: - Make sure optional dependencies are imported within a function. - Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests. - Most PRs should not touch more than one package. - Changes should be backwards compatible. - If you are adding something to community, do not re-import it in langchain. If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17.
This commit is contained in:
78
libs/core/tests/unit_tests/prompts/test_structured.py
Normal file
78
libs/core/tests/unit_tests/prompts/test_structured.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from functools import partial
|
||||
from inspect import isclass
|
||||
from typing import Any, Dict, Type, Union, cast
|
||||
|
||||
from langchain_core.load.dump import dumps
|
||||
from langchain_core.load.load import loads
|
||||
from langchain_core.prompts.structured import StructuredPrompt
|
||||
from langchain_core.pydantic_v1 import BaseModel
|
||||
from langchain_core.runnables.base import Runnable, RunnableLambda
|
||||
from tests.unit_tests.fake.chat_model import FakeListChatModel
|
||||
|
||||
|
||||
def _fake_runnable(
|
||||
schema: Union[Dict, Type[BaseModel]], _: Any
|
||||
) -> Union[BaseModel, Dict]:
|
||||
if isclass(schema) and issubclass(schema, BaseModel):
|
||||
return schema(name="yo", value=42)
|
||||
else:
|
||||
params = cast(Dict, schema)["parameters"]
|
||||
return {k: 1 for k, v in params.items()}
|
||||
|
||||
|
||||
class FakeStructuredChatModel(FakeListChatModel):
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
|
||||
def with_structured_output(self, schema: Union[Dict, Type[BaseModel]]) -> Runnable:
|
||||
return RunnableLambda(partial(_fake_runnable, schema))
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "fake-messages-list-chat-model"
|
||||
|
||||
|
||||
def test_structured_prompt_pydantic() -> None:
|
||||
class OutputSchema(BaseModel):
|
||||
name: str
|
||||
value: int
|
||||
|
||||
prompt = StructuredPrompt.from_messages_and_schema(
|
||||
[
|
||||
("human", "I'm very structured, how about you?"),
|
||||
],
|
||||
OutputSchema,
|
||||
)
|
||||
|
||||
model = FakeStructuredChatModel(responses=[])
|
||||
|
||||
chain = prompt | model
|
||||
|
||||
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
|
||||
|
||||
|
||||
def test_structured_prompt_dict() -> None:
|
||||
prompt = StructuredPrompt.from_messages_and_schema(
|
||||
[
|
||||
("human", "I'm very structured, how about you?"),
|
||||
],
|
||||
{
|
||||
"name": "yo",
|
||||
"description": "a structured output",
|
||||
"parameters": {
|
||||
"name": {"type": "string"},
|
||||
"value": {"type": "integer"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
model = FakeStructuredChatModel(responses=[])
|
||||
|
||||
chain = prompt | model
|
||||
|
||||
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 1}
|
||||
|
||||
assert loads(dumps(prompt)) == prompt
|
||||
|
||||
chain = loads(dumps(prompt)) | model
|
||||
|
||||
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 1}
|
Reference in New Issue
Block a user