From 46f580d42d911d6b2ff7914f08ed4a182ffb8b00 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Fri, 5 Apr 2024 14:50:40 -0700 Subject: [PATCH] docs: anthropic tool docstring (#20091) --- .../langchain_anthropic/chat_models.py | 122 +++++++++++++++++- 1 file changed, 121 insertions(+), 1 deletion(-) diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 8a16398d863..6b2aa154e01 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -438,7 +438,31 @@ class ChatAnthropic(BaseChatModel): models, callables, and BaseTools will be automatically converted to their schema dictionary representation. **kwargs: Any additional parameters to bind. - """ + + Example: + .. code-block:: python + + from langchain_anthropic import ChatAnthropic + from langchain_core.pydantic_v1 import BaseModel, Field + + class GetWeather(BaseModel): + '''Get the current weather in a given location''' + + location: str = Field(..., description="The city and state, e.g. San Francisco, CA") + + + llm = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + llm_with_tools = llm.bind_tools([GetWeather]) + llm_with_tools.invoke("what is the weather like in San Francisco",) + # -> AIMessage( + # content=[ + # {'text': '\nBased on the user\'s question, the relevant function to call is GetWeather, which requires the "location" parameter.\n\nThe user has directly specified the location as "San Francisco". Since San Francisco is a well known city, I can reasonably infer they mean San Francisco, CA without needing the state specified.\n\nAll the required parameters are provided, so I can proceed with the API call.\n', 'type': 'text'}, + # {'text': None, 'type': 'tool_use', 'id': 'toolu_01SCgExKzQ7eqSkMHfygvYuu', 'name': 'GetWeather', 'input': {'location': 'San Francisco, CA'}} + # ], + # response_metadata={'id': 'msg_01GM3zQtoFv8jGQMW7abLnhi', 'model': 'claude-3-opus-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 487, 'output_tokens': 145}}, + # id='run-87b1331e-9251-4a68-acef-f0a018b639cc-0' + # ) + """ # noqa: E501 formatted_tools = [convert_to_anthropic_tool(tool) for tool in tools] return self.bind(tools=formatted_tools, **kwargs) @@ -450,6 +474,102 @@ class ChatAnthropic(BaseChatModel): include_raw: bool = False, **kwargs: Any, ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]: + """Model wrapper that returns outputs formatted to match the given schema. + + Args: + schema: The output schema as a dict or a Pydantic class. If a Pydantic class + then the model output will be an object of that class. If a dict then + the model output will be a dict. With a Pydantic class the returned + attributes will be validated, whereas with a dict they will not be. + include_raw: If False then only the parsed structured output is returned. If + an error occurs during model output parsing it will be raised. If True + then both the raw model response (a BaseMessage) and the parsed model + response will be returned. If an error occurs during output parsing it + will be caught and returned as well. The final output is always a dict + with keys "raw", "parsed", and "parsing_error". + + Returns: + A Runnable that takes any ChatModel input. The output type depends on + include_raw and schema. + + If include_raw is True then output is a dict with keys: + raw: BaseMessage, + parsed: Optional[_DictOrPydantic], + parsing_error: Optional[BaseException], + + If include_raw is False and schema is a Dict then the runnable outputs a Dict. + If include_raw is False and schema is a Type[BaseModel] then the runnable + outputs a BaseModel. + + Example: Pydantic schema (include_raw=False): + .. code-block:: python + + from langchain_anthropic import ChatAnthropic + from langchain_core.pydantic_v1 import BaseModel + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + llm = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + + # -> AnswerWithJustification( + # answer='They weigh the same', + # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' + # ) + + Example: Pydantic schema (include_raw=True): + .. code-block:: python + + from langchain_anthropic import ChatAnthropic + from langchain_core.pydantic_v1 import BaseModel + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + llm = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), + # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), + # 'parsing_error': None + # } + + Example: Dict schema (include_raw=False): + .. code-block:: python + + from langchain_anthropic import ChatAnthropic + + schema = { + "name": "AnswerWithJustification", + "description": "An answer to the user question along with justification for the answer.", + "input_schema": { + "type": "object", + "properties": { + "answer": {"type": "string"}, + "justification": {"type": "string"}, + }, + "required": ["answer", "justification"] + } + } + llm = ChatAnthropic(model="claude-3-opus-20240229", temperature=0) + structured_llm = llm.with_structured_output(schema) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'answer': 'They weigh the same', + # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' + # } + + """ # noqa: E501 llm = self.bind_tools([schema]) if isinstance(schema, type) and issubclass(schema, BaseModel): output_parser = ToolsOutputParser(