mirror of
https://github.com/hwchase17/langchain.git
synced 2025-05-28 02:29:17 +00:00
- **Description:** `QianfanChatEndpoint` When using tool result to answer questions, the content of the tool is required to be in Dict format. Of course, this can require users to return Dict format when calling the tool, but in order to be consistent with other Chat Models, I think such modifications are necessary.
40 lines
1.3 KiB
Python
40 lines
1.3 KiB
Python
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
|
||
from langchain_core.messages.tool import ToolCall
|
||
from langchain_core.tools import tool
|
||
|
||
from langchain_community.chat_models import QianfanChatEndpoint
|
||
|
||
|
||
@tool
|
||
def get_current_weather(location: str, unit: str = "摄氏度") -> str:
|
||
"""获取指定地点的天气"""
|
||
return f"{location}是晴朗,25{unit}左右。"
|
||
|
||
|
||
def test_chat_qianfan_tool_result_to_model() -> None:
|
||
"""Test QianfanChatEndpoint invoke with tool_calling result."""
|
||
messages = [
|
||
HumanMessage("上海天气怎么样?"),
|
||
AIMessage(
|
||
content=" ",
|
||
tool_calls=[
|
||
ToolCall(
|
||
name="get_current_weather",
|
||
args={"location": "上海", "unit": "摄氏度"},
|
||
id="foo",
|
||
type="tool_call",
|
||
),
|
||
],
|
||
),
|
||
ToolMessage(
|
||
content="上海是晴天,25度左右。",
|
||
tool_call_id="foo",
|
||
name="get_current_weather",
|
||
),
|
||
]
|
||
chat = QianfanChatEndpoint(model="ERNIE-3.5-8K") # type: ignore[call-arg]
|
||
llm_with_tool = chat.bind_tools([get_current_weather])
|
||
response = llm_with_tool.invoke(messages)
|
||
assert isinstance(response, AIMessage)
|
||
print(response.content) # noqa: T201
|