mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-22 14:49:29 +00:00
parent
f34e62ef42
commit
e4f106ea62
@ -13,7 +13,6 @@ from langchain_core.messages import (
|
||||
SystemMessage,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, LLMResult
|
||||
from langchain_core.tools import tool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain_groq import ChatGroq
|
||||
@ -394,42 +393,6 @@ def test_json_mode_structured_output() -> None:
|
||||
assert len(result.punchline) != 0
|
||||
|
||||
|
||||
def test_tool_calling_no_arguments() -> None:
|
||||
# Note: this is a variant of a test in langchain_tests
|
||||
# that as of 2024-08-19 fails with "Failed to call a function. Please
|
||||
# adjust your prompt." when `tool_choice="any"` is specified, but
|
||||
# passes when `tool_choice` is not specified.
|
||||
model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0) # type: ignore[call-arg]
|
||||
|
||||
@tool
|
||||
def magic_function_no_args() -> int:
|
||||
"""Calculates a magic function."""
|
||||
return 5
|
||||
|
||||
model_with_tools = model.bind_tools([magic_function_no_args])
|
||||
query = "What is the value of magic_function()? Use the tool."
|
||||
result = model_with_tools.invoke(query)
|
||||
assert isinstance(result, AIMessage)
|
||||
assert len(result.tool_calls) == 1
|
||||
tool_call = result.tool_calls[0]
|
||||
assert tool_call["name"] == "magic_function_no_args"
|
||||
assert tool_call["args"] == {}
|
||||
assert tool_call["id"] is not None
|
||||
assert tool_call["type"] == "tool_call"
|
||||
|
||||
# Test streaming
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
for chunk in model_with_tools.stream(query):
|
||||
full = chunk if full is None else full + chunk # type: ignore
|
||||
assert isinstance(full, AIMessage)
|
||||
assert len(full.tool_calls) == 1
|
||||
tool_call = full.tool_calls[0]
|
||||
assert tool_call["name"] == "magic_function_no_args"
|
||||
assert tool_call["args"] == {}
|
||||
assert tool_call["id"] is not None
|
||||
assert tool_call["type"] == "tool_call"
|
||||
|
||||
|
||||
# Groq does not currently support N > 1
|
||||
# @pytest.mark.scheduled
|
||||
# def test_chat_multiple_completions() -> None:
|
||||
|
@ -48,26 +48,3 @@ class TestGroqLlama(BaseTestGroq):
|
||||
@property
|
||||
def supports_json_mode(self) -> bool:
|
||||
return False # Not supported in streaming mode
|
||||
|
||||
@pytest.mark.xfail(
|
||||
reason=("Fails with 'Failed to call a function. Please adjust your prompt.'")
|
||||
)
|
||||
def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None:
|
||||
super().test_tool_calling_with_no_arguments(model)
|
||||
|
||||
@pytest.mark.xfail(
|
||||
reason=("Fails with 'Failed to call a function. Please adjust your prompt.'")
|
||||
)
|
||||
def test_tool_message_histories_string_content(
|
||||
self, model: BaseChatModel, my_adder_tool: BaseTool
|
||||
) -> None:
|
||||
super().test_tool_message_histories_string_content(model, my_adder_tool)
|
||||
|
||||
@pytest.mark.xfail(
|
||||
reason=(
|
||||
"Sometimes fails with 'Failed to call a function. "
|
||||
"Please adjust your prompt.'"
|
||||
)
|
||||
)
|
||||
def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None:
|
||||
super().test_bind_runnables_as_tools(model)
|
||||
|
Loading…
Reference in New Issue
Block a user