mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-26 00:23:25 +00:00
Here we allow standard tests to specify a value for `tool_choice` via a `tool_choice_value` property, which defaults to None. Chat models [available in Together](https://docs.together.ai/docs/chat-models) have issues passing standard tool calling tests: - llama 3.1 models currently [appear to rely on user-side parsing](https://docs.together.ai/docs/llama-3-function-calling) in Together; - Mixtral-8x7B and Mistral-7B (currently tested) consistently do not call tools in some tests. Specifying tool_choice also lets us remove an existing `xfail` and use a smaller model in Groq tests.
26 lines
765 B
Python
26 lines
765 B
Python
"""Standard LangChain interface tests"""
|
|
|
|
from typing import Optional, Type
|
|
|
|
from langchain_core.language_models import BaseChatModel
|
|
from langchain_standard_tests.integration_tests import ( # type: ignore[import-not-found]
|
|
ChatModelIntegrationTests, # type: ignore[import-not-found]
|
|
)
|
|
|
|
from langchain_mistralai import ChatMistralAI
|
|
|
|
|
|
class TestMistralStandard(ChatModelIntegrationTests):
|
|
@property
|
|
def chat_model_class(self) -> Type[BaseChatModel]:
|
|
return ChatMistralAI
|
|
|
|
@property
|
|
def chat_model_params(self) -> dict:
|
|
return {"model": "mistral-large-latest", "temperature": 0}
|
|
|
|
@property
|
|
def tool_choice_value(self) -> Optional[str]:
|
|
"""Value to use for tool choice when used in tests."""
|
|
return "any"
|