From 28f8d436f6778bd21cd310e12996300dc64a6d02 Mon Sep 17 00:00:00 2001 From: blaufink <88979818+blaufink@users.noreply.github.com> Date: Fri, 6 Dec 2024 00:28:12 +0100 Subject: [PATCH] mistral: fix of issue #26029 (#28233) - Description: Azure AI takes an issue with the safe_mode parameter being set to False instead of None. Therefore, this PR changes the default value of safe_mode from False to None. This results in it being filtered out before the request is sent - avoind the extra-parameter issue described below. - Issue: #26029 - Dependencies: / --------- Co-authored-by: blaufink Co-authored-by: Erick Friis --- libs/partners/mistralai/langchain_mistralai/chat_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/partners/mistralai/langchain_mistralai/chat_models.py b/libs/partners/mistralai/langchain_mistralai/chat_models.py index aed0f705427..9e68e23ae45 100644 --- a/libs/partners/mistralai/langchain_mistralai/chat_models.py +++ b/libs/partners/mistralai/langchain_mistralai/chat_models.py @@ -388,7 +388,7 @@ class ChatMistralAI(BaseChatModel): """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" random_seed: Optional[int] = None - safe_mode: bool = False + safe_mode: Optional[bool] = None streaming: bool = False model_config = ConfigDict(