mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-17 00:17:47 +00:00
mistralai[patch]: warn on stop token, fix on_llm_new_token (#15787)
Fixes #15269 Addresses with warning. MistralAI API doesn't support stop token yet. --------- Co-authored-by: Niels Garve <info@nielsgarve.com>
This commit is contained in:
parent
323941a90a
commit
0c95f3a981
@ -289,10 +289,12 @@ class ChatMistralAI(BaseChatModel):
|
|||||||
self, messages: List[BaseMessage], stop: Optional[List[str]]
|
self, messages: List[BaseMessage], stop: Optional[List[str]]
|
||||||
) -> Tuple[List[MistralChatMessage], Dict[str, Any]]:
|
) -> Tuple[List[MistralChatMessage], Dict[str, Any]]:
|
||||||
params = self._client_params
|
params = self._client_params
|
||||||
if stop is not None:
|
if stop is not None or "stop" in params:
|
||||||
if "stop" in params:
|
if "stop" in params:
|
||||||
raise ValueError("`stop` found in both the input and default params.")
|
params.pop("stop")
|
||||||
params["stop"] = stop
|
logger.warning(
|
||||||
|
"Parameter `stop` not yet supported (https://docs.mistral.ai/api)"
|
||||||
|
)
|
||||||
message_dicts = [_convert_message_to_mistral_chat_message(m) for m in messages]
|
message_dicts = [_convert_message_to_mistral_chat_message(m) for m in messages]
|
||||||
return message_dicts, params
|
return message_dicts, params
|
||||||
|
|
||||||
@ -319,7 +321,7 @@ class ChatMistralAI(BaseChatModel):
|
|||||||
default_chunk_class = chunk.__class__
|
default_chunk_class = chunk.__class__
|
||||||
yield ChatGenerationChunk(message=chunk)
|
yield ChatGenerationChunk(message=chunk)
|
||||||
if run_manager:
|
if run_manager:
|
||||||
run_manager.on_llm_new_token(chunk.content)
|
run_manager.on_llm_new_token(token=chunk.content, chunk=chunk)
|
||||||
|
|
||||||
async def _astream(
|
async def _astream(
|
||||||
self,
|
self,
|
||||||
@ -344,7 +346,7 @@ class ChatMistralAI(BaseChatModel):
|
|||||||
default_chunk_class = chunk.__class__
|
default_chunk_class = chunk.__class__
|
||||||
yield ChatGenerationChunk(message=chunk)
|
yield ChatGenerationChunk(message=chunk)
|
||||||
if run_manager:
|
if run_manager:
|
||||||
await run_manager.on_llm_new_token(chunk.content)
|
await run_manager.on_llm_new_token(token=chunk.content, chunk=chunk)
|
||||||
|
|
||||||
async def _agenerate(
|
async def _agenerate(
|
||||||
self,
|
self,
|
||||||
|
Loading…
Reference in New Issue
Block a user