mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-25 16:13:25 +00:00
community[patch]: Fix the error of Baidu Qianfan not passing the stop parameter (#18666)
- [x] **PR title**: "community: fix baidu qianfan missing stop parameter" - [x] **PR message**: - **Description: Baidu Qianfan lost the stop parameter when requesting service due to extracting it from kwargs. This bug can cause the agent to receive incorrect results --------- Co-authored-by: ligang33 <ligang33@baidu.com> Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com> Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
parent
d1a2e194c3
commit
a662468dde
@ -271,6 +271,7 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
},
|
||||
)
|
||||
params = self._convert_prompt_msg_params(messages, **kwargs)
|
||||
params["stop"] = stop
|
||||
response_payload = self.client.do(**params)
|
||||
lc_msg = _convert_dict_to_message(response_payload)
|
||||
gen = ChatGeneration(
|
||||
@ -316,6 +317,7 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
},
|
||||
)
|
||||
params = self._convert_prompt_msg_params(messages, **kwargs)
|
||||
params["stop"] = stop
|
||||
response_payload = await self.client.ado(**params)
|
||||
lc_msg = _convert_dict_to_message(response_payload)
|
||||
generations = []
|
||||
@ -339,6 +341,7 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
**kwargs: Any,
|
||||
) -> Iterator[ChatGenerationChunk]:
|
||||
params = self._convert_prompt_msg_params(messages, **kwargs)
|
||||
params["stop"] = stop
|
||||
params["stream"] = True
|
||||
for res in self.client.do(**params):
|
||||
if res:
|
||||
@ -365,6 +368,7 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[ChatGenerationChunk]:
|
||||
params = self._convert_prompt_msg_params(messages, **kwargs)
|
||||
params["stop"] = stop
|
||||
params["stream"] = True
|
||||
async for res in await self.client.ado(**params):
|
||||
if res:
|
||||
|
@ -180,6 +180,7 @@ class QianfanLLMEndpoint(LLM):
|
||||
completion += chunk.text
|
||||
return completion
|
||||
params = self._convert_prompt_msg_params(prompt, **kwargs)
|
||||
params["stop"] = stop
|
||||
response_payload = self.client.do(**params)
|
||||
|
||||
return response_payload["result"]
|
||||
@ -198,6 +199,7 @@ class QianfanLLMEndpoint(LLM):
|
||||
return completion
|
||||
|
||||
params = self._convert_prompt_msg_params(prompt, **kwargs)
|
||||
params["stop"] = stop
|
||||
response_payload = await self.client.ado(**params)
|
||||
|
||||
return response_payload["result"]
|
||||
@ -210,6 +212,7 @@ class QianfanLLMEndpoint(LLM):
|
||||
**kwargs: Any,
|
||||
) -> Iterator[GenerationChunk]:
|
||||
params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True})
|
||||
params["stop"] = stop
|
||||
for res in self.client.do(**params):
|
||||
if res:
|
||||
chunk = GenerationChunk(text=res["result"])
|
||||
@ -225,6 +228,7 @@ class QianfanLLMEndpoint(LLM):
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[GenerationChunk]:
|
||||
params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True})
|
||||
params["stop"] = stop
|
||||
async for res in await self.client.ado(**params):
|
||||
if res:
|
||||
chunk = GenerationChunk(text=res["result"])
|
||||
|
Loading…
Reference in New Issue
Block a user