mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-05 11:12:47 +00:00
community[fix]: Update Perplexity to pass parameters into API calls (#28421)
- [x] **PR title**: "package: description" - Where "package" is whichever of langchain, community, core, etc. is being modified. Use "docs: ..." for purely docs changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" - **Description:** I realized the invocation parameters were not being passed into `_generate` so I added those in but then realized that the parameters contained some old fields designed for an older openai client which I removed. Parameters work fine now. - **Issue:** Fixes #28229 - **Dependencies:** No new dependencies. - **Twitter handle:** @arch_plane - [x] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ Additional guidelines: - Make sure optional dependencies are imported within a function. - Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests. - Most PRs should not touch more than one package. - Changes should be backwards compatible. - If you are adding something to community, do not re-import it in langchain. If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17. Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
parent
34ca31e467
commit
b78b2f7a28
@ -148,7 +148,6 @@ class ChatPerplexity(BaseChatModel):
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling PerplexityChat API."""
|
||||
return {
|
||||
"request_timeout": self.request_timeout,
|
||||
"max_tokens": self.max_tokens,
|
||||
"stream": self.streaming,
|
||||
"temperature": self.temperature,
|
||||
@ -222,7 +221,7 @@ class ChatPerplexity(BaseChatModel):
|
||||
if stop:
|
||||
params["stop_sequences"] = stop
|
||||
stream_resp = self.client.chat.completions.create(
|
||||
model=params["model"], messages=message_dicts, stream=True
|
||||
messages=message_dicts, stream=True, **params
|
||||
)
|
||||
for chunk in stream_resp:
|
||||
if not isinstance(chunk, dict):
|
||||
@ -258,9 +257,7 @@ class ChatPerplexity(BaseChatModel):
|
||||
return generate_from_stream(stream_iter)
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
params = {**params, **kwargs}
|
||||
response = self.client.chat.completions.create(
|
||||
model=params["model"], messages=message_dicts
|
||||
)
|
||||
response = self.client.chat.completions.create(messages=message_dicts, **params)
|
||||
message = AIMessage(
|
||||
content=response.choices[0].message.content,
|
||||
additional_kwargs={"citations": response.citations},
|
||||
@ -271,8 +268,6 @@ class ChatPerplexity(BaseChatModel):
|
||||
def _invocation_params(self) -> Mapping[str, Any]:
|
||||
"""Get the parameters used to invoke the model."""
|
||||
pplx_creds: Dict[str, Any] = {
|
||||
"api_key": self.pplx_api_key,
|
||||
"api_base": "https://api.perplexity.ai",
|
||||
"model": self.model,
|
||||
}
|
||||
return {**pplx_creds, **self._default_params}
|
||||
|
Loading…
Reference in New Issue
Block a user