diff --git a/libs/community/langchain_community/chat_models/perplexity.py b/libs/community/langchain_community/chat_models/perplexity.py index ce415dd59cb..d168b1363e5 100644 --- a/libs/community/langchain_community/chat_models/perplexity.py +++ b/libs/community/langchain_community/chat_models/perplexity.py @@ -148,7 +148,6 @@ class ChatPerplexity(BaseChatModel): def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling PerplexityChat API.""" return { - "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "temperature": self.temperature, @@ -222,7 +221,7 @@ class ChatPerplexity(BaseChatModel): if stop: params["stop_sequences"] = stop stream_resp = self.client.chat.completions.create( - model=params["model"], messages=message_dicts, stream=True + messages=message_dicts, stream=True, **params ) for chunk in stream_resp: if not isinstance(chunk, dict): @@ -258,9 +257,7 @@ class ChatPerplexity(BaseChatModel): return generate_from_stream(stream_iter) message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} - response = self.client.chat.completions.create( - model=params["model"], messages=message_dicts - ) + response = self.client.chat.completions.create(messages=message_dicts, **params) message = AIMessage( content=response.choices[0].message.content, additional_kwargs={"citations": response.citations}, @@ -271,8 +268,6 @@ class ChatPerplexity(BaseChatModel): def _invocation_params(self) -> Mapping[str, Any]: """Get the parameters used to invoke the model.""" pplx_creds: Dict[str, Any] = { - "api_key": self.pplx_api_key, - "api_base": "https://api.perplexity.ai", "model": self.model, } return {**pplx_creds, **self._default_params}