From bcd5842b5d7f559a81d3cc5d4d486c84cc3ab8da Mon Sep 17 00:00:00 2001 From: Alejandro Companioni Date: Fri, 23 Aug 2024 04:33:30 -0400 Subject: [PATCH] community[patch]: Updating default PPLX model to supported llama-3.1 model. (#25643) # Issue As of late July, Perplexity [no longer supports Llama 3 models](https://docs.perplexity.ai/changelog/introducing-new-and-improved-sonar-models). # Description This PR updates the default model and doc examples to reflect their latest supported model. (Mostly updating the same places changed by #23723.) # Twitter handle `@acompa_` on behalf of the team at Not Diamond. Check us out [here](https://notdiamond.ai). --------- Co-authored-by: Harrison Chase --- docs/docs/integrations/chat/perplexity.ipynb | 12 +++++++++--- .../langchain_community/chat_models/perplexity.py | 6 +++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/docs/docs/integrations/chat/perplexity.ipynb b/docs/docs/integrations/chat/perplexity.ipynb index 4b8e151750a..b2545aad14e 100644 --- a/docs/docs/integrations/chat/perplexity.ipynb +++ b/docs/docs/integrations/chat/perplexity.ipynb @@ -62,6 +62,12 @@ "id": "97a8ce3a", "metadata": {}, "source": [ + "The code provided assumes that your PPLX_API_KEY is set in your environment variables. If you would like to manually specify your API key and also choose a different model, you can use the following code:\n", + "\n", + "```python\n", + "chat = ChatPerplexity(temperature=0, pplx_api_key=\"YOUR_API_KEY\", model=\"llama-3.1-sonar-small-128k-online\")\n", + "```\n", + "\n", "You can check a list of available models [here](https://docs.perplexity.ai/docs/model-cards). For reproducibility, we can set the API key dynamically by taking it as an input in this notebook." ] }, @@ -92,7 +98,7 @@ }, "outputs": [], "source": [ - "chat = ChatPerplexity(temperature=0, model=\"llama-3-sonar-small-32k-online\")" + "chat = ChatPerplexity(temperature=0, model=\"llama-3.1-sonar-small-128k-online\")" ] }, { @@ -160,7 +166,7 @@ } ], "source": [ - "chat = ChatPerplexity(temperature=0, model=\"llama-3-sonar-small-32k-online\")\n", + "chat = ChatPerplexity(temperature=0, model=\"llama-3.1-sonar-small-128k-online\")\n", "prompt = ChatPromptTemplate.from_messages([(\"human\", \"Tell me a joke about {topic}\")])\n", "chain = prompt | chat\n", "response = chain.invoke({\"topic\": \"cats\"})\n", @@ -209,7 +215,7 @@ } ], "source": [ - "chat = ChatPerplexity(temperature=0.7, model=\"llama-3-sonar-small-32k-online\")\n", + "chat = ChatPerplexity(temperature=0.7, model=\"llama-3.1-sonar-small-128k-online\")\n", "prompt = ChatPromptTemplate.from_messages(\n", " [(\"human\", \"Give me a list of famous tourist attractions in Pakistan\")]\n", ")\n", diff --git a/libs/community/langchain_community/chat_models/perplexity.py b/libs/community/langchain_community/chat_models/perplexity.py index bcb8e1acfb3..ee93f48cbf8 100644 --- a/libs/community/langchain_community/chat_models/perplexity.py +++ b/libs/community/langchain_community/chat_models/perplexity.py @@ -55,20 +55,20 @@ class ChatPerplexity(BaseChatModel): from langchain_community.chat_models import ChatPerplexity chat = ChatPerplexity( - model="llama-3-sonar-small-32k-online", + model="llama-3.1-sonar-small-128k-online", temperature=0.7, ) """ client: Any #: :meta private: - model: str = "llama-3-sonar-small-32k-online" + model: str = "llama-3.1-sonar-small-128k-online" """Model name.""" temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" pplx_api_key: Optional[str] = Field(None, alias="api_key") - """Base URL path for API requests, + """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = Field( None, alias="timeout"