feat(perplexity): overhaul integration with official SDK and Search API (#34412)

This commit is contained in:
Kesku
2025-12-19 18:58:41 +01:00
committed by GitHub
parent 795e746ca7
commit c5baa3ac27
16 changed files with 869 additions and 519 deletions

View File

@@ -1,3 +1,27 @@
from langchain_perplexity.chat_models import ChatPerplexity
from langchain_perplexity.output_parsers import (
ReasoningJsonOutputParser,
ReasoningStructuredOutputParser,
strip_think_tags,
)
from langchain_perplexity.retrievers import PerplexitySearchRetriever
from langchain_perplexity.tools import PerplexitySearchResults
from langchain_perplexity.types import (
MediaResponse,
MediaResponseOverrides,
UserLocation,
WebSearchOptions,
)
__all__ = ["ChatPerplexity"]
__all__ = [
"ChatPerplexity",
"PerplexitySearchRetriever",
"PerplexitySearchResults",
"UserLocation",
"WebSearchOptions",
"MediaResponse",
"MediaResponseOverrides",
"ReasoningJsonOutputParser",
"ReasoningStructuredOutputParser",
"strip_think_tags",
]

View File

@@ -0,0 +1,25 @@
import os
from typing import Any
from langchain_core.utils import convert_to_secret_str
from perplexity import Perplexity
def initialize_client(values: dict[str, Any]) -> dict[str, Any]:
"""Initialize the Perplexity client."""
pplx_api_key = (
values.get("pplx_api_key")
or os.environ.get("PPLX_API_KEY")
or os.environ.get("PERPLEXITY_API_KEY")
or ""
)
values["pplx_api_key"] = convert_to_secret_str(pplx_api_key)
api_key = (
values["pplx_api_key"].get_secret_value() if values["pplx_api_key"] else None
)
if not values.get("client"):
values["client"] = Perplexity(api_key=api_key)
return values

View File

@@ -3,12 +3,14 @@
from __future__ import annotations
import logging
from collections.abc import Iterator, Mapping
from collections.abc import AsyncIterator, Iterator, Mapping
from operator import itemgetter
from typing import Any, Literal, TypeAlias, cast
import openai
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import (
LanguageModelInput,
ModelProfile,
@@ -16,6 +18,7 @@ from langchain_core.language_models import (
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
@@ -42,6 +45,7 @@ from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.utils import get_pydantic_field_names, secret_from_env
from langchain_core.utils.function_calling import convert_to_json_schema
from langchain_core.utils.pydantic import is_basemodel_subclass
from perplexity import AsyncPerplexity, Perplexity
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
@@ -50,6 +54,7 @@ from langchain_perplexity.output_parsers import (
ReasoningJsonOutputParser,
ReasoningStructuredOutputParser,
)
from langchain_perplexity.types import MediaResponse, WebSearchOptions
_DictOrPydanticClass: TypeAlias = dict[str, Any] | type[BaseModel]
_DictOrPydantic: TypeAlias = dict | BaseModel
@@ -84,8 +89,10 @@ def _create_usage_metadata(token_usage: dict) -> UsageMetadata:
# Build output_token_details for Perplexity-specific fields
output_token_details: OutputTokenDetails = {}
output_token_details["reasoning"] = token_usage.get("reasoning_tokens", 0)
output_token_details["citation_tokens"] = token_usage.get("citation_tokens", 0) # type: ignore[typeddict-unknown-key]
if (reasoning := token_usage.get("reasoning_tokens")) is not None:
output_token_details["reasoning"] = reasoning
if (citation_tokens := token_usage.get("citation_tokens")) is not None:
output_token_details["citation_tokens"] = citation_tokens # type: ignore[typeddict-unknown-key]
return UsageMetadata(
input_tokens=input_tokens,
@@ -100,7 +107,7 @@ class ChatPerplexity(BaseChatModel):
Setup:
To use, you should have the environment variable `PPLX_API_KEY` set to your API key.
Any parameters that are valid to be passed to the openai.create call
Any parameters that are valid to be passed to the perplexity.create call
can be passed in, even if not explicitly saved on this class.
```bash
@@ -157,12 +164,6 @@ class ChatPerplexity(BaseChatModel):
model.invoke(messages)
```
Invoke with perplexity-specific params:
```python
model.invoke(messages, extra_body={"search_recency_filter": "week"})
```
Stream:
```python
for chunk in model.stream(messages):
@@ -182,7 +183,8 @@ class ChatPerplexity(BaseChatModel):
```
""" # noqa: E501
client: Any = None
client: Any = Field(default=None, exclude=True)
async_client: Any = Field(default=None, exclude=True)
model: str = "sonar"
"""Model name."""
@@ -196,8 +198,7 @@ class ChatPerplexity(BaseChatModel):
pplx_api_key: SecretStr | None = Field(
default_factory=secret_from_env("PPLX_API_KEY", default=None), alias="api_key"
)
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
"""Perplexity API key."""
request_timeout: float | tuple[float, float] | None = Field(None, alias="timeout")
"""Timeout for requests to PerplexityChat completion API."""
@@ -211,6 +212,51 @@ class ChatPerplexity(BaseChatModel):
max_tokens: int | None = None
"""Maximum number of tokens to generate."""
search_mode: Literal["academic", "sec", "web"] | None = None
"""Search mode for specialized content: "academic", "sec", or "web"."""
reasoning_effort: Literal["low", "medium", "high"] | None = None
"""Reasoning effort: "low", "medium", or "high" (default)."""
language_preference: str | None = None
"""Language preference:"""
search_domain_filter: list[str] | None = None
"""Search domain filter: list of domains to filter search results (max 20)."""
return_images: bool = False
"""Whether to return images in the response."""
return_related_questions: bool = False
"""Whether to return related questions in the response."""
search_recency_filter: Literal["day", "week", "month", "year"] | None = None
"""Filter search results by recency: "day", "week", "month", or "year"."""
search_after_date_filter: str | None = None
"""Search after date filter: date in format "MM/DD/YYYY" (default)."""
search_before_date_filter: str | None = None
"""Only return results before this date (format: MM/DD/YYYY)."""
last_updated_after_filter: str | None = None
"""Only return results updated after this date (format: MM/DD/YYYY)."""
last_updated_before_filter: str | None = None
"""Only return results updated before this date (format: MM/DD/YYYY)."""
disable_search: bool = False
"""Whether to disable web search entirely."""
enable_search_classifier: bool = False
"""Whether to enable the search classifier."""
web_search_options: WebSearchOptions | None = None
"""Configuration for web search behavior including Pro Search."""
media_response: MediaResponse | None = None
"""Media response: "images", "videos", or "none" (default)."""
model_config = ConfigDict(populate_by_name=True)
@property
@@ -247,19 +293,16 @@ class ChatPerplexity(BaseChatModel):
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
try:
self.client = openai.OpenAI(
api_key=self.pplx_api_key.get_secret_value()
if self.pplx_api_key
else None,
base_url="https://api.perplexity.ai",
)
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
pplx_api_key = (
self.pplx_api_key.get_secret_value() if self.pplx_api_key else None
)
if not self.client:
self.client = Perplexity(api_key=pplx_api_key)
if not self.async_client:
self.async_client = AsyncPerplexity(api_key=pplx_api_key)
return self
@model_validator(mode="after")
@@ -272,12 +315,49 @@ class ChatPerplexity(BaseChatModel):
@property
def _default_params(self) -> dict[str, Any]:
"""Get the default parameters for calling PerplexityChat API."""
return {
params: dict[str, Any] = {
"max_tokens": self.max_tokens,
"stream": self.streaming,
"temperature": self.temperature,
**self.model_kwargs,
}
if self.search_mode:
params["search_mode"] = self.search_mode
if self.reasoning_effort:
params["reasoning_effort"] = self.reasoning_effort
if self.language_preference:
params["language_preference"] = self.language_preference
if self.search_domain_filter:
params["search_domain_filter"] = self.search_domain_filter
if self.return_images:
params["return_images"] = self.return_images
if self.return_related_questions:
params["return_related_questions"] = self.return_related_questions
if self.search_recency_filter:
params["search_recency_filter"] = self.search_recency_filter
if self.search_after_date_filter:
params["search_after_date_filter"] = self.search_after_date_filter
if self.search_before_date_filter:
params["search_before_date_filter"] = self.search_before_date_filter
if self.last_updated_after_filter:
params["last_updated_after_filter"] = self.last_updated_after_filter
if self.last_updated_before_filter:
params["last_updated_before_filter"] = self.last_updated_before_filter
if self.disable_search:
params["disable_search"] = self.disable_search
if self.enable_search_classifier:
params["enable_search_classifier"] = self.enable_search_classifier
if self.web_search_options:
params["web_search_options"] = self.web_search_options.model_dump(
exclude_none=True
)
if self.media_response:
if "extra_body" not in params:
params["extra_body"] = {}
params["extra_body"]["media_response"] = self.media_response.model_dump(
exclude_none=True
)
return {**params, **self.model_kwargs}
def _convert_message_to_dict(self, message: BaseMessage) -> dict[str, Any]:
if isinstance(message, ChatMessage):
@@ -353,6 +433,7 @@ class ChatPerplexity(BaseChatModel):
added_model_name: bool = False
added_search_queries: bool = False
added_search_context_size: bool = False
for chunk in stream_resp:
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
@@ -379,17 +460,26 @@ class ChatPerplexity(BaseChatModel):
if attr in chunk:
additional_kwargs[attr] = chunk[attr]
if chunk.get("videos"):
additional_kwargs["videos"] = chunk["videos"]
if chunk.get("reasoning_steps"):
additional_kwargs["reasoning_steps"] = chunk["reasoning_steps"]
generation_info = {}
if (model_name := chunk.get("model")) and not added_model_name:
generation_info["model_name"] = model_name
added_model_name = True
# Add num_search_queries to generation_info if present
if total_usage := chunk.get("usage"):
if num_search_queries := total_usage.get("num_search_queries"):
if not added_search_queries:
generation_info["num_search_queries"] = num_search_queries
added_search_queries = True
if not added_search_context_size:
if search_context_size := total_usage.get("search_context_size"):
generation_info["search_context_size"] = search_context_size
added_search_context_size = True
chunk = self._convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
@@ -411,6 +501,91 @@ class ChatPerplexity(BaseChatModel):
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _astream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
default_chunk_class = AIMessageChunk
params.pop("stream", None)
if stop:
params["stop_sequences"] = stop
stream_resp = await self.async_client.chat.completions.create(
messages=message_dicts, stream=True, **params
)
first_chunk = True
prev_total_usage: UsageMetadata | None = None
added_model_name: bool = False
added_search_queries: bool = False
async for chunk in stream_resp:
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
if total_usage := chunk.get("usage"):
lc_total_usage = _create_usage_metadata(total_usage)
if prev_total_usage:
usage_metadata: UsageMetadata | None = subtract_usage(
lc_total_usage, prev_total_usage
)
else:
usage_metadata = lc_total_usage
prev_total_usage = lc_total_usage
else:
usage_metadata = None
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
additional_kwargs = {}
if first_chunk:
additional_kwargs["citations"] = chunk.get("citations", [])
for attr in ["images", "related_questions", "search_results"]:
if attr in chunk:
additional_kwargs[attr] = chunk[attr]
if chunk.get("videos"):
additional_kwargs["videos"] = chunk["videos"]
if chunk.get("reasoning_steps"):
additional_kwargs["reasoning_steps"] = chunk["reasoning_steps"]
generation_info = {}
if (model_name := chunk.get("model")) and not added_model_name:
generation_info["model_name"] = model_name
added_model_name = True
if total_usage := chunk.get("usage"):
if num_search_queries := total_usage.get("num_search_queries"):
if not added_search_queries:
generation_info["num_search_queries"] = num_search_queries
added_search_queries = True
if search_context_size := total_usage.get("search_context_size"):
generation_info["search_context_size"] = search_context_size
chunk = self._convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
if isinstance(chunk, AIMessageChunk) and usage_metadata:
chunk.usage_metadata = usage_metadata
if first_chunk:
chunk.additional_kwargs |= additional_kwargs
first_chunk = False
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def _generate(
self,
messages: list[BaseMessage],
@@ -427,8 +602,9 @@ class ChatPerplexity(BaseChatModel):
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.client.chat.completions.create(messages=message_dicts, **params)
if usage := getattr(response, "usage", None):
usage_dict = usage.model_dump()
if hasattr(response, "usage") and response.usage:
usage_dict = response.usage.model_dump()
usage_metadata = _create_usage_metadata(usage_dict)
else:
usage_metadata = None
@@ -436,15 +612,87 @@ class ChatPerplexity(BaseChatModel):
additional_kwargs = {}
for attr in ["citations", "images", "related_questions", "search_results"]:
if hasattr(response, attr):
if hasattr(response, attr) and getattr(response, attr):
additional_kwargs[attr] = getattr(response, attr)
# Build response_metadata with model_name and num_search_queries
if hasattr(response, "videos") and response.videos:
additional_kwargs["videos"] = [
v.model_dump() if hasattr(v, "model_dump") else v
for v in response.videos
]
if hasattr(response, "reasoning_steps") and response.reasoning_steps:
additional_kwargs["reasoning_steps"] = [
r.model_dump() if hasattr(r, "model_dump") else r
for r in response.reasoning_steps
]
response_metadata: dict[str, Any] = {
"model_name": getattr(response, "model", self.model)
}
if num_search_queries := usage_dict.get("num_search_queries"):
response_metadata["num_search_queries"] = num_search_queries
if search_context_size := usage_dict.get("search_context_size"):
response_metadata["search_context_size"] = search_context_size
message = AIMessage(
content=response.choices[0].message.content,
additional_kwargs=additional_kwargs,
usage_metadata=usage_metadata,
response_metadata=response_metadata,
)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
if stream_iter:
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = await self.async_client.chat.completions.create(
messages=message_dicts, **params
)
if hasattr(response, "usage") and response.usage:
usage_dict = response.usage.model_dump()
usage_metadata = _create_usage_metadata(usage_dict)
else:
usage_metadata = None
usage_dict = {}
additional_kwargs = {}
for attr in ["citations", "images", "related_questions", "search_results"]:
if hasattr(response, attr) and getattr(response, attr):
additional_kwargs[attr] = getattr(response, attr)
if hasattr(response, "videos") and response.videos:
additional_kwargs["videos"] = [
v.model_dump() if hasattr(v, "model_dump") else v
for v in response.videos
]
if hasattr(response, "reasoning_steps") and response.reasoning_steps:
additional_kwargs["reasoning_steps"] = [
r.model_dump() if hasattr(r, "model_dump") else r
for r in response.reasoning_steps
]
response_metadata: dict[str, Any] = {
"model_name": getattr(response, "model", self.model)
}
if num_search_queries := usage_dict.get("num_search_queries"):
response_metadata["num_search_queries"] = num_search_queries
if search_context_size := usage_dict.get("search_context_size"):
response_metadata["search_context_size"] = search_context_size
message = AIMessage(
content=response.choices[0].message.content,

View File

@@ -64,4 +64,16 @@ _PROFILES: dict[str, dict[str, Any]] = {
"reasoning_output": True,
"tool_calling": False,
},
"sonar-deep-research": {
"max_input_tokens": 128000,
"max_output_tokens": 8192,
"image_inputs": True,
"audio_inputs": False,
"video_inputs": False,
"image_outputs": False,
"audio_outputs": False,
"video_outputs": False,
"reasoning_output": True,
"tool_calling": False,
},
}

View File

@@ -0,0 +1,13 @@
provider = "perplexity"
[overrides."sonar-deep-research"]
max_input_tokens = 128000
max_output_tokens = 8192
image_inputs = true
audio_inputs = false
video_inputs = false
image_outputs = false
audio_outputs = false
video_outputs = false
reasoning_output = true
tool_calling = false

View File

@@ -0,0 +1,68 @@
from __future__ import annotations
from typing import Any, Literal
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field, SecretStr, model_validator
from langchain_perplexity._utils import initialize_client
class PerplexitySearchRetriever(BaseRetriever):
"""Perplexity Search retriever."""
k: int = Field(default=10, description="Max results (1-20)")
max_tokens: int = Field(default=25000, description="Max tokens across all results")
max_tokens_per_page: int = Field(default=1024, description="Max tokens per page")
country: str | None = Field(default=None, description="ISO country code")
search_domain_filter: list[str] | None = Field(
default=None, description="Domain filter (max 20)"
)
search_recency_filter: Literal["day", "week", "month", "year"] | None = None
search_after_date: str | None = Field(
default=None, description="Date filter (format: %m/%d/%Y)"
)
search_before_date: str | None = Field(
default=None, description="Date filter (format: %m/%d/%Y)"
)
client: Any = Field(default=None, exclude=True)
pplx_api_key: SecretStr = Field(default=SecretStr(""))
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate the environment."""
return initialize_client(values)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
params = {
"query": query,
"max_results": self.k,
"max_tokens": self.max_tokens,
"max_tokens_per_page": self.max_tokens_per_page,
"country": self.country,
"search_domain_filter": self.search_domain_filter,
"search_recency_filter": self.search_recency_filter,
"search_after_date": self.search_after_date,
"search_before_date": self.search_before_date,
}
params = {k: v for k, v in params.items() if v is not None}
response = self.client.search.create(**params)
return [
Document(
page_content=result.snippet,
metadata={
"title": result.title,
"url": result.url,
"date": result.date,
"last_updated": result.last_updated,
},
)
for result in response.results
]

View File

@@ -0,0 +1,66 @@
from __future__ import annotations
from typing import Any, Literal
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field, SecretStr, model_validator
from langchain_perplexity._utils import initialize_client
class PerplexitySearchResults(BaseTool):
"""Perplexity Search tool."""
name: str = "perplexity_search_results_json"
description: str = (
"A wrapper around Perplexity Search. "
"Input should be a search query. "
"Output is a JSON array of the query results"
)
client: Any = Field(default=None, exclude=True)
pplx_api_key: SecretStr = Field(default=SecretStr(""))
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate the environment."""
return initialize_client(values)
def _run(
self,
query: str | list[str],
max_results: int = 10,
country: str | None = None,
search_domain_filter: list[str] | None = None,
search_recency_filter: Literal["day", "week", "month", "year"] | None = None,
search_after_date: str | None = None,
search_before_date: str | None = None,
run_manager: CallbackManagerForToolRun | None = None,
) -> list[dict] | str:
"""Use the tool."""
try:
params = {
"query": query,
"max_results": max_results,
"country": country,
"search_domain_filter": search_domain_filter,
"search_recency_filter": search_recency_filter,
"search_after_date": search_after_date,
"search_before_date": search_before_date,
}
params = {k: v for k, v in params.items() if v is not None}
response = self.client.search.create(**params)
return [
{
"title": result.title,
"url": result.url,
"snippet": result.snippet,
"date": result.date,
"last_updated": result.last_updated,
}
for result in response.results
]
except Exception as e:
msg = f"Perplexity search failed: {type(e).__name__}"
return msg

View File

@@ -0,0 +1,29 @@
from __future__ import annotations
from typing import Literal
from pydantic import BaseModel
class UserLocation(BaseModel):
latitude: float | None = None
longitude: float | None = None
country: str | None = None
region: str | None = None
city: str | None = None
class WebSearchOptions(BaseModel):
search_context_size: Literal["low", "medium", "high"] | None = None
user_location: UserLocation | None = None
search_type: Literal["fast", "pro", "auto"] | None = None
image_search_relevance_enhanced: bool | None = None
class MediaResponseOverrides(BaseModel):
return_videos: bool | None = None
return_images: bool | None = None
class MediaResponse(BaseModel):
overrides: MediaResponseOverrides | None = None

View File

@@ -13,7 +13,7 @@ version = "1.1.0"
requires-python = ">=3.10.0,<4.0.0"
dependencies = [
"langchain-core>=1.1.0,<2.0.0",
"openai>=2.0.0,<3.0.0",
"perplexityai>=0.22.0",
]
[project.urls]

View File

@@ -0,0 +1,125 @@
"""Integration tests for ChatPerplexity."""
import os
import pytest
from langchain_core.messages import HumanMessage
from langchain_perplexity import ChatPerplexity, MediaResponse, WebSearchOptions
@pytest.mark.skipif(not os.environ.get("PPLX_API_KEY"), reason="PPLX_API_KEY not set")
class TestChatPerplexityIntegration:
def test_standard_generation(self) -> None:
"""Test standard generation."""
chat = ChatPerplexity(model="sonar", temperature=0)
message = HumanMessage(content="Hello! How are you?")
response = chat.invoke([message])
assert response.content
assert isinstance(response.content, str)
async def test_async_generation(self) -> None:
"""Test async generation."""
chat = ChatPerplexity(model="sonar", temperature=0)
message = HumanMessage(content="Hello! How are you?")
response = await chat.ainvoke([message])
assert response.content
assert isinstance(response.content, str)
def test_pro_search(self) -> None:
"""Test Pro Search (reasoning_steps extraction)."""
# Pro search is available on sonar-pro
chat = ChatPerplexity(
model="sonar-pro",
temperature=0,
web_search_options=WebSearchOptions(search_type="pro"),
streaming=True,
)
message = HumanMessage(content="Who won the 2024 US election and why?")
# We need to collect chunks to check reasoning steps
chunks = list(chat.stream([message]))
full_content = "".join(c.content for c in chunks if isinstance(c.content, str))
assert full_content
# Check if any chunk has reasoning_steps
has_reasoning = any("reasoning_steps" in c.additional_kwargs for c in chunks)
if has_reasoning:
assert True
else:
# Fallback assertion if no reasoning steps returned
assert len(chunks) > 0
async def test_streaming(self) -> None:
"""Test streaming."""
chat = ChatPerplexity(model="sonar", temperature=0)
message = HumanMessage(content="Count to 5")
async for chunk in chat.astream([message]):
assert isinstance(chunk.content, str)
def test_citations_and_search_results(self) -> None:
"""Test that citations and search results are returned."""
chat = ChatPerplexity(model="sonar", temperature=0)
message = HumanMessage(content="Who is the CEO of OpenAI?")
response = chat.invoke([message])
# Citations are usually in additional_kwargs
assert "citations" in response.additional_kwargs
# Search results might be there too
# Note: presence depends on whether search was performed
if response.additional_kwargs.get("citations"):
assert len(response.additional_kwargs["citations"]) > 0
def test_search_control(self) -> None:
"""Test search control parameters."""
# Test disabled search (should complete without citations)
chat = ChatPerplexity(model="sonar", disable_search=True)
message = HumanMessage(content="What is 2+2?")
response = chat.invoke([message])
assert response.content
# Test search classifier
chat_classifier = ChatPerplexity(model="sonar", enable_search_classifier=True)
response_classifier = chat_classifier.invoke([message])
assert response_classifier.content
def test_search_recency_filter(self) -> None:
"""Test search_recency_filter parameter."""
chat = ChatPerplexity(model="sonar", search_recency_filter="month")
message = HumanMessage(content="Latest AI news")
response = chat.invoke([message])
assert response.content
def test_search_domain_filter(self) -> None:
"""Test search_domain_filter parameter."""
chat = ChatPerplexity(model="sonar", search_domain_filter=["wikipedia.org"])
message = HumanMessage(content="Python programming language")
response = chat.invoke([message])
# Verify citations come from wikipedia if any
if citations := response.additional_kwargs.get("citations"):
assert any("wikipedia.org" in c for c in citations)
def test_media_and_metadata(self) -> None:
"""Test related questions and images."""
chat = ChatPerplexity(
model="sonar-pro",
return_related_questions=True,
return_images=True,
# Media response overrides for video
media_response=MediaResponse(overrides={"return_videos": True}),
)
message = HumanMessage(content="Apollo 11 moon landing")
response = chat.invoke([message])
# Check related questions
if related := response.additional_kwargs.get("related_questions"):
assert len(related) > 0
# Check images
if images := response.additional_kwargs.get("images"):
assert len(images) > 0
# Check videos (might not always be present but structure should handle it)
if videos := response.additional_kwargs.get("videos"):
assert len(videos) > 0

View File

@@ -0,0 +1,56 @@
"""Integration tests for Perplexity Search API."""
import os
import pytest
from langchain_core.documents import Document
from langchain_perplexity import PerplexitySearchResults, PerplexitySearchRetriever
@pytest.mark.skipif(not os.environ.get("PPLX_API_KEY"), reason="PPLX_API_KEY not set")
class TestPerplexitySearchAPI:
def test_search_retriever_basic(self) -> None:
"""Test basic search with retriever."""
retriever = PerplexitySearchRetriever(k=3)
docs = retriever.invoke("What is the capital of France?")
assert len(docs) > 0
assert isinstance(docs[0], Document)
assert "Paris" in docs[0].page_content
assert docs[0].metadata["title"]
assert docs[0].metadata["url"]
def test_search_retriever_with_filters(self) -> None:
"""Test search with filters."""
# Search for recent news (recency filter)
retriever = PerplexitySearchRetriever(
k=3, search_recency_filter="month", search_domain_filter=["wikipedia.org"]
)
docs = retriever.invoke("Python programming language")
assert len(docs) > 0
for doc in docs:
assert "wikipedia.org" in doc.metadata["url"]
def test_search_tool_basic(self) -> None:
"""Test basic search with tool."""
tool = PerplexitySearchResults(max_results=3)
results = tool.invoke("Who won the 2024 Super Bowl?")
# BaseTool.invoke calls _run. If return_direct is False (default),
# it returns the output of _run, which is a list of dicts.
assert isinstance(results, list)
assert len(results) > 0
assert "title" in results[0]
assert "url" in results[0]
assert "snippet" in results[0]
def test_search_tool_multi_query(self) -> None:
"""Test search tool with multiple queries."""
tool = PerplexitySearchResults(max_results=2)
queries = ["Apple stock price", "Microsoft stock price"]
# Pass input as dict to avoid BaseTool validation error with list
results = tool.invoke({"query": queries})
assert isinstance(results, list)
# Should have results for both (combined)
assert len(results) > 0

View File

@@ -4,7 +4,7 @@ from unittest.mock import MagicMock
from langchain_core.messages import AIMessageChunk, BaseMessage
from pytest_mock import MockerFixture
from langchain_perplexity import ChatPerplexity
from langchain_perplexity import ChatPerplexity, MediaResponse, WebSearchOptions
from langchain_perplexity.chat_models import _create_usage_metadata
@@ -41,6 +41,32 @@ def test_perplexity_initialization() -> None:
)
def test_perplexity_new_params() -> None:
"""Test new Perplexity-specific parameters."""
web_search_options = WebSearchOptions(search_type="pro", search_context_size="high")
media_response = MediaResponse(overrides={"return_videos": True})
llm = ChatPerplexity(
model="sonar-pro",
search_mode="academic",
web_search_options=web_search_options,
media_response=media_response,
return_images=True,
)
params = llm._default_params
assert params["search_mode"] == "academic"
assert params["web_search_options"] == {
"search_type": "pro",
"search_context_size": "high",
}
assert params["extra_body"]["media_response"] == {
"overrides": {"return_videos": True}
}
assert params["return_images"] is True
def test_perplexity_stream_includes_citations(mocker: MockerFixture) -> None:
"""Test that the stream method includes citations in the additional_kwargs."""
llm = ChatPerplexity(model="test", timeout=30, verbose=True)
@@ -88,214 +114,34 @@ def test_perplexity_stream_includes_citations(mocker: MockerFixture) -> None:
patcher.assert_called_once()
def test_perplexity_stream_includes_citations_and_images(mocker: MockerFixture) -> None:
"""Test that the stream method includes citations in the additional_kwargs."""
llm = ChatPerplexity(model="test", timeout=30, verbose=True)
mock_chunk_0 = {
"choices": [{"delta": {"content": "Hello "}, "finish_reason": None}],
"citations": ["example.com", "example2.com"],
"images": [
{
"image_url": "mock_image_url",
"origin_url": "mock_origin_url",
"height": 100,
"width": 100,
}
],
}
mock_chunk_1 = {
"choices": [{"delta": {"content": "Perplexity"}, "finish_reason": None}],
"citations": ["example.com", "example2.com"],
"images": [
{
"image_url": "mock_image_url",
"origin_url": "mock_origin_url",
"height": 100,
"width": 100,
}
],
}
mock_chunk_2 = {
"choices": [{"delta": {}, "finish_reason": "stop"}],
}
mock_chunks: list[dict[str, Any]] = [mock_chunk_0, mock_chunk_1, mock_chunk_2]
mock_stream = MagicMock()
mock_stream.__iter__.return_value = mock_chunks
patcher = mocker.patch.object(
llm.client.chat.completions, "create", return_value=mock_stream
)
stream = llm.stream("Hello langchain")
full: BaseMessage | None = None
chunks_list = list(stream)
# BaseChatModel.stream() adds an extra chunk after the final chunk from _stream
assert len(chunks_list) == 4
for i, chunk in enumerate(
chunks_list[:3]
): # Only check first 3 chunks against mock
full = chunk if full is None else cast(BaseMessage, full + chunk)
assert chunk.content == mock_chunks[i]["choices"][0]["delta"].get("content", "")
if i == 0:
assert chunk.additional_kwargs["citations"] == [
"example.com",
"example2.com",
]
assert chunk.additional_kwargs["images"] == [
{
"image_url": "mock_image_url",
"origin_url": "mock_origin_url",
"height": 100,
"width": 100,
}
]
else:
assert "citations" not in chunk.additional_kwargs
assert "images" not in chunk.additional_kwargs
# Process the 4th chunk
assert full is not None
full = cast(BaseMessage, full + chunks_list[3])
assert isinstance(full, AIMessageChunk)
assert full.content == "Hello Perplexity"
assert full.additional_kwargs == {
"citations": ["example.com", "example2.com"],
"images": [
{
"image_url": "mock_image_url",
"origin_url": "mock_origin_url",
"height": 100,
"width": 100,
}
],
}
patcher.assert_called_once()
def test_perplexity_stream_includes_citations_and_related_questions(
mocker: MockerFixture,
) -> None:
"""Test that the stream method includes citations in the additional_kwargs."""
llm = ChatPerplexity(model="test", timeout=30, verbose=True)
mock_chunk_0 = {
"choices": [{"delta": {"content": "Hello "}, "finish_reason": None}],
"citations": ["example.com", "example2.com"],
"related_questions": ["example_question_1", "example_question_2"],
}
mock_chunk_1 = {
"choices": [{"delta": {"content": "Perplexity"}, "finish_reason": None}],
"citations": ["example.com", "example2.com"],
"related_questions": ["example_question_1", "example_question_2"],
}
mock_chunk_2 = {
"choices": [{"delta": {}, "finish_reason": "stop"}],
}
mock_chunks: list[dict[str, Any]] = [mock_chunk_0, mock_chunk_1, mock_chunk_2]
mock_stream = MagicMock()
mock_stream.__iter__.return_value = mock_chunks
patcher = mocker.patch.object(
llm.client.chat.completions, "create", return_value=mock_stream
)
stream = llm.stream("Hello langchain")
full: BaseMessage | None = None
chunks_list = list(stream)
# BaseChatModel.stream() adds an extra chunk after the final chunk from _stream
assert len(chunks_list) == 4
for i, chunk in enumerate(
chunks_list[:3]
): # Only check first 3 chunks against mock
full = chunk if full is None else cast(BaseMessage, full + chunk)
assert chunk.content == mock_chunks[i]["choices"][0]["delta"].get("content", "")
if i == 0:
assert chunk.additional_kwargs["citations"] == [
"example.com",
"example2.com",
]
assert chunk.additional_kwargs["related_questions"] == [
"example_question_1",
"example_question_2",
]
else:
assert "citations" not in chunk.additional_kwargs
assert "related_questions" not in chunk.additional_kwargs
# Process the 4th chunk
assert full is not None
full = cast(BaseMessage, full + chunks_list[3])
assert isinstance(full, AIMessageChunk)
assert full.content == "Hello Perplexity"
assert full.additional_kwargs == {
"citations": ["example.com", "example2.com"],
"related_questions": ["example_question_1", "example_question_2"],
}
patcher.assert_called_once()
def test_perplexity_stream_includes_citations_and_search_results(
mocker: MockerFixture,
) -> None:
"""Test that the stream method exposes `search_results` via additional_kwargs."""
def test_perplexity_stream_includes_videos_and_reasoning(mocker: MockerFixture) -> None:
"""Test that stream extracts videos and reasoning_steps."""
llm = ChatPerplexity(model="test", timeout=30, verbose=True)
mock_chunk_0 = {
"choices": [{"delta": {"content": "Hello "}, "finish_reason": None}],
"citations": ["example.com/a", "example.com/b"],
"search_results": [
{"title": "Mock result", "url": "https://example.com/result", "date": None}
],
"choices": [{"delta": {"content": "Thinking... "}, "finish_reason": None}],
"videos": [{"url": "http://video.com", "thumbnail_url": "http://thumb.com"}],
"reasoning_steps": [{"thought": "I should search", "type": "web_search"}],
}
mock_chunk_1 = {
"choices": [{"delta": {"content": "Perplexity"}, "finish_reason": None}],
"citations": ["example.com/a", "example.com/b"],
"search_results": [
{"title": "Mock result", "url": "https://example.com/result", "date": None}
],
}
mock_chunk_2 = {
"choices": [{"delta": {}, "finish_reason": "stop"}],
}
mock_chunks: list[dict[str, Any]] = [mock_chunk_0, mock_chunk_1, mock_chunk_2]
mock_chunks: list[dict[str, Any]] = [mock_chunk_0, mock_chunk_1]
mock_stream = MagicMock()
mock_stream.__iter__.return_value = mock_chunks
patcher = mocker.patch.object(
llm.client.chat.completions, "create", return_value=mock_stream
)
stream = llm.stream("Hello langchain")
full: BaseMessage | None = None
chunks_list = list(stream)
# BaseChatModel.stream() adds an extra chunk after the final chunk from _stream
assert len(chunks_list) == 4
for i, chunk in enumerate(
chunks_list[:3]
): # Only check first 3 chunks against mock
full = chunk if full is None else cast(BaseMessage, full + chunk)
assert chunk.content == mock_chunks[i]["choices"][0]["delta"].get("content", "")
if i == 0:
assert chunk.additional_kwargs["citations"] == [
"example.com/a",
"example.com/b",
]
assert chunk.additional_kwargs["search_results"] == [
{
"title": "Mock result",
"url": "https://example.com/result",
"date": None,
}
]
else:
assert "citations" not in chunk.additional_kwargs
assert "search_results" not in chunk.additional_kwargs
# Process the 4th chunk
assert full is not None
full = cast(BaseMessage, full + chunks_list[3])
assert isinstance(full, AIMessageChunk)
assert full.content == "Hello Perplexity"
assert full.additional_kwargs == {
"citations": ["example.com/a", "example.com/b"],
"search_results": [
{"title": "Mock result", "url": "https://example.com/result", "date": None}
],
}
mocker.patch.object(llm.client.chat.completions, "create", return_value=mock_stream)
patcher.assert_called_once()
stream = list(llm.stream("test"))
first_chunk = stream[0]
assert "videos" in first_chunk.additional_kwargs
assert first_chunk.additional_kwargs["videos"][0]["url"] == "http://video.com"
assert "reasoning_steps" in first_chunk.additional_kwargs
assert (
first_chunk.additional_kwargs["reasoning_steps"][0]["thought"]
== "I should search"
)
def test_create_usage_metadata_basic() -> None:
@@ -304,6 +150,8 @@ def test_create_usage_metadata_basic() -> None:
"prompt_tokens": 10,
"completion_tokens": 20,
"total_tokens": 30,
"reasoning_tokens": 0,
"citation_tokens": 0,
}
usage_metadata = _create_usage_metadata(token_usage)
@@ -315,94 +163,6 @@ def test_create_usage_metadata_basic() -> None:
assert usage_metadata["output_token_details"]["citation_tokens"] == 0 # type: ignore[typeddict-item]
def test_create_usage_metadata_with_reasoning_tokens() -> None:
"""Test _create_usage_metadata with reasoning tokens."""
token_usage = {
"prompt_tokens": 50,
"completion_tokens": 100,
"total_tokens": 150,
"reasoning_tokens": 25,
}
usage_metadata = _create_usage_metadata(token_usage)
assert usage_metadata["input_tokens"] == 50
assert usage_metadata["output_tokens"] == 100
assert usage_metadata["total_tokens"] == 150
assert usage_metadata["output_token_details"]["reasoning"] == 25
assert usage_metadata["output_token_details"]["citation_tokens"] == 0 # type: ignore[typeddict-item]
def test_create_usage_metadata_with_citation_tokens() -> None:
"""Test _create_usage_metadata with citation tokens."""
token_usage = {
"prompt_tokens": 100,
"completion_tokens": 200,
"total_tokens": 300,
"citation_tokens": 15,
}
usage_metadata = _create_usage_metadata(token_usage)
assert usage_metadata["input_tokens"] == 100
assert usage_metadata["output_tokens"] == 200
assert usage_metadata["total_tokens"] == 300
assert usage_metadata["output_token_details"]["reasoning"] == 0
assert usage_metadata["output_token_details"]["citation_tokens"] == 15 # type: ignore[typeddict-item]
def test_create_usage_metadata_with_all_token_types() -> None:
"""Test _create_usage_metadata with all token types.
Tests reasoning tokens and citation tokens together.
"""
token_usage = {
"prompt_tokens": 75,
"completion_tokens": 150,
"total_tokens": 225,
"reasoning_tokens": 30,
"citation_tokens": 20,
}
usage_metadata = _create_usage_metadata(token_usage)
assert usage_metadata["input_tokens"] == 75
assert usage_metadata["output_tokens"] == 150
assert usage_metadata["total_tokens"] == 225
assert usage_metadata["output_token_details"]["reasoning"] == 30
assert usage_metadata["output_token_details"]["citation_tokens"] == 20 # type: ignore[typeddict-item]
def test_create_usage_metadata_missing_optional_fields() -> None:
"""Test _create_usage_metadata with missing optional fields defaults to 0."""
token_usage = {
"prompt_tokens": 25,
"completion_tokens": 50,
}
usage_metadata = _create_usage_metadata(token_usage)
assert usage_metadata["input_tokens"] == 25
assert usage_metadata["output_tokens"] == 50
# Total tokens should be calculated if not provided
assert usage_metadata["total_tokens"] == 75
assert usage_metadata["output_token_details"]["reasoning"] == 0
assert usage_metadata["output_token_details"]["citation_tokens"] == 0 # type: ignore[typeddict-item]
def test_create_usage_metadata_empty_dict() -> None:
"""Test _create_usage_metadata with empty token usage dict."""
token_usage: dict = {}
usage_metadata = _create_usage_metadata(token_usage)
assert usage_metadata["input_tokens"] == 0
assert usage_metadata["output_tokens"] == 0
assert usage_metadata["total_tokens"] == 0
assert usage_metadata["output_token_details"]["reasoning"] == 0
assert usage_metadata["output_token_details"]["citation_tokens"] == 0 # type: ignore[typeddict-item]
def test_perplexity_invoke_includes_num_search_queries(mocker: MockerFixture) -> None:
"""Test that invoke includes num_search_queries in response_metadata."""
llm = ChatPerplexity(model="test", timeout=30, verbose=True)
@@ -413,6 +173,7 @@ def test_perplexity_invoke_includes_num_search_queries(mocker: MockerFixture) ->
"completion_tokens": 20,
"total_tokens": 30,
"num_search_queries": 3,
"search_context_size": "high",
}
mock_response = MagicMock()
@@ -427,6 +188,13 @@ def test_perplexity_invoke_includes_num_search_queries(mocker: MockerFixture) ->
]
mock_response.model = "test-model"
mock_response.usage = mock_usage
# Mock optional fields as empty/None
mock_response.videos = None
mock_response.reasoning_steps = None
mock_response.citations = None
mock_response.search_results = None
mock_response.images = None
mock_response.related_questions = None
patcher = mocker.patch.object(
llm.client.chat.completions, "create", return_value=mock_response
@@ -435,96 +203,11 @@ def test_perplexity_invoke_includes_num_search_queries(mocker: MockerFixture) ->
result = llm.invoke("Test query")
assert result.response_metadata["num_search_queries"] == 3
assert result.response_metadata["search_context_size"] == "high"
assert result.response_metadata["model_name"] == "test-model"
patcher.assert_called_once()
def test_perplexity_invoke_without_num_search_queries(mocker: MockerFixture) -> None:
"""Test that invoke works when num_search_queries is not provided."""
llm = ChatPerplexity(model="test", timeout=30, verbose=True)
mock_usage = MagicMock()
mock_usage.model_dump.return_value = {
"prompt_tokens": 10,
"completion_tokens": 20,
"total_tokens": 30,
}
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="Test response",
tool_calls=None,
),
finish_reason="stop",
)
]
mock_response.model = "test-model"
mock_response.usage = mock_usage
patcher = mocker.patch.object(
llm.client.chat.completions, "create", return_value=mock_response
)
result = llm.invoke("Test query")
assert "num_search_queries" not in result.response_metadata
assert result.response_metadata["model_name"] == "test-model"
patcher.assert_called_once()
def test_perplexity_stream_includes_num_search_queries(mocker: MockerFixture) -> None:
"""Test that stream properly handles num_search_queries in usage data."""
llm = ChatPerplexity(model="test", timeout=30, verbose=True)
mock_chunk_0 = {
"choices": [{"delta": {"content": "Hello "}, "finish_reason": None}],
}
mock_chunk_1 = {
"choices": [{"delta": {"content": "world"}, "finish_reason": None}],
}
mock_chunk_2 = {
"choices": [{"delta": {}, "finish_reason": "stop"}],
"usage": {
"prompt_tokens": 5,
"completion_tokens": 10,
"total_tokens": 15,
"num_search_queries": 2,
"reasoning_tokens": 1,
"citation_tokens": 3,
},
}
mock_chunks: list[dict[str, Any]] = [mock_chunk_0, mock_chunk_1, mock_chunk_2]
mock_stream = MagicMock()
mock_stream.__iter__.return_value = mock_chunks
patcher = mocker.patch.object(
llm.client.chat.completions, "create", return_value=mock_stream
)
chunks_list = list(llm.stream("Test query"))
# Find the chunk with usage metadata
usage_chunk = None
for chunk in chunks_list:
if chunk.usage_metadata:
usage_chunk = chunk
break
# Verify usage metadata is properly set
assert usage_chunk is not None
assert usage_chunk.usage_metadata is not None
assert usage_chunk.usage_metadata["input_tokens"] == 5
assert usage_chunk.usage_metadata["output_tokens"] == 10
assert usage_chunk.usage_metadata["total_tokens"] == 15
# Verify reasoning and citation tokens are included
assert usage_chunk.usage_metadata["output_token_details"]["reasoning"] == 1
assert usage_chunk.usage_metadata["output_token_details"]["citation_tokens"] == 3 # type: ignore[typeddict-item]
patcher.assert_called_once()
def test_profile() -> None:
model = ChatPerplexity(model="sonar")
assert model.profile

View File

@@ -1,6 +1,17 @@
from langchain_perplexity import __all__
EXPECTED_ALL = ["ChatPerplexity"]
EXPECTED_ALL = [
"ChatPerplexity",
"PerplexitySearchRetriever",
"PerplexitySearchResults",
"UserLocation",
"WebSearchOptions",
"MediaResponse",
"MediaResponseOverrides",
"ReasoningJsonOutputParser",
"ReasoningStructuredOutputParser",
"strip_think_tags",
]
def test_all_imports() -> None:

View File

@@ -0,0 +1,42 @@
from unittest.mock import MagicMock
from pytest_mock import MockerFixture
from langchain_perplexity import PerplexitySearchRetriever
def test_search_retriever_initialization() -> None:
retriever = PerplexitySearchRetriever(pplx_api_key="test")
assert retriever.pplx_api_key.get_secret_value() == "test"
assert retriever.k == 10
def test_search_retriever_get_relevant_documents(mocker: MockerFixture) -> None:
retriever = PerplexitySearchRetriever(pplx_api_key="test")
mock_result = MagicMock()
mock_result.title = "Test Title"
mock_result.url = "http://test.com"
mock_result.snippet = "Test snippet"
mock_result.date = "2023-01-01"
mock_result.last_updated = "2023-01-02"
mock_response = MagicMock()
mock_response.results = [mock_result]
mock_create = MagicMock(return_value=mock_response)
mocker.patch.object(retriever.client.search, "create", mock_create)
docs = retriever.invoke("query")
assert len(docs) == 1
assert docs[0].page_content == "Test snippet"
assert docs[0].metadata["title"] == "Test Title"
assert docs[0].metadata["url"] == "http://test.com"
mock_create.assert_called_once_with(
query="query",
max_results=10,
max_tokens=25000,
max_tokens_per_page=1024,
)

View File

@@ -0,0 +1,35 @@
from unittest.mock import MagicMock
from pytest_mock import MockerFixture
from langchain_perplexity import PerplexitySearchResults
def test_search_tool_run(mocker: MockerFixture) -> None:
tool = PerplexitySearchResults(pplx_api_key="test")
mock_result = MagicMock()
mock_result.title = "Test Title"
mock_result.url = "http://test.com"
mock_result.snippet = "Test snippet"
mock_result.date = "2023-01-01"
mock_result.last_updated = "2023-01-02"
mock_response = MagicMock()
mock_response.results = [mock_result]
mock_create = MagicMock(return_value=mock_response)
mocker.patch.object(tool.client.search, "create", mock_create)
result = tool.invoke("query")
# result should be a list of dicts (converted by tool) or str if string output
# By default, tool.invoke returns the output of _run.
assert isinstance(result, list)
assert len(result) == 1
assert result[0]["title"] == "Test Title"
mock_create.assert_called_once_with(
query="query",
max_results=10,
)

View File

@@ -399,79 +399,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" },
]
[[package]]
name = "jiter"
version = "0.11.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/9d/c0/a3bb4cc13aced219dd18191ea66e874266bd8aa7b96744e495e1c733aa2d/jiter-0.11.0.tar.gz", hash = "sha256:1d9637eaf8c1d6a63d6562f2a6e5ab3af946c66037eb1b894e8fad75422266e4", size = 167094, upload-time = "2025-09-15T09:20:38.212Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/25/21/7dd1235a19e26979be6098e87e4cced2e061752f3a40a17bbce6dea7fae1/jiter-0.11.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3893ce831e1c0094a83eeaf56c635a167d6fa8cc14393cc14298fd6fdc2a2449", size = 309875, upload-time = "2025-09-15T09:18:48.41Z" },
{ url = "https://files.pythonhosted.org/packages/71/f9/462b54708aa85b135733ccba70529dd68a18511bf367a87c5fd28676c841/jiter-0.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:25c625b9b61b5a8725267fdf867ef2e51b429687f6a4eef211f4612e95607179", size = 316505, upload-time = "2025-09-15T09:18:51.057Z" },
{ url = "https://files.pythonhosted.org/packages/bd/40/14e2eeaac6a47bff27d213834795472355fd39769272eb53cb7aa83d5aa8/jiter-0.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd4ca85fb6a62cf72e1c7f5e34ddef1b660ce4ed0886ec94a1ef9777d35eaa1f", size = 337613, upload-time = "2025-09-15T09:18:52.358Z" },
{ url = "https://files.pythonhosted.org/packages/d3/ed/a5f1f8419c92b150a7c7fb5ccba1fb1e192887ad713d780e70874f0ce996/jiter-0.11.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:572208127034725e79c28437b82414028c3562335f2b4f451d98136d0fc5f9cd", size = 361438, upload-time = "2025-09-15T09:18:54.637Z" },
{ url = "https://files.pythonhosted.org/packages/dd/f5/70682c023dfcdd463a53faf5d30205a7d99c51d70d3e303c932d0936e5a2/jiter-0.11.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494ba627c7f550ad3dabb21862864b8f2216098dc18ff62f37b37796f2f7c325", size = 486180, upload-time = "2025-09-15T09:18:56.158Z" },
{ url = "https://files.pythonhosted.org/packages/7c/39/020d08cbab4eab48142ad88b837c41eb08a15c0767fdb7c0d3265128a44b/jiter-0.11.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8da18a99f58bca3ecc2d2bba99cac000a924e115b6c4f0a2b98f752b6fbf39a", size = 376681, upload-time = "2025-09-15T09:18:57.553Z" },
{ url = "https://files.pythonhosted.org/packages/52/10/b86733f6e594cf51dd142f37c602d8df87c554c5844958deaab0de30eb5d/jiter-0.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ffd3b0fff3fabbb02cc09910c08144db6bb5697a98d227a074401e01ee63dd", size = 348685, upload-time = "2025-09-15T09:18:59.208Z" },
{ url = "https://files.pythonhosted.org/packages/fb/ee/8861665e83a9e703aa5f65fddddb6225428e163e6b0baa95a7f9a8fb9aae/jiter-0.11.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8fe6530aa738a4f7d4e4702aa8f9581425d04036a5f9e25af65ebe1f708f23be", size = 385573, upload-time = "2025-09-15T09:19:00.593Z" },
{ url = "https://files.pythonhosted.org/packages/25/74/05afec03600951f128293813b5a208c9ba1bf587c57a344c05a42a69e1b1/jiter-0.11.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e35d66681c133a03d7e974e7eedae89720fe8ca3bd09f01a4909b86a8adf31f5", size = 516669, upload-time = "2025-09-15T09:19:02.369Z" },
{ url = "https://files.pythonhosted.org/packages/93/d1/2e5bfe147cfbc2a5eef7f73eb75dc5c6669da4fa10fc7937181d93af9495/jiter-0.11.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c59459beca2fbc9718b6f1acb7bfb59ebc3eb4294fa4d40e9cb679dafdcc6c60", size = 508767, upload-time = "2025-09-15T09:19:04.011Z" },
{ url = "https://files.pythonhosted.org/packages/87/50/597f71307e10426b5c082fd05d38c615ddbdd08c3348d8502963307f0652/jiter-0.11.0-cp310-cp310-win32.whl", hash = "sha256:b7b0178417b0dcfc5f259edbc6db2b1f5896093ed9035ee7bab0f2be8854726d", size = 205476, upload-time = "2025-09-15T09:19:05.594Z" },
{ url = "https://files.pythonhosted.org/packages/c7/86/1e5214b3272e311754da26e63edec93a183811d4fc2e0118addec365df8b/jiter-0.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:11df2bf99fb4754abddd7f5d940a48e51f9d11624d6313ca4314145fcad347f0", size = 204708, upload-time = "2025-09-15T09:19:06.955Z" },
{ url = "https://files.pythonhosted.org/packages/38/55/a69fefeef09c2eaabae44b935a1aa81517e49639c0a0c25d861cb18cd7ac/jiter-0.11.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:cb5d9db02979c3f49071fce51a48f4b4e4cf574175fb2b11c7a535fa4867b222", size = 309503, upload-time = "2025-09-15T09:19:08.191Z" },
{ url = "https://files.pythonhosted.org/packages/bd/d5/a6aba9e6551f32f9c127184f398208e4eddb96c59ac065c8a92056089d28/jiter-0.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1dc6a123f3471c4730db7ca8ba75f1bb3dcb6faeb8d46dd781083e7dee88b32d", size = 317688, upload-time = "2025-09-15T09:19:09.918Z" },
{ url = "https://files.pythonhosted.org/packages/bb/f3/5e86f57c1883971cdc8535d0429c2787bf734840a231da30a3be12850562/jiter-0.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09858f8d230f031c7b8e557429102bf050eea29c77ad9c34c8fe253c5329acb7", size = 337418, upload-time = "2025-09-15T09:19:11.078Z" },
{ url = "https://files.pythonhosted.org/packages/5e/4f/a71d8a24c2a70664970574a8e0b766663f5ef788f7fe1cc20ee0c016d488/jiter-0.11.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dbe2196c4a0ce760925a74ab4456bf644748ab0979762139626ad138f6dac72d", size = 361423, upload-time = "2025-09-15T09:19:13.286Z" },
{ url = "https://files.pythonhosted.org/packages/8f/e5/b09076f4e7fd9471b91e16f9f3dc7330b161b738f3b39b2c37054a36e26a/jiter-0.11.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5beb56d22b63647bafd0b74979216fdee80c580c0c63410be8c11053860ffd09", size = 486367, upload-time = "2025-09-15T09:19:14.546Z" },
{ url = "https://files.pythonhosted.org/packages/fb/f1/98cb3a36f5e62f80cd860f0179f948d9eab5a316d55d3e1bab98d9767af5/jiter-0.11.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97025d09ef549795d8dc720a824312cee3253c890ac73c621721ddfc75066789", size = 376335, upload-time = "2025-09-15T09:19:15.939Z" },
{ url = "https://files.pythonhosted.org/packages/9f/d8/ec74886497ea393c29dbd7651ddecc1899e86404a6b1f84a3ddab0ab59fd/jiter-0.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d50880a6da65d8c23a2cf53c412847d9757e74cc9a3b95c5704a1d1a24667347", size = 348981, upload-time = "2025-09-15T09:19:17.568Z" },
{ url = "https://files.pythonhosted.org/packages/24/93/d22ad7fa3b86ade66c86153ceea73094fc2af8b20c59cb7fceab9fea4704/jiter-0.11.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:452d80a1c86c095a242007bd9fc5d21b8a8442307193378f891cb8727e469648", size = 385797, upload-time = "2025-09-15T09:19:19.121Z" },
{ url = "https://files.pythonhosted.org/packages/c8/bd/e25ff4a4df226e9b885f7cb01ee4b9dc74e3000e612d6f723860d71a1f34/jiter-0.11.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e84e58198d4894668eec2da660ffff60e0f3e60afa790ecc50cb12b0e02ca1d4", size = 516597, upload-time = "2025-09-15T09:19:20.301Z" },
{ url = "https://files.pythonhosted.org/packages/be/fb/beda613db7d93ffa2fdd2683f90f2f5dce8daf4bc2d0d2829e7de35308c6/jiter-0.11.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df64edcfc5dd5279a791eea52aa113d432c933119a025b0b5739f90d2e4e75f1", size = 508853, upload-time = "2025-09-15T09:19:22.075Z" },
{ url = "https://files.pythonhosted.org/packages/20/64/c5b0d93490634e41e38e2a15de5d54fdbd2c9f64a19abb0f95305b63373c/jiter-0.11.0-cp311-cp311-win32.whl", hash = "sha256:144fc21337d21b1d048f7f44bf70881e1586401d405ed3a98c95a114a9994982", size = 205140, upload-time = "2025-09-15T09:19:23.351Z" },
{ url = "https://files.pythonhosted.org/packages/a1/e6/c347c0e6f5796e97d4356b7e5ff0ce336498b7f4ef848fae621a56f1ccf3/jiter-0.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:b0f32e644d241293b892b1a6dd8f0b9cc029bfd94c97376b2681c36548aabab7", size = 204311, upload-time = "2025-09-15T09:19:24.591Z" },
{ url = "https://files.pythonhosted.org/packages/ba/b5/3009b112b8f673e568ef79af9863d8309a15f0a8cdcc06ed6092051f377e/jiter-0.11.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:2fb7b377688cc3850bbe5c192a6bd493562a0bc50cbc8b047316428fbae00ada", size = 305510, upload-time = "2025-09-15T09:19:25.893Z" },
{ url = "https://files.pythonhosted.org/packages/fe/82/15514244e03b9e71e086bbe2a6de3e4616b48f07d5f834200c873956fb8c/jiter-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a1b7cbe3f25bd0d8abb468ba4302a5d45617ee61b2a7a638f63fee1dc086be99", size = 316521, upload-time = "2025-09-15T09:19:27.525Z" },
{ url = "https://files.pythonhosted.org/packages/92/94/7a2e905f40ad2d6d660e00b68d818f9e29fb87ffe82774f06191e93cbe4a/jiter-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0a7f0ec81d5b7588c5cade1eb1925b91436ae6726dc2df2348524aeabad5de6", size = 338214, upload-time = "2025-09-15T09:19:28.727Z" },
{ url = "https://files.pythonhosted.org/packages/a8/9c/5791ed5bdc76f12110158d3316a7a3ec0b1413d018b41c5ed399549d3ad5/jiter-0.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07630bb46ea2a6b9c6ed986c6e17e35b26148cce2c535454b26ee3f0e8dcaba1", size = 361280, upload-time = "2025-09-15T09:19:30.013Z" },
{ url = "https://files.pythonhosted.org/packages/d4/7f/b7d82d77ff0d2cb06424141000176b53a9e6b16a1125525bb51ea4990c2e/jiter-0.11.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7764f27d28cd4a9cbc61704dfcd80c903ce3aad106a37902d3270cd6673d17f4", size = 487895, upload-time = "2025-09-15T09:19:31.424Z" },
{ url = "https://files.pythonhosted.org/packages/42/44/10a1475d46f1fc1fd5cc2e82c58e7bca0ce5852208e0fa5df2f949353321/jiter-0.11.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4a6c4a737d486f77f842aeb22807edecb4a9417e6700c7b981e16d34ba7c72", size = 378421, upload-time = "2025-09-15T09:19:32.746Z" },
{ url = "https://files.pythonhosted.org/packages/9a/5f/0dc34563d8164d31d07bc09d141d3da08157a68dcd1f9b886fa4e917805b/jiter-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf408d2a0abd919b60de8c2e7bc5eeab72d4dafd18784152acc7c9adc3291591", size = 347932, upload-time = "2025-09-15T09:19:34.612Z" },
{ url = "https://files.pythonhosted.org/packages/f7/de/b68f32a4fcb7b4a682b37c73a0e5dae32180140cd1caf11aef6ad40ddbf2/jiter-0.11.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cdef53eda7d18e799625023e1e250dbc18fbc275153039b873ec74d7e8883e09", size = 386959, upload-time = "2025-09-15T09:19:35.994Z" },
{ url = "https://files.pythonhosted.org/packages/76/0a/c08c92e713b6e28972a846a81ce374883dac2f78ec6f39a0dad9f2339c3a/jiter-0.11.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:53933a38ef7b551dd9c7f1064f9d7bb235bb3168d0fa5f14f0798d1b7ea0d9c5", size = 517187, upload-time = "2025-09-15T09:19:37.426Z" },
{ url = "https://files.pythonhosted.org/packages/89/b5/4a283bec43b15aad54fcae18d951f06a2ec3f78db5708d3b59a48e9c3fbd/jiter-0.11.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11840d2324c9ab5162fc1abba23bc922124fedcff0d7b7f85fffa291e2f69206", size = 509461, upload-time = "2025-09-15T09:19:38.761Z" },
{ url = "https://files.pythonhosted.org/packages/34/a5/f8bad793010534ea73c985caaeef8cc22dfb1fedb15220ecdf15c623c07a/jiter-0.11.0-cp312-cp312-win32.whl", hash = "sha256:4f01a744d24a5f2bb4a11657a1b27b61dc038ae2e674621a74020406e08f749b", size = 206664, upload-time = "2025-09-15T09:19:40.096Z" },
{ url = "https://files.pythonhosted.org/packages/ed/42/5823ec2b1469395a160b4bf5f14326b4a098f3b6898fbd327366789fa5d3/jiter-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:29fff31190ab3a26de026da2f187814f4b9c6695361e20a9ac2123e4d4378a4c", size = 203520, upload-time = "2025-09-15T09:19:41.798Z" },
{ url = "https://files.pythonhosted.org/packages/97/c4/d530e514d0f4f29b2b68145e7b389cbc7cac7f9c8c23df43b04d3d10fa3e/jiter-0.11.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:4441a91b80a80249f9a6452c14b2c24708f139f64de959943dfeaa6cb915e8eb", size = 305021, upload-time = "2025-09-15T09:19:43.523Z" },
{ url = "https://files.pythonhosted.org/packages/7a/77/796a19c567c5734cbfc736a6f987affc0d5f240af8e12063c0fb93990ffa/jiter-0.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ff85fc6d2a431251ad82dbd1ea953affb5a60376b62e7d6809c5cd058bb39471", size = 314384, upload-time = "2025-09-15T09:19:44.849Z" },
{ url = "https://files.pythonhosted.org/packages/14/9c/824334de0b037b91b6f3fa9fe5a191c83977c7ec4abe17795d3cb6d174cf/jiter-0.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5e86126d64706fd28dfc46f910d496923c6f95b395138c02d0e252947f452bd", size = 337389, upload-time = "2025-09-15T09:19:46.094Z" },
{ url = "https://files.pythonhosted.org/packages/a2/95/ed4feab69e6cf9b2176ea29d4ef9d01a01db210a3a2c8a31a44ecdc68c38/jiter-0.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad8bd82165961867a10f52010590ce0b7a8c53da5ddd8bbb62fef68c181b921", size = 360519, upload-time = "2025-09-15T09:19:47.494Z" },
{ url = "https://files.pythonhosted.org/packages/b5/0c/2ad00f38d3e583caba3909d95b7da1c3a7cd82c0aa81ff4317a8016fb581/jiter-0.11.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b42c2cd74273455ce439fd9528db0c6e84b5623cb74572305bdd9f2f2961d3df", size = 487198, upload-time = "2025-09-15T09:19:49.116Z" },
{ url = "https://files.pythonhosted.org/packages/ea/8b/919b64cf3499b79bdfba6036da7b0cac5d62d5c75a28fb45bad7819e22f0/jiter-0.11.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0062dab98172dd0599fcdbf90214d0dcde070b1ff38a00cc1b90e111f071982", size = 377835, upload-time = "2025-09-15T09:19:50.468Z" },
{ url = "https://files.pythonhosted.org/packages/29/7f/8ebe15b6e0a8026b0d286c083b553779b4dd63db35b43a3f171b544de91d/jiter-0.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb948402821bc76d1f6ef0f9e19b816f9b09f8577844ba7140f0b6afe994bc64", size = 347655, upload-time = "2025-09-15T09:19:51.726Z" },
{ url = "https://files.pythonhosted.org/packages/8e/64/332127cef7e94ac75719dda07b9a472af6158ba819088d87f17f3226a769/jiter-0.11.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25a5b1110cca7329fd0daf5060faa1234be5c11e988948e4f1a1923b6a457fe1", size = 386135, upload-time = "2025-09-15T09:19:53.075Z" },
{ url = "https://files.pythonhosted.org/packages/20/c8/557b63527442f84c14774159948262a9d4fabb0d61166f11568f22fc60d2/jiter-0.11.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bf11807e802a214daf6c485037778843fadd3e2ec29377ae17e0706ec1a25758", size = 516063, upload-time = "2025-09-15T09:19:54.447Z" },
{ url = "https://files.pythonhosted.org/packages/86/13/4164c819df4a43cdc8047f9a42880f0ceef5afeb22e8b9675c0528ebdccd/jiter-0.11.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:dbb57da40631c267861dd0090461222060960012d70fd6e4c799b0f62d0ba166", size = 508139, upload-time = "2025-09-15T09:19:55.764Z" },
{ url = "https://files.pythonhosted.org/packages/fa/70/6e06929b401b331d41ddb4afb9f91cd1168218e3371972f0afa51c9f3c31/jiter-0.11.0-cp313-cp313-win32.whl", hash = "sha256:8e36924dad32c48d3c5e188d169e71dc6e84d6cb8dedefea089de5739d1d2f80", size = 206369, upload-time = "2025-09-15T09:19:57.048Z" },
{ url = "https://files.pythonhosted.org/packages/f4/0d/8185b8e15de6dce24f6afae63380e16377dd75686d56007baa4f29723ea1/jiter-0.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:452d13e4fd59698408087235259cebe67d9d49173b4dacb3e8d35ce4acf385d6", size = 202538, upload-time = "2025-09-15T09:19:58.35Z" },
{ url = "https://files.pythonhosted.org/packages/13/3a/d61707803260d59520721fa326babfae25e9573a88d8b7b9cb54c5423a59/jiter-0.11.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:089f9df9f69532d1339e83142438668f52c97cd22ee2d1195551c2b1a9e6cf33", size = 313737, upload-time = "2025-09-15T09:19:59.638Z" },
{ url = "https://files.pythonhosted.org/packages/cd/cc/c9f0eec5d00f2a1da89f6bdfac12b8afdf8d5ad974184863c75060026457/jiter-0.11.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29ed1fe69a8c69bf0f2a962d8d706c7b89b50f1332cd6b9fbda014f60bd03a03", size = 346183, upload-time = "2025-09-15T09:20:01.442Z" },
{ url = "https://files.pythonhosted.org/packages/a6/87/fc632776344e7aabbab05a95a0075476f418c5d29ab0f2eec672b7a1f0ac/jiter-0.11.0-cp313-cp313t-win_amd64.whl", hash = "sha256:a4d71d7ea6ea8786291423fe209acf6f8d398a0759d03e7f24094acb8ab686ba", size = 204225, upload-time = "2025-09-15T09:20:03.102Z" },
{ url = "https://files.pythonhosted.org/packages/ee/3b/e7f45be7d3969bdf2e3cd4b816a7a1d272507cd0edd2d6dc4b07514f2d9a/jiter-0.11.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:9a6dff27eca70930bdbe4cbb7c1a4ba8526e13b63dc808c0670083d2d51a4a72", size = 304414, upload-time = "2025-09-15T09:20:04.357Z" },
{ url = "https://files.pythonhosted.org/packages/06/32/13e8e0d152631fcc1907ceb4943711471be70496d14888ec6e92034e2caf/jiter-0.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b1ae2a7593a62132c7d4c2abbee80bbbb94fdc6d157e2c6cc966250c564ef774", size = 314223, upload-time = "2025-09-15T09:20:05.631Z" },
{ url = "https://files.pythonhosted.org/packages/0c/7e/abedd5b5a20ca083f778d96bba0d2366567fcecb0e6e34ff42640d5d7a18/jiter-0.11.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b13a431dba4b059e9e43019d3022346d009baf5066c24dcdea321a303cde9f0", size = 337306, upload-time = "2025-09-15T09:20:06.917Z" },
{ url = "https://files.pythonhosted.org/packages/ac/e2/30d59bdc1204c86aa975ec72c48c482fee6633120ee9c3ab755e4dfefea8/jiter-0.11.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:af62e84ca3889604ebb645df3b0a3f3bcf6b92babbff642bd214616f57abb93a", size = 360565, upload-time = "2025-09-15T09:20:08.283Z" },
{ url = "https://files.pythonhosted.org/packages/fe/88/567288e0d2ed9fa8f7a3b425fdaf2cb82b998633c24fe0d98f5417321aa8/jiter-0.11.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6f3b32bb723246e6b351aecace52aba78adb8eeb4b2391630322dc30ff6c773", size = 486465, upload-time = "2025-09-15T09:20:09.613Z" },
{ url = "https://files.pythonhosted.org/packages/18/6e/7b72d09273214cadd15970e91dd5ed9634bee605176107db21e1e4205eb1/jiter-0.11.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:adcab442f4a099a358a7f562eaa54ed6456fb866e922c6545a717be51dbed7d7", size = 377581, upload-time = "2025-09-15T09:20:10.884Z" },
{ url = "https://files.pythonhosted.org/packages/58/52/4db456319f9d14deed325f70102577492e9d7e87cf7097bda9769a1fcacb/jiter-0.11.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9967c2ab338ee2b2c0102fd379ec2693c496abf71ffd47e4d791d1f593b68e2", size = 347102, upload-time = "2025-09-15T09:20:12.175Z" },
{ url = "https://files.pythonhosted.org/packages/ce/b4/433d5703c38b26083aec7a733eb5be96f9c6085d0e270a87ca6482cbf049/jiter-0.11.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e7d0bed3b187af8b47a981d9742ddfc1d9b252a7235471ad6078e7e4e5fe75c2", size = 386477, upload-time = "2025-09-15T09:20:13.428Z" },
{ url = "https://files.pythonhosted.org/packages/c8/7a/a60bfd9c55b55b07c5c441c5085f06420b6d493ce9db28d069cc5b45d9f3/jiter-0.11.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:f6fe0283e903ebc55f1a6cc569b8c1f3bf4abd026fed85e3ff8598a9e6f982f0", size = 516004, upload-time = "2025-09-15T09:20:14.848Z" },
{ url = "https://files.pythonhosted.org/packages/2e/46/f8363e5ecc179b4ed0ca6cb0a6d3bfc266078578c71ff30642ea2ce2f203/jiter-0.11.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:4ee5821e3d66606b29ae5b497230b304f1376f38137d69e35f8d2bd5f310ff73", size = 507855, upload-time = "2025-09-15T09:20:16.176Z" },
{ url = "https://files.pythonhosted.org/packages/90/33/396083357d51d7ff0f9805852c288af47480d30dd31d8abc74909b020761/jiter-0.11.0-cp314-cp314-win32.whl", hash = "sha256:c2d13ba7567ca8799f17c76ed56b1d49be30df996eb7fa33e46b62800562a5e2", size = 205802, upload-time = "2025-09-15T09:20:17.661Z" },
{ url = "https://files.pythonhosted.org/packages/e7/ab/eb06ca556b2551d41de7d03bf2ee24285fa3d0c58c5f8d95c64c9c3281b1/jiter-0.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fb4790497369d134a07fc763cc88888c46f734abdd66f9fdf7865038bf3a8f40", size = 313405, upload-time = "2025-09-15T09:20:18.918Z" },
{ url = "https://files.pythonhosted.org/packages/af/22/7ab7b4ec3a1c1f03aef376af11d23b05abcca3fb31fbca1e7557053b1ba2/jiter-0.11.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e2bbf24f16ba5ad4441a9845e40e4ea0cb9eed00e76ba94050664ef53ef4406", size = 347102, upload-time = "2025-09-15T09:20:20.16Z" },
{ url = "https://files.pythonhosted.org/packages/70/f3/ce100253c80063a7b8b406e1d1562657fd4b9b4e1b562db40e68645342fb/jiter-0.11.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:902b43386c04739229076bd1c4c69de5d115553d982ab442a8ae82947c72ede7", size = 336380, upload-time = "2025-09-15T09:20:36.867Z" },
]
[[package]]
name = "jsonpatch"
version = "1.33"
@@ -495,7 +422,7 @@ wheels = [
[[package]]
name = "langchain-core"
version = "1.1.1"
version = "1.2.3"
source = { editable = "../../core" }
dependencies = [
{ name = "jsonpatch" },
@@ -559,7 +486,7 @@ version = "1.1.0"
source = { editable = "." }
dependencies = [
{ name = "langchain-core" },
{ name = "openai" },
{ name = "perplexityai" },
]
[package.dev-dependencies]
@@ -596,7 +523,7 @@ typing = [
[package.metadata]
requires-dist = [
{ name = "langchain-core", editable = "../../core" },
{ name = "openai", specifier = ">=2.0.0,<3.0.0" },
{ name = "perplexityai", specifier = ">=0.22.0" },
]
[package.metadata.requires-dev]
@@ -628,7 +555,7 @@ typing = [
[[package]]
name = "langchain-tests"
version = "1.0.2"
version = "1.1.1"
source = { editable = "../../standard-tests" }
dependencies = [
{ name = "httpx" },
@@ -1019,25 +946,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/af/11/0cc63f9f321ccf63886ac203336777140011fb669e739da36d8db3c53b98/numpy-2.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:2e267c7da5bf7309670523896df97f93f6e469fb931161f483cd6882b3b1a5dc", size = 12971844, upload-time = "2025-09-09T15:58:57.359Z" },
]
[[package]]
name = "openai"
version = "2.1.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "distro" },
{ name = "httpx" },
{ name = "jiter" },
{ name = "pydantic" },
{ name = "sniffio" },
{ name = "tqdm" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/1a/dd/4d4d46a06943e37c95b6e388237e1e38d1e9aab264ff070f86345d60b7a4/openai-2.1.0.tar.gz", hash = "sha256:47f3463a5047340a989b4c0cd5378054acfca966ff61a96553b22f098e3270a2", size = 572998, upload-time = "2025-10-02T20:43:15.385Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/68/83/88f64fc8f037885efa8a629d1215f5bc1f037453bab4d4f823b5533319eb/openai-2.1.0-py3-none-any.whl", hash = "sha256:33172e8c06a4576144ba4137a493807a9ca427421dcabc54ad3aa656daf757d3", size = 964939, upload-time = "2025-10-02T20:43:13.568Z" },
]
[[package]]
name = "orjson"
version = "3.11.3"
@@ -1133,6 +1041,23 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" },
]
[[package]]
name = "perplexityai"
version = "0.22.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "distro" },
{ name = "httpx" },
{ name = "pydantic" },
{ name = "sniffio" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/47/f0/4ead48afbe4c9d7b57e9d03e253bed2986ac20d2257ed918cac276949018/perplexityai-0.22.2.tar.gz", hash = "sha256:9c3cad307c95aa5e8967358547e548d58793d350318d8d1d4aa33a933cbed844", size = 113014, upload-time = "2025-12-17T19:05:25.572Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/cb/aa/fc3337fdb014b1584297fc212552e6365d22a6fb77850a56c9038cd47173/perplexityai-0.22.2-py3-none-any.whl", hash = "sha256:92d3dc7f4e110c879ac5009daf7263a04f413523f7d76fba871176516c253890", size = 96860, upload-time = "2025-12-17T19:05:24.292Z" },
]
[[package]]
name = "pillow"
version = "11.3.0"
@@ -1861,18 +1786,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" },
]
[[package]]
name = "tqdm"
version = "4.67.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
]
[[package]]
name = "types-requests"
version = "2.31.0.6"