community[patch]: Add missing type annotations (#22758)

Add missing type annotations to objects in community.
These missing type annotations will raise type errors in pydantic 2.
This commit is contained in:
Eugene Yurtsev 2024-06-10 16:59:28 -04:00 committed by GitHub
parent 3237909221
commit 05d31a2f00
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 37 additions and 37 deletions

View File

@ -59,7 +59,7 @@ class UpstashRatelimitHandler(BaseCallbackHandler):
""" """
raise_error = True raise_error = True
_checked = False _checked: bool = False
def __init__( def __init__(
self, self,

View File

@ -72,9 +72,9 @@ class PebbloRetrievalQA(Chain):
"""Pebblo cloud API key for app.""" """Pebblo cloud API key for app."""
classifier_url: str = CLASSIFIER_URL #: :meta private: classifier_url: str = CLASSIFIER_URL #: :meta private:
"""Classifier endpoint.""" """Classifier endpoint."""
_discover_sent = False #: :meta private: _discover_sent: bool = False #: :meta private:
"""Flag to check if discover payload has been sent.""" """Flag to check if discover payload has been sent."""
_prompt_sent = False #: :meta private: _prompt_sent: bool = False #: :meta private:
"""Flag to check if prompt payload has been sent.""" """Flag to check if prompt payload has been sent."""
def _call( def _call(

View File

@ -97,7 +97,7 @@ class ChatBaichuan(BaseChatModel):
"""Whether to stream the results or not.""" """Whether to stream the results or not."""
request_timeout: int = Field(default=60, alias="timeout") request_timeout: int = Field(default=60, alias="timeout")
"""request timeout for chat http requests""" """request timeout for chat http requests"""
model = "Baichuan2-Turbo-192K" model: str = "Baichuan2-Turbo-192K"
"""model name of Baichuan, default is `Baichuan2-Turbo-192K`, """model name of Baichuan, default is `Baichuan2-Turbo-192K`,
other options include `Baichuan2-Turbo`""" other options include `Baichuan2-Turbo`"""
temperature: Optional[float] = Field(default=0.3) temperature: Optional[float] = Field(default=0.3)

View File

@ -240,7 +240,7 @@ class OCIModelDeploymentTGI(OCIModelDeploymentLLM):
sampling and Top-p sampling. sampling and Top-p sampling.
""" """
watermark = True watermark: bool = True
"""Watermarking with `A Watermark for Large Language Models <https://arxiv.org/abs/2301.10226>`_. """Watermarking with `A Watermark for Large Language Models <https://arxiv.org/abs/2301.10226>`_.
Defaults to True.""" Defaults to True."""

View File

@ -18,7 +18,7 @@ class BreebsRetriever(BaseRetriever):
""" """
breeb_key: str breeb_key: str
url = "https://breebs.promptbreeders.com/knowledge" url: str = "https://breebs.promptbreeders.com/knowledge"
def __init__(self, breeb_key: str): def __init__(self, breeb_key: str):
super().__init__(breeb_key=breeb_key) # type: ignore[call-arg] super().__init__(breeb_key=breeb_key) # type: ignore[call-arg]

View File

@ -37,8 +37,8 @@ class HuggingFaceTextToSpeechModelInference(BaseTool):
api_url: str api_url: str
huggingface_api_key: SecretStr huggingface_api_key: SecretStr
_HUGGINGFACE_API_KEY_ENV_NAME = "HUGGINGFACE_API_KEY" _HUGGINGFACE_API_KEY_ENV_NAME: str = "HUGGINGFACE_API_KEY"
_HUGGINGFACE_API_URL_ROOT = "https://api-inference.huggingface.co/models" _HUGGINGFACE_API_URL_ROOT: str = "https://api-inference.huggingface.co/models"
def __init__( def __init__(
self, self,

View File

@ -24,7 +24,7 @@ class CogniswitchKnowledgeRequest(BaseTool):
cs_token: str cs_token: str
OAI_token: str OAI_token: str
apiKey: str apiKey: str
api_url = "https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeRequest" api_url: str = "https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeRequest"
def _run( def _run(
self, self,

View File

@ -96,7 +96,7 @@ class E2BDataAnalysisToolArguments(BaseModel):
class E2BDataAnalysisTool(BaseTool): class E2BDataAnalysisTool(BaseTool):
"""Tool for running python code in a sandboxed environment for data analysis.""" """Tool for running python code in a sandboxed environment for data analysis."""
name = "e2b_data_analysis" name: str = "e2b_data_analysis"
args_schema: Type[BaseModel] = E2BDataAnalysisToolArguments args_schema: Type[BaseModel] = E2BDataAnalysisToolArguments
session: Any session: Any
description: str description: str

View File

@ -28,13 +28,13 @@ class EdenAiSpeechToTextTool(EdenaiTool):
edenai_api_key: Optional[str] = None edenai_api_key: Optional[str] = None
name = "edenai_speech_to_text" name: str = "edenai_speech_to_text"
description = ( description = (
"A wrapper around edenai Services speech to text " "A wrapper around edenai Services speech to text "
"Useful for when you have to convert audio to text." "Useful for when you have to convert audio to text."
"Input should be a url to an audio file." "Input should be a url to an audio file."
) )
is_async = True is_async: bool = True
language: Optional[str] = "en" language: Optional[str] = "en"
speakers: Optional[int] speakers: Optional[int]
@ -43,7 +43,7 @@ class EdenAiSpeechToTextTool(EdenaiTool):
feature: str = "audio" feature: str = "audio"
subfeature: str = "speech_to_text_async" subfeature: str = "speech_to_text_async"
base_url = "https://api.edenai.run/v2/audio/speech_to_text_async/" base_url: str = "https://api.edenai.run/v2/audio/speech_to_text_async/"
@validator("providers") @validator("providers")
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]: def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:

View File

@ -23,7 +23,7 @@ class EdenAiTextToSpeechTool(EdenaiTool):
""" """
name = "edenai_text_to_speech" name: str = "edenai_text_to_speech"
description = ( description = (
"A wrapper around edenai Services text to speech." "A wrapper around edenai Services text to speech."
"Useful for when you need to convert text to speech." "Useful for when you need to convert text to speech."

View File

@ -23,9 +23,9 @@ class EdenAiExplicitImageTool(EdenaiTool):
""" """
name = "edenai_image_explicit_content_detection" name: str = "edenai_image_explicit_content_detection"
description = ( description: str = (
"A wrapper around edenai Services Explicit image detection. " "A wrapper around edenai Services Explicit image detection. "
"""Useful for when you have to extract Explicit Content from images. """Useful for when you have to extract Explicit Content from images.
it detects adult only content in images, it detects adult only content in images,
@ -35,9 +35,9 @@ class EdenAiExplicitImageTool(EdenaiTool):
"Input should be the string url of the image ." "Input should be the string url of the image ."
) )
combine_available = True combine_available: bool = True
feature = "image" feature: str = "image"
subfeature = "explicit_content" subfeature: str = "explicit_content"
def _parse_json(self, json_data: dict) -> str: def _parse_json(self, json_data: dict) -> str:
result_str = f"nsfw_likelihood: {json_data['nsfw_likelihood']}\n" result_str = f"nsfw_likelihood: {json_data['nsfw_likelihood']}\n"

View File

@ -22,9 +22,9 @@ class EdenAiObjectDetectionTool(EdenaiTool):
""" """
name = "edenai_object_detection" name: str = "edenai_object_detection"
description = ( description: str = (
"A wrapper around edenai Services Object Detection . " "A wrapper around edenai Services Object Detection . "
"""Useful for when you have to do an to identify and locate """Useful for when you have to do an to identify and locate
(with bounding boxes) objects in an image """ (with bounding boxes) objects in an image """
@ -33,8 +33,8 @@ class EdenAiObjectDetectionTool(EdenaiTool):
show_positions: bool = False show_positions: bool = False
feature = "image" feature: str = "image"
subfeature = "object_detection" subfeature: str = "object_detection"
def _parse_json(self, json_data: dict) -> str: def _parse_json(self, json_data: dict) -> str:
result = [] result = []

View File

@ -22,16 +22,16 @@ class EdenAiParsingIDTool(EdenaiTool):
""" """
name = "edenai_identity_parsing" name: str = "edenai_identity_parsing"
description = ( description: str = (
"A wrapper around edenai Services Identity parsing. " "A wrapper around edenai Services Identity parsing. "
"Useful for when you have to extract information from an ID Document " "Useful for when you have to extract information from an ID Document "
"Input should be the string url of the document to parse." "Input should be the string url of the document to parse."
) )
feature = "ocr" feature: str = "ocr"
subfeature = "identity_parser" subfeature: str = "identity_parser"
language: Optional[str] = None language: Optional[str] = None
""" """

View File

@ -22,9 +22,9 @@ class EdenAiParsingInvoiceTool(EdenaiTool):
""" """
name = "edenai_invoice_parsing" name: str = "edenai_invoice_parsing"
description = ( description: str = (
"A wrapper around edenai Services invoice parsing. " "A wrapper around edenai Services invoice parsing. "
"""Useful for when you have to extract information from """Useful for when you have to extract information from
an image it enables to take invoices an image it enables to take invoices
@ -39,8 +39,8 @@ class EdenAiParsingInvoiceTool(EdenaiTool):
language of the image passed to the model. language of the image passed to the model.
""" """
feature = "ocr" feature: str = "ocr"
subfeature = "invoice_parser" subfeature: str = "invoice_parser"
def _parse_response(self, response: list) -> str: def _parse_response(self, response: list) -> str:
formatted_list: list = [] formatted_list: list = []

View File

@ -22,9 +22,9 @@ class EdenAiTextModerationTool(EdenaiTool):
""" """
name = "edenai_explicit_content_detection_text" name: str = "edenai_explicit_content_detection_text"
description = ( description: str = (
"A wrapper around edenai Services explicit content detection for text. " "A wrapper around edenai Services explicit content detection for text. "
"""Useful for when you have to scan text for offensive, """Useful for when you have to scan text for offensive,
sexually explicit or suggestive content, sexually explicit or suggestive content,

View File

@ -20,8 +20,8 @@ class YouInput(BaseModel):
class YouSearchTool(BaseTool): class YouSearchTool(BaseTool):
"""Tool that searches the you.com API.""" """Tool that searches the you.com API."""
name = "you_search" name: str = "you_search"
description = ( description: str = (
"The YOU APIs make LLMs and search experiences more factual and" "The YOU APIs make LLMs and search experiences more factual and"
"up to date with realtime web data." "up to date with realtime web data."
) )

View File

@ -8,7 +8,7 @@ from langchain_core.pydantic_v1 import BaseModel, Field
class MojeekSearchAPIWrapper(BaseModel): class MojeekSearchAPIWrapper(BaseModel):
api_key: str api_key: str
search_kwargs: dict = Field(default_factory=dict) search_kwargs: dict = Field(default_factory=dict)
api_url = "https://api.mojeek.com/search" api_url: str = "https://api.mojeek.com/search"
def run(self, query: str) -> str: def run(self, query: str) -> str:
search_results = self._search(query) search_results = self._search(query)

View File

@ -26,7 +26,7 @@ GUARDRAILS_TRIGGER = os.environ.get(
class BedrockAsyncCallbackHandler(AsyncCallbackHandler): class BedrockAsyncCallbackHandler(AsyncCallbackHandler):
"""Async callback handler that can be used to handle callbacks from langchain.""" """Async callback handler that can be used to handle callbacks from langchain."""
guardrails_intervened = False guardrails_intervened: bool = False
async def on_llm_error( async def on_llm_error(
self, self,