mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-06 11:37:12 +00:00
community[patch]: Add missing type annotations (#22758)
Add missing type annotations to objects in community. These missing type annotations will raise type errors in pydantic 2.
This commit is contained in:
parent
3237909221
commit
05d31a2f00
@ -59,7 +59,7 @@ class UpstashRatelimitHandler(BaseCallbackHandler):
|
||||
"""
|
||||
|
||||
raise_error = True
|
||||
_checked = False
|
||||
_checked: bool = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -72,9 +72,9 @@ class PebbloRetrievalQA(Chain):
|
||||
"""Pebblo cloud API key for app."""
|
||||
classifier_url: str = CLASSIFIER_URL #: :meta private:
|
||||
"""Classifier endpoint."""
|
||||
_discover_sent = False #: :meta private:
|
||||
_discover_sent: bool = False #: :meta private:
|
||||
"""Flag to check if discover payload has been sent."""
|
||||
_prompt_sent = False #: :meta private:
|
||||
_prompt_sent: bool = False #: :meta private:
|
||||
"""Flag to check if prompt payload has been sent."""
|
||||
|
||||
def _call(
|
||||
|
@ -97,7 +97,7 @@ class ChatBaichuan(BaseChatModel):
|
||||
"""Whether to stream the results or not."""
|
||||
request_timeout: int = Field(default=60, alias="timeout")
|
||||
"""request timeout for chat http requests"""
|
||||
model = "Baichuan2-Turbo-192K"
|
||||
model: str = "Baichuan2-Turbo-192K"
|
||||
"""model name of Baichuan, default is `Baichuan2-Turbo-192K`,
|
||||
other options include `Baichuan2-Turbo`"""
|
||||
temperature: Optional[float] = Field(default=0.3)
|
||||
|
@ -240,7 +240,7 @@ class OCIModelDeploymentTGI(OCIModelDeploymentLLM):
|
||||
sampling and Top-p sampling.
|
||||
"""
|
||||
|
||||
watermark = True
|
||||
watermark: bool = True
|
||||
"""Watermarking with `A Watermark for Large Language Models <https://arxiv.org/abs/2301.10226>`_.
|
||||
Defaults to True."""
|
||||
|
||||
|
@ -18,7 +18,7 @@ class BreebsRetriever(BaseRetriever):
|
||||
"""
|
||||
|
||||
breeb_key: str
|
||||
url = "https://breebs.promptbreeders.com/knowledge"
|
||||
url: str = "https://breebs.promptbreeders.com/knowledge"
|
||||
|
||||
def __init__(self, breeb_key: str):
|
||||
super().__init__(breeb_key=breeb_key) # type: ignore[call-arg]
|
||||
|
@ -37,8 +37,8 @@ class HuggingFaceTextToSpeechModelInference(BaseTool):
|
||||
api_url: str
|
||||
huggingface_api_key: SecretStr
|
||||
|
||||
_HUGGINGFACE_API_KEY_ENV_NAME = "HUGGINGFACE_API_KEY"
|
||||
_HUGGINGFACE_API_URL_ROOT = "https://api-inference.huggingface.co/models"
|
||||
_HUGGINGFACE_API_KEY_ENV_NAME: str = "HUGGINGFACE_API_KEY"
|
||||
_HUGGINGFACE_API_URL_ROOT: str = "https://api-inference.huggingface.co/models"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -24,7 +24,7 @@ class CogniswitchKnowledgeRequest(BaseTool):
|
||||
cs_token: str
|
||||
OAI_token: str
|
||||
apiKey: str
|
||||
api_url = "https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeRequest"
|
||||
api_url: str = "https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeRequest"
|
||||
|
||||
def _run(
|
||||
self,
|
||||
|
@ -96,7 +96,7 @@ class E2BDataAnalysisToolArguments(BaseModel):
|
||||
class E2BDataAnalysisTool(BaseTool):
|
||||
"""Tool for running python code in a sandboxed environment for data analysis."""
|
||||
|
||||
name = "e2b_data_analysis"
|
||||
name: str = "e2b_data_analysis"
|
||||
args_schema: Type[BaseModel] = E2BDataAnalysisToolArguments
|
||||
session: Any
|
||||
description: str
|
||||
|
@ -28,13 +28,13 @@ class EdenAiSpeechToTextTool(EdenaiTool):
|
||||
|
||||
edenai_api_key: Optional[str] = None
|
||||
|
||||
name = "edenai_speech_to_text"
|
||||
name: str = "edenai_speech_to_text"
|
||||
description = (
|
||||
"A wrapper around edenai Services speech to text "
|
||||
"Useful for when you have to convert audio to text."
|
||||
"Input should be a url to an audio file."
|
||||
)
|
||||
is_async = True
|
||||
is_async: bool = True
|
||||
|
||||
language: Optional[str] = "en"
|
||||
speakers: Optional[int]
|
||||
@ -43,7 +43,7 @@ class EdenAiSpeechToTextTool(EdenaiTool):
|
||||
|
||||
feature: str = "audio"
|
||||
subfeature: str = "speech_to_text_async"
|
||||
base_url = "https://api.edenai.run/v2/audio/speech_to_text_async/"
|
||||
base_url: str = "https://api.edenai.run/v2/audio/speech_to_text_async/"
|
||||
|
||||
@validator("providers")
|
||||
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
|
||||
|
@ -23,7 +23,7 @@ class EdenAiTextToSpeechTool(EdenaiTool):
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_text_to_speech"
|
||||
name: str = "edenai_text_to_speech"
|
||||
description = (
|
||||
"A wrapper around edenai Services text to speech."
|
||||
"Useful for when you need to convert text to speech."
|
||||
|
@ -23,9 +23,9 @@ class EdenAiExplicitImageTool(EdenaiTool):
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_image_explicit_content_detection"
|
||||
name: str = "edenai_image_explicit_content_detection"
|
||||
|
||||
description = (
|
||||
description: str = (
|
||||
"A wrapper around edenai Services Explicit image detection. "
|
||||
"""Useful for when you have to extract Explicit Content from images.
|
||||
it detects adult only content in images,
|
||||
@ -35,9 +35,9 @@ class EdenAiExplicitImageTool(EdenaiTool):
|
||||
"Input should be the string url of the image ."
|
||||
)
|
||||
|
||||
combine_available = True
|
||||
feature = "image"
|
||||
subfeature = "explicit_content"
|
||||
combine_available: bool = True
|
||||
feature: str = "image"
|
||||
subfeature: str = "explicit_content"
|
||||
|
||||
def _parse_json(self, json_data: dict) -> str:
|
||||
result_str = f"nsfw_likelihood: {json_data['nsfw_likelihood']}\n"
|
||||
|
@ -22,9 +22,9 @@ class EdenAiObjectDetectionTool(EdenaiTool):
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_object_detection"
|
||||
name: str = "edenai_object_detection"
|
||||
|
||||
description = (
|
||||
description: str = (
|
||||
"A wrapper around edenai Services Object Detection . "
|
||||
"""Useful for when you have to do an to identify and locate
|
||||
(with bounding boxes) objects in an image """
|
||||
@ -33,8 +33,8 @@ class EdenAiObjectDetectionTool(EdenaiTool):
|
||||
|
||||
show_positions: bool = False
|
||||
|
||||
feature = "image"
|
||||
subfeature = "object_detection"
|
||||
feature: str = "image"
|
||||
subfeature: str = "object_detection"
|
||||
|
||||
def _parse_json(self, json_data: dict) -> str:
|
||||
result = []
|
||||
|
@ -22,16 +22,16 @@ class EdenAiParsingIDTool(EdenaiTool):
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_identity_parsing"
|
||||
name: str = "edenai_identity_parsing"
|
||||
|
||||
description = (
|
||||
description: str = (
|
||||
"A wrapper around edenai Services Identity parsing. "
|
||||
"Useful for when you have to extract information from an ID Document "
|
||||
"Input should be the string url of the document to parse."
|
||||
)
|
||||
|
||||
feature = "ocr"
|
||||
subfeature = "identity_parser"
|
||||
feature: str = "ocr"
|
||||
subfeature: str = "identity_parser"
|
||||
|
||||
language: Optional[str] = None
|
||||
"""
|
||||
|
@ -22,9 +22,9 @@ class EdenAiParsingInvoiceTool(EdenaiTool):
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_invoice_parsing"
|
||||
name: str = "edenai_invoice_parsing"
|
||||
|
||||
description = (
|
||||
description: str = (
|
||||
"A wrapper around edenai Services invoice parsing. "
|
||||
"""Useful for when you have to extract information from
|
||||
an image it enables to take invoices
|
||||
@ -39,8 +39,8 @@ class EdenAiParsingInvoiceTool(EdenaiTool):
|
||||
language of the image passed to the model.
|
||||
"""
|
||||
|
||||
feature = "ocr"
|
||||
subfeature = "invoice_parser"
|
||||
feature: str = "ocr"
|
||||
subfeature: str = "invoice_parser"
|
||||
|
||||
def _parse_response(self, response: list) -> str:
|
||||
formatted_list: list = []
|
||||
|
@ -22,9 +22,9 @@ class EdenAiTextModerationTool(EdenaiTool):
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_explicit_content_detection_text"
|
||||
name: str = "edenai_explicit_content_detection_text"
|
||||
|
||||
description = (
|
||||
description: str = (
|
||||
"A wrapper around edenai Services explicit content detection for text. "
|
||||
"""Useful for when you have to scan text for offensive,
|
||||
sexually explicit or suggestive content,
|
||||
|
@ -20,8 +20,8 @@ class YouInput(BaseModel):
|
||||
class YouSearchTool(BaseTool):
|
||||
"""Tool that searches the you.com API."""
|
||||
|
||||
name = "you_search"
|
||||
description = (
|
||||
name: str = "you_search"
|
||||
description: str = (
|
||||
"The YOU APIs make LLMs and search experiences more factual and"
|
||||
"up to date with realtime web data."
|
||||
)
|
||||
|
@ -8,7 +8,7 @@ from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
class MojeekSearchAPIWrapper(BaseModel):
|
||||
api_key: str
|
||||
search_kwargs: dict = Field(default_factory=dict)
|
||||
api_url = "https://api.mojeek.com/search"
|
||||
api_url: str = "https://api.mojeek.com/search"
|
||||
|
||||
def run(self, query: str) -> str:
|
||||
search_results = self._search(query)
|
||||
|
@ -26,7 +26,7 @@ GUARDRAILS_TRIGGER = os.environ.get(
|
||||
class BedrockAsyncCallbackHandler(AsyncCallbackHandler):
|
||||
"""Async callback handler that can be used to handle callbacks from langchain."""
|
||||
|
||||
guardrails_intervened = False
|
||||
guardrails_intervened: bool = False
|
||||
|
||||
async def on_llm_error(
|
||||
self,
|
||||
|
Loading…
Reference in New Issue
Block a user