From 844955d6e1a77102a59bde737841e208b849975a Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Wed, 11 Sep 2024 11:13:11 -0400 Subject: [PATCH] community[patch]: assign missed default (#26326) Assigning missed defaults in various classes. Most clients were being assigned during the `model_validator(mode="before")` step, so this change should amount to a no-op in those cases. --- This PR was autogenerated using gritql ```shell grit apply 'class_definition(name=$C, $body, superclasses=$S) where { $C <: ! "Config", // Does not work in this scope, but works after class_definition $body <: block($statements), $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where { or { $y <: `Field($z)`, $x <: "model_config" } }, // And has either Any or Optional fields without a default $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where { $t <: or { r"Optional.*", r"Any", r"Union[None, .*]", r"Union[.*, None, .*]", r"Union[.*, None]", }, $y <: ., // Match empty node $t => `$t = None`, }, } ' --language python . ``` --- .../langchain_community/chains/openapi/chain.py | 2 +- .../chat_models/baidu_qianfan_endpoint.py | 2 +- .../langchain_community/chat_models/jinachat.py | 2 +- .../langchain_community/chat_models/litellm.py | 2 +- .../langchain_community/chat_models/llamacpp.py | 2 +- .../langchain_community/chat_models/minimax.py | 2 +- .../langchain_community/chat_models/premai.py | 2 +- .../langchain_community/chat_models/tongyi.py | 2 +- .../community/langchain_community/chat_models/yuan2.py | 2 +- .../langchain_community/cross_encoders/huggingface.py | 2 +- .../cross_encoders/sagemaker_endpoint.py | 2 +- .../document_compressors/llmlingua_filter.py | 2 +- .../document_compressors/openvino_rerank.py | 4 ++-- .../langchain_community/embeddings/baichuan.py | 2 +- .../embeddings/baidu_qianfan_endpoint.py | 2 +- .../langchain_community/embeddings/bedrock.py | 2 +- .../community/langchain_community/embeddings/cohere.py | 4 ++-- .../langchain_community/embeddings/dashscope.py | 2 +- .../langchain_community/embeddings/fastembed.py | 6 +++--- .../langchain_community/embeddings/ipex_llm.py | 2 +- libs/community/langchain_community/embeddings/laser.py | 4 ++-- .../langchain_community/embeddings/llamacpp.py | 2 +- .../langchain_community/embeddings/localai.py | 2 +- .../langchain_community/embeddings/modelscope_hub.py | 2 +- .../langchain_community/embeddings/openvino.py | 4 ++-- .../langchain_community/embeddings/oracleai.py | 2 +- .../langchain_community/embeddings/tensorflow_hub.py | 2 +- libs/community/langchain_community/llms/aleph_alpha.py | 2 +- libs/community/langchain_community/llms/aphrodite.py | 2 +- .../langchain_community/llms/baidu_qianfan_endpoint.py | 2 +- libs/community/langchain_community/llms/cohere.py | 4 ++-- libs/community/langchain_community/llms/ctranslate2.py | 4 ++-- libs/community/langchain_community/llms/exllamav2.py | 2 +- libs/community/langchain_community/llms/gooseai.py | 2 +- .../langchain_community/llms/huggingface_pipeline.py | 2 +- .../llms/huggingface_text_gen_inference.py | 4 ++-- libs/community/langchain_community/llms/ipex_llm.py | 4 ++-- libs/community/langchain_community/llms/llamacpp.py | 2 +- libs/community/langchain_community/llms/manifest.py | 2 +- .../community/langchain_community/llms/mlx_pipeline.py | 4 ++-- libs/community/langchain_community/llms/nlpcloud.py | 2 +- libs/community/langchain_community/llms/petals.py | 4 ++-- .../langchain_community/llms/predictionguard.py | 2 +- libs/community/langchain_community/llms/self_hosted.py | 6 +++--- .../llms/self_hosted_hugging_face.py | 2 +- libs/community/langchain_community/llms/tongyi.py | 2 +- libs/community/langchain_community/llms/vllm.py | 2 +- .../langchain_community/llms/volcengine_maas.py | 2 +- libs/community/langchain_community/llms/watsonxllm.py | 2 +- .../llms/weight_only_quantization.py | 2 +- libs/community/langchain_community/retrievers/bm25.py | 2 +- .../langchain_community/retrievers/docarray.py | 2 +- .../retrievers/google_vertex_ai_search.py | 4 ++-- libs/community/langchain_community/retrievers/knn.py | 2 +- .../langchain_community/retrievers/llama_index.py | 4 ++-- .../community/langchain_community/retrievers/nanopq.py | 2 +- .../retrievers/pinecone_hybrid_search.py | 4 ++-- .../retrievers/qdrant_sparse_vector_retriever.py | 2 +- libs/community/langchain_community/retrievers/svm.py | 2 +- libs/community/langchain_community/retrievers/tfidf.py | 4 ++-- .../retrievers/weaviate_hybrid_search.py | 2 +- .../tools/edenai/audio_text_to_speech.py | 10 +++++----- .../langchain_community/tools/powerbi/tool.py | 2 +- .../community/langchain_community/utilities/asknews.py | 4 ++-- .../langchain_community/utilities/awslambda.py | 2 +- .../utilities/dalle_image_generator.py | 2 +- .../langchain_community/utilities/dataherald.py | 2 +- libs/community/langchain_community/utilities/github.py | 4 ++-- libs/community/langchain_community/utilities/gitlab.py | 4 ++-- .../langchain_community/utilities/google_finance.py | 2 +- .../langchain_community/utilities/google_jobs.py | 2 +- .../langchain_community/utilities/google_lens.py | 2 +- .../langchain_community/utilities/google_places_api.py | 2 +- .../langchain_community/utilities/google_search.py | 2 +- .../langchain_community/utilities/google_trends.py | 2 +- .../community/langchain_community/utilities/graphql.py | 2 +- libs/community/langchain_community/utilities/jira.py | 4 ++-- .../langchain_community/utilities/openweathermap.py | 2 +- .../community/langchain_community/utilities/serpapi.py | 2 +- .../langchain_community/utilities/stackexchange.py | 2 +- libs/community/langchain_community/utilities/steam.py | 2 +- libs/community/langchain_community/utilities/twilio.py | 2 +- .../langchain_community/utilities/wolfram_alpha.py | 2 +- 83 files changed, 109 insertions(+), 109 deletions(-) diff --git a/libs/community/langchain_community/chains/openapi/chain.py b/libs/community/langchain_community/chains/openapi/chain.py index 983efea7cc8..46766ae9441 100644 --- a/libs/community/langchain_community/chains/openapi/chain.py +++ b/libs/community/langchain_community/chains/openapi/chain.py @@ -30,7 +30,7 @@ class OpenAPIEndpointChain(Chain, BaseModel): """Chain interacts with an OpenAPI endpoint using natural language.""" api_request_chain: LLMChain - api_response_chain: Optional[LLMChain] + api_response_chain: Optional[LLMChain] = None api_operation: APIOperation requests: Requests = Field(exclude=True, default_factory=Requests) param_mapping: _ParamMapping = Field(alias="param_mapping") diff --git a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py index a4fe51e3b42..741528d9152 100644 --- a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py @@ -346,7 +346,7 @@ class QianfanChatEndpoint(BaseChatModel): model_kwargs: Dict[str, Any] = Field(default_factory=dict) """extra params for model invoke using with `do`.""" - client: Any #: :meta private: + client: Any = None #: :meta private: # It could be empty due to the use of Console API # And they're not list here diff --git a/libs/community/langchain_community/chat_models/jinachat.py b/libs/community/langchain_community/chat_models/jinachat.py index f258674856b..72c962d9354 100644 --- a/libs/community/langchain_community/chat_models/jinachat.py +++ b/libs/community/langchain_community/chat_models/jinachat.py @@ -171,7 +171,7 @@ class JinaChat(BaseChatModel): """Return whether this model can be serialized by Langchain.""" return False - client: Any #: :meta private: + client: Any = None #: :meta private: temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) diff --git a/libs/community/langchain_community/chat_models/litellm.py b/libs/community/langchain_community/chat_models/litellm.py index 97e6a3403fc..d6c95573398 100644 --- a/libs/community/langchain_community/chat_models/litellm.py +++ b/libs/community/langchain_community/chat_models/litellm.py @@ -215,7 +215,7 @@ def _convert_message_to_dict(message: BaseMessage) -> dict: class ChatLiteLLM(BaseChatModel): """Chat model that uses the LiteLLM API.""" - client: Any #: :meta private: + client: Any = None #: :meta private: model: str = "gpt-3.5-turbo" model_name: Optional[str] = None """Model name to use.""" diff --git a/libs/community/langchain_community/chat_models/llamacpp.py b/libs/community/langchain_community/chat_models/llamacpp.py index ed8d0a528ad..65055e7fa7b 100644 --- a/libs/community/langchain_community/chat_models/llamacpp.py +++ b/libs/community/langchain_community/chat_models/llamacpp.py @@ -67,7 +67,7 @@ class ChatLlamaCpp(BaseChatModel): """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_path: str """The path to the Llama model file.""" diff --git a/libs/community/langchain_community/chat_models/minimax.py b/libs/community/langchain_community/chat_models/minimax.py index 46cd4c19501..b4a06e45d33 100644 --- a/libs/community/langchain_community/chat_models/minimax.py +++ b/libs/community/langchain_community/chat_models/minimax.py @@ -369,7 +369,7 @@ class MiniMaxChat(BaseChatModel): **self.model_kwargs, } - _client: Any + _client: Any = None model: str = "abab6.5-chat" """Model name to use.""" max_tokens: int = 256 diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index 60fba533508..5be59560ac7 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -305,7 +305,7 @@ class ChatPremAI(BaseChatModel, BaseModel): streaming: Optional[bool] = False """Whether to stream the responses or not.""" - client: Any + client: Any = None model_config = ConfigDict( populate_by_name=True, diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 6e0a2d59787..a39e744531a 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -434,7 +434,7 @@ class ChatTongyi(BaseChatModel): def lc_secrets(self) -> Dict[str, str]: return {"dashscope_api_key": "DASHSCOPE_API_KEY"} - client: Any #: :meta private: + client: Any = None #: :meta private: model_name: str = Field(default="qwen-turbo", alias="model") """Model name to use. callable multimodal model: diff --git a/libs/community/langchain_community/chat_models/yuan2.py b/libs/community/langchain_community/chat_models/yuan2.py index 9266dadcadc..f5d0a2e0951 100644 --- a/libs/community/langchain_community/chat_models/yuan2.py +++ b/libs/community/langchain_community/chat_models/yuan2.py @@ -76,7 +76,7 @@ class ChatYuan2(BaseChatModel): chat = ChatYuan2() """ - client: Any #: :meta private: + client: Any = None #: :meta private: async_client: Any = Field(default=None, exclude=True) #: :meta private: model_name: str = Field(default="yuan2", alias="model") diff --git a/libs/community/langchain_community/cross_encoders/huggingface.py b/libs/community/langchain_community/cross_encoders/huggingface.py index 5c10553e17e..a0d1f68a12f 100644 --- a/libs/community/langchain_community/cross_encoders/huggingface.py +++ b/libs/community/langchain_community/cross_encoders/huggingface.py @@ -23,7 +23,7 @@ class HuggingFaceCrossEncoder(BaseModel, BaseCrossEncoder): ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_name: str = DEFAULT_MODEL_NAME """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) diff --git a/libs/community/langchain_community/cross_encoders/sagemaker_endpoint.py b/libs/community/langchain_community/cross_encoders/sagemaker_endpoint.py index aed3de5354f..9cc6c8e2f7b 100644 --- a/libs/community/langchain_community/cross_encoders/sagemaker_endpoint.py +++ b/libs/community/langchain_community/cross_encoders/sagemaker_endpoint.py @@ -61,7 +61,7 @@ class SagemakerEndpointCrossEncoder(BaseModel, BaseCrossEncoder): credentials_profile_name=credentials_profile_name ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. diff --git a/libs/community/langchain_community/document_compressors/llmlingua_filter.py b/libs/community/langchain_community/document_compressors/llmlingua_filter.py index be1a1ee5d87..8fe82901608 100644 --- a/libs/community/langchain_community/document_compressors/llmlingua_filter.py +++ b/libs/community/langchain_community/document_compressors/llmlingua_filter.py @@ -49,7 +49,7 @@ class LLMLinguaCompressor(BaseDocumentCompressor): "dynamic_context_compression_ratio": 0.4, } """Extra compression arguments""" - lingua: Any + lingua: Any = None """The instance of the llm linqua""" @model_validator(mode="before") diff --git a/libs/community/langchain_community/document_compressors/openvino_rerank.py b/libs/community/langchain_community/document_compressors/openvino_rerank.py index 9257a88f189..47d8fb6e0bf 100644 --- a/libs/community/langchain_community/document_compressors/openvino_rerank.py +++ b/libs/community/langchain_community/document_compressors/openvino_rerank.py @@ -21,9 +21,9 @@ class OpenVINOReranker(BaseDocumentCompressor): OpenVINO rerank models. """ - ov_model: Any + ov_model: Any = None """OpenVINO model object.""" - tokenizer: Any + tokenizer: Any = None """Tokenizer for embedding model.""" model_name_or_path: str """HuggingFace model id.""" diff --git a/libs/community/langchain_community/embeddings/baichuan.py b/libs/community/langchain_community/embeddings/baichuan.py index 082ff369ed7..a2f63773a27 100644 --- a/libs/community/langchain_community/embeddings/baichuan.py +++ b/libs/community/langchain_community/embeddings/baichuan.py @@ -59,7 +59,7 @@ class BaichuanTextEmbeddings(BaseModel, Embeddings): vectors = embeddings.embed_query(text) """ # noqa: E501 - session: Any #: :meta private: + session: Any = None #: :meta private: model_name: str = Field(default="Baichuan-Text-Embedding", alias="model") """The model used to embed the documents.""" baichuan_api_key: SecretStr = Field( diff --git a/libs/community/langchain_community/embeddings/baidu_qianfan_endpoint.py b/libs/community/langchain_community/embeddings/baidu_qianfan_endpoint.py index 981378eb19f..14e2f35a4ea 100644 --- a/libs/community/langchain_community/embeddings/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/embeddings/baidu_qianfan_endpoint.py @@ -71,7 +71,7 @@ class QianfanEmbeddingsEndpoint(BaseModel, Embeddings): endpoint: str = "" """Endpoint of the Qianfan Embedding, required if custom model used.""" - client: Any + client: Any = None """Qianfan client""" init_kwargs: Dict[str, Any] = Field(default_factory=dict) diff --git a/libs/community/langchain_community/embeddings/bedrock.py b/libs/community/langchain_community/embeddings/bedrock.py index 7287e1acee9..7fcfe707b27 100644 --- a/libs/community/langchain_community/embeddings/bedrock.py +++ b/libs/community/langchain_community/embeddings/bedrock.py @@ -47,7 +47,7 @@ class BedrockEmbeddings(BaseModel, Embeddings): ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: """Bedrock client.""" region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable diff --git a/libs/community/langchain_community/embeddings/cohere.py b/libs/community/langchain_community/embeddings/cohere.py index 2b4e0efe57b..504f688100f 100644 --- a/libs/community/langchain_community/embeddings/cohere.py +++ b/libs/community/langchain_community/embeddings/cohere.py @@ -30,9 +30,9 @@ class CohereEmbeddings(BaseModel, Embeddings): ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: """Cohere client.""" - async_client: Any #: :meta private: + async_client: Any = None #: :meta private: """Cohere async client.""" model: str = "embed-english-v2.0" """Model name to use.""" diff --git a/libs/community/langchain_community/embeddings/dashscope.py b/libs/community/langchain_community/embeddings/dashscope.py index e2f69712145..0963808635d 100644 --- a/libs/community/langchain_community/embeddings/dashscope.py +++ b/libs/community/langchain_community/embeddings/dashscope.py @@ -101,7 +101,7 @@ class DashScopeEmbeddings(BaseModel, Embeddings): """ - client: Any #: :meta private: + client: Any = None #: :meta private: """The DashScope client.""" model: str = "text-embedding-v1" dashscope_api_key: Optional[str] = None diff --git a/libs/community/langchain_community/embeddings/fastembed.py b/libs/community/langchain_community/embeddings/fastembed.py index 75dcbda805f..288061b67cb 100644 --- a/libs/community/langchain_community/embeddings/fastembed.py +++ b/libs/community/langchain_community/embeddings/fastembed.py @@ -38,12 +38,12 @@ class FastEmbedEmbeddings(BaseModel, Embeddings): Unknown behavior for values > 512. """ - cache_dir: Optional[str] + cache_dir: Optional[str] = None """The path to the cache directory. Defaults to `local_cache` in the parent directory """ - threads: Optional[int] + threads: Optional[int] = None """The number of threads single onnxruntime session can use. Defaults to None """ @@ -65,7 +65,7 @@ class FastEmbedEmbeddings(BaseModel, Embeddings): Defaults to `None`. """ - _model: Any # : :meta private: + _model: Any = None # : :meta private: model_config = ConfigDict(extra="allow", protected_namespaces=()) diff --git a/libs/community/langchain_community/embeddings/ipex_llm.py b/libs/community/langchain_community/embeddings/ipex_llm.py index 1f53cd7fd7b..8022616f22d 100644 --- a/libs/community/langchain_community/embeddings/ipex_llm.py +++ b/libs/community/langchain_community/embeddings/ipex_llm.py @@ -49,7 +49,7 @@ class IpexLLMBgeEmbeddings(BaseModel, Embeddings): ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_name: str = DEFAULT_BGE_MODEL """Model name to use.""" cache_folder: Optional[str] = None diff --git a/libs/community/langchain_community/embeddings/laser.py b/libs/community/langchain_community/embeddings/laser.py index 4525bc2aee0..1299e55e518 100644 --- a/libs/community/langchain_community/embeddings/laser.py +++ b/libs/community/langchain_community/embeddings/laser.py @@ -27,7 +27,7 @@ class LaserEmbeddings(BaseModel, Embeddings): embeddings = encoder.encode_sentences(["Hello", "World"]) """ - lang: Optional[str] + lang: Optional[str] = None """The language or language code you'd like to use If empty, this implementation will default to using a multilingual earlier LASER encoder model (called laser2) @@ -35,7 +35,7 @@ class LaserEmbeddings(BaseModel, Embeddings): https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200 """ - _encoder_pipeline: Any # : :meta private: + _encoder_pipeline: Any = None # : :meta private: model_config = ConfigDict( extra="forbid", diff --git a/libs/community/langchain_community/embeddings/llamacpp.py b/libs/community/langchain_community/embeddings/llamacpp.py index 5a90b05aef9..86a029ea753 100644 --- a/libs/community/langchain_community/embeddings/llamacpp.py +++ b/libs/community/langchain_community/embeddings/llamacpp.py @@ -19,7 +19,7 @@ class LlamaCppEmbeddings(BaseModel, Embeddings): llama = LlamaCppEmbeddings(model_path="/path/to/model.bin") """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_path: str n_ctx: int = Field(512, alias="n_ctx") diff --git a/libs/community/langchain_community/embeddings/localai.py b/libs/community/langchain_community/embeddings/localai.py index f4ca9fa9723..924f162128e 100644 --- a/libs/community/langchain_community/embeddings/localai.py +++ b/libs/community/langchain_community/embeddings/localai.py @@ -141,7 +141,7 @@ class LocalAIEmbeddings(BaseModel, Embeddings): """ - client: Any #: :meta private: + client: Any = None #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model openai_api_version: Optional[str] = None diff --git a/libs/community/langchain_community/embeddings/modelscope_hub.py b/libs/community/langchain_community/embeddings/modelscope_hub.py index a0c78633916..e200244c551 100644 --- a/libs/community/langchain_community/embeddings/modelscope_hub.py +++ b/libs/community/langchain_community/embeddings/modelscope_hub.py @@ -17,7 +17,7 @@ class ModelScopeEmbeddings(BaseModel, Embeddings): embed = ModelScopeEmbeddings(model_id=model_id, model_revision="v1.0.0") """ - embed: Any + embed: Any = None model_id: str = "damo/nlp_corom_sentence-embedding_english-base" """Model name to use.""" model_revision: Optional[str] = None diff --git a/libs/community/langchain_community/embeddings/openvino.py b/libs/community/langchain_community/embeddings/openvino.py index d58bf92d255..3dbbdef45f1 100644 --- a/libs/community/langchain_community/embeddings/openvino.py +++ b/libs/community/langchain_community/embeddings/openvino.py @@ -31,9 +31,9 @@ class OpenVINOEmbeddings(BaseModel, Embeddings): ) """ - ov_model: Any + ov_model: Any = None """OpenVINO model object.""" - tokenizer: Any + tokenizer: Any = None """Tokenizer for embedding model.""" model_name_or_path: str """HuggingFace model id.""" diff --git a/libs/community/langchain_community/embeddings/oracleai.py b/libs/community/langchain_community/embeddings/oracleai.py index 4b81787206e..d1dca419052 100644 --- a/libs/community/langchain_community/embeddings/oracleai.py +++ b/libs/community/langchain_community/embeddings/oracleai.py @@ -28,7 +28,7 @@ class OracleEmbeddings(BaseModel, Embeddings): """Get Embeddings""" """Oracle Connection""" - conn: Any + conn: Any = None """Embedding Parameters""" params: Dict[str, Any] """Proxy""" diff --git a/libs/community/langchain_community/embeddings/tensorflow_hub.py b/libs/community/langchain_community/embeddings/tensorflow_hub.py index 15c32941748..a9c2edf419b 100644 --- a/libs/community/langchain_community/embeddings/tensorflow_hub.py +++ b/libs/community/langchain_community/embeddings/tensorflow_hub.py @@ -19,7 +19,7 @@ class TensorflowHubEmbeddings(BaseModel, Embeddings): tf = TensorflowHubEmbeddings(model_url=url) """ - embed: Any #: :meta private: + embed: Any = None #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" diff --git a/libs/community/langchain_community/llms/aleph_alpha.py b/libs/community/langchain_community/llms/aleph_alpha.py index f0a6735ecf5..73cf1f0dd85 100644 --- a/libs/community/langchain_community/llms/aleph_alpha.py +++ b/libs/community/langchain_community/llms/aleph_alpha.py @@ -25,7 +25,7 @@ class AlephAlpha(LLM): aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ - client: Any #: :meta private: + client: Any = None #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" diff --git a/libs/community/langchain_community/llms/aphrodite.py b/libs/community/langchain_community/llms/aphrodite.py index 7d752b07e79..bcaeabe296d 100644 --- a/libs/community/langchain_community/llms/aphrodite.py +++ b/libs/community/langchain_community/llms/aphrodite.py @@ -156,7 +156,7 @@ class Aphrodite(BaseLLM): """Holds any model parameters valid for `aphrodite.LLM` call not explicitly specified.""" - client: Any #: :meta private: + client: Any = None #: :meta private: @pre_init def validate_environment(cls, values: Dict) -> Dict: diff --git a/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py b/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py index 6e4bc68b57f..5268b780344 100644 --- a/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py @@ -123,7 +123,7 @@ class QianfanLLMEndpoint(LLM): model_kwargs: Dict[str, Any] = Field(default_factory=dict) """extra params for model invoke using with `do`.""" - client: Any + client: Any = None qianfan_ak: Optional[SecretStr] = Field(default=None, alias="api_key") qianfan_sk: Optional[SecretStr] = Field(default=None, alias="secret_key") diff --git a/libs/community/langchain_community/llms/cohere.py b/libs/community/langchain_community/llms/cohere.py index 875e9228122..dbe15e200a3 100644 --- a/libs/community/langchain_community/llms/cohere.py +++ b/libs/community/langchain_community/llms/cohere.py @@ -76,8 +76,8 @@ def acompletion_with_retry(llm: Cohere, **kwargs: Any) -> Any: class BaseCohere(Serializable): """Base class for Cohere models.""" - client: Any #: :meta private: - async_client: Any #: :meta private: + client: Any = None #: :meta private: + async_client: Any = None #: :meta private: model: Optional[str] = Field(default=None) """Model name to use.""" diff --git a/libs/community/langchain_community/llms/ctranslate2.py b/libs/community/langchain_community/llms/ctranslate2.py index 71bfeb1f798..bc78c7e4a42 100644 --- a/libs/community/langchain_community/llms/ctranslate2.py +++ b/libs/community/langchain_community/llms/ctranslate2.py @@ -41,9 +41,9 @@ class CTranslate2(BaseLLM): sampling_temperature: float = 1 """Sampling temperature to generate more random samples.""" - client: Any #: :meta private: + client: Any = None #: :meta private: - tokenizer: Any #: :meta private: + tokenizer: Any = None #: :meta private: ctranslate2_kwargs: Dict[str, Any] = Field(default_factory=dict) """ diff --git a/libs/community/langchain_community/llms/exllamav2.py b/libs/community/langchain_community/llms/exllamav2.py index d91a38034c2..27f5c1ac1d5 100644 --- a/libs/community/langchain_community/llms/exllamav2.py +++ b/libs/community/langchain_community/llms/exllamav2.py @@ -30,7 +30,7 @@ class ExLlamaV2(LLM): - Add support for custom stop sequences """ - client: Any + client: Any = None model_path: str exllama_cache: Any = None config: Any = None diff --git a/libs/community/langchain_community/llms/gooseai.py b/libs/community/langchain_community/llms/gooseai.py index 8a2dbdf37db..990dee8758c 100644 --- a/libs/community/langchain_community/llms/gooseai.py +++ b/libs/community/langchain_community/llms/gooseai.py @@ -30,7 +30,7 @@ class GooseAI(LLM): """ - client: Any + client: Any = None model_name: str = "gpt-neo-20b" """Model name to use""" diff --git a/libs/community/langchain_community/llms/huggingface_pipeline.py b/libs/community/langchain_community/llms/huggingface_pipeline.py index 852e49c9385..44b4558d2df 100644 --- a/libs/community/langchain_community/llms/huggingface_pipeline.py +++ b/libs/community/langchain_community/llms/huggingface_pipeline.py @@ -60,7 +60,7 @@ class HuggingFacePipeline(BaseLLM): hf = HuggingFacePipeline(pipeline=pipe) """ - pipeline: Any #: :meta private: + pipeline: Any = None #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name to use.""" model_kwargs: Optional[dict] = None diff --git a/libs/community/langchain_community/llms/huggingface_text_gen_inference.py b/libs/community/langchain_community/llms/huggingface_text_gen_inference.py index 97acb3ce78c..e536a2ee4af 100644 --- a/libs/community/langchain_community/llms/huggingface_text_gen_inference.py +++ b/libs/community/langchain_community/llms/huggingface_text_gen_inference.py @@ -100,8 +100,8 @@ class HuggingFaceTextGenInference(LLM): """Holds any text-generation-inference server parameters not explicitly specified""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `call` not explicitly specified""" - client: Any - async_client: Any + client: Any = None + async_client: Any = None model_config = ConfigDict( extra="forbid", diff --git a/libs/community/langchain_community/llms/ipex_llm.py b/libs/community/langchain_community/llms/ipex_llm.py index 72d5afa342d..1758c323d0e 100644 --- a/libs/community/langchain_community/llms/ipex_llm.py +++ b/libs/community/langchain_community/llms/ipex_llm.py @@ -25,9 +25,9 @@ class IpexLLM(LLM): """Model name or model path to use.""" model_kwargs: Optional[dict] = None """Keyword arguments passed to the model.""" - model: Any #: :meta private: + model: Any = None #: :meta private: """IpexLLM model.""" - tokenizer: Any #: :meta private: + tokenizer: Any = None #: :meta private: """Huggingface tokenizer model.""" streaming: bool = True """Whether to stream the results, token by token.""" diff --git a/libs/community/langchain_community/llms/llamacpp.py b/libs/community/langchain_community/llms/llamacpp.py index d206f138e6b..15d1119fcad 100644 --- a/libs/community/langchain_community/llms/llamacpp.py +++ b/libs/community/langchain_community/llms/llamacpp.py @@ -28,7 +28,7 @@ class LlamaCpp(LLM): llm = LlamaCpp(model_path="/path/to/llama/model") """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_path: str """The path to the Llama model file.""" diff --git a/libs/community/langchain_community/llms/manifest.py b/libs/community/langchain_community/llms/manifest.py index 745de7e5954..966933d5f95 100644 --- a/libs/community/langchain_community/llms/manifest.py +++ b/libs/community/langchain_community/llms/manifest.py @@ -9,7 +9,7 @@ from pydantic import ConfigDict class ManifestWrapper(LLM): """HazyResearch's Manifest library.""" - client: Any #: :meta private: + client: Any = None #: :meta private: llm_kwargs: Optional[Dict] = None model_config = ConfigDict( diff --git a/libs/community/langchain_community/llms/mlx_pipeline.py b/libs/community/langchain_community/llms/mlx_pipeline.py index 0937c8f39d5..3856a80658e 100644 --- a/libs/community/langchain_community/llms/mlx_pipeline.py +++ b/libs/community/langchain_community/llms/mlx_pipeline.py @@ -38,9 +38,9 @@ class MLXPipeline(LLM): model_id: str = DEFAULT_MODEL_ID """Model name to use.""" - model: Any #: :meta private: + model: Any = None #: :meta private: """Model.""" - tokenizer: Any #: :meta private: + tokenizer: Any = None #: :meta private: """Tokenizer.""" tokenizer_config: Optional[dict] = None """ diff --git a/libs/community/langchain_community/llms/nlpcloud.py b/libs/community/langchain_community/llms/nlpcloud.py index cd014f8581f..774122a38fd 100644 --- a/libs/community/langchain_community/llms/nlpcloud.py +++ b/libs/community/langchain_community/llms/nlpcloud.py @@ -19,7 +19,7 @@ class NLPCloud(LLM): nlpcloud = NLPCloud(model="finetuned-gpt-neox-20b") """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_name: str = "finetuned-gpt-neox-20b" """Model name to use.""" gpu: bool = True diff --git a/libs/community/langchain_community/llms/petals.py b/libs/community/langchain_community/llms/petals.py index 3f8e4f87333..7210037c6d4 100644 --- a/libs/community/langchain_community/llms/petals.py +++ b/libs/community/langchain_community/llms/petals.py @@ -29,10 +29,10 @@ class Petals(LLM): """ - client: Any + client: Any = None """The client to use for the API calls.""" - tokenizer: Any + tokenizer: Any = None """The tokenizer to use for the API calls.""" model_name: str = "bigscience/bloom-petals" diff --git a/libs/community/langchain_community/llms/predictionguard.py b/libs/community/langchain_community/llms/predictionguard.py index 9e85daf4ba6..a26d853ff4b 100644 --- a/libs/community/langchain_community/llms/predictionguard.py +++ b/libs/community/langchain_community/llms/predictionguard.py @@ -30,7 +30,7 @@ class PredictionGuard(LLM): }) """ - client: Any #: :meta private: + client: Any = None #: :meta private: model: Optional[str] = "MPT-7B-Instruct" """Model name to use.""" diff --git a/libs/community/langchain_community/llms/self_hosted.py b/libs/community/langchain_community/llms/self_hosted.py index 5026c2108a7..70098685786 100644 --- a/libs/community/langchain_community/llms/self_hosted.py +++ b/libs/community/langchain_community/llms/self_hosted.py @@ -126,11 +126,11 @@ class SelfHostedPipeline(LLM): ) """ - pipeline_ref: Any #: :meta private: - client: Any #: :meta private: + pipeline_ref: Any = None #: :meta private: + client: Any = None #: :meta private: inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" - hardware: Any + hardware: Any = None """Remote hardware to send the inference function to.""" model_load_fn: Callable """Function to load the model remotely on the server.""" diff --git a/libs/community/langchain_community/llms/self_hosted_hugging_face.py b/libs/community/langchain_community/llms/self_hosted_hugging_face.py index 5003a86aeb3..016da2e48f9 100644 --- a/libs/community/langchain_community/llms/self_hosted_hugging_face.py +++ b/libs/community/langchain_community/llms/self_hosted_hugging_face.py @@ -160,7 +160,7 @@ class SelfHostedHuggingFaceLLM(SelfHostedPipeline): """Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.""" model_kwargs: Optional[dict] = None """Keyword arguments to pass to the model.""" - hardware: Any + hardware: Any = None """Remote hardware to send the inference function to.""" model_reqs: List[str] = ["./", "transformers", "torch"] """Requirements to install on hardware to inference the model.""" diff --git a/libs/community/langchain_community/llms/tongyi.py b/libs/community/langchain_community/llms/tongyi.py index 65bdd47ceae..e7289955f66 100644 --- a/libs/community/langchain_community/llms/tongyi.py +++ b/libs/community/langchain_community/llms/tongyi.py @@ -238,7 +238,7 @@ class Tongyi(BaseLLM): def lc_secrets(self) -> Dict[str, str]: return {"dashscope_api_key": "DASHSCOPE_API_KEY"} - client: Any #: :meta private: + client: Any = None #: :meta private: model_name: str = Field(default="qwen-plus", alias="model") """Model name to use.""" diff --git a/libs/community/langchain_community/llms/vllm.py b/libs/community/langchain_community/llms/vllm.py index 83e28b7ac71..b887a8b0ab2 100644 --- a/libs/community/langchain_community/llms/vllm.py +++ b/libs/community/langchain_community/llms/vllm.py @@ -72,7 +72,7 @@ class VLLM(BaseLLM): vllm_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `vllm.LLM` call not explicitly specified.""" - client: Any #: :meta private: + client: Any = None #: :meta private: @pre_init def validate_environment(cls, values: Dict) -> Dict: diff --git a/libs/community/langchain_community/llms/volcengine_maas.py b/libs/community/langchain_community/llms/volcengine_maas.py index 1046eb4544b..c62c3523bb3 100644 --- a/libs/community/langchain_community/llms/volcengine_maas.py +++ b/libs/community/langchain_community/llms/volcengine_maas.py @@ -12,7 +12,7 @@ from pydantic import BaseModel, Field, SecretStr class VolcEngineMaasBase(BaseModel): """Base class for VolcEngineMaas models.""" - client: Any + client: Any = None volc_engine_maas_ak: Optional[SecretStr] = None """access key for volc engine""" diff --git a/libs/community/langchain_community/llms/watsonxllm.py b/libs/community/langchain_community/llms/watsonxllm.py index a19909e2da2..9a63d824137 100644 --- a/libs/community/langchain_community/llms/watsonxllm.py +++ b/libs/community/langchain_community/llms/watsonxllm.py @@ -93,7 +93,7 @@ class WatsonxLLM(BaseLLM): streaming: bool = False """ Whether to stream the results or not. """ - watsonx_model: Any + watsonx_model: Any = None model_config = ConfigDict( extra="forbid", diff --git a/libs/community/langchain_community/llms/weight_only_quantization.py b/libs/community/langchain_community/llms/weight_only_quantization.py index 35302ff6fd5..916734414fb 100644 --- a/libs/community/langchain_community/llms/weight_only_quantization.py +++ b/libs/community/langchain_community/llms/weight_only_quantization.py @@ -62,7 +62,7 @@ class WeightOnlyQuantPipeline(LLM): hf = WeightOnlyQuantPipeline(pipeline=pipe) """ - pipeline: Any #: :meta private: + pipeline: Any = None #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name or local path to use.""" diff --git a/libs/community/langchain_community/retrievers/bm25.py b/libs/community/langchain_community/retrievers/bm25.py index a654d3626ab..543058c131a 100644 --- a/libs/community/langchain_community/retrievers/bm25.py +++ b/libs/community/langchain_community/retrievers/bm25.py @@ -15,7 +15,7 @@ def default_preprocessing_func(text: str) -> List[str]: class BM25Retriever(BaseRetriever): """`BM25` retriever without Elasticsearch.""" - vectorizer: Any + vectorizer: Any = None """ BM25 vectorizer.""" docs: List[Document] = Field(repr=False) """ List of documents.""" diff --git a/libs/community/langchain_community/retrievers/docarray.py b/libs/community/langchain_community/retrievers/docarray.py index 4531028fb6e..2e602c10653 100644 --- a/libs/community/langchain_community/retrievers/docarray.py +++ b/libs/community/langchain_community/retrievers/docarray.py @@ -38,7 +38,7 @@ class DocArrayRetriever(BaseRetriever): top_k: Number of documents to return """ - index: Any + index: Any = None embeddings: Embeddings search_field: str content_field: str diff --git a/libs/community/langchain_community/retrievers/google_vertex_ai_search.py b/libs/community/langchain_community/retrievers/google_vertex_ai_search.py index 109f0385f0c..e6945b94200 100644 --- a/libs/community/langchain_community/retrievers/google_vertex_ai_search.py +++ b/libs/community/langchain_community/retrievers/google_vertex_ai_search.py @@ -239,7 +239,7 @@ class GoogleVertexAISearchRetriever(BaseRetriever, _BaseGoogleVertexAISearchRetr """ # type is SearchServiceClient but can't be set due to optional imports - _client: Any + _client: Any = None _serving_config: str model_config = ConfigDict( @@ -405,7 +405,7 @@ class GoogleVertexAIMultiTurnSearchRetriever( """Vertex AI Search Conversation ID.""" # type is ConversationalSearchServiceClient but can't be set due to optional imports - _client: Any + _client: Any = None _serving_config: str model_config = ConfigDict( diff --git a/libs/community/langchain_community/retrievers/knn.py b/libs/community/langchain_community/retrievers/knn.py index ed76aa41b7b..8c08479248a 100644 --- a/libs/community/langchain_community/retrievers/knn.py +++ b/libs/community/langchain_community/retrievers/knn.py @@ -35,7 +35,7 @@ class KNNRetriever(BaseRetriever): embeddings: Embeddings """Embeddings model to use.""" - index: Any + index: Any = None """Index of embeddings.""" texts: List[str] """List of texts to index.""" diff --git a/libs/community/langchain_community/retrievers/llama_index.py b/libs/community/langchain_community/retrievers/llama_index.py index 433def09001..1ab75b572c7 100644 --- a/libs/community/langchain_community/retrievers/llama_index.py +++ b/libs/community/langchain_community/retrievers/llama_index.py @@ -12,7 +12,7 @@ class LlamaIndexRetriever(BaseRetriever): It is used for the question-answering with sources over an LlamaIndex data structure.""" - index: Any + index: Any = None """LlamaIndex index to query.""" query_kwargs: Dict = Field(default_factory=dict) """Keyword arguments to pass to the query method.""" @@ -48,7 +48,7 @@ class LlamaIndexGraphRetriever(BaseRetriever): It is used for question-answering with sources over an LlamaIndex graph data structure.""" - graph: Any + graph: Any = None """LlamaIndex graph to query.""" query_configs: List[Dict] = Field(default_factory=list) """List of query configs to pass to the query method.""" diff --git a/libs/community/langchain_community/retrievers/nanopq.py b/libs/community/langchain_community/retrievers/nanopq.py index 3ce6069327a..ca4162240d0 100644 --- a/libs/community/langchain_community/retrievers/nanopq.py +++ b/libs/community/langchain_community/retrievers/nanopq.py @@ -31,7 +31,7 @@ class NanoPQRetriever(BaseRetriever): embeddings: Embeddings """Embeddings model to use.""" - index: Any + index: Any = None """Index of embeddings.""" texts: List[str] """List of texts to index.""" diff --git a/libs/community/langchain_community/retrievers/pinecone_hybrid_search.py b/libs/community/langchain_community/retrievers/pinecone_hybrid_search.py index b2e2d10d812..a6e0f68002d 100644 --- a/libs/community/langchain_community/retrievers/pinecone_hybrid_search.py +++ b/libs/community/langchain_community/retrievers/pinecone_hybrid_search.py @@ -104,9 +104,9 @@ class PineconeHybridSearchRetriever(BaseRetriever): embeddings: Embeddings """Embeddings model to use.""" """description""" - sparse_encoder: Any + sparse_encoder: Any = None """Sparse encoder to use.""" - index: Any + index: Any = None """Pinecone index to use.""" top_k: int = 4 """Number of documents to return.""" diff --git a/libs/community/langchain_community/retrievers/qdrant_sparse_vector_retriever.py b/libs/community/langchain_community/retrievers/qdrant_sparse_vector_retriever.py index 1e9fde744a2..404081d5265 100644 --- a/libs/community/langchain_community/retrievers/qdrant_sparse_vector_retriever.py +++ b/libs/community/langchain_community/retrievers/qdrant_sparse_vector_retriever.py @@ -36,7 +36,7 @@ from langchain_community.vectorstores.qdrant import Qdrant, QdrantException class QdrantSparseVectorRetriever(BaseRetriever): """Qdrant sparse vector retriever.""" - client: Any + client: Any = None """'qdrant_client' instance to use.""" collection_name: str """Qdrant collection name.""" diff --git a/libs/community/langchain_community/retrievers/svm.py b/libs/community/langchain_community/retrievers/svm.py index 6b21e7fec52..58a7889691e 100644 --- a/libs/community/langchain_community/retrievers/svm.py +++ b/libs/community/langchain_community/retrievers/svm.py @@ -35,7 +35,7 @@ class SVMRetriever(BaseRetriever): embeddings: Embeddings """Embeddings model to use.""" - index: Any + index: Any = None """Index of embeddings.""" texts: List[str] """List of texts to index.""" diff --git a/libs/community/langchain_community/retrievers/tfidf.py b/libs/community/langchain_community/retrievers/tfidf.py index 4101ac9af50..6a991f81f33 100644 --- a/libs/community/langchain_community/retrievers/tfidf.py +++ b/libs/community/langchain_community/retrievers/tfidf.py @@ -17,11 +17,11 @@ class TFIDFRetriever(BaseRetriever): https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb """ - vectorizer: Any + vectorizer: Any = None """TF-IDF vectorizer.""" docs: List[Document] """Documents.""" - tfidf_array: Any + tfidf_array: Any = None """TF-IDF array.""" k: int = 4 """Number of documents to return.""" diff --git a/libs/community/langchain_community/retrievers/weaviate_hybrid_search.py b/libs/community/langchain_community/retrievers/weaviate_hybrid_search.py index 6e98d076690..9d1cae90fd3 100644 --- a/libs/community/langchain_community/retrievers/weaviate_hybrid_search.py +++ b/libs/community/langchain_community/retrievers/weaviate_hybrid_search.py @@ -16,7 +16,7 @@ class WeaviateHybridSearchRetriever(BaseRetriever): https://weaviate.io/blog/hybrid-search-explained """ - client: Any + client: Any = None """keyword arguments to pass to the Weaviate client.""" index_name: str """The name of the index to use.""" diff --git a/libs/community/langchain_community/tools/edenai/audio_text_to_speech.py b/libs/community/langchain_community/tools/edenai/audio_text_to_speech.py index 4bf79180b39..d17c0854f9e 100644 --- a/libs/community/langchain_community/tools/edenai/audio_text_to_speech.py +++ b/libs/community/langchain_community/tools/edenai/audio_text_to_speech.py @@ -43,11 +43,11 @@ class EdenAiTextToSpeechTool(EdenaiTool): # optional params see api documentation for more info return_type: Literal["url", "wav"] = "url" - rate: Optional[int] - pitch: Optional[int] - volume: Optional[int] - audio_format: Optional[str] - sampling_rate: Optional[int] + rate: Optional[int] = None + pitch: Optional[int] = None + volume: Optional[int] = None + audio_format: Optional[str] = None + sampling_rate: Optional[int] = None voice_models: Dict[str, str] = Field(default_factory=dict) voice: Literal["MALE", "FEMALE"] diff --git a/libs/community/langchain_community/tools/powerbi/tool.py b/libs/community/langchain_community/tools/powerbi/tool.py index 59794294a5d..c5ec51e5b52 100644 --- a/libs/community/langchain_community/tools/powerbi/tool.py +++ b/libs/community/langchain_community/tools/powerbi/tool.py @@ -31,7 +31,7 @@ class QueryPowerBITool(BaseTool): Example Input: "How many rows are in table1?" """ # noqa: E501 - llm_chain: Any + llm_chain: Any = None powerbi: PowerBIDataset = Field(exclude=True) examples: Optional[str] = DEFAULT_FEWSHOT_EXAMPLES session_cache: Dict[str, Any] = Field(default_factory=dict, exclude=True) diff --git a/libs/community/langchain_community/utilities/asknews.py b/libs/community/langchain_community/utilities/asknews.py index e61a1e23f3d..5a3eaa2340d 100644 --- a/libs/community/langchain_community/utilities/asknews.py +++ b/libs/community/langchain_community/utilities/asknews.py @@ -12,8 +12,8 @@ from pydantic import BaseModel, ConfigDict, model_validator class AskNewsAPIWrapper(BaseModel): """Wrapper for AskNews API.""" - asknews_sync: Any #: :meta private: - asknews_async: Any #: :meta private: + asknews_sync: Any = None #: :meta private: + asknews_async: Any = None #: :meta private: asknews_client_id: Optional[str] = None """Client ID for the AskNews API.""" asknews_client_secret: Optional[str] = None diff --git a/libs/community/langchain_community/utilities/awslambda.py b/libs/community/langchain_community/utilities/awslambda.py index 9abd0e7958f..72e584d4ce6 100644 --- a/libs/community/langchain_community/utilities/awslambda.py +++ b/libs/community/langchain_community/utilities/awslambda.py @@ -21,7 +21,7 @@ class LambdaWrapper(BaseModel): """ - lambda_client: Any #: :meta private: + lambda_client: Any = None #: :meta private: """The configured boto3 client""" function_name: Optional[str] = None """The name of your lambda function""" diff --git a/libs/community/langchain_community/utilities/dalle_image_generator.py b/libs/community/langchain_community/utilities/dalle_image_generator.py index e5608e6b064..654770c6d3a 100644 --- a/libs/community/langchain_community/utilities/dalle_image_generator.py +++ b/libs/community/langchain_community/utilities/dalle_image_generator.py @@ -27,7 +27,7 @@ class DallEAPIWrapper(BaseModel): 2. save your OPENAI_API_KEY in an environment variable """ - client: Any #: :meta private: + client: Any = None #: :meta private: async_client: Any = Field(default=None, exclude=True) #: :meta private: model_name: str = Field(default="dall-e-2", alias="model") model_kwargs: Dict[str, Any] = Field(default_factory=dict) diff --git a/libs/community/langchain_community/utilities/dataherald.py b/libs/community/langchain_community/utilities/dataherald.py index bf69d8813af..84ad9d83132 100644 --- a/libs/community/langchain_community/utilities/dataherald.py +++ b/libs/community/langchain_community/utilities/dataherald.py @@ -18,7 +18,7 @@ class DataheraldAPIWrapper(BaseModel): """ - dataherald_client: Any #: :meta private: + dataherald_client: Any = None #: :meta private: db_connection_id: str dataherald_api_key: Optional[str] = None diff --git a/libs/community/langchain_community/utilities/github.py b/libs/community/langchain_community/utilities/github.py index 6fee6c27cf9..a2639532872 100644 --- a/libs/community/langchain_community/utilities/github.py +++ b/libs/community/langchain_community/utilities/github.py @@ -29,8 +29,8 @@ def _import_tiktoken() -> Any: class GitHubAPIWrapper(BaseModel): """Wrapper for GitHub API.""" - github: Any #: :meta private: - github_repo_instance: Any #: :meta private: + github: Any = None #: :meta private: + github_repo_instance: Any = None #: :meta private: github_repository: Optional[str] = None github_app_id: Optional[str] = None github_app_private_key: Optional[str] = None diff --git a/libs/community/langchain_community/utilities/gitlab.py b/libs/community/langchain_community/utilities/gitlab.py index 12e2e1ecd22..1dd5894fe7a 100644 --- a/libs/community/langchain_community/utilities/gitlab.py +++ b/libs/community/langchain_community/utilities/gitlab.py @@ -15,8 +15,8 @@ if TYPE_CHECKING: class GitLabAPIWrapper(BaseModel): """Wrapper for GitLab API.""" - gitlab: Any #: :meta private: - gitlab_repo_instance: Any #: :meta private: + gitlab: Any = None #: :meta private: + gitlab_repo_instance: Any = None #: :meta private: gitlab_repository: Optional[str] = None """The name of the GitLab repository, in the form {username}/{repo-name}.""" gitlab_personal_access_token: Optional[str] = None diff --git a/libs/community/langchain_community/utilities/google_finance.py b/libs/community/langchain_community/utilities/google_finance.py index 650bf3611aa..c5def13a574 100644 --- a/libs/community/langchain_community/utilities/google_finance.py +++ b/libs/community/langchain_community/utilities/google_finance.py @@ -22,7 +22,7 @@ class GoogleFinanceAPIWrapper(BaseModel): google_Finance.run('langchain') """ - serp_search_engine: Any + serp_search_engine: Any = None serp_api_key: Optional[SecretStr] = None model_config = ConfigDict( diff --git a/libs/community/langchain_community/utilities/google_jobs.py b/libs/community/langchain_community/utilities/google_jobs.py index d036ff38e8b..51d9e9d2011 100644 --- a/libs/community/langchain_community/utilities/google_jobs.py +++ b/libs/community/langchain_community/utilities/google_jobs.py @@ -22,7 +22,7 @@ class GoogleJobsAPIWrapper(BaseModel): google_Jobs.run('langchain') """ - serp_search_engine: Any + serp_search_engine: Any = None serp_api_key: Optional[SecretStr] = None model_config = ConfigDict( diff --git a/libs/community/langchain_community/utilities/google_lens.py b/libs/community/langchain_community/utilities/google_lens.py index 2aad581c995..ac2a8ae8a12 100644 --- a/libs/community/langchain_community/utilities/google_lens.py +++ b/libs/community/langchain_community/utilities/google_lens.py @@ -27,7 +27,7 @@ class GoogleLensAPIWrapper(BaseModel): google_lens.run('langchain') """ - serp_search_engine: Any + serp_search_engine: Any = None serp_api_key: Optional[SecretStr] = None model_config = ConfigDict( diff --git a/libs/community/langchain_community/utilities/google_places_api.py b/libs/community/langchain_community/utilities/google_places_api.py index e5e15898ab1..b0c6f152bd2 100644 --- a/libs/community/langchain_community/utilities/google_places_api.py +++ b/libs/community/langchain_community/utilities/google_places_api.py @@ -34,7 +34,7 @@ class GooglePlacesAPIWrapper(BaseModel): """ gplaces_api_key: Optional[str] = None - google_map_client: Any #: :meta private: + google_map_client: Any = None #: :meta private: top_k_results: Optional[int] = None model_config = ConfigDict( diff --git a/libs/community/langchain_community/utilities/google_search.py b/libs/community/langchain_community/utilities/google_search.py index 13e1a41c3b3..2037b34c8d2 100644 --- a/libs/community/langchain_community/utilities/google_search.py +++ b/libs/community/langchain_community/utilities/google_search.py @@ -51,7 +51,7 @@ class GoogleSearchAPIWrapper(BaseModel): """ - search_engine: Any #: :meta private: + search_engine: Any = None #: :meta private: google_api_key: Optional[str] = None google_cse_id: Optional[str] = None k: int = 10 diff --git a/libs/community/langchain_community/utilities/google_trends.py b/libs/community/langchain_community/utilities/google_trends.py index 38163a43bbd..00f790d91c8 100644 --- a/libs/community/langchain_community/utilities/google_trends.py +++ b/libs/community/langchain_community/utilities/google_trends.py @@ -26,7 +26,7 @@ class GoogleTrendsAPIWrapper(BaseModel): google_trends.run('langchain') """ - serp_search_engine: Any + serp_search_engine: Any = None serp_api_key: Optional[SecretStr] = None model_config = ConfigDict( diff --git a/libs/community/langchain_community/utilities/graphql.py b/libs/community/langchain_community/utilities/graphql.py index 2e1dcc181b7..efe9a3581e7 100644 --- a/libs/community/langchain_community/utilities/graphql.py +++ b/libs/community/langchain_community/utilities/graphql.py @@ -14,7 +14,7 @@ class GraphQLAPIWrapper(BaseModel): custom_headers: Optional[Dict[str, str]] = None fetch_schema_from_transport: Optional[bool] = None graphql_endpoint: str - gql_client: Any #: :meta private: + gql_client: Any = None #: :meta private: gql_function: Callable[[str], Any] #: :meta private: model_config = ConfigDict( diff --git a/libs/community/langchain_community/utilities/jira.py b/libs/community/langchain_community/utilities/jira.py index 811e7136086..d96b72efb18 100644 --- a/libs/community/langchain_community/utilities/jira.py +++ b/libs/community/langchain_community/utilities/jira.py @@ -10,8 +10,8 @@ from pydantic import BaseModel, ConfigDict, model_validator class JiraAPIWrapper(BaseModel): """Wrapper for Jira API.""" - jira: Any #: :meta private: - confluence: Any + jira: Any = None #: :meta private: + confluence: Any = None jira_username: Optional[str] = None jira_api_token: Optional[str] = None jira_instance_url: Optional[str] = None diff --git a/libs/community/langchain_community/utilities/openweathermap.py b/libs/community/langchain_community/utilities/openweathermap.py index 42eb2674929..a08cf3c7c25 100644 --- a/libs/community/langchain_community/utilities/openweathermap.py +++ b/libs/community/langchain_community/utilities/openweathermap.py @@ -16,7 +16,7 @@ class OpenWeatherMapAPIWrapper(BaseModel): 3. pip install pyowm """ - owm: Any + owm: Any = None openweathermap_api_key: Optional[str] = None model_config = ConfigDict( diff --git a/libs/community/langchain_community/utilities/serpapi.py b/libs/community/langchain_community/utilities/serpapi.py index a9281d587e3..1b5c2208936 100644 --- a/libs/community/langchain_community/utilities/serpapi.py +++ b/libs/community/langchain_community/utilities/serpapi.py @@ -40,7 +40,7 @@ class SerpAPIWrapper(BaseModel): serpapi = SerpAPIWrapper() """ - search_engine: Any #: :meta private: + search_engine: Any = None #: :meta private: params: dict = Field( default={ "engine": "google", diff --git a/libs/community/langchain_community/utilities/stackexchange.py b/libs/community/langchain_community/utilities/stackexchange.py index 7d09a480f39..80288a67656 100644 --- a/libs/community/langchain_community/utilities/stackexchange.py +++ b/libs/community/langchain_community/utilities/stackexchange.py @@ -7,7 +7,7 @@ from pydantic import BaseModel, Field, model_validator class StackExchangeAPIWrapper(BaseModel): """Wrapper for Stack Exchange API.""" - client: Any #: :meta private: + client: Any = None #: :meta private: max_results: int = 3 """Max number of results to include in output.""" query_type: Literal["all", "title", "body"] = "all" diff --git a/libs/community/langchain_community/utilities/steam.py b/libs/community/langchain_community/utilities/steam.py index e3197b52954..96998b999e5 100644 --- a/libs/community/langchain_community/utilities/steam.py +++ b/libs/community/langchain_community/utilities/steam.py @@ -13,7 +13,7 @@ from langchain_community.tools.steam.prompt import ( class SteamWebAPIWrapper(BaseModel): """Wrapper for Steam API.""" - steam: Any # for python-steam-api + steam: Any = None # for python-steam-api # operations: a list of dictionaries, each representing a specific operation that # can be performed with the API diff --git a/libs/community/langchain_community/utilities/twilio.py b/libs/community/langchain_community/utilities/twilio.py index ceef3b2f26a..c9ed0b23def 100644 --- a/libs/community/langchain_community/utilities/twilio.py +++ b/libs/community/langchain_community/utilities/twilio.py @@ -26,7 +26,7 @@ class TwilioAPIWrapper(BaseModel): twilio.run('test', '+12484345508') """ - client: Any #: :meta private: + client: Any = None #: :meta private: account_sid: Optional[str] = None """Twilio account string identifier.""" auth_token: Optional[str] = None diff --git a/libs/community/langchain_community/utilities/wolfram_alpha.py b/libs/community/langchain_community/utilities/wolfram_alpha.py index aed9b9ddc67..5565f6c28c3 100644 --- a/libs/community/langchain_community/utilities/wolfram_alpha.py +++ b/libs/community/langchain_community/utilities/wolfram_alpha.py @@ -18,7 +18,7 @@ class WolframAlphaAPIWrapper(BaseModel): """ - wolfram_client: Any #: :meta private: + wolfram_client: Any = None #: :meta private: wolfram_alpha_appid: Optional[str] = None model_config = ConfigDict(