community[patch]: assign missed default (#26326)

Assigning missed defaults in various classes. Most clients were being
assigned during the `model_validator(mode="before")` step, so this
change should amount to a no-op in those cases.

---

This PR was autogenerated using gritql

```shell

grit apply 'class_definition(name=$C, $body, superclasses=$S) where {    
    $C <: ! "Config", // Does not work in this scope, but works after class_definition
    $body <: block($statements),
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        or {
            $y <: `Field($z)`,
            $x <: "model_config"
        }
    },
    // And has either Any or Optional fields without a default
    $statements <: some bubble assignment(left=$x, right=$y, type=$t) as $A where {
        $t <: or {
            r"Optional.*",
            r"Any",
            r"Union[None, .*]",
            r"Union[.*, None, .*]",
            r"Union[.*, None]",
        },
        $y <: ., // Match empty node        
        $t => `$t = None`,
    },    
}
' --language python .

```
This commit is contained in:
Eugene Yurtsev
2024-09-11 11:13:11 -04:00
committed by GitHub
parent c417bbc313
commit 844955d6e1
83 changed files with 109 additions and 109 deletions

View File

@@ -30,7 +30,7 @@ class OpenAPIEndpointChain(Chain, BaseModel):
"""Chain interacts with an OpenAPI endpoint using natural language."""
api_request_chain: LLMChain
api_response_chain: Optional[LLMChain]
api_response_chain: Optional[LLMChain] = None
api_operation: APIOperation
requests: Requests = Field(exclude=True, default_factory=Requests)
param_mapping: _ParamMapping = Field(alias="param_mapping")

View File

@@ -346,7 +346,7 @@ class QianfanChatEndpoint(BaseChatModel):
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""extra params for model invoke using with `do`."""
client: Any #: :meta private:
client: Any = None #: :meta private:
# It could be empty due to the use of Console API
# And they're not list here

View File

@@ -171,7 +171,7 @@ class JinaChat(BaseChatModel):
"""Return whether this model can be serialized by Langchain."""
return False
client: Any #: :meta private:
client: Any = None #: :meta private:
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)

View File

@@ -215,7 +215,7 @@ def _convert_message_to_dict(message: BaseMessage) -> dict:
class ChatLiteLLM(BaseChatModel):
"""Chat model that uses the LiteLLM API."""
client: Any #: :meta private:
client: Any = None #: :meta private:
model: str = "gpt-3.5-turbo"
model_name: Optional[str] = None
"""Model name to use."""

View File

@@ -67,7 +67,7 @@ class ChatLlamaCpp(BaseChatModel):
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model_path: str
"""The path to the Llama model file."""

View File

@@ -369,7 +369,7 @@ class MiniMaxChat(BaseChatModel):
**self.model_kwargs,
}
_client: Any
_client: Any = None
model: str = "abab6.5-chat"
"""Model name to use."""
max_tokens: int = 256

View File

@@ -305,7 +305,7 @@ class ChatPremAI(BaseChatModel, BaseModel):
streaming: Optional[bool] = False
"""Whether to stream the responses or not."""
client: Any
client: Any = None
model_config = ConfigDict(
populate_by_name=True,

View File

@@ -434,7 +434,7 @@ class ChatTongyi(BaseChatModel):
def lc_secrets(self) -> Dict[str, str]:
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
client: Any #: :meta private:
client: Any = None #: :meta private:
model_name: str = Field(default="qwen-turbo", alias="model")
"""Model name to use.
callable multimodal model:

View File

@@ -76,7 +76,7 @@ class ChatYuan2(BaseChatModel):
chat = ChatYuan2()
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="yuan2", alias="model")

View File

@@ -23,7 +23,7 @@ class HuggingFaceCrossEncoder(BaseModel, BaseCrossEncoder):
)
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model_name: str = DEFAULT_MODEL_NAME
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)

View File

@@ -61,7 +61,7 @@ class SagemakerEndpointCrossEncoder(BaseModel, BaseCrossEncoder):
credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.

View File

@@ -49,7 +49,7 @@ class LLMLinguaCompressor(BaseDocumentCompressor):
"dynamic_context_compression_ratio": 0.4,
}
"""Extra compression arguments"""
lingua: Any
lingua: Any = None
"""The instance of the llm linqua"""
@model_validator(mode="before")

View File

@@ -21,9 +21,9 @@ class OpenVINOReranker(BaseDocumentCompressor):
OpenVINO rerank models.
"""
ov_model: Any
ov_model: Any = None
"""OpenVINO model object."""
tokenizer: Any
tokenizer: Any = None
"""Tokenizer for embedding model."""
model_name_or_path: str
"""HuggingFace model id."""

View File

@@ -59,7 +59,7 @@ class BaichuanTextEmbeddings(BaseModel, Embeddings):
vectors = embeddings.embed_query(text)
""" # noqa: E501
session: Any #: :meta private:
session: Any = None #: :meta private:
model_name: str = Field(default="Baichuan-Text-Embedding", alias="model")
"""The model used to embed the documents."""
baichuan_api_key: SecretStr = Field(

View File

@@ -71,7 +71,7 @@ class QianfanEmbeddingsEndpoint(BaseModel, Embeddings):
endpoint: str = ""
"""Endpoint of the Qianfan Embedding, required if custom model used."""
client: Any
client: Any = None
"""Qianfan client"""
init_kwargs: Dict[str, Any] = Field(default_factory=dict)

View File

@@ -47,7 +47,7 @@ class BedrockEmbeddings(BaseModel, Embeddings):
)
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
"""Bedrock client."""
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable

View File

@@ -30,9 +30,9 @@ class CohereEmbeddings(BaseModel, Embeddings):
)
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
"""Cohere client."""
async_client: Any #: :meta private:
async_client: Any = None #: :meta private:
"""Cohere async client."""
model: str = "embed-english-v2.0"
"""Model name to use."""

View File

@@ -101,7 +101,7 @@ class DashScopeEmbeddings(BaseModel, Embeddings):
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
"""The DashScope client."""
model: str = "text-embedding-v1"
dashscope_api_key: Optional[str] = None

View File

@@ -38,12 +38,12 @@ class FastEmbedEmbeddings(BaseModel, Embeddings):
Unknown behavior for values > 512.
"""
cache_dir: Optional[str]
cache_dir: Optional[str] = None
"""The path to the cache directory.
Defaults to `local_cache` in the parent directory
"""
threads: Optional[int]
threads: Optional[int] = None
"""The number of threads single onnxruntime session can use.
Defaults to None
"""
@@ -65,7 +65,7 @@ class FastEmbedEmbeddings(BaseModel, Embeddings):
Defaults to `None`.
"""
_model: Any # : :meta private:
_model: Any = None # : :meta private:
model_config = ConfigDict(extra="allow", protected_namespaces=())

View File

@@ -49,7 +49,7 @@ class IpexLLMBgeEmbeddings(BaseModel, Embeddings):
)
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model_name: str = DEFAULT_BGE_MODEL
"""Model name to use."""
cache_folder: Optional[str] = None

View File

@@ -27,7 +27,7 @@ class LaserEmbeddings(BaseModel, Embeddings):
embeddings = encoder.encode_sentences(["Hello", "World"])
"""
lang: Optional[str]
lang: Optional[str] = None
"""The language or language code you'd like to use
If empty, this implementation will default
to using a multilingual earlier LASER encoder model (called laser2)
@@ -35,7 +35,7 @@ class LaserEmbeddings(BaseModel, Embeddings):
https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200
"""
_encoder_pipeline: Any # : :meta private:
_encoder_pipeline: Any = None # : :meta private:
model_config = ConfigDict(
extra="forbid",

View File

@@ -19,7 +19,7 @@ class LlamaCppEmbeddings(BaseModel, Embeddings):
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model_path: str
n_ctx: int = Field(512, alias="n_ctx")

View File

@@ -141,7 +141,7 @@ class LocalAIEmbeddings(BaseModel, Embeddings):
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model: str = "text-embedding-ada-002"
deployment: str = model
openai_api_version: Optional[str] = None

View File

@@ -17,7 +17,7 @@ class ModelScopeEmbeddings(BaseModel, Embeddings):
embed = ModelScopeEmbeddings(model_id=model_id, model_revision="v1.0.0")
"""
embed: Any
embed: Any = None
model_id: str = "damo/nlp_corom_sentence-embedding_english-base"
"""Model name to use."""
model_revision: Optional[str] = None

View File

@@ -31,9 +31,9 @@ class OpenVINOEmbeddings(BaseModel, Embeddings):
)
"""
ov_model: Any
ov_model: Any = None
"""OpenVINO model object."""
tokenizer: Any
tokenizer: Any = None
"""Tokenizer for embedding model."""
model_name_or_path: str
"""HuggingFace model id."""

View File

@@ -28,7 +28,7 @@ class OracleEmbeddings(BaseModel, Embeddings):
"""Get Embeddings"""
"""Oracle Connection"""
conn: Any
conn: Any = None
"""Embedding Parameters"""
params: Dict[str, Any]
"""Proxy"""

View File

@@ -19,7 +19,7 @@ class TensorflowHubEmbeddings(BaseModel, Embeddings):
tf = TensorflowHubEmbeddings(model_url=url)
"""
embed: Any #: :meta private:
embed: Any = None #: :meta private:
model_url: str = DEFAULT_MODEL_URL
"""Model name to use."""

View File

@@ -25,7 +25,7 @@ class AlephAlpha(LLM):
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""

View File

@@ -156,7 +156,7 @@ class Aphrodite(BaseLLM):
"""Holds any model parameters valid for `aphrodite.LLM` call not explicitly
specified."""
client: Any #: :meta private:
client: Any = None #: :meta private:
@pre_init
def validate_environment(cls, values: Dict) -> Dict:

View File

@@ -123,7 +123,7 @@ class QianfanLLMEndpoint(LLM):
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""extra params for model invoke using with `do`."""
client: Any
client: Any = None
qianfan_ak: Optional[SecretStr] = Field(default=None, alias="api_key")
qianfan_sk: Optional[SecretStr] = Field(default=None, alias="secret_key")

View File

@@ -76,8 +76,8 @@ def acompletion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
class BaseCohere(Serializable):
"""Base class for Cohere models."""
client: Any #: :meta private:
async_client: Any #: :meta private:
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model: Optional[str] = Field(default=None)
"""Model name to use."""

View File

@@ -41,9 +41,9 @@ class CTranslate2(BaseLLM):
sampling_temperature: float = 1
"""Sampling temperature to generate more random samples."""
client: Any #: :meta private:
client: Any = None #: :meta private:
tokenizer: Any #: :meta private:
tokenizer: Any = None #: :meta private:
ctranslate2_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""

View File

@@ -30,7 +30,7 @@ class ExLlamaV2(LLM):
- Add support for custom stop sequences
"""
client: Any
client: Any = None
model_path: str
exllama_cache: Any = None
config: Any = None

View File

@@ -30,7 +30,7 @@ class GooseAI(LLM):
"""
client: Any
client: Any = None
model_name: str = "gpt-neo-20b"
"""Model name to use"""

View File

@@ -60,7 +60,7 @@ class HuggingFacePipeline(BaseLLM):
hf = HuggingFacePipeline(pipeline=pipe)
"""
pipeline: Any #: :meta private:
pipeline: Any = None #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name to use."""
model_kwargs: Optional[dict] = None

View File

@@ -100,8 +100,8 @@ class HuggingFaceTextGenInference(LLM):
"""Holds any text-generation-inference server parameters not explicitly specified"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `call` not explicitly specified"""
client: Any
async_client: Any
client: Any = None
async_client: Any = None
model_config = ConfigDict(
extra="forbid",

View File

@@ -25,9 +25,9 @@ class IpexLLM(LLM):
"""Model name or model path to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments passed to the model."""
model: Any #: :meta private:
model: Any = None #: :meta private:
"""IpexLLM model."""
tokenizer: Any #: :meta private:
tokenizer: Any = None #: :meta private:
"""Huggingface tokenizer model."""
streaming: bool = True
"""Whether to stream the results, token by token."""

View File

@@ -28,7 +28,7 @@ class LlamaCpp(LLM):
llm = LlamaCpp(model_path="/path/to/llama/model")
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model_path: str
"""The path to the Llama model file."""

View File

@@ -9,7 +9,7 @@ from pydantic import ConfigDict
class ManifestWrapper(LLM):
"""HazyResearch's Manifest library."""
client: Any #: :meta private:
client: Any = None #: :meta private:
llm_kwargs: Optional[Dict] = None
model_config = ConfigDict(

View File

@@ -38,9 +38,9 @@ class MLXPipeline(LLM):
model_id: str = DEFAULT_MODEL_ID
"""Model name to use."""
model: Any #: :meta private:
model: Any = None #: :meta private:
"""Model."""
tokenizer: Any #: :meta private:
tokenizer: Any = None #: :meta private:
"""Tokenizer."""
tokenizer_config: Optional[dict] = None
"""

View File

@@ -19,7 +19,7 @@ class NLPCloud(LLM):
nlpcloud = NLPCloud(model="finetuned-gpt-neox-20b")
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model_name: str = "finetuned-gpt-neox-20b"
"""Model name to use."""
gpu: bool = True

View File

@@ -29,10 +29,10 @@ class Petals(LLM):
"""
client: Any
client: Any = None
"""The client to use for the API calls."""
tokenizer: Any
tokenizer: Any = None
"""The tokenizer to use for the API calls."""
model_name: str = "bigscience/bloom-petals"

View File

@@ -30,7 +30,7 @@ class PredictionGuard(LLM):
})
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
model: Optional[str] = "MPT-7B-Instruct"
"""Model name to use."""

View File

@@ -126,11 +126,11 @@ class SelfHostedPipeline(LLM):
)
"""
pipeline_ref: Any #: :meta private:
client: Any #: :meta private:
pipeline_ref: Any = None #: :meta private:
client: Any = None #: :meta private:
inference_fn: Callable = _generate_text #: :meta private:
"""Inference function to send to the remote hardware."""
hardware: Any
hardware: Any = None
"""Remote hardware to send the inference function to."""
model_load_fn: Callable
"""Function to load the model remotely on the server."""

View File

@@ -160,7 +160,7 @@ class SelfHostedHuggingFaceLLM(SelfHostedPipeline):
"""Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
hardware: Any
hardware: Any = None
"""Remote hardware to send the inference function to."""
model_reqs: List[str] = ["./", "transformers", "torch"]
"""Requirements to install on hardware to inference the model."""

View File

@@ -238,7 +238,7 @@ class Tongyi(BaseLLM):
def lc_secrets(self) -> Dict[str, str]:
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
client: Any #: :meta private:
client: Any = None #: :meta private:
model_name: str = Field(default="qwen-plus", alias="model")
"""Model name to use."""

View File

@@ -72,7 +72,7 @@ class VLLM(BaseLLM):
vllm_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `vllm.LLM` call not explicitly specified."""
client: Any #: :meta private:
client: Any = None #: :meta private:
@pre_init
def validate_environment(cls, values: Dict) -> Dict:

View File

@@ -12,7 +12,7 @@ from pydantic import BaseModel, Field, SecretStr
class VolcEngineMaasBase(BaseModel):
"""Base class for VolcEngineMaas models."""
client: Any
client: Any = None
volc_engine_maas_ak: Optional[SecretStr] = None
"""access key for volc engine"""

View File

@@ -93,7 +93,7 @@ class WatsonxLLM(BaseLLM):
streaming: bool = False
""" Whether to stream the results or not. """
watsonx_model: Any
watsonx_model: Any = None
model_config = ConfigDict(
extra="forbid",

View File

@@ -62,7 +62,7 @@ class WeightOnlyQuantPipeline(LLM):
hf = WeightOnlyQuantPipeline(pipeline=pipe)
"""
pipeline: Any #: :meta private:
pipeline: Any = None #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name or local path to use."""

View File

@@ -15,7 +15,7 @@ def default_preprocessing_func(text: str) -> List[str]:
class BM25Retriever(BaseRetriever):
"""`BM25` retriever without Elasticsearch."""
vectorizer: Any
vectorizer: Any = None
""" BM25 vectorizer."""
docs: List[Document] = Field(repr=False)
""" List of documents."""

View File

@@ -38,7 +38,7 @@ class DocArrayRetriever(BaseRetriever):
top_k: Number of documents to return
"""
index: Any
index: Any = None
embeddings: Embeddings
search_field: str
content_field: str

View File

@@ -239,7 +239,7 @@ class GoogleVertexAISearchRetriever(BaseRetriever, _BaseGoogleVertexAISearchRetr
"""
# type is SearchServiceClient but can't be set due to optional imports
_client: Any
_client: Any = None
_serving_config: str
model_config = ConfigDict(
@@ -405,7 +405,7 @@ class GoogleVertexAIMultiTurnSearchRetriever(
"""Vertex AI Search Conversation ID."""
# type is ConversationalSearchServiceClient but can't be set due to optional imports
_client: Any
_client: Any = None
_serving_config: str
model_config = ConfigDict(

View File

@@ -35,7 +35,7 @@ class KNNRetriever(BaseRetriever):
embeddings: Embeddings
"""Embeddings model to use."""
index: Any
index: Any = None
"""Index of embeddings."""
texts: List[str]
"""List of texts to index."""

View File

@@ -12,7 +12,7 @@ class LlamaIndexRetriever(BaseRetriever):
It is used for the question-answering with sources over
an LlamaIndex data structure."""
index: Any
index: Any = None
"""LlamaIndex index to query."""
query_kwargs: Dict = Field(default_factory=dict)
"""Keyword arguments to pass to the query method."""
@@ -48,7 +48,7 @@ class LlamaIndexGraphRetriever(BaseRetriever):
It is used for question-answering with sources over an LlamaIndex
graph data structure."""
graph: Any
graph: Any = None
"""LlamaIndex graph to query."""
query_configs: List[Dict] = Field(default_factory=list)
"""List of query configs to pass to the query method."""

View File

@@ -31,7 +31,7 @@ class NanoPQRetriever(BaseRetriever):
embeddings: Embeddings
"""Embeddings model to use."""
index: Any
index: Any = None
"""Index of embeddings."""
texts: List[str]
"""List of texts to index."""

View File

@@ -104,9 +104,9 @@ class PineconeHybridSearchRetriever(BaseRetriever):
embeddings: Embeddings
"""Embeddings model to use."""
"""description"""
sparse_encoder: Any
sparse_encoder: Any = None
"""Sparse encoder to use."""
index: Any
index: Any = None
"""Pinecone index to use."""
top_k: int = 4
"""Number of documents to return."""

View File

@@ -36,7 +36,7 @@ from langchain_community.vectorstores.qdrant import Qdrant, QdrantException
class QdrantSparseVectorRetriever(BaseRetriever):
"""Qdrant sparse vector retriever."""
client: Any
client: Any = None
"""'qdrant_client' instance to use."""
collection_name: str
"""Qdrant collection name."""

View File

@@ -35,7 +35,7 @@ class SVMRetriever(BaseRetriever):
embeddings: Embeddings
"""Embeddings model to use."""
index: Any
index: Any = None
"""Index of embeddings."""
texts: List[str]
"""List of texts to index."""

View File

@@ -17,11 +17,11 @@ class TFIDFRetriever(BaseRetriever):
https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb
"""
vectorizer: Any
vectorizer: Any = None
"""TF-IDF vectorizer."""
docs: List[Document]
"""Documents."""
tfidf_array: Any
tfidf_array: Any = None
"""TF-IDF array."""
k: int = 4
"""Number of documents to return."""

View File

@@ -16,7 +16,7 @@ class WeaviateHybridSearchRetriever(BaseRetriever):
https://weaviate.io/blog/hybrid-search-explained
"""
client: Any
client: Any = None
"""keyword arguments to pass to the Weaviate client."""
index_name: str
"""The name of the index to use."""

View File

@@ -43,11 +43,11 @@ class EdenAiTextToSpeechTool(EdenaiTool):
# optional params see api documentation for more info
return_type: Literal["url", "wav"] = "url"
rate: Optional[int]
pitch: Optional[int]
volume: Optional[int]
audio_format: Optional[str]
sampling_rate: Optional[int]
rate: Optional[int] = None
pitch: Optional[int] = None
volume: Optional[int] = None
audio_format: Optional[str] = None
sampling_rate: Optional[int] = None
voice_models: Dict[str, str] = Field(default_factory=dict)
voice: Literal["MALE", "FEMALE"]

View File

@@ -31,7 +31,7 @@ class QueryPowerBITool(BaseTool):
Example Input: "How many rows are in table1?"
""" # noqa: E501
llm_chain: Any
llm_chain: Any = None
powerbi: PowerBIDataset = Field(exclude=True)
examples: Optional[str] = DEFAULT_FEWSHOT_EXAMPLES
session_cache: Dict[str, Any] = Field(default_factory=dict, exclude=True)

View File

@@ -12,8 +12,8 @@ from pydantic import BaseModel, ConfigDict, model_validator
class AskNewsAPIWrapper(BaseModel):
"""Wrapper for AskNews API."""
asknews_sync: Any #: :meta private:
asknews_async: Any #: :meta private:
asknews_sync: Any = None #: :meta private:
asknews_async: Any = None #: :meta private:
asknews_client_id: Optional[str] = None
"""Client ID for the AskNews API."""
asknews_client_secret: Optional[str] = None

View File

@@ -21,7 +21,7 @@ class LambdaWrapper(BaseModel):
"""
lambda_client: Any #: :meta private:
lambda_client: Any = None #: :meta private:
"""The configured boto3 client"""
function_name: Optional[str] = None
"""The name of your lambda function"""

View File

@@ -27,7 +27,7 @@ class DallEAPIWrapper(BaseModel):
2. save your OPENAI_API_KEY in an environment variable
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="dall-e-2", alias="model")
model_kwargs: Dict[str, Any] = Field(default_factory=dict)

View File

@@ -18,7 +18,7 @@ class DataheraldAPIWrapper(BaseModel):
"""
dataherald_client: Any #: :meta private:
dataherald_client: Any = None #: :meta private:
db_connection_id: str
dataherald_api_key: Optional[str] = None

View File

@@ -29,8 +29,8 @@ def _import_tiktoken() -> Any:
class GitHubAPIWrapper(BaseModel):
"""Wrapper for GitHub API."""
github: Any #: :meta private:
github_repo_instance: Any #: :meta private:
github: Any = None #: :meta private:
github_repo_instance: Any = None #: :meta private:
github_repository: Optional[str] = None
github_app_id: Optional[str] = None
github_app_private_key: Optional[str] = None

View File

@@ -15,8 +15,8 @@ if TYPE_CHECKING:
class GitLabAPIWrapper(BaseModel):
"""Wrapper for GitLab API."""
gitlab: Any #: :meta private:
gitlab_repo_instance: Any #: :meta private:
gitlab: Any = None #: :meta private:
gitlab_repo_instance: Any = None #: :meta private:
gitlab_repository: Optional[str] = None
"""The name of the GitLab repository, in the form {username}/{repo-name}."""
gitlab_personal_access_token: Optional[str] = None

View File

@@ -22,7 +22,7 @@ class GoogleFinanceAPIWrapper(BaseModel):
google_Finance.run('langchain')
"""
serp_search_engine: Any
serp_search_engine: Any = None
serp_api_key: Optional[SecretStr] = None
model_config = ConfigDict(

View File

@@ -22,7 +22,7 @@ class GoogleJobsAPIWrapper(BaseModel):
google_Jobs.run('langchain')
"""
serp_search_engine: Any
serp_search_engine: Any = None
serp_api_key: Optional[SecretStr] = None
model_config = ConfigDict(

View File

@@ -27,7 +27,7 @@ class GoogleLensAPIWrapper(BaseModel):
google_lens.run('langchain')
"""
serp_search_engine: Any
serp_search_engine: Any = None
serp_api_key: Optional[SecretStr] = None
model_config = ConfigDict(

View File

@@ -34,7 +34,7 @@ class GooglePlacesAPIWrapper(BaseModel):
"""
gplaces_api_key: Optional[str] = None
google_map_client: Any #: :meta private:
google_map_client: Any = None #: :meta private:
top_k_results: Optional[int] = None
model_config = ConfigDict(

View File

@@ -51,7 +51,7 @@ class GoogleSearchAPIWrapper(BaseModel):
"""
search_engine: Any #: :meta private:
search_engine: Any = None #: :meta private:
google_api_key: Optional[str] = None
google_cse_id: Optional[str] = None
k: int = 10

View File

@@ -26,7 +26,7 @@ class GoogleTrendsAPIWrapper(BaseModel):
google_trends.run('langchain')
"""
serp_search_engine: Any
serp_search_engine: Any = None
serp_api_key: Optional[SecretStr] = None
model_config = ConfigDict(

View File

@@ -14,7 +14,7 @@ class GraphQLAPIWrapper(BaseModel):
custom_headers: Optional[Dict[str, str]] = None
fetch_schema_from_transport: Optional[bool] = None
graphql_endpoint: str
gql_client: Any #: :meta private:
gql_client: Any = None #: :meta private:
gql_function: Callable[[str], Any] #: :meta private:
model_config = ConfigDict(

View File

@@ -10,8 +10,8 @@ from pydantic import BaseModel, ConfigDict, model_validator
class JiraAPIWrapper(BaseModel):
"""Wrapper for Jira API."""
jira: Any #: :meta private:
confluence: Any
jira: Any = None #: :meta private:
confluence: Any = None
jira_username: Optional[str] = None
jira_api_token: Optional[str] = None
jira_instance_url: Optional[str] = None

View File

@@ -16,7 +16,7 @@ class OpenWeatherMapAPIWrapper(BaseModel):
3. pip install pyowm
"""
owm: Any
owm: Any = None
openweathermap_api_key: Optional[str] = None
model_config = ConfigDict(

View File

@@ -40,7 +40,7 @@ class SerpAPIWrapper(BaseModel):
serpapi = SerpAPIWrapper()
"""
search_engine: Any #: :meta private:
search_engine: Any = None #: :meta private:
params: dict = Field(
default={
"engine": "google",

View File

@@ -7,7 +7,7 @@ from pydantic import BaseModel, Field, model_validator
class StackExchangeAPIWrapper(BaseModel):
"""Wrapper for Stack Exchange API."""
client: Any #: :meta private:
client: Any = None #: :meta private:
max_results: int = 3
"""Max number of results to include in output."""
query_type: Literal["all", "title", "body"] = "all"

View File

@@ -13,7 +13,7 @@ from langchain_community.tools.steam.prompt import (
class SteamWebAPIWrapper(BaseModel):
"""Wrapper for Steam API."""
steam: Any # for python-steam-api
steam: Any = None # for python-steam-api
# operations: a list of dictionaries, each representing a specific operation that
# can be performed with the API

View File

@@ -26,7 +26,7 @@ class TwilioAPIWrapper(BaseModel):
twilio.run('test', '+12484345508')
"""
client: Any #: :meta private:
client: Any = None #: :meta private:
account_sid: Optional[str] = None
"""Twilio account string identifier."""
auth_token: Optional[str] = None

View File

@@ -18,7 +18,7 @@ class WolframAlphaAPIWrapper(BaseModel):
"""
wolfram_client: Any #: :meta private:
wolfram_client: Any = None #: :meta private:
wolfram_alpha_appid: Optional[str] = None
model_config = ConfigDict(