diff --git a/docs/docs_skeleton/docs/get_started/quickstart.mdx b/docs/docs_skeleton/docs/get_started/quickstart.mdx index bbad4f4488e..3c1bf0f8c4a 100644 --- a/docs/docs_skeleton/docs/get_started/quickstart.mdx +++ b/docs/docs_skeleton/docs/get_started/quickstart.mdx @@ -85,7 +85,7 @@ import InputMessages from "@snippets/get_started/quickstart/input_messages.mdx" -For both these methods, you can also pass in parameters as key word arguments. +For both these methods, you can also pass in parameters as keyword arguments. For example, you could pass in `temperature=0` to adjust the temperature that is used from what the object was configured with. Whatever values are passed in during run time will always override what the object was configured with. diff --git a/docs/snippets/modules/agents/get_started.mdx b/docs/snippets/modules/agents/get_started.mdx index 78f3f715153..9bde8b49ba4 100644 --- a/docs/snippets/modules/agents/get_started.mdx +++ b/docs/snippets/modules/agents/get_started.mdx @@ -54,7 +54,7 @@ prompt = ChatPromptTemplate.from_messages([ ``` How does the agent know what tools it can use? -Those are passed in as a separate argument, so we can bind those as key word arguments to the LLM. +Those are passed in as a separate argument, so we can bind those as keyword arguments to the LLM. ```python from langchain.tools.render import format_tool_to_openai_function diff --git a/libs/langchain/langchain/agents/initialize.py b/libs/langchain/langchain/agents/initialize.py index dfbb7f9b47b..378d2c9d116 100644 --- a/libs/langchain/langchain/agents/initialize.py +++ b/libs/langchain/langchain/agents/initialize.py @@ -30,9 +30,9 @@ def initialize_agent( callback_manager: CallbackManager to use. Global callback manager is used if not provided. Defaults to None. agent_path: Path to serialized agent to use. - agent_kwargs: Additional key word arguments to pass to the underlying agent + agent_kwargs: Additional keyword arguments to pass to the underlying agent tags: Tags to apply to the traced runs. - **kwargs: Additional key word arguments passed to the agent executor + **kwargs: Additional keyword arguments passed to the agent executor Returns: An agent executor diff --git a/libs/langchain/langchain/agents/loading.py b/libs/langchain/langchain/agents/loading.py index 8a0c4a9b3f0..32e882c94ad 100644 --- a/libs/langchain/langchain/agents/loading.py +++ b/libs/langchain/langchain/agents/loading.py @@ -42,7 +42,7 @@ def load_agent_from_config( config: Config dict to load agent from. llm: Language model to use as the agent. tools: List of tools this agent has access to. - **kwargs: Additional key word arguments passed to the agent executor. + **kwargs: Additional keyword arguments passed to the agent executor. Returns: An agent executor. @@ -92,7 +92,7 @@ def load_agent( Args: path: Path to the agent file. - **kwargs: Additional key word arguments passed to the agent executor. + **kwargs: Additional keyword arguments passed to the agent executor. Returns: An agent executor. diff --git a/libs/langchain/langchain/chat_models/azureml_endpoint.py b/libs/langchain/langchain/chat_models/azureml_endpoint.py index 9cd57fbcf8c..2ab7d3ea4f7 100644 --- a/libs/langchain/langchain/chat_models/azureml_endpoint.py +++ b/libs/langchain/langchain/chat_models/azureml_endpoint.py @@ -93,7 +93,7 @@ class AzureMLChatOnlineEndpoint(SimpleChatModel): the endpoint""" model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" @validator("http_client", always=True, allow_reuse=True) @classmethod diff --git a/libs/langchain/langchain/embeddings/bedrock.py b/libs/langchain/langchain/embeddings/bedrock.py index 654cce07d0a..b65200901bb 100644 --- a/libs/langchain/langchain/embeddings/bedrock.py +++ b/libs/langchain/langchain/embeddings/bedrock.py @@ -59,7 +59,7 @@ class BedrockEmbeddings(BaseModel, Embeddings): equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" endpoint_url: Optional[str] = None """Needed if you don't want to default to us-east-1 endpoint""" diff --git a/libs/langchain/langchain/embeddings/huggingface.py b/libs/langchain/langchain/embeddings/huggingface.py index 870c3965a8f..5f355d0e2bd 100644 --- a/libs/langchain/langchain/embeddings/huggingface.py +++ b/libs/langchain/langchain/embeddings/huggingface.py @@ -45,9 +45,9 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings): """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) - """Key word arguments to pass when calling the `encode` method of the model.""" + """Keyword arguments to pass when calling the `encode` method of the model.""" multi_process: bool = False """Run encode() on multiple GPUs.""" @@ -133,9 +133,9 @@ class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) - """Key word arguments to pass when calling the `encode` method of the model.""" + """Keyword arguments to pass when calling the `encode` method of the model.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION @@ -212,9 +212,9 @@ class HuggingFaceBgeEmbeddings(BaseModel, Embeddings): """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) - """Key word arguments to pass when calling the `encode` method of the model.""" + """Keyword arguments to pass when calling the `encode` method of the model.""" query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN """Instruction to use for embedding query.""" diff --git a/libs/langchain/langchain/embeddings/huggingface_hub.py b/libs/langchain/langchain/embeddings/huggingface_hub.py index 7bb52f3fa56..d887be2abc0 100644 --- a/libs/langchain/langchain/embeddings/huggingface_hub.py +++ b/libs/langchain/langchain/embeddings/huggingface_hub.py @@ -33,7 +33,7 @@ class HuggingFaceHubEmbeddings(BaseModel, Embeddings): task: Optional[str] = "feature-extraction" """Task to call the model with.""" model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None diff --git a/libs/langchain/langchain/embeddings/sagemaker_endpoint.py b/libs/langchain/langchain/embeddings/sagemaker_endpoint.py index f3a72ccb375..6bfd29f2c22 100644 --- a/libs/langchain/langchain/embeddings/sagemaker_endpoint.py +++ b/libs/langchain/langchain/embeddings/sagemaker_endpoint.py @@ -90,7 +90,7 @@ class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """ # noqa: E501 model_kwargs: Optional[Dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint diff --git a/libs/langchain/langchain/embeddings/self_hosted_hugging_face.py b/libs/langchain/langchain/embeddings/self_hosted_hugging_face.py index e12263d67f0..7fbcc60f4d6 100644 --- a/libs/langchain/langchain/embeddings/self_hosted_hugging_face.py +++ b/libs/langchain/langchain/embeddings/self_hosted_hugging_face.py @@ -86,7 +86,7 @@ class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings): model_load_fn: Callable = load_embedding_model """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None - """Key word arguments to pass to the model load function.""" + """Keyword arguments to pass to the model load function.""" inference_fn: Callable = _embed_documents """Inference function to extract the embeddings.""" diff --git a/libs/langchain/langchain/llms/amazon_api_gateway.py b/libs/langchain/langchain/llms/amazon_api_gateway.py index 3161c0bc2ee..1a019dfdd92 100644 --- a/libs/langchain/langchain/llms/amazon_api_gateway.py +++ b/libs/langchain/langchain/llms/amazon_api_gateway.py @@ -36,7 +36,7 @@ class AmazonAPIGateway(LLM): """API Gateway HTTP Headers to send, e.g. for authentication""" model_kwargs: Optional[Dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" content_handler: ContentHandlerAmazonAPIGateway = ContentHandlerAmazonAPIGateway() """The content handler class that provides an input and diff --git a/libs/langchain/langchain/llms/anyscale.py b/libs/langchain/langchain/llms/anyscale.py index 810d1730712..b3582961141 100644 --- a/libs/langchain/langchain/llms/anyscale.py +++ b/libs/langchain/langchain/llms/anyscale.py @@ -36,7 +36,7 @@ class Anyscale(LLM): """ model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model. Reserved for future use""" + """Keyword arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None diff --git a/libs/langchain/langchain/llms/azureml_endpoint.py b/libs/langchain/langchain/llms/azureml_endpoint.py index 3e7833bc988..e8baa7d8c45 100644 --- a/libs/langchain/langchain/llms/azureml_endpoint.py +++ b/libs/langchain/langchain/llms/azureml_endpoint.py @@ -230,7 +230,7 @@ class AzureMLOnlineEndpoint(LLM, BaseModel): the endpoint""" model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" @validator("http_client", always=True, allow_reuse=True) @classmethod diff --git a/libs/langchain/langchain/llms/bedrock.py b/libs/langchain/langchain/llms/bedrock.py index 265ac725921..1713af650c6 100644 --- a/libs/langchain/langchain/llms/bedrock.py +++ b/libs/langchain/langchain/llms/bedrock.py @@ -147,7 +147,7 @@ class BedrockBase(BaseModel, ABC): equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" endpoint_url: Optional[str] = None """Needed if you don't want to default to us-east-1 endpoint""" diff --git a/libs/langchain/langchain/llms/chatglm.py b/libs/langchain/langchain/llms/chatglm.py index 232f2f9af71..e7b2a49d163 100644 --- a/libs/langchain/langchain/llms/chatglm.py +++ b/libs/langchain/langchain/llms/chatglm.py @@ -28,7 +28,7 @@ class ChatGLM(LLM): endpoint_url: str = "http://127.0.0.1:8000/" """Endpoint URL to use.""" model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" max_token: int = 20000 """Max token allowed to pass to the model.""" temperature: float = 0.1 diff --git a/libs/langchain/langchain/llms/deepsparse.py b/libs/langchain/langchain/llms/deepsparse.py index f506e2c1200..aa7d8612afc 100644 --- a/libs/langchain/langchain/llms/deepsparse.py +++ b/libs/langchain/langchain/llms/deepsparse.py @@ -28,7 +28,7 @@ class DeepSparse(LLM): """The path to a model file or directory or the name of a SparseZoo model stub.""" model_config: Optional[Dict[str, Any]] = None - """Key word arguments passed to the pipeline construction. + """Keyword arguments passed to the pipeline construction. Common parameters are sequence_length, prompt_sequence_length""" generation_config: Union[None, str, Dict] = None diff --git a/libs/langchain/langchain/llms/gradient_ai.py b/libs/langchain/langchain/llms/gradient_ai.py index 9bc72a8d2e2..4a2c4f12dcc 100644 --- a/libs/langchain/langchain/llms/gradient_ai.py +++ b/libs/langchain/langchain/llms/gradient_ai.py @@ -54,7 +54,7 @@ class GradientLLM(LLM): """ model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" gradient_api_url: str = "https://api.gradient.ai/api" """Endpoint URL to use.""" diff --git a/libs/langchain/langchain/llms/huggingface_endpoint.py b/libs/langchain/langchain/llms/huggingface_endpoint.py index a1593287a60..2c55bb213e5 100644 --- a/libs/langchain/langchain/llms/huggingface_endpoint.py +++ b/libs/langchain/langchain/llms/huggingface_endpoint.py @@ -39,7 +39,7 @@ class HuggingFaceEndpoint(LLM): """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None diff --git a/libs/langchain/langchain/llms/huggingface_hub.py b/libs/langchain/langchain/llms/huggingface_hub.py index f1bc8977ab9..45b326c4e9b 100644 --- a/libs/langchain/langchain/llms/huggingface_hub.py +++ b/libs/langchain/langchain/llms/huggingface_hub.py @@ -33,7 +33,7 @@ class HuggingFaceHub(LLM): """Task to call the model with. Should be a task that returns `generated_text` or `summary_text`.""" model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None diff --git a/libs/langchain/langchain/llms/huggingface_pipeline.py b/libs/langchain/langchain/llms/huggingface_pipeline.py index 336b61842ed..06581859023 100644 --- a/libs/langchain/langchain/llms/huggingface_pipeline.py +++ b/libs/langchain/langchain/llms/huggingface_pipeline.py @@ -53,9 +53,9 @@ class HuggingFacePipeline(BaseLLM): model_id: str = DEFAULT_MODEL_ID """Model name to use.""" model_kwargs: Optional[dict] = None - """Key word arguments passed to the model.""" + """Keyword arguments passed to the model.""" pipeline_kwargs: Optional[dict] = None - """Key word arguments passed to the pipeline.""" + """Keyword arguments passed to the pipeline.""" batch_size: int = DEFAULT_BATCH_SIZE """Batch size to use when passing multiple documents to generate.""" diff --git a/libs/langchain/langchain/llms/mosaicml.py b/libs/langchain/langchain/llms/mosaicml.py index 718466178b3..9103988f5cb 100644 --- a/libs/langchain/langchain/llms/mosaicml.py +++ b/libs/langchain/langchain/llms/mosaicml.py @@ -53,7 +53,7 @@ class MosaicML(LLM): inject_instruction_format: bool = False """Whether to inject the instruction format into the prompt.""" model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" retry_sleep: float = 1.0 """How long to try sleeping for if a rate limit is encountered""" diff --git a/libs/langchain/langchain/llms/octoai_endpoint.py b/libs/langchain/langchain/llms/octoai_endpoint.py index 93bdb36ff93..adc2bf89bce 100644 --- a/libs/langchain/langchain/llms/octoai_endpoint.py +++ b/libs/langchain/langchain/llms/octoai_endpoint.py @@ -40,7 +40,7 @@ class OctoAIEndpoint(LLM): """Endpoint URL to use.""" model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" octoai_api_token: Optional[str] = None """OCTOAI API Token""" diff --git a/libs/langchain/langchain/llms/openllm.py b/libs/langchain/langchain/llms/openllm.py index 4677d8b96c8..42238ac3b87 100644 --- a/libs/langchain/langchain/llms/openllm.py +++ b/libs/langchain/langchain/llms/openllm.py @@ -88,7 +88,7 @@ class OpenLLM(LLM): """Initialize this LLM instance in current process by default. Should only set to False when using in conjunction with BentoML Service.""" llm_kwargs: Dict[str, Any] - """Key word arguments to be passed to openllm.LLM""" + """Keyword arguments to be passed to openllm.LLM""" _runner: Optional[openllm.LLMRunner] = PrivateAttr(default=None) _client: Union[ diff --git a/libs/langchain/langchain/llms/sagemaker_endpoint.py b/libs/langchain/langchain/llms/sagemaker_endpoint.py index b38cbd40d4c..c7e465c8c14 100644 --- a/libs/langchain/langchain/llms/sagemaker_endpoint.py +++ b/libs/langchain/langchain/llms/sagemaker_endpoint.py @@ -171,7 +171,7 @@ class SagemakerEndpoint(LLM): """ model_kwargs: Optional[Dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint diff --git a/libs/langchain/langchain/llms/self_hosted.py b/libs/langchain/langchain/llms/self_hosted.py index b43ce822e54..8e188814933 100644 --- a/libs/langchain/langchain/llms/self_hosted.py +++ b/libs/langchain/langchain/llms/self_hosted.py @@ -132,7 +132,7 @@ class SelfHostedPipeline(LLM): model_load_fn: Callable """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None - """Key word arguments to pass to the model load function.""" + """Keyword arguments to pass to the model load function.""" model_reqs: List[str] = ["./", "torch"] """Requirements to install on hardware to inference the model.""" diff --git a/libs/langchain/langchain/llms/self_hosted_hugging_face.py b/libs/langchain/langchain/llms/self_hosted_hugging_face.py index 0b590cf5192..77c00d81734 100644 --- a/libs/langchain/langchain/llms/self_hosted_hugging_face.py +++ b/libs/langchain/langchain/llms/self_hosted_hugging_face.py @@ -158,7 +158,7 @@ class SelfHostedHuggingFaceLLM(SelfHostedPipeline): device: int = 0 """Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.""" model_kwargs: Optional[dict] = None - """Key word arguments to pass to the model.""" + """Keyword arguments to pass to the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_reqs: List[str] = ["./", "transformers", "torch"] diff --git a/libs/langchain/langchain/llms/xinference.py b/libs/langchain/langchain/llms/xinference.py index 3b16838da1a..9ca7e078dd2 100644 --- a/libs/langchain/langchain/llms/xinference.py +++ b/libs/langchain/langchain/llms/xinference.py @@ -80,7 +80,7 @@ class Xinference(LLM): model_uid: Optional[str] """UID of the launched model""" model_kwargs: Dict[str, Any] - """Key word arguments to be passed to xinference.LLM""" + """Keyword arguments to be passed to xinference.LLM""" def __init__( self, diff --git a/libs/langchain/langchain/retrievers/kendra.py b/libs/langchain/langchain/retrievers/kendra.py index fd11358f850..68d6444caa2 100644 --- a/libs/langchain/langchain/retrievers/kendra.py +++ b/libs/langchain/langchain/retrievers/kendra.py @@ -53,7 +53,7 @@ Dates are also represented as str. # Unexpected keyword argument "extra" for "__init_subclass__" of "object" class Highlight(BaseModel, extra=Extra.allow): # type: ignore[call-arg] - """Information that highlights the key words in the excerpt.""" + """Information that highlights the keywords in the excerpt.""" BeginOffset: int """The zero-based location in the excerpt where the highlight starts.""" diff --git a/libs/langchain/tests/unit_tests/test_formatting.py b/libs/langchain/tests/unit_tests/test_formatting.py index 482615608e6..ef941932da2 100644 --- a/libs/langchain/tests/unit_tests/test_formatting.py +++ b/libs/langchain/tests/unit_tests/test_formatting.py @@ -20,7 +20,7 @@ def test_does_not_allow_args() -> None: def test_does_not_allow_extra_kwargs() -> None: - """Test formatting does not allow extra key word arguments.""" + """Test formatting does not allow extra keyword arguments.""" template = "This is a {foo} test." with pytest.raises(KeyError): formatter.format(template, foo="good", bar="oops")