diff --git a/libs/community/langchain_community/chat_models/deepinfra.py b/libs/community/langchain_community/chat_models/deepinfra.py index 1a9941f3865..41da4adc9d8 100644 --- a/libs/community/langchain_community/chat_models/deepinfra.py +++ b/libs/community/langchain_community/chat_models/deepinfra.py @@ -207,7 +207,7 @@ class ChatDeepInfra(BaseChatModel): request_timeout: Optional[float] = Field(default=None, alias="timeout") temperature: Optional[float] = 1 model_kwargs: Dict[str, Any] = Field(default_factory=dict) - """Run inference with this temperature. Must by in the closed + """Run inference with this temperature. Must be in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose diff --git a/libs/community/langchain_community/chat_models/google_palm.py b/libs/community/langchain_community/chat_models/google_palm.py index 28d1d9018ce..bbdc7efc32b 100644 --- a/libs/community/langchain_community/chat_models/google_palm.py +++ b/libs/community/langchain_community/chat_models/google_palm.py @@ -236,7 +236,7 @@ class ChatGooglePalm(BaseChatModel, BaseModel): """Model name to use.""" google_api_key: Optional[SecretStr] = None temperature: Optional[float] = None - """Run inference with this temperature. Must by in the closed + """Run inference with this temperature. Must be in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose diff --git a/libs/community/langchain_community/chat_models/litellm.py b/libs/community/langchain_community/chat_models/litellm.py index a16cb187c4f..3ce4836c8a2 100644 --- a/libs/community/langchain_community/chat_models/litellm.py +++ b/libs/community/langchain_community/chat_models/litellm.py @@ -190,7 +190,7 @@ class ChatLiteLLM(BaseChatModel): request_timeout: Optional[Union[float, Tuple[float, float]]] = None temperature: Optional[float] = 1 model_kwargs: Dict[str, Any] = Field(default_factory=dict) - """Run inference with this temperature. Must by in the closed + """Run inference with this temperature. Must be in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose diff --git a/libs/community/langchain_community/llms/google_palm.py b/libs/community/langchain_community/llms/google_palm.py index 278a46ea2d7..e996c67b92d 100644 --- a/libs/community/langchain_community/llms/google_palm.py +++ b/libs/community/langchain_community/llms/google_palm.py @@ -72,7 +72,7 @@ class GooglePalm(BaseLLM, BaseModel): model_name: str = "models/text-bison-001" """Model name to use.""" temperature: float = 0.7 - """Run inference with this temperature. Must by in the closed interval + """Run inference with this temperature. Must be in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose