diff --git a/libs/community/langchain_community/embeddings/ollama.py b/libs/community/langchain_community/embeddings/ollama.py index 9b9830fb0a0..f1c28f1124e 100644 --- a/libs/community/langchain_community/embeddings/ollama.py +++ b/libs/community/langchain_community/embeddings/ollama.py @@ -97,7 +97,7 @@ class OllamaEmbeddings(BaseModel, Embeddings): will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)""" - top_p: Optional[int] = None + top_p: Optional[float] = None """Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)""" diff --git a/libs/community/langchain_community/llms/nlpcloud.py b/libs/community/langchain_community/llms/nlpcloud.py index bdff6404290..a73087918ce 100644 --- a/libs/community/langchain_community/llms/nlpcloud.py +++ b/libs/community/langchain_community/llms/nlpcloud.py @@ -38,7 +38,7 @@ class NLPCloud(LLM): """Whether or not to remove the end sequence token.""" bad_words: List[str] = [] """List of tokens not allowed to be generated.""" - top_p: int = 1 + top_p: float = 1.0 """Total probability mass of tokens to consider at each step.""" top_k: int = 50 """The number of highest probability tokens to keep for top-k filtering.""" diff --git a/libs/community/langchain_community/llms/ollama.py b/libs/community/langchain_community/llms/ollama.py index 078edcf0928..29a724b07e9 100644 --- a/libs/community/langchain_community/llms/ollama.py +++ b/libs/community/langchain_community/llms/ollama.py @@ -90,7 +90,7 @@ class _OllamaCommon(BaseLanguageModel): will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)""" - top_p: Optional[int] = None + top_p: Optional[float] = None """Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)""" diff --git a/libs/partners/google-genai/langchain_google_genai/chat_models.py b/libs/partners/google-genai/langchain_google_genai/chat_models.py index d226aeefc4f..62cb707cdca 100644 --- a/libs/partners/google-genai/langchain_google_genai/chat_models.py +++ b/libs/partners/google-genai/langchain_google_genai/chat_models.py @@ -443,7 +443,7 @@ Supported examples: top_k: Optional[int] = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.""" - top_p: Optional[int] = None + top_p: Optional[float] = None """The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling.