From 64f97e707ee194f1c149c1ea7a8dd2292efacde9 Mon Sep 17 00:00:00 2001 From: Wenqi Li <831580+wyli@users.noreply.github.com> Date: Mon, 31 Mar 2025 16:28:49 +0100 Subject: [PATCH] ollama[patch]: Support seed param for OllamaLLM (#30553) **Description:** a description of the change add the seed param for OllamaLLM client reproducibility **Issue:** the issue # it fixes, if applicable follow up of a similar issue https://github.com/langchain-ai/langchain/issues/24703 see also https://github.com/langchain-ai/langchain/pull/24782 **Dependencies:** any dependencies required for this change n/a --- libs/partners/ollama/langchain_ollama/llms.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libs/partners/ollama/langchain_ollama/llms.py b/libs/partners/ollama/langchain_ollama/llms.py index 16b307041b5..24708e88b64 100644 --- a/libs/partners/ollama/langchain_ollama/llms.py +++ b/libs/partners/ollama/langchain_ollama/llms.py @@ -84,6 +84,11 @@ class OllamaLLM(BaseLLM): """The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)""" + seed: Optional[int] = None + """Sets the random number seed to use for generation. Setting this + to a specific number will make the model generate the same text for + the same prompt.""" + stop: Optional[List[str]] = None """Sets the stop tokens to use.""" @@ -150,6 +155,7 @@ class OllamaLLM(BaseLLM): "repeat_last_n": self.repeat_last_n, "repeat_penalty": self.repeat_penalty, "temperature": self.temperature, + "seed": self.seed, "stop": self.stop if stop is None else stop, "tfs_z": self.tfs_z, "top_k": self.top_k,