From c72da53c109277ec61b27a8a1600b977893b1818 Mon Sep 17 00:00:00 2001 From: Massimiliano Pronesti Date: Thu, 10 Aug 2023 00:48:29 +0200 Subject: [PATCH] Add logprobs to SamplingParameters in vllm (#9010) This PR aims at amending #8806 , that I opened a few days ago, adding the extra `logprobs` parameter that I accidentally forgot --- libs/langchain/langchain/llms/vllm.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libs/langchain/langchain/llms/vllm.py b/libs/langchain/langchain/llms/vllm.py index d02b6ff02f2..46da858c775 100644 --- a/libs/langchain/langchain/llms/vllm.py +++ b/libs/langchain/langchain/llms/vllm.py @@ -54,6 +54,9 @@ class VLLM(BaseLLM): max_new_tokens: int = 512 """Maximum number of tokens to generate per output sequence.""" + logprobs: Optional[int] = None + """Number of log probabilities to return per output token.""" + client: Any #: :meta private: @root_validator() @@ -91,6 +94,7 @@ class VLLM(BaseLLM): "stop": self.stop, "ignore_eos": self.ignore_eos, "use_beam_search": self.use_beam_search, + "logprobs": self.logprobs, } def _generate(