mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-09 06:53:59 +00:00
openai[patch]: update openai params (#23691)
**Description:** Explicitly add parameters from openai API - [X] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/ --------- Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
@@ -319,10 +319,26 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
None."""
|
||||
max_retries: int = 2
|
||||
"""Maximum number of retries to make when generating."""
|
||||
presence_penalty: Optional[float] = None
|
||||
"""Penalizes repeated tokens."""
|
||||
frequency_penalty: Optional[float] = None
|
||||
"""Penalizes repeated tokens according to frequency."""
|
||||
seed: Optional[int] = None
|
||||
"""Seed for generation"""
|
||||
logprobs: Optional[bool] = False
|
||||
"""Whether to return logprobs."""
|
||||
top_logprobs: Optional[int] = None
|
||||
"""Number of most likely tokens to return at each token position, each with
|
||||
an associated log probability. `logprobs` must be set to true
|
||||
if this parameter is used."""
|
||||
logit_bias: Optional[Dict[int, int]] = None
|
||||
"""Modify the likelihood of specified tokens appearing in the completion."""
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results or not."""
|
||||
n: int = 1
|
||||
"""Number of chat completions to generate for each prompt."""
|
||||
top_p: Optional[float] = None
|
||||
"""Total probability mass of tokens to consider at each step."""
|
||||
max_tokens: Optional[int] = None
|
||||
"""Maximum number of tokens to generate."""
|
||||
tiktoken_model_name: Optional[str] = None
|
||||
@@ -444,6 +460,13 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
"stream": self.streaming,
|
||||
"n": self.n,
|
||||
"temperature": self.temperature,
|
||||
"presence_penalty": self.presence_penalty,
|
||||
"frequency_penalty": self.frequency_penalty,
|
||||
"seed": self.seed,
|
||||
"top_p": self.top_p,
|
||||
"logprobs": self.logprobs,
|
||||
"top_logprobs": self.top_logprobs,
|
||||
"logit_bias": self.logit_bias,
|
||||
**self.model_kwargs,
|
||||
}
|
||||
if self.max_tokens is not None:
|
||||
|
Reference in New Issue
Block a user