mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-28 17:38:36 +00:00
increase text-davinci-003 contextsize to 4097 (#748)
text-davinci-003 supports a context size of 4097 tokens so return 4097 instead of 4000 in modelname_to_contextsize() for text-davinci-003 Co-authored-by: Bill Kish <bill@cogniac.co>
This commit is contained in:
parent
6ad360bdef
commit
309d86e339
@ -254,7 +254,7 @@ class BaseOpenAI(BaseLLM, BaseModel):
|
||||
def modelname_to_contextsize(self, modelname: str) -> int:
|
||||
"""Calculate the maximum number of tokens possible to generate for a model.
|
||||
|
||||
text-davinci-003: 4,000 tokens
|
||||
text-davinci-003: 4,097 tokens
|
||||
text-curie-001: 2,048 tokens
|
||||
text-babbage-001: 2,048 tokens
|
||||
text-ada-001: 2,048 tokens
|
||||
@ -273,7 +273,7 @@ class BaseOpenAI(BaseLLM, BaseModel):
|
||||
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
|
||||
"""
|
||||
if modelname == "text-davinci-003":
|
||||
return 4000
|
||||
return 4097
|
||||
elif modelname == "text-curie-001":
|
||||
return 2048
|
||||
elif modelname == "text-babbage-001":
|
||||
@ -285,7 +285,7 @@ class BaseOpenAI(BaseLLM, BaseModel):
|
||||
elif modelname == "code-cushman-001":
|
||||
return 2048
|
||||
else:
|
||||
return 4000
|
||||
return 4097
|
||||
|
||||
def max_tokens_for_prompt(self, prompt: str) -> int:
|
||||
"""Calculate the maximum number of tokens possible to generate for a prompt.
|
||||
|
Loading…
Reference in New Issue
Block a user