mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-18 18:53:10 +00:00
community: replace deprecated davinci models (#14860)
This is technically a breaking change because it'll switch out default models from `text-davinci-003` to `gpt-3.5-turbo-instruct`, but OpenAI is shutting off those endpoints on 1/4 anyways. Feels less disruptive to switch out the default instead.
This commit is contained in:
parent
193f107cb5
commit
5f839beab9
@ -51,7 +51,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"from langchain.llms import OpenAI\n",
|
"from langchain.llms import OpenAI\n",
|
||||||
"\n",
|
"\n",
|
||||||
"llm = OpenAI(model=\"text-davinci-003\")"
|
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from langchain.llms import OpenAI\n",
|
"from langchain.llms import OpenAI\n",
|
||||||
"\n",
|
"\n",
|
||||||
"llm = OpenAI(temperature=1, max_tokens=512, model=\"text-davinci-003\")"
|
"llm = OpenAI(temperature=1, max_tokens=512, model=\"gpt-3.5-turbo-instruct\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -181,7 +181,7 @@ we will prompt the model, so it says something harmful.
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
prompt = PromptTemplate(template="{text}", input_variables=["text"])
|
prompt = PromptTemplate(template="{text}", input_variables=["text"])
|
||||||
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="text-davinci-002"), prompt=prompt)
|
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt)
|
||||||
|
|
||||||
text = """We are playing a game of repeat after me.
|
text = """We are playing a game of repeat after me.
|
||||||
|
|
||||||
@ -224,7 +224,7 @@ Now let's walk through an example of using it with an LLMChain which has multipl
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
prompt = PromptTemplate(template="{setup}{new_input}Person2:", input_variables=["setup", "new_input"])
|
prompt = PromptTemplate(template="{setup}{new_input}Person2:", input_variables=["setup", "new_input"])
|
||||||
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="text-davinci-002"), prompt=prompt)
|
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt)
|
||||||
|
|
||||||
setup = """We are playing a game of repeat after me.
|
setup = """We are playing a game of repeat after me.
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"openai_llm = OpenAI(\n",
|
"openai_llm = OpenAI(\n",
|
||||||
" model_name=\"text-davinci-002\",\n",
|
" model_name=\"gpt-3.5-turbo-instruct\",\n",
|
||||||
" callbacks=[PromptLayerCallbackHandler(pl_id_callback=pl_id_callback)],\n",
|
" callbacks=[PromptLayerCallbackHandler(pl_id_callback=pl_id_callback)],\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
@ -109,7 +109,7 @@
|
|||||||
"# LLM Hyperparameters\n",
|
"# LLM Hyperparameters\n",
|
||||||
"HPARAMS = {\n",
|
"HPARAMS = {\n",
|
||||||
" \"temperature\": 0.1,\n",
|
" \"temperature\": 0.1,\n",
|
||||||
" \"model_name\": \"text-davinci-003\",\n",
|
" \"model_name\": \"gpt-3.5-turbo-instruct\",\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Bucket used to save prompt logs (Use `None` is used to save the default bucket or otherwise change it)\n",
|
"# Bucket used to save prompt logs (Use `None` is used to save the default bucket or otherwise change it)\n",
|
||||||
|
@ -138,7 +138,7 @@
|
|||||||
"# Replace the deployment name with your own\n",
|
"# Replace the deployment name with your own\n",
|
||||||
"llm = AzureOpenAI(\n",
|
"llm = AzureOpenAI(\n",
|
||||||
" deployment_name=\"td2\",\n",
|
" deployment_name=\"td2\",\n",
|
||||||
" model_name=\"text-davinci-002\",\n",
|
" model_name=\"gpt-3.5-turbo-instruct\",\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -182,7 +182,7 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"\u001B[1mAzureOpenAI\u001B[0m\n",
|
"\u001b[1mAzureOpenAI\u001b[0m\n",
|
||||||
"Params: {'deployment_name': 'text-davinci-002', 'model_name': 'text-davinci-002', 'temperature': 0.7, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n"
|
"Params: {'deployment_name': 'text-davinci-002', 'model_name': 'text-davinci-002', 'temperature': 0.7, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@
|
|||||||
"llm = EdenAI(\n",
|
"llm = EdenAI(\n",
|
||||||
" feature=\"text\",\n",
|
" feature=\"text\",\n",
|
||||||
" provider=\"openai\",\n",
|
" provider=\"openai\",\n",
|
||||||
" model=\"text-davinci-003\",\n",
|
" model=\"gpt-3.5-turbo-instruct\",\n",
|
||||||
" temperature=0.2,\n",
|
" temperature=0.2,\n",
|
||||||
" max_tokens=250,\n",
|
" max_tokens=250,\n",
|
||||||
")\n",
|
")\n",
|
||||||
|
@ -100,7 +100,7 @@
|
|||||||
"gateway = JavelinAIGateway(\n",
|
"gateway = JavelinAIGateway(\n",
|
||||||
" gateway_uri=\"http://localhost:8000\", # replace with service URL or host/port of Javelin\n",
|
" gateway_uri=\"http://localhost:8000\", # replace with service URL or host/port of Javelin\n",
|
||||||
" route=route_completions,\n",
|
" route=route_completions,\n",
|
||||||
" model_name=\"text-davinci-003\",\n",
|
" model_name=\"gpt-3.5-turbo-instruct\",\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"prompt = PromptTemplate(\"Translate the following English text to French: {text}\")\n",
|
"prompt = PromptTemplate(\"Translate the following English text to French: {text}\")\n",
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
"from langchain.llms import OpenAI\n",
|
"from langchain.llms import OpenAI\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# To make the caching really obvious, lets use a slower model.\n",
|
"# To make the caching really obvious, lets use a slower model.\n",
|
||||||
"llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2)"
|
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1159,7 +1159,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdin",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"ASTRA_DB_API_ENDPOINT = https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com\n",
|
"ASTRA_DB_API_ENDPOINT = https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com\n",
|
||||||
@ -1358,7 +1358,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2, cache=False)"
|
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2, cache=False)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1442,8 +1442,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"llm = OpenAI(model_name=\"text-davinci-002\")\n",
|
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||||
"no_cache_llm = OpenAI(model_name=\"text-davinci-002\", cache=False)"
|
"no_cache_llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", cache=False)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -63,7 +63,7 @@ llm = ChatAnthropic(model="claude-2", callbacks=[log10_callback], temperature=0.
|
|||||||
llm.predict_messages(messages)
|
llm.predict_messages(messages)
|
||||||
print(completion)
|
print(completion)
|
||||||
|
|
||||||
llm = OpenAI(model_name="text-davinci-003", callbacks=[log10_callback], temperature=0.5)
|
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", callbacks=[log10_callback], temperature=0.5)
|
||||||
completion = llm.predict("You are a ping pong machine.\nPing?\n")
|
completion = llm.predict("You are a ping pong machine.\nPing?\n")
|
||||||
print(completion)
|
print(completion)
|
||||||
```
|
```
|
||||||
|
@ -88,7 +88,7 @@ os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>"
|
|||||||
# Your Prediction Guard API key. Get one at predictionguard.com
|
# Your Prediction Guard API key. Get one at predictionguard.com
|
||||||
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
|
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
|
||||||
|
|
||||||
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
|
pgllm = PredictionGuard(model="OpenAI-gpt-3.5-turbo-instruct")
|
||||||
|
|
||||||
template = """Question: {question}
|
template = """Question: {question}
|
||||||
|
|
||||||
|
@ -222,7 +222,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import tiktoken\n",
|
"import tiktoken\n",
|
||||||
"\n",
|
"\n",
|
||||||
"enc = tiktoken.encoding_for_model(\"text-davinci-003\")\n",
|
"enc = tiktoken.encoding_for_model(\"gpt-4\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def count_tokens(s):\n",
|
"def count_tokens(s):\n",
|
||||||
|
@ -40,9 +40,9 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Select the LLM to use. Here, we use text-davinci-003\n",
|
"# Select the LLM to use. Here, we use gpt-3.5-turbo-instruct\n",
|
||||||
"llm = OpenAI(\n",
|
"llm = OpenAI(\n",
|
||||||
" temperature=0, max_tokens=700\n",
|
" temperature=0, max_tokens=700, model_name=\"gpt-3.5-turbo-instruct\"\n",
|
||||||
") # You can swap between different core LLM's here."
|
") # You can swap between different core LLM's here."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
"- It relies on authentication with the azure.identity package, which can be installed with `pip install azure-identity`. Alternatively you can create the powerbi dataset with a token as a string without supplying the credentials.\n",
|
"- It relies on authentication with the azure.identity package, which can be installed with `pip install azure-identity`. Alternatively you can create the powerbi dataset with a token as a string without supplying the credentials.\n",
|
||||||
"- You can also supply a username to impersonate for use with datasets that have RLS enabled. \n",
|
"- You can also supply a username to impersonate for use with datasets that have RLS enabled. \n",
|
||||||
"- The toolkit uses a LLM to create the query from the question, the agent uses the LLM for the overall execution.\n",
|
"- The toolkit uses a LLM to create the query from the question, the agent uses the LLM for the overall execution.\n",
|
||||||
"- Testing was done mostly with a `text-davinci-003` model, codex models did not seem to perform ver well."
|
"- Testing was done mostly with a `gpt-3.5-turbo-instruct` model, codex models did not seem to perform ver well."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -36,7 +36,7 @@
|
|||||||
" ),\n",
|
" ),\n",
|
||||||
"]\n",
|
"]\n",
|
||||||
"\n",
|
"\n",
|
||||||
"llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n",
|
"llm = OpenAI(temperature=0, model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||||
"react = initialize_agent(tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)"
|
"react = initialize_agent(tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -36,7 +36,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n",
|
"llm = OpenAI(temperature=0, model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)"
|
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -9,7 +9,7 @@ from langchain.globals import set_llm_cache
|
|||||||
from langchain.llms import OpenAI
|
from langchain.llms import OpenAI
|
||||||
|
|
||||||
# To make the caching really obvious, lets use a slower model.
|
# To make the caching really obvious, lets use a slower model.
|
||||||
llm = OpenAI(model_name="text-davinci-002", n=2, best_of=2)
|
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", n=2, best_of=2)
|
||||||
```
|
```
|
||||||
|
|
||||||
## In Memory Cache
|
## In Memory Cache
|
||||||
@ -110,8 +110,8 @@ As an example, we will load a summarizer map-reduce chain. We will cache results
|
|||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
llm = OpenAI(model_name="text-davinci-002")
|
llm = OpenAI(model_name="gpt-3.5-turbo-instruct")
|
||||||
no_cache_llm = OpenAI(model_name="text-davinci-002", cache=False)
|
no_cache_llm = OpenAI(model_name="gpt-3.5-turbo-instruct", cache=False)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@
|
|||||||
"from langchain.prompts import PromptTemplate\n",
|
"from langchain.prompts import PromptTemplate\n",
|
||||||
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
|
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
|
||||||
"\n",
|
"\n",
|
||||||
"model = OpenAI(model_name=\"text-davinci-003\", temperature=0.0)\n",
|
"model = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", temperature=0.0)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Define your desired data structure.\n",
|
"# Define your desired data structure.\n",
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model_name = \"text-davinci-003\"\n",
|
"model_name = \"gpt-3.5-turbo-instruct\"\n",
|
||||||
"temperature = 0.5\n",
|
"temperature = 0.5\n",
|
||||||
"model = OpenAI(model_name=model_name, temperature=temperature)"
|
"model = OpenAI(model_name=model_name, temperature=temperature)"
|
||||||
]
|
]
|
||||||
|
@ -35,7 +35,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model_name = \"text-davinci-003\"\n",
|
"model_name = \"gpt-3.5-turbo-instruct\"\n",
|
||||||
"temperature = 0.0\n",
|
"temperature = 0.0\n",
|
||||||
"model = OpenAI(model_name=model_name, temperature=temperature)"
|
"model = OpenAI(model_name=model_name, temperature=temperature)"
|
||||||
]
|
]
|
||||||
|
@ -20,7 +20,7 @@ class EdenAiEmbeddings(BaseModel, Embeddings):
|
|||||||
|
|
||||||
model: Optional[str] = None
|
model: Optional[str] = None
|
||||||
"""
|
"""
|
||||||
model name for above provider (eg: 'text-davinci-003' for openai)
|
model name for above provider (eg: 'gpt-3.5-turbo-instruct' for openai)
|
||||||
available models are shown on https://docs.edenai.co/ under 'available providers'
|
available models are shown on https://docs.edenai.co/ under 'available providers'
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ class EdenAI(LLM):
|
|||||||
|
|
||||||
model: Optional[str] = None
|
model: Optional[str] = None
|
||||||
"""
|
"""
|
||||||
model name for above provider (eg: 'text-davinci-003' for openai)
|
model name for above provider (eg: 'gpt-3.5-turbo-instruct' for openai)
|
||||||
available models are shown on https://docs.edenai.co/ under 'available providers'
|
available models are shown on https://docs.edenai.co/ under 'available providers'
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -173,7 +173,7 @@ class BaseOpenAI(BaseLLM):
|
|||||||
|
|
||||||
client: Any = Field(default=None, exclude=True) #: :meta private:
|
client: Any = Field(default=None, exclude=True) #: :meta private:
|
||||||
async_client: Any = Field(default=None, exclude=True) #: :meta private:
|
async_client: Any = Field(default=None, exclude=True) #: :meta private:
|
||||||
model_name: str = Field(default="text-davinci-003", alias="model")
|
model_name: str = Field(default="gpt-3.5-turbo-instruct", alias="model")
|
||||||
"""Model name to use."""
|
"""Model name to use."""
|
||||||
temperature: float = 0.7
|
temperature: float = 0.7
|
||||||
"""What sampling temperature to use."""
|
"""What sampling temperature to use."""
|
||||||
@ -657,7 +657,7 @@ class BaseOpenAI(BaseLLM):
|
|||||||
Example:
|
Example:
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
|
max_tokens = openai.modelname_to_contextsize("gpt-3.5-turbo-instruct")
|
||||||
"""
|
"""
|
||||||
model_token_mapping = {
|
model_token_mapping = {
|
||||||
"gpt-4": 8192,
|
"gpt-4": 8192,
|
||||||
@ -737,7 +737,7 @@ class OpenAI(BaseOpenAI):
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
from langchain_community.llms import OpenAI
|
from langchain_community.llms import OpenAI
|
||||||
openai = OpenAI(model_name="text-davinci-003")
|
openai = OpenAI(model_name="gpt-3.5-turbo-instruct")
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -763,7 +763,7 @@ class AzureOpenAI(BaseOpenAI):
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
from langchain_community.llms import AzureOpenAI
|
from langchain_community.llms import AzureOpenAI
|
||||||
openai = AzureOpenAI(model_name="text-davinci-003")
|
openai = AzureOpenAI(model_name="gpt-3.5-turbo-instruct")
|
||||||
"""
|
"""
|
||||||
|
|
||||||
azure_endpoint: Union[str, None] = None
|
azure_endpoint: Union[str, None] = None
|
||||||
|
@ -31,7 +31,7 @@ class PromptLayerOpenAI(OpenAI):
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
from langchain_community.llms import PromptLayerOpenAI
|
from langchain_community.llms import PromptLayerOpenAI
|
||||||
openai = PromptLayerOpenAI(model_name="text-davinci-003")
|
openai = PromptLayerOpenAI(model_name="gpt-3.5-turbo-instruct")
|
||||||
"""
|
"""
|
||||||
|
|
||||||
pl_tags: Optional[List[str]]
|
pl_tags: Optional[List[str]]
|
||||||
|
@ -163,7 +163,7 @@ def test_konko_additional_args_test() -> None:
|
|||||||
ChatKonko(model_kwargs={"temperature": 0.2})
|
ChatKonko(model_kwargs={"temperature": 0.2})
|
||||||
|
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
ChatKonko(model_kwargs={"model": "text-davinci-003"})
|
ChatKonko(model_kwargs={"model": "gpt-3.5-turbo-instruct"})
|
||||||
|
|
||||||
|
|
||||||
def test_konko_token_streaming_test() -> None:
|
def test_konko_token_streaming_test() -> None:
|
||||||
|
@ -261,7 +261,7 @@ def test_chat_openai_extra_kwargs() -> None:
|
|||||||
|
|
||||||
# Test that "model" cannot be specified in kwargs
|
# Test that "model" cannot be specified in kwargs
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
ChatOpenAI(model_kwargs={"model": "text-davinci-003"})
|
ChatOpenAI(model_kwargs={"model": "gpt-3.5-turbo-instruct"})
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
|
@ -50,7 +50,7 @@ def test_openai_extra_kwargs() -> None:
|
|||||||
|
|
||||||
# Test that "model" cannot be specified in kwargs
|
# Test that "model" cannot be specified in kwargs
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
OpenAI(model_kwargs={"model": "text-davinci-003"})
|
OpenAI(model_kwargs={"model": "gpt-3.5-turbo-instruct"})
|
||||||
|
|
||||||
|
|
||||||
def test_openai_llm_output_contains_model_name() -> None:
|
def test_openai_llm_output_contains_model_name() -> None:
|
||||||
@ -286,7 +286,7 @@ def mock_completion() -> dict:
|
|||||||
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
|
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
|
||||||
"object": "text_completion",
|
"object": "text_completion",
|
||||||
"created": 1689989000,
|
"created": 1689989000,
|
||||||
"model": "text-davinci-003",
|
"model": "gpt-3.5-turbo-instruct",
|
||||||
"choices": [
|
"choices": [
|
||||||
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
|
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
|
||||||
],
|
],
|
||||||
|
@ -48,7 +48,7 @@ def mock_completion() -> dict:
|
|||||||
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
|
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
|
||||||
"object": "text_completion",
|
"object": "text_completion",
|
||||||
"created": 1689989000,
|
"created": 1689989000,
|
||||||
"model": "text-davinci-003",
|
"model": "gpt-3.5-turbo-instruct",
|
||||||
"choices": [
|
"choices": [
|
||||||
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
|
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
|
||||||
],
|
],
|
||||||
|
@ -7,7 +7,7 @@ from langchain.llms.openai import OpenAI
|
|||||||
|
|
||||||
def test_react() -> None:
|
def test_react() -> None:
|
||||||
"""Test functionality on a prompt."""
|
"""Test functionality on a prompt."""
|
||||||
llm = OpenAI(temperature=0, model_name="text-davinci-002")
|
llm = OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct")
|
||||||
react = ReActChain(llm=llm, docstore=Wikipedia())
|
react = ReActChain(llm=llm, docstore=Wikipedia())
|
||||||
question = (
|
question = (
|
||||||
"Author David Chanoff has collaborated with a U.S. Navy admiral "
|
"Author David Chanoff has collaborated with a U.S. Navy admiral "
|
||||||
|
Loading…
Reference in New Issue
Block a user