community: replace deprecated davinci models (#14860)

This is technically a breaking change because it'll switch out default
models from `text-davinci-003` to `gpt-3.5-turbo-instruct`, but OpenAI
is shutting off those endpoints on 1/4 anyways.

Feels less disruptive to switch out the default instead.
This commit is contained in:
Erick Friis
2023-12-18 13:49:46 -08:00
committed by GitHub
parent 193f107cb5
commit 5f839beab9
29 changed files with 42 additions and 42 deletions

View File

@@ -181,7 +181,7 @@ we will prompt the model, so it says something harmful.
```python
prompt = PromptTemplate(template="{text}", input_variables=["text"])
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="text-davinci-002"), prompt=prompt)
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt)
text = """We are playing a game of repeat after me.
@@ -224,7 +224,7 @@ Now let's walk through an example of using it with an LLMChain which has multipl
```python
prompt = PromptTemplate(template="{setup}{new_input}Person2:", input_variables=["setup", "new_input"])
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="text-davinci-002"), prompt=prompt)
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt)
setup = """We are playing a game of repeat after me.

View File

@@ -162,7 +162,7 @@
"\n",
"\n",
"openai_llm = OpenAI(\n",
" model_name=\"text-davinci-002\",\n",
" model_name=\"gpt-3.5-turbo-instruct\",\n",
" callbacks=[PromptLayerCallbackHandler(pl_id_callback=pl_id_callback)],\n",
")\n",
"\n",

View File

@@ -109,7 +109,7 @@
"# LLM Hyperparameters\n",
"HPARAMS = {\n",
" \"temperature\": 0.1,\n",
" \"model_name\": \"text-davinci-003\",\n",
" \"model_name\": \"gpt-3.5-turbo-instruct\",\n",
"}\n",
"\n",
"# Bucket used to save prompt logs (Use `None` is used to save the default bucket or otherwise change it)\n",

View File

@@ -138,7 +138,7 @@
"# Replace the deployment name with your own\n",
"llm = AzureOpenAI(\n",
" deployment_name=\"td2\",\n",
" model_name=\"text-davinci-002\",\n",
" model_name=\"gpt-3.5-turbo-instruct\",\n",
")"
]
},
@@ -182,7 +182,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"\u001B[1mAzureOpenAI\u001B[0m\n",
"\u001b[1mAzureOpenAI\u001b[0m\n",
"Params: {'deployment_name': 'text-davinci-002', 'model_name': 'text-davinci-002', 'temperature': 0.7, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n"
]
}

View File

@@ -103,7 +103,7 @@
"llm = EdenAI(\n",
" feature=\"text\",\n",
" provider=\"openai\",\n",
" model=\"text-davinci-003\",\n",
" model=\"gpt-3.5-turbo-instruct\",\n",
" temperature=0.2,\n",
" max_tokens=250,\n",
")\n",

View File

@@ -100,7 +100,7 @@
"gateway = JavelinAIGateway(\n",
" gateway_uri=\"http://localhost:8000\", # replace with service URL or host/port of Javelin\n",
" route=route_completions,\n",
" model_name=\"text-davinci-003\",\n",
" model_name=\"gpt-3.5-turbo-instruct\",\n",
")\n",
"\n",
"prompt = PromptTemplate(\"Translate the following English text to French: {text}\")\n",

View File

@@ -21,7 +21,7 @@
"from langchain.llms import OpenAI\n",
"\n",
"# To make the caching really obvious, lets use a slower model.\n",
"llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2)"
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
]
},
{
@@ -1159,7 +1159,7 @@
"metadata": {},
"outputs": [
{
"name": "stdin",
"name": "stdout",
"output_type": "stream",
"text": [
"ASTRA_DB_API_ENDPOINT = https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com\n",
@@ -1358,7 +1358,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2, cache=False)"
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2, cache=False)"
]
},
{
@@ -1442,8 +1442,8 @@
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAI(model_name=\"text-davinci-002\")\n",
"no_cache_llm = OpenAI(model_name=\"text-davinci-002\", cache=False)"
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n",
"no_cache_llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", cache=False)"
]
},
{

View File

@@ -63,7 +63,7 @@ llm = ChatAnthropic(model="claude-2", callbacks=[log10_callback], temperature=0.
llm.predict_messages(messages)
print(completion)
llm = OpenAI(model_name="text-davinci-003", callbacks=[log10_callback], temperature=0.5)
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", callbacks=[log10_callback], temperature=0.5)
completion = llm.predict("You are a ping pong machine.\nPing?\n")
print(completion)
```

View File

@@ -88,7 +88,7 @@ os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>"
# Your Prediction Guard API key. Get one at predictionguard.com
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
pgllm = PredictionGuard(model="OpenAI-gpt-3.5-turbo-instruct")
template = """Question: {question}

View File

@@ -222,7 +222,7 @@
"source": [
"import tiktoken\n",
"\n",
"enc = tiktoken.encoding_for_model(\"text-davinci-003\")\n",
"enc = tiktoken.encoding_for_model(\"gpt-4\")\n",
"\n",
"\n",
"def count_tokens(s):\n",

View File

@@ -40,9 +40,9 @@
},
"outputs": [],
"source": [
"# Select the LLM to use. Here, we use text-davinci-003\n",
"# Select the LLM to use. Here, we use gpt-3.5-turbo-instruct\n",
"llm = OpenAI(\n",
" temperature=0, max_tokens=700\n",
" temperature=0, max_tokens=700, model_name=\"gpt-3.5-turbo-instruct\"\n",
") # You can swap between different core LLM's here."
]
},

View File

@@ -15,7 +15,7 @@
"- It relies on authentication with the azure.identity package, which can be installed with `pip install azure-identity`. Alternatively you can create the powerbi dataset with a token as a string without supplying the credentials.\n",
"- You can also supply a username to impersonate for use with datasets that have RLS enabled. \n",
"- The toolkit uses a LLM to create the query from the question, the agent uses the LLM for the overall execution.\n",
"- Testing was done mostly with a `text-davinci-003` model, codex models did not seem to perform ver well."
"- Testing was done mostly with a `gpt-3.5-turbo-instruct` model, codex models did not seem to perform ver well."
]
},
{

View File

@@ -36,7 +36,7 @@
" ),\n",
"]\n",
"\n",
"llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n",
"llm = OpenAI(temperature=0, model_name=\"gpt-3.5-turbo-instruct\")\n",
"react = initialize_agent(tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)"
]
},

View File

@@ -36,7 +36,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n",
"llm = OpenAI(temperature=0, model_name=\"gpt-3.5-turbo-instruct\")\n",
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)"
]
},

View File

@@ -9,7 +9,7 @@ from langchain.globals import set_llm_cache
from langchain.llms import OpenAI
# To make the caching really obvious, lets use a slower model.
llm = OpenAI(model_name="text-davinci-002", n=2, best_of=2)
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", n=2, best_of=2)
```
## In Memory Cache
@@ -110,8 +110,8 @@ As an example, we will load a summarizer map-reduce chain. We will cache results
```python
llm = OpenAI(model_name="text-davinci-002")
no_cache_llm = OpenAI(model_name="text-davinci-002", cache=False)
llm = OpenAI(model_name="gpt-3.5-turbo-instruct")
no_cache_llm = OpenAI(model_name="gpt-3.5-turbo-instruct", cache=False)
```

View File

@@ -55,7 +55,7 @@
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
"\n",
"model = OpenAI(model_name=\"text-davinci-003\", temperature=0.0)\n",
"model = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", temperature=0.0)\n",
"\n",
"\n",
"# Define your desired data structure.\n",

View File

@@ -34,7 +34,7 @@
"metadata": {},
"outputs": [],
"source": [
"model_name = \"text-davinci-003\"\n",
"model_name = \"gpt-3.5-turbo-instruct\"\n",
"temperature = 0.5\n",
"model = OpenAI(model_name=model_name, temperature=temperature)"
]

View File

@@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
"model_name = \"text-davinci-003\"\n",
"model_name = \"gpt-3.5-turbo-instruct\"\n",
"temperature = 0.0\n",
"model = OpenAI(model_name=model_name, temperature=temperature)"
]