mirror of
https://github.com/hwchase17/langchain.git
synced 2025-05-16 12:32:06 +00:00
Update docs to use new usage in openai>1.0.0 (#14163)
### Description Use new [APIs](https://github.com/openai/openai-python/blob/main/api.md#finetuning) ### Twitter handle [lin_bob57617](https://twitter.com/lin_bob57617)
This commit is contained in:
parent
052e23be3e
commit
ac449f186b
@ -243,16 +243,16 @@
|
||||
" my_file.write((json.dumps({\"messages\": m}) + \"\\n\").encode(\"utf-8\"))\n",
|
||||
"\n",
|
||||
"my_file.seek(0)\n",
|
||||
"training_file = openai.File.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"training_file = openai.files.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"\n",
|
||||
"# OpenAI audits each training file for compliance reasons.\n",
|
||||
"# This make take a few minutes\n",
|
||||
"status = openai.File.retrieve(training_file.id).status\n",
|
||||
"status = openai.files.retrieve(training_file.id).status\n",
|
||||
"start_time = time.time()\n",
|
||||
"while status != \"processed\":\n",
|
||||
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
|
||||
" time.sleep(5)\n",
|
||||
" status = openai.File.retrieve(training_file.id).status\n",
|
||||
" status = openai.files.retrieve(training_file.id).status\n",
|
||||
"print(f\"File {training_file.id} ready after {time.time() - start_time:.2f} seconds.\")"
|
||||
]
|
||||
},
|
||||
@ -271,7 +271,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"job = openai.FineTuningJob.create(\n",
|
||||
"job = openai.fine_tuning.jobs.create(\n",
|
||||
" training_file=training_file.id,\n",
|
||||
" model=\"gpt-3.5-turbo\",\n",
|
||||
")"
|
||||
@ -300,12 +300,12 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"status = openai.FineTuningJob.retrieve(job.id).status\n",
|
||||
"status = openai.fine_tuning.jobs.retrieve(job.id).status\n",
|
||||
"start_time = time.time()\n",
|
||||
"while status != \"succeeded\":\n",
|
||||
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
|
||||
" time.sleep(5)\n",
|
||||
" job = openai.FineTuningJob.retrieve(job.id)\n",
|
||||
" job = openai.fine_tuning.jobs.retrieve(job.id)\n",
|
||||
" status = job.status"
|
||||
]
|
||||
},
|
||||
@ -416,7 +416,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -123,7 +123,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 6,
|
||||
"id": "817bc077-c18a-473b-94a4-a7d810d583a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -145,7 +145,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 7,
|
||||
"id": "9e5ac127-b094-4584-9159-5a6d3d7315c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -166,7 +166,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 8,
|
||||
"id": "11d19e28-be49-4801-8065-1a58d13cd192",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -174,7 +174,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Status=[running]... 302.42s. 143.85s\r"
|
||||
"Status=[running]... 429.55s. 46.34s\r"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -190,20 +190,20 @@
|
||||
" my_file.write((json.dumps({\"messages\": dialog}) + \"\\n\").encode(\"utf-8\"))\n",
|
||||
"\n",
|
||||
"my_file.seek(0)\n",
|
||||
"training_file = openai.File.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"training_file = openai.files.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"\n",
|
||||
"job = openai.FineTuningJob.create(\n",
|
||||
"job = openai.fine_tuning.jobs.create(\n",
|
||||
" training_file=training_file.id,\n",
|
||||
" model=\"gpt-3.5-turbo\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Wait for the fine-tuning to complete (this may take some time)\n",
|
||||
"status = openai.FineTuningJob.retrieve(job.id).status\n",
|
||||
"status = openai.fine_tuning.jobs.retrieve(job.id).status\n",
|
||||
"start_time = time.time()\n",
|
||||
"while status != \"succeeded\":\n",
|
||||
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
|
||||
" time.sleep(5)\n",
|
||||
" status = openai.FineTuningJob.retrieve(job.id).status\n",
|
||||
" status = openai.fine_tuning.jobs.retrieve(job.id).status\n",
|
||||
"\n",
|
||||
"# Now your model is fine-tuned!"
|
||||
]
|
||||
@ -220,16 +220,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"id": "3f472ca4-fa9b-485d-bd37-8ce3c59c44db",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the fine-tuned model ID\n",
|
||||
"job = openai.FineTuningJob.retrieve(job.id)\n",
|
||||
"job = openai.fine_tuning.jobs.retrieve(job.id)\n",
|
||||
"model_id = job.fine_tuned_model\n",
|
||||
"\n",
|
||||
"# Use the fine-tuned model in LangChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(\n",
|
||||
" model=model_id,\n",
|
||||
" temperature=1,\n",
|
||||
@ -238,10 +240,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 11,
|
||||
"id": "7d3b5845-6385-42d1-9f7d-5ea798dc2cd9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='[{\"s\": \"There were three ravens\", \"object\": \"tree\", \"relation\": \"sat on\"}, {\"s\": \"three ravens\", \"object\": \"a tree\", \"relation\": \"sat on\"}]')"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.invoke(\"There were three ravens sat on a tree.\")"
|
||||
]
|
||||
@ -271,7 +284,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -35,7 +35,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "473adce5-c863-49e6-85c3-049e0ec2222e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -65,7 +65,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "9a36d27f-2f3b-4148-b94a-9436fe8b00e0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -105,7 +105,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "89bcc676-27e8-40dc-a4d6-92cf28e0db58",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -144,7 +144,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"id": "cd44ff01-22cf-431a-8bf4-29a758d1fcff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -169,18 +169,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"id": "62da7d8f-5cfc-45a6-946e-2bcda2b0ba1f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"math_questions = [\n",
|
||||
" \"What's 45/9?\",\n",
|
||||
@ -219,7 +211,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 6,
|
||||
"id": "d6037992-050d-4ada-a061-860c124f0bf1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -231,7 +223,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"id": "0444919a-6f5a-4726-9916-4603b1420d0e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -266,7 +258,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"id": "817bc077-c18a-473b-94a4-a7d810d583a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -288,7 +280,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 9,
|
||||
"id": "9e5ac127-b094-4584-9159-5a6d3d7315c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -309,7 +301,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 10,
|
||||
"id": "11d19e28-be49-4801-8065-1a58d13cd192",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -317,7 +309,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Status=[running]... 346.26s. 31.70s\r"
|
||||
"Status=[running]... 349.84s. 17.72s\r"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -333,20 +325,20 @@
|
||||
" my_file.write((json.dumps({\"messages\": dialog}) + \"\\n\").encode(\"utf-8\"))\n",
|
||||
"\n",
|
||||
"my_file.seek(0)\n",
|
||||
"training_file = openai.File.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"training_file = openai.files.create(file=my_file, purpose=\"fine-tune\")\n",
|
||||
"\n",
|
||||
"job = openai.FineTuningJob.create(\n",
|
||||
"job = openai.fine_tuning.jobs.create(\n",
|
||||
" training_file=training_file.id,\n",
|
||||
" model=\"gpt-3.5-turbo\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Wait for the fine-tuning to complete (this may take some time)\n",
|
||||
"status = openai.FineTuningJob.retrieve(job.id).status\n",
|
||||
"status = openai.fine_tuning.jobs.retrieve(job.id).status\n",
|
||||
"start_time = time.time()\n",
|
||||
"while status != \"succeeded\":\n",
|
||||
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
|
||||
" time.sleep(5)\n",
|
||||
" status = openai.FineTuningJob.retrieve(job.id).status\n",
|
||||
" status = openai.fine_tuning.jobs.retrieve(job.id).status\n",
|
||||
"\n",
|
||||
"# Now your model is fine-tuned!"
|
||||
]
|
||||
@ -363,16 +355,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 11,
|
||||
"id": "7f45b281-1dfa-43cb-bd28-99fa7e9f45d1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the fine-tuned model ID\n",
|
||||
"job = openai.FineTuningJob.retrieve(job.id)\n",
|
||||
"job = openai.fine_tuning.jobs.retrieve(job.id)\n",
|
||||
"model_id = job.fine_tuned_model\n",
|
||||
"\n",
|
||||
"# Use the fine-tuned model in LangChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(\n",
|
||||
" model=model_id,\n",
|
||||
" temperature=1,\n",
|
||||
@ -381,17 +375,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 12,
|
||||
"id": "7d3b5845-6385-42d1-9f7d-5ea798dc2cd9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='{\\n \"num1\": 56,\\n \"num2\": 7,\\n \"operation\": \"/\"\\n}')"
|
||||
"AIMessage(content='Let me calculate that for you.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -425,7 +419,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
Loading…
Reference in New Issue
Block a user