update openai functions docs (#9278)

This commit is contained in:
Bagatur 2023-08-15 17:00:56 -07:00 committed by GitHub
parent 9abf60acb6
commit afba2be3dc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 62 additions and 85 deletions

View File

@ -83,10 +83,8 @@
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n", "Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for extracting information in structured formats.\n", "\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for extracting information in structured formats.\n",
"Human: Use the given format to extract information from the following input:\n", "Human: Use the given format to extract information from the following input: Sally is 13\n",
"Human: Sally is 13\n", "Human: Tip: Make sure to answer in the correct format\u001b[0m\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': '_OutputFormatter', 'arguments': '{\\n \"output\": {\\n \"name\": \"Sally\",\\n \"age\": 13,\\n \"fav_food\": \"Unknown\"\\n }\\n}'}}\n",
"\n", "\n",
"\u001b[1m> Finished chain.\u001b[0m\n" "\u001b[1m> Finished chain.\u001b[0m\n"
] ]
@ -105,18 +103,13 @@
"source": [ "source": [
"# If we pass in a model explicitly, we need to make sure it supports the OpenAI function-calling API.\n", "# If we pass in a model explicitly, we need to make sure it supports the OpenAI function-calling API.\n",
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n", "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
"\n", "prompt = ChatPromptTemplate.from_messages(\n",
"prompt_msgs = [\n", " [\n",
" SystemMessage(\n", " (\"system\", \"You are a world class algorithm for extracting information in structured formats.\"),\n",
" content=\"You are a world class algorithm for extracting information in structured formats.\"\n", " (\"human\", \"Use the given format to extract information from the following input: {input}\"),\n",
" ),\n", " (\"human\", \"Tip: Make sure to answer in the correct format\"),\n",
" HumanMessage(\n", " ]\n",
" content=\"Use the given format to extract information from the following input:\"\n", ")\n",
" ),\n",
" HumanMessagePromptTemplate.from_template(\"{input}\"),\n",
" HumanMessage(content=\"Tips: Make sure to answer in the correct format\"),\n",
"]\n",
"prompt = ChatPromptTemplate(messages=prompt_msgs)\n",
"\n", "\n",
"chain = create_structured_output_chain(Person, llm, prompt, verbose=True)\n", "chain = create_structured_output_chain(Person, llm, prompt, verbose=True)\n",
"chain.run(\"Sally is 13\")" "chain.run(\"Sally is 13\")"
@ -132,7 +125,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 5,
"id": "4d8ea815", "id": "4d8ea815",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -145,10 +138,8 @@
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n", "Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for extracting information in structured formats.\n", "\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for extracting information in structured formats.\n",
"Human: Use the given format to extract information from the following input:\n", "Human: Use the given format to extract information from the following input: Sally is 13, Joey just turned 12 and loves spinach. Caroline is 10 years older than Sally.\n",
"Human: Sally is 13, Joey just turned 12 and loves spinach. Caroline is 10 years older than Sally, so she's 23.\n", "Human: Tip: Make sure to answer in the correct format\u001b[0m\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': '_OutputFormatter', 'arguments': '{\\n \"output\": {\\n \"people\": [\\n {\\n \"name\": \"Sally\",\\n \"age\": 13,\\n \"fav_food\": \"\"\\n },\\n {\\n \"name\": \"Joey\",\\n \"age\": 12,\\n \"fav_food\": \"spinach\"\\n },\\n {\\n \"name\": \"Caroline\",\\n \"age\": 23,\\n \"fav_food\": \"\"\\n }\\n ]\\n }\\n}'}}\n",
"\n", "\n",
"\u001b[1m> Finished chain.\u001b[0m\n" "\u001b[1m> Finished chain.\u001b[0m\n"
] ]
@ -159,7 +150,7 @@
"People(people=[Person(name='Sally', age=13, fav_food=''), Person(name='Joey', age=12, fav_food='spinach'), Person(name='Caroline', age=23, fav_food='')])" "People(people=[Person(name='Sally', age=13, fav_food=''), Person(name='Joey', age=12, fav_food='spinach'), Person(name='Caroline', age=23, fav_food='')])"
] ]
}, },
"execution_count": 4, "execution_count": 5,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -176,7 +167,7 @@
"\n", "\n",
"chain = create_structured_output_chain(People, llm, prompt, verbose=True)\n", "chain = create_structured_output_chain(People, llm, prompt, verbose=True)\n",
"chain.run(\n", "chain.run(\n",
" \"Sally is 13, Joey just turned 12 and loves spinach. Caroline is 10 years older than Sally, so she's 23.\"\n", " \"Sally is 13, Joey just turned 12 and loves spinach. Caroline is 10 years older than Sally.\"\n",
")" ")"
] ]
}, },
@ -229,10 +220,8 @@
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n", "Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for extracting information in structured formats.\n", "\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for extracting information in structured formats.\n",
"Human: Use the given format to extract information from the following input:\n", "Human: Use the given format to extract information from the following input: Sally is 13\n",
"Human: Sally is 13\n", "Human: Tip: Make sure to answer in the correct format\u001b[0m\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': 'output_formatter', 'arguments': '{\\n \"name\": \"Sally\",\\n \"age\": 13\\n}'}}\n",
"\n", "\n",
"\u001b[1m> Finished chain.\u001b[0m\n" "\u001b[1m> Finished chain.\u001b[0m\n"
] ]
@ -279,7 +268,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 8, "execution_count": 9,
"id": "17f52508", "id": "17f52508",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -302,7 +291,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": 10,
"id": "a4658ad8", "id": "a4658ad8",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -314,11 +303,9 @@
"\n", "\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n", "Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for recording entities\n", "\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for recording entities.\n",
"Human: Make calls to the relevant function to record the entities in the following input:\n", "Human: Make calls to the relevant function to record the entities in the following input: Harry was a chubby brown beagle who loved chicken\n",
"Human: Harry was a chubby brown beagle who loved chicken\n", "Human: Tip: Make sure to answer in the correct format\u001b[0m\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': 'RecordDog', 'arguments': '{\\n \"name\": \"Harry\",\\n \"color\": \"brown\",\\n \"fav_food\": \"chicken\"\\n}'}}\n",
"\n", "\n",
"\u001b[1m> Finished chain.\u001b[0m\n" "\u001b[1m> Finished chain.\u001b[0m\n"
] ]
@ -329,21 +316,19 @@
"RecordDog(name='Harry', color='brown', fav_food='chicken')" "RecordDog(name='Harry', color='brown', fav_food='chicken')"
] ]
}, },
"execution_count": 9, "execution_count": 10,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": [
"prompt_msgs = [\n", "prompt = ChatPromptTemplate.from_messages(\n",
" SystemMessage(content=\"You are a world class algorithm for recording entities\"),\n", " [\n",
" HumanMessage(\n", " (\"system\", \"You are a world class algorithm for recording entities.\"),\n",
" content=\"Make calls to the relevant function to record the entities in the following input:\"\n", " (\"human\", \"Make calls to the relevant function to record the entities in the following input: {input}\"),\n",
" ),\n", " (\"human\", \"Tip: Make sure to answer in the correct format\"),\n",
" HumanMessagePromptTemplate.from_template(\"{input}\"),\n", " ]\n",
" HumanMessage(content=\"Tips: Make sure to answer in the correct format\"),\n", ")\n",
"]\n",
"prompt = ChatPromptTemplate(messages=prompt_msgs)\n",
"\n", "\n",
"chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt, verbose=True)\n", "chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt, verbose=True)\n",
"chain.run(\"Harry was a chubby brown beagle who loved chicken\")" "chain.run(\"Harry was a chubby brown beagle who loved chicken\")"
@ -362,7 +347,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 10, "execution_count": 11,
"id": "95ac5825", "id": "95ac5825",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -374,11 +359,9 @@
"\n", "\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n", "Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for recording entities\n", "\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for recording entities.\n",
"Human: Make calls to the relevant function to record the entities in the following input:\n", "Human: Make calls to the relevant function to record the entities in the following input: The most important thing to remember about Tommy, my 12 year old, is that he'll do anything for apple pie.\n",
"Human: The most important thing to remember about Tommy, my 12 year old, is that he'll do anything for apple pie.\n", "Human: Tip: Make sure to answer in the correct format\u001b[0m\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': 'record_person', 'arguments': '{\\n \"name\": \"Tommy\",\\n \"age\": 12,\\n \"fav_food\": {\\n \"food\": \"apple pie\"\\n }\\n}'}}\n",
"\n", "\n",
"\u001b[1m> Finished chain.\u001b[0m\n" "\u001b[1m> Finished chain.\u001b[0m\n"
] ]
@ -389,7 +372,7 @@
"{'name': 'Tommy', 'age': 12, 'fav_food': {'food': 'apple pie'}}" "{'name': 'Tommy', 'age': 12, 'fav_food': {'food': 'apple pie'}}"
] ]
}, },
"execution_count": 10, "execution_count": 11,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -434,7 +417,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": 12,
"id": "8b0d11de", "id": "8b0d11de",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -446,11 +429,9 @@
"\n", "\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n", "Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for recording entities\n", "\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for recording entities.\n",
"Human: Make calls to the relevant function to record the entities in the following input:\n", "Human: Make calls to the relevant function to record the entities in the following input: I can't find my dog Henry anywhere, he's a small brown beagle. Could you send a message about him?\n",
"Human: I can't find my dog Henry anywhere, he's a small brown beagle. Could you send a message about him?\n", "Human: Tip: Make sure to answer in the correct format\u001b[0m\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': 'record_dog', 'arguments': '{\\n \"name\": \"Henry\",\\n \"color\": \"brown\",\\n \"fav_food\": {\\n \"food\": null\\n }\\n}'}}\n",
"\n", "\n",
"\u001b[1m> Finished chain.\u001b[0m\n" "\u001b[1m> Finished chain.\u001b[0m\n"
] ]
@ -462,7 +443,7 @@
" 'arguments': {'name': 'Henry', 'color': 'brown', 'fav_food': {'food': None}}}" " 'arguments': {'name': 'Henry', 'color': 'brown', 'fav_food': {'food': None}}}"
] ]
}, },
"execution_count": 11, "execution_count": 12,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -502,9 +483,9 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "venv", "display_name": "poetry-venv",
"language": "python", "language": "python",
"name": "venv" "name": "poetry-venv"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
@ -516,7 +497,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.3" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -226,9 +226,9 @@ def create_openai_fn_chain(
from langchain.chains.openai_functions import create_openai_fn_chain from langchain.chains.openai_functions import create_openai_fn_chain
from langchain.chat_models import ChatOpenAI from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain.prompts import ChatPromptTemplate
from pydantic_v1 import BaseModel, Field from pydantic import BaseModel, Field
class RecordPerson(BaseModel): class RecordPerson(BaseModel):
@ -247,17 +247,15 @@ def create_openai_fn_chain(
fav_food: Optional[str] = Field(None, description="The dog's favorite food") fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) llm = ChatOpenAI(model="gpt-4", temperature=0)
prompt_msgs = [ prompt = ChatPromptTemplate.from_messages(
SystemMessage( [
content="You are a world class algorithm for recording entities" ("system", "You are a world class algorithm for recording entities."),
), ("human", "Make calls to the relevant function to record the entities in the following input: {input}"),
HumanMessage(content="Make calls to the relevant function to record the entities in the following input:"), ("human", "Tip: Make sure to answer in the correct format"),
HumanMessagePromptTemplate.from_template("{input}"), ]
HumanMessage(content="Tips: Make sure to answer in the correct format"), )
] chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt)
prompt = ChatPromptTemplate(messages=prompt_msgs)
chain = create_openai_fn_chain([RecordPerson, RecordDog])
chain.run("Harry was a chubby brown beagle who loved chicken") chain.run("Harry was a chubby brown beagle who loved chicken")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken") # -> RecordDog(name="Harry", color="brown", fav_food="chicken")
""" # noqa: E501 """ # noqa: E501
@ -312,9 +310,9 @@ def create_structured_output_chain(
from langchain.chains.openai_functions import create_structured_output_chain from langchain.chains.openai_functions import create_structured_output_chain
from langchain.chat_models import ChatOpenAI from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain.prompts import ChatPromptTemplate
from pydantic_v1 import BaseModel, Field from pydantic import BaseModel, Field
class Dog(BaseModel): class Dog(BaseModel):
\"\"\"Identifying information about a dog.\"\"\" \"\"\"Identifying information about a dog.\"\"\"
@ -324,15 +322,13 @@ def create_structured_output_chain(
fav_food: Optional[str] = Field(None, description="The dog's favorite food") fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
prompt_msgs = [ prompt = ChatPromptTemplate.from_messages(
SystemMessage( [
content="You are a world class algorithm for extracting information in structured formats." ("system", "You are a world class algorithm for extracting information in structured formats."),
), ("human", "Use the given format to extract information from the following input: {input}"),
HumanMessage(content="Use the given format to extract information from the following input:"), ("human", "Tip: Make sure to answer in the correct format"),
HumanMessagePromptTemplate.from_template("{input}"), ]
HumanMessage(content="Tips: Make sure to answer in the correct format"), )
]
prompt = ChatPromptTemplate(messages=prompt_msgs)
chain = create_structured_output_chain(Dog, llm, prompt) chain = create_structured_output_chain(Dog, llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken") chain.run("Harry was a chubby brown beagle who loved chicken")
# -> Dog(name="Harry", color="brown", fav_food="chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken")