openapi -> openai nit (#6667)

This commit is contained in:
Davis Chase 2023-06-23 15:09:02 -07:00 committed by GitHub
parent 2acf109c4b
commit 5e5b30b74f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 83 additions and 27 deletions

View File

@ -10,6 +10,16 @@
"In this notebook we'll show how to create a chain that automatically makes calls to an API based only on an OpenAPI spec. Under the hood, we're parsing the OpenAPI spec into a JSON schema that the OpenAI functions API can handle. This allows ChatGPT to automatically select and populate the relevant API call to make for any user input. Using the output of ChatGPT we then make the actual API call, and return the result." "In this notebook we'll show how to create a chain that automatically makes calls to an API based only on an OpenAPI spec. Under the hood, we're parsing the OpenAPI spec into a JSON schema that the OpenAI functions API can handle. This allows ChatGPT to automatically select and populate the relevant API call to make for any user input. Using the output of ChatGPT we then make the actual API call, and return the result."
] ]
}, },
{
"cell_type": "code",
"execution_count": 1,
"id": "555661b5",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains.openai_functions.openapi import get_openapi_chain"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "a95f510a", "id": "a95f510a",
@ -25,14 +35,12 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain.chains.openai_functions.openapi import get_openapi_chain\n",
"\n",
"chain = get_openapi_chain(\"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\")" "chain = get_openapi_chain(\"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 3,
"id": "3959f866", "id": "3959f866",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -76,7 +84,7 @@
" 'Size:S,XL,XS,L,M,XXL']}]}" " 'Size:S,XL,XS,L,M,XXL']}]}"
] ]
}, },
"execution_count": 2, "execution_count": 3,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -90,7 +98,9 @@
"id": "6f648c77", "id": "6f648c77",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Query a translation service" "## Query a translation service\n",
"\n",
"Additionally, see the request payload by setting `verbose=True`"
] ]
}, },
{ {
@ -100,23 +110,57 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"chain = get_openapi_chain(\"https://api.speak.com/openapi.yaml\")" "chain = get_openapi_chain(\"https://api.speak.com/openapi.yaml\", verbose=True)"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": 10,
"id": "1ba51609", "id": "1ba51609",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mHuman: Use the provided API's to respond to this user query:\n",
"\n",
"How would you say no thanks in Russian\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Calling endpoint \u001b[32;1m\u001b[1;3mtranslate\u001b[0m with arguments:\n",
"\u001b[32;1m\u001b[1;3m{\n",
" \"json\": {\n",
" \"phrase_to_translate\": \"no thanks\",\n",
" \"learning_language\": \"russian\",\n",
" \"native_language\": \"english\",\n",
" \"additional_context\": \"\",\n",
" \"full_query\": \"How would you say no thanks in Russian\"\n",
" }\n",
"}\u001b[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"{'explanation': '<translation language=\"None\" context=\"None\">\\nNone\\n</translation>\\n\\n<alternatives context=\"None\">\\n1. \"N/A\" *(Formal - used in professional settings to indicate that the answer is not applicable)*\\n2. \"I don\\'t have an answer for that\" *(Neutral - commonly used when one does not know the answer to a question)*\\n3. \"I\\'m not sure\" *(Neutral - similar to the above alternative, used when one is unsure of the answer)*\\n</alternatives>\\n\\n<example-convo language=\"None\">\\n<context>None</context>\\n* Tom: \"Do you know what time the concert starts?\"\\n* Sarah: \"I\\'m sorry, I don\\'t have an answer for that.\"\\n</example-convo>\\n\\n*[Report an issue or leave feedback](https://speak.com/chatgpt?rid=p8i6p14duafpctg4ve7tm48z})*',\n", "{'explanation': '<translation language=\"Russian\">\\nНет, спасибо. (Net, spasibo)\\n</translation>\\n\\n<alternatives>\\n1. \"Нет, я в порядке\" *(Neutral/Formal - Can be used in professional settings or formal situations.)*\\n2. \"Нет, спасибо, я откажусь\" *(Formal - Can be used in polite settings, such as a fancy dinner with colleagues or acquaintances.)*\\n3. \"Не надо\" *(Informal - Can be used in informal situations, such as declining an offer from a friend.)*\\n</alternatives>\\n\\n<example-convo language=\"Russian\">\\n<context>Max is being offered a cigarette at a party.</context>\\n* Sasha: \"Хочешь покурить?\"\\n* Max: \"Нет, спасибо. Я бросил.\"\\n* Sasha: \"Окей, понятно.\"\\n</example-convo>\\n\\n*[Report an issue or leave feedback](https://speak.com/chatgpt?rid=noczaa460do8yqs8xjun6zdm})*',\n",
" 'extra_response_instructions': 'Use all information in the API response and fully render all Markdown.\\nAlways end your response with a link to report an issue or leave feedback on the plugin.'}" " 'extra_response_instructions': 'Use all information in the API response and fully render all Markdown.\\nAlways end your response with a link to report an issue or leave feedback on the plugin.'}"
] ]
}, },
"execution_count": 7, "execution_count": 10,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -137,7 +181,9 @@
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "a9198f62", "id": "a9198f62",
"metadata": {}, "metadata": {
"scrolled": true
},
"outputs": [], "outputs": [],
"source": [ "source": [
"chain = get_openapi_chain(\"https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml\")" "chain = get_openapi_chain(\"https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml\")"
@ -145,17 +191,10 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": 7,
"id": "3110c398", "id": "3110c398",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.<locals>._completion_with_retry in 1.0 seconds as it raised ServiceUnavailableError: The server is overloaded or not ready yet..\n"
]
},
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
@ -172,7 +211,7 @@
" 'day': '23'}" " 'day': '23'}"
] ]
}, },
"execution_count": 9, "execution_count": 7,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }

View File

@ -13,6 +13,7 @@ from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain from langchain.chains.base import Chain
from langchain.chains.sequential import SequentialChain from langchain.chains.sequential import SequentialChain
from langchain.chat_models import ChatOpenAI from langchain.chat_models import ChatOpenAI
from langchain.input import get_colored_text
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain.prompts import ChatPromptTemplate from langchain.prompts import ChatPromptTemplate
from langchain.tools import APIOperation from langchain.tools import APIOperation
@ -127,15 +128,17 @@ def openapi_spec_to_openai_fn(
request_body = spec.get_request_body_for_operation(op) request_body = spec.get_request_body_for_operation(op)
# TODO: Support more MIME types. # TODO: Support more MIME types.
if request_body and request_body.content: if request_body and request_body.content:
media_types = [] media_types = {}
for media_type in request_body.content.values(): for media_type, media_type_object in request_body.content.items():
if media_type.media_type_schema: if media_type_object.media_type_schema:
schema = spec.get_schema(media_type.media_type_schema) schema = spec.get_schema(media_type_object.media_type_schema)
media_types.append(schema.dict(exclude_none=True)) media_types[media_type] = schema.dict(exclude_none=True)
if len(media_types) == 1: if len(media_types) == 1:
request_args["data"] = media_types[0] media_type, schema_dict = list(media_types.items())[0]
key = "json" if media_type == "application/json" else "data"
request_args[key] = schema_dict
elif len(media_types) > 1: elif len(media_types) > 1:
request_args["data"] = {"anyOf": media_types} request_args["data"] = {"anyOf": list(media_types.values())}
api_op = APIOperation.from_openapi_spec(spec, path, method) api_op = APIOperation.from_openapi_spec(spec, path, method)
fn = { fn = {
@ -184,8 +187,13 @@ class SimpleRequestChain(Chain):
run_manager: Optional[CallbackManagerForChainRun] = None, run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
"""Run the logic of this chain and return the output.""" """Run the logic of this chain and return the output."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
name = inputs["function"].pop("name") name = inputs["function"].pop("name")
args = inputs["function"].pop("arguments") args = inputs["function"].pop("arguments")
_pretty_name = get_colored_text(name, "green")
_pretty_args = get_colored_text(json.dumps(args, indent=2), "green")
_text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
_run_manager.on_text(_text)
api_response: Response = self.request_method(name, args) api_response: Response = self.request_method(name, args)
if api_response.status_code != 200: if api_response.status_code != 200:
response = ( response = (
@ -206,6 +214,9 @@ def get_openapi_chain(
llm: Optional[BaseLanguageModel] = None, llm: Optional[BaseLanguageModel] = None,
prompt: Optional[BasePromptTemplate] = None, prompt: Optional[BasePromptTemplate] = None,
request_chain: Optional[Chain] = None, request_chain: Optional[Chain] = None,
llm_kwargs: Optional[Dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> SequentialChain: ) -> SequentialChain:
"""Create a chain for querying an API from a OpenAPI spec. """Create a chain for querying an API from a OpenAPI spec.
@ -242,10 +253,16 @@ def get_openapi_chain(
llm_kwargs={"functions": openai_fns}, llm_kwargs={"functions": openai_fns},
output_parser=JsonOutputFunctionsParser(args_only=False), output_parser=JsonOutputFunctionsParser(args_only=False),
output_key="function", output_key="function",
verbose=verbose,
**(llm_kwargs or {}),
)
request_chain = request_chain or SimpleRequestChain(
request_method=call_api_fn, verbose=verbose
) )
request_chain = request_chain or SimpleRequestChain(request_method=call_api_fn)
return SequentialChain( return SequentialChain(
chains=[llm_chain, request_chain], chains=[llm_chain, request_chain],
input_variables=llm_chain.input_keys, input_variables=llm_chain.input_keys,
output_variables=["response"], output_variables=["response"],
verbose=verbose,
**kwargs,
) )