mirror of
https://github.com/hwchase17/langchain.git
synced 2025-10-22 17:50:03 +00:00
@@ -10,32 +10,4 @@ By default, it will extract the title and author of papers.
|
||||
|
||||
This template will use `OpenAI` by default.
|
||||
|
||||
Be sure that `OPENAI_API_KEY` is set in your environment.
|
||||
|
||||
## Adding the template
|
||||
|
||||
Install the langchain package
|
||||
```
|
||||
pip install -e packages/extraction_openai_functions
|
||||
```
|
||||
|
||||
Edit app/server.py to add that package to the routes
|
||||
```
|
||||
from fastapi import FastAPI
|
||||
from langserve import add_routes
|
||||
from extraction_openai_functions.chain import chain
|
||||
|
||||
app = FastAPI()
|
||||
add_routes(app, chain)
|
||||
```
|
||||
|
||||
Run the app
|
||||
```
|
||||
python app/server.py
|
||||
```
|
||||
|
||||
You can use this template in the Playground:
|
||||
|
||||
http://127.0.0.1:8000/extraction-openai-functions/playground/
|
||||
|
||||
Also, see Jupyter notebook `openai_functions` for various other ways to connect to the template.
|
||||
Be sure that `OPENAI_API_KEY` is set in your environment.
|
@@ -29,22 +29,10 @@
|
||||
"source": [
|
||||
"## Run Template\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"As shown in the README, add template and start server:\n",
|
||||
"In `server.py`, set -\n",
|
||||
"```\n",
|
||||
"langchain serve add openai-functions\n",
|
||||
"langchain start\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"We can now look at the endpoints:\n",
|
||||
"\n",
|
||||
"http://127.0.0.1:8000/docs#\n",
|
||||
"\n",
|
||||
"And specifically at our loaded template:\n",
|
||||
"\n",
|
||||
"http://127.0.0.1:8000/docs#/default/invoke_openai_functions_invoke_post\n",
|
||||
" \n",
|
||||
"We can also use remote runnable to call it."
|
||||
"add_routes(app, chain_ext, path=\"/extraction_openai_functions\")\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -55,7 +43,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langserve.client import RemoteRunnable\n",
|
||||
"oai_function = RemoteRunnable('http://localhost:8000/openai-functions')"
|
||||
"oai_function = RemoteRunnable('http://0.0.0.0:8001/extraction_openai_functions')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -63,32 +51,30 @@
|
||||
"id": "68046695",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The function call will perform tagging:\n",
|
||||
"\n",
|
||||
"* summarize\n",
|
||||
"* provide keywords\n",
|
||||
"* provide language"
|
||||
"The function wille extract paper titles and authors from an input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 8,
|
||||
"id": "6dace748",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'Overview', 'arguments': '{\\n \"summary\": \"This article discusses the concept of building agents with LLM (large language model) as their core controller. It explores the potentiality of LLM as a general problem solver and describes the key components of an LLM-powered autonomous agent system, including planning, memory, and tool use. The article also presents case studies and challenges related to building LLM-powered agents.\",\\n \"language\": \"English\",\\n \"keywords\": \"LLM, autonomous agents, planning, memory, tool use, case studies, challenges\"\\n}'}})"
|
||||
"[{'title': 'Chain of Thought', 'author': 'Wei et al. 2022'},\n",
|
||||
" {'title': 'Tree of Thoughts', 'author': 'Yao et al. 2023'},\n",
|
||||
" {'title': 'LLM+P', 'author': 'Liu et al. 2023'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"oai_function.invoke(text[0].page_content[0:1500])"
|
||||
"oai_function.invoke({\"input\":text[0].page_content[0:4000]})"
|
||||
]
|
||||
}
|
||||
],
|
Reference in New Issue
Block a user