Harrison/use functions agent (#6185)

Co-authored-by: Francisco Ingham <24279597+fpingham@users.noreply.github.com>
This commit is contained in:
Harrison Chase
2023-06-15 08:18:50 -07:00
committed by GitHub
parent 7d2b946d0b
commit e82687ddf4
29 changed files with 682 additions and 255 deletions

View File

@@ -30,38 +30,83 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import OpenAI"
"from langchain.llms import OpenAI\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.agents.agent_types import AgentType"
]
},
{
"cell_type": "markdown",
"id": "bd806175",
"metadata": {},
"source": [
"## Using ZERO_SHOT_REACT_DESCRIPTION\n",
"\n",
"This shows how to initialize the agent using the ZERO_SHOT_REACT_DESCRIPTION agent type. Note that this is an alternative to the above."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "a1717204",
"metadata": {},
"outputs": [],
"source": [
"agent = create_csv_agent(\n",
" OpenAI(temperature=0), \n",
" 'titanic.csv', \n",
" verbose=True, \n",
" agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION\n",
")"
]
},
{
"cell_type": "markdown",
"id": "c31bb8a6",
"metadata": {},
"source": [
"## Using OpenAI Functions\n",
"\n",
"This shows how to initialize the agent using the OPENAI_FUNCTIONS agent type. Note that this is an alternative to the above."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "16c4dc59",
"metadata": {},
"outputs": [],
"source": [
"agent = create_csv_agent(\n",
" ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\"), \n",
" 'titanic.csv', \n",
" verbose=True, \n",
" agent_type=AgentType.OPENAI_FUNCTIONS\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "16c4dc59",
"metadata": {},
"outputs": [],
"source": [
"agent = create_csv_agent(OpenAI(temperature=0), 'titanic.csv', verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "46b9489d",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Error in on_chain_start callback: 'name'\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `python_repl_ast` with `df.shape[0]`\n",
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to count the number of rows\n",
"Action: python_repl_ast\n",
"Action Input: df.shape[0]\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m891\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: There are 891 rows.\u001b[0m\n",
"\u001b[0m\u001b[36;1m\u001b[1;3m891\u001b[0m\u001b[32;1m\u001b[1;3mThere are 891 rows in the dataframe.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -69,10 +114,10 @@
{
"data": {
"text/plain": [
"'There are 891 rows.'"
"'There are 891 rows in the dataframe.'"
]
},
"execution_count": 5,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@@ -83,23 +128,28 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "a96309be",
"metadata": {},
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Error in on_chain_start callback: 'name'\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `python_repl_ast` with `df[df['SibSp'] > 3]['PassengerId'].count()`\n",
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to count the number of people with more than 3 siblings\n",
"Action: python_repl_ast\n",
"Action Input: df[df['SibSp'] > 3].shape[0]\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m30\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: 30 people have more than 3 siblings.\u001b[0m\n",
"\u001b[0m\u001b[36;1m\u001b[1;3m30\u001b[0m\u001b[32;1m\u001b[1;3mThere are 30 people in the dataframe who have more than 3 siblings.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -107,10 +157,10 @@
{
"data": {
"text/plain": [
"'30 people have more than 3 siblings.'"
"'There are 30 people in the dataframe who have more than 3 siblings.'"
]
},
"execution_count": 6,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -121,35 +171,39 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 6,
"id": "964a09f7",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Error in on_chain_start callback: 'name'\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `python_repl_ast` with `import pandas as pd\n",
"import math\n",
"\n",
"# Create a dataframe\n",
"data = {'Age': [22, 38, 26, 35, 35]}\n",
"df = pd.DataFrame(data)\n",
"\n",
"# Calculate the average age\n",
"average_age = df['Age'].mean()\n",
"\n",
"# Calculate the square root of the average age\n",
"square_root = math.sqrt(average_age)\n",
"\n",
"square_root`\n",
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to calculate the average age first\n",
"Action: python_repl_ast\n",
"Action Input: df['Age'].mean()\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m29.69911764705882\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now need to calculate the square root of the average age\n",
"Action: python_repl_ast\n",
"Action Input: math.sqrt(df['Age'].mean())\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mNameError(\"name 'math' is not defined\")\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to import the math library\n",
"Action: python_repl_ast\n",
"Action Input: import math\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now need to calculate the square root of the average age\n",
"Action: python_repl_ast\n",
"Action Input: math.sqrt(df['Age'].mean())\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m5.449689683556195\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: 5.449689683556195\u001b[0m\n",
"\u001b[0m\u001b[36;1m\u001b[1;3m5.585696017507576\u001b[0m\u001b[32;1m\u001b[1;3mThe square root of the average age is approximately 5.59.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -157,10 +211,10 @@
{
"data": {
"text/plain": [
"'5.449689683556195'"
"'The square root of the average age is approximately 5.59.'"
]
},
"execution_count": 7,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@@ -181,23 +235,26 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 8,
"id": "15f11fbd",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Error in on_chain_start callback: 'name'\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `python_repl_ast` with `df1['Age'].nunique() - df2['Age'].nunique()`\n",
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to compare the age columns in both dataframes\n",
"Action: python_repl_ast\n",
"Action Input: len(df1[df1['Age'] != df2['Age']])\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m177\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: 177 rows in the age column are different.\u001b[0m\n",
"\u001b[0m\u001b[36;1m\u001b[1;3m-1\u001b[0m\u001b[32;1m\u001b[1;3mThere is 1 row in the age column that is different between the two dataframes.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -205,17 +262,17 @@
{
"data": {
"text/plain": [
"'177 rows in the age column are different.'"
"'There is 1 row in the age column that is different between the two dataframes.'"
]
},
"execution_count": 3,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent = create_csv_agent(OpenAI(temperature=0), ['titanic.csv', 'titanic_age_fillna.csv'], verbose=True)\n",
"agent.run(\"how many rows in the age column are different?\")"
"agent = create_csv_agent(ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\"), ['titanic.csv', 'titanic_age_fillna.csv'], verbose=True, agent_type=AgentType.OPENAI_FUNCTIONS)\n",
"agent.run(\"how many rows in the age column are different between the two dfs?\")"
]
},
{

View File

@@ -19,7 +19,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import create_pandas_dataframe_agent"
"from langchain.agents import create_pandas_dataframe_agent\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.agents.agent_types import AgentType"
]
},
{
@@ -35,6 +37,16 @@
"df = pd.read_csv('titanic.csv')"
]
},
{
"cell_type": "markdown",
"id": "a62858e2",
"metadata": {},
"source": [
"## Using ZERO_SHOT_REACT_DESCRIPTION\n",
"\n",
"This shows how to initialize the agent using the ZERO_SHOT_REACT_DESCRIPTION agent type. Note that this is an alternative to the above."
]
},
{
"cell_type": "code",
"execution_count": 3,
@@ -45,9 +57,34 @@
"agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df, verbose=True)"
]
},
{
"cell_type": "markdown",
"id": "7233ab56",
"metadata": {},
"source": [
"## Using OpenAI Functions\n",
"\n",
"This shows how to initialize the agent using the OPENAI_FUNCTIONS agent type. Note that this is an alternative to the above."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "a8ea710e",
"metadata": {},
"outputs": [],
"source": [
"agent = create_pandas_dataframe_agent(\n",
" ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\"), \n",
" df, \n",
" verbose=True, \n",
" agent_type=AgentType.OPENAI_FUNCTIONS\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "a9207a2e",
"metadata": {},
"outputs": [
@@ -57,13 +94,12 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to count the number of rows\n",
"Action: python_repl_ast\n",
"Action Input: df.shape[0]\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m891\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: There are 891 rows.\u001b[0m\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `python_repl_ast` with `df.shape[0]`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3m891\u001b[0m\u001b[32;1m\u001b[1;3mThere are 891 rows in the dataframe.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -71,10 +107,10 @@
{
"data": {
"text/plain": [
"'There are 891 rows.'"
"'There are 891 rows in the dataframe.'"
]
},
"execution_count": 4,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -172,7 +208,6 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "c4bc0584",
"metadata": {},
@@ -257,7 +292,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -12,7 +12,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 1,
"id": "f98e9c90-5c37-4fb9-af3e-d09693af8543",
"metadata": {
"tags": []
@@ -22,12 +22,24 @@
"from langchain.agents.agent_toolkits import create_python_agent\n",
"from langchain.tools.python.tool import PythonREPLTool\n",
"from langchain.python import PythonREPL\n",
"from langchain.llms.openai import OpenAI"
"from langchain.llms.openai import OpenAI\n",
"from langchain.agents.agent_types import AgentType\n",
"from langchain.chat_models import ChatOpenAI"
]
},
{
"cell_type": "markdown",
"id": "ca30d64c",
"metadata": {},
"source": [
"## Using ZERO_SHOT_REACT_DESCRIPTION\n",
"\n",
"This shows how to initialize the agent using the ZERO_SHOT_REACT_DESCRIPTION agent type. Note that this is an alternative to the above."
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"id": "cc422f53-c51c-4694-a834-72ecd1e68363",
"metadata": {
"tags": []
@@ -37,7 +49,34 @@
"agent_executor = create_python_agent(\n",
" llm=OpenAI(temperature=0, max_tokens=1000),\n",
" tool=PythonREPLTool(),\n",
" verbose=True\n",
" verbose=True,\n",
" agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION\n",
")"
]
},
{
"cell_type": "markdown",
"id": "bb487e8e",
"metadata": {},
"source": [
"## Using OpenAI Functions\n",
"\n",
"This shows how to initialize the agent using the OPENAI_FUNCTIONS agent type. Note that this is an alternative to the above."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "6e651822",
"metadata": {},
"outputs": [],
"source": [
"agent_executor = create_python_agent(\n",
" llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\"),\n",
" tool=PythonREPLTool(),\n",
" verbose=True,\n",
" agent_type=AgentType.OPENAI_FUNCTIONS,\n",
" agent_executor_kwargs={\"handle_parsing_errors\": True},\n",
")"
]
},
@@ -52,7 +91,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 4,
"id": "25cd4f92-ea9b-4fe6-9838-a4f85f81eebe",
"metadata": {
"tags": []
@@ -64,23 +103,20 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m I need to calculate the 10th fibonacci number\n",
"Action: Python REPL\n",
"Action Input: def fibonacci(n):\n",
" if n == 0:\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `Python_REPL` with `def fibonacci(n):\n",
" if n <= 0:\n",
" return 0\n",
" elif n == 1:\n",
" return 1\n",
" else:\n",
" return fibonacci(n-1) + fibonacci(n-2)\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to call the function with 10 as the argument\n",
"Action: Python REPL\n",
"Action Input: fibonacci(10)\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: 55\u001b[0m\n",
" return fibonacci(n-1) + fibonacci(n-2)\n",
"\n",
"fibonacci(10)`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3m\u001b[0m\u001b[32;1m\u001b[1;3mThe 10th Fibonacci number is 55.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -88,10 +124,10 @@
{
"data": {
"text/plain": [
"'55'"
"'The 10th Fibonacci number is 55.'"
]
},
"execution_count": 3,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@@ -111,9 +147,11 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 5,
"id": "4b9f60e7-eb6a-4f14-8604-498d863d4482",
"metadata": {},
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
@@ -121,59 +159,70 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m I need to write a neural network in PyTorch and train it on the given data.\n",
"Action: Python REPL\n",
"Action Input: \n",
"import torch\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mCould not parse tool input: {'name': 'python', 'arguments': 'import torch\\nimport torch.nn as nn\\nimport torch.optim as optim\\n\\n# Define the neural network\\nclass SingleNeuron(nn.Module):\\n def __init__(self):\\n super(SingleNeuron, self).__init__()\\n self.linear = nn.Linear(1, 1)\\n \\n def forward(self, x):\\n return self.linear(x)\\n\\n# Create the synthetic data\\nx_train = torch.tensor([[1.0], [2.0], [3.0], [4.0]], dtype=torch.float32)\\ny_train = torch.tensor([[2.0], [4.0], [6.0], [8.0]], dtype=torch.float32)\\n\\n# Create the neural network\\nmodel = SingleNeuron()\\n\\n# Define the loss function and optimizer\\ncriterion = nn.MSELoss()\\noptimizer = optim.SGD(model.parameters(), lr=0.01)\\n\\n# Train the neural network\\nfor epoch in range(1, 1001):\\n # Forward pass\\n y_pred = model(x_train)\\n \\n # Compute loss\\n loss = criterion(y_pred, y_train)\\n \\n # Backward pass and optimization\\n optimizer.zero_grad()\\n loss.backward()\\n optimizer.step()\\n \\n # Print the loss every 100 epochs\\n if epoch % 100 == 0:\\n print(f\"Epoch {epoch}: Loss = {loss.item()}\")\\n\\n# Make a prediction for x = 5\\nx_test = torch.tensor([[5.0]], dtype=torch.float32)\\ny_pred = model(x_test)\\ny_pred.item()'} because the `arguments` is not valid JSON.\u001b[0mInvalid or incomplete response\u001b[32;1m\u001b[1;3m\n",
"Invoking: `Python_REPL` with `import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"\n",
"# Define the model\n",
"model = torch.nn.Sequential(\n",
" torch.nn.Linear(1, 1)\n",
")\n",
"# Define the neural network\n",
"class SingleNeuron(nn.Module):\n",
" def __init__(self):\n",
" super(SingleNeuron, self).__init__()\n",
" self.linear = nn.Linear(1, 1)\n",
" \n",
" def forward(self, x):\n",
" return self.linear(x)\n",
"\n",
"# Define the loss\n",
"loss_fn = torch.nn.MSELoss()\n",
"# Create the synthetic data\n",
"x_train = torch.tensor([[1.0], [2.0], [3.0], [4.0]], dtype=torch.float32)\n",
"y_train = torch.tensor([[2.0], [4.0], [6.0], [8.0]], dtype=torch.float32)\n",
"\n",
"# Define the optimizer\n",
"optimizer = torch.optim.SGD(model.parameters(), lr=0.01)\n",
"# Create the neural network\n",
"model = SingleNeuron()\n",
"\n",
"# Define the data\n",
"x_data = torch.tensor([[1.0], [2.0], [3.0], [4.0]])\n",
"y_data = torch.tensor([[2.0], [4.0], [6.0], [8.0]])\n",
"# Define the loss function and optimizer\n",
"criterion = nn.MSELoss()\n",
"optimizer = optim.SGD(model.parameters(), lr=0.01)\n",
"\n",
"# Train the model\n",
"for epoch in range(1000):\n",
"# Train the neural network\n",
"for epoch in range(1, 1001):\n",
" # Forward pass\n",
" y_pred = model(x_data)\n",
"\n",
" # Compute and print loss\n",
" loss = loss_fn(y_pred, y_data)\n",
" if (epoch+1) % 100 == 0:\n",
" print(f'Epoch {epoch+1}: loss = {loss.item():.4f}')\n",
"\n",
" # Zero the gradients\n",
" y_pred = model(x_train)\n",
" \n",
" # Compute loss\n",
" loss = criterion(y_pred, y_train)\n",
" \n",
" # Backward pass and optimization\n",
" optimizer.zero_grad()\n",
"\n",
" # Backward pass\n",
" loss.backward()\n",
"\n",
" # Update the weights\n",
" optimizer.step()\n",
"\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mEpoch 100: loss = 0.0013\n",
"Epoch 200: loss = 0.0007\n",
"Epoch 300: loss = 0.0004\n",
"Epoch 400: loss = 0.0002\n",
"Epoch 500: loss = 0.0001\n",
"Epoch 600: loss = 0.0001\n",
"Epoch 700: loss = 0.0000\n",
"Epoch 800: loss = 0.0000\n",
"Epoch 900: loss = 0.0000\n",
"Epoch 1000: loss = 0.0000\n",
"\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: The prediction for x = 5 is 10.0.\u001b[0m\n",
" \n",
" # Print the loss every 100 epochs\n",
" if epoch % 100 == 0:\n",
" print(f\"Epoch {epoch}: Loss = {loss.item()}\")\n",
"\n",
"# Make a prediction for x = 5\n",
"x_test = torch.tensor([[5.0]], dtype=torch.float32)\n",
"y_pred = model(x_test)\n",
"y_pred.item()`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3mEpoch 100: Loss = 0.03825576975941658\n",
"Epoch 200: Loss = 0.02100197970867157\n",
"Epoch 300: Loss = 0.01152981910854578\n",
"Epoch 400: Loss = 0.006329738534986973\n",
"Epoch 500: Loss = 0.0034749575424939394\n",
"Epoch 600: Loss = 0.0019077073084190488\n",
"Epoch 700: Loss = 0.001047312980517745\n",
"Epoch 800: Loss = 0.0005749554838985205\n",
"Epoch 900: Loss = 0.0003156439634039998\n",
"Epoch 1000: Loss = 0.00017328384274151176\n",
"\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Invoking: `Python_REPL` with `x_test.item()`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3m\u001b[0m\u001b[32;1m\u001b[1;3mThe prediction for x = 5 is 10.000173568725586.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -181,10 +230,10 @@
{
"data": {
"text/plain": [
"'The prediction for x = 5 is 10.0.'"
"'The prediction for x = 5 is 10.000173568725586.'"
]
},
"execution_count": 4,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -206,9 +255,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "LangChain",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "langchain"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -220,7 +269,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -37,7 +37,30 @@
"from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n",
"from langchain.sql_database import SQLDatabase\n",
"from langchain.llms.openai import OpenAI\n",
"from langchain.agents import AgentExecutor"
"from langchain.agents import AgentExecutor\n",
"from langchain.agents.agent_types import AgentType\n",
"from langchain.chat_models import ChatOpenAI"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "65ec5bb3",
"metadata": {},
"outputs": [],
"source": [
"db = SQLDatabase.from_uri(\"sqlite:///../../../../../notebooks/Chinook.db\")\n",
"toolkit = SQLDatabaseToolkit(db=db, llm=OpenAI(temperature=0))\n"
]
},
{
"cell_type": "markdown",
"id": "f74d1792",
"metadata": {},
"source": [
"## Using ZERO_SHOT_REACT_DESCRIPTION\n",
"\n",
"This shows how to initialize the agent using the ZERO_SHOT_REACT_DESCRIPTION agent type. Note that this is an alternative to the above."
]
},
{
@@ -49,16 +72,39 @@
},
"outputs": [],
"source": [
"db = SQLDatabase.from_uri(\"sqlite:///../../../../notebooks/Chinook.db\")\n",
"toolkit = SQLDatabaseToolkit(db=db)\n",
"\n",
"agent_executor = create_sql_agent(\n",
" llm=OpenAI(temperature=0),\n",
" toolkit=toolkit,\n",
" verbose=True\n",
" verbose=True,\n",
" agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION\n",
")"
]
},
{
"cell_type": "markdown",
"id": "971cc455",
"metadata": {},
"source": [
"## Using OpenAI Functions\n",
"\n",
"This shows how to initialize the agent using the OPENAI_FUNCTIONS agent type. Note that this is an alternative to the above."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "6426a27d",
"metadata": {},
"outputs": [],
"source": [
"# agent_executor = create_sql_agent(\n",
"# llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\"),\n",
"# toolkit=toolkit,\n",
"# verbose=True,\n",
"# agent_type=AgentType.OPENAI_FUNCTIONS\n",
"# )"
]
},
{
"cell_type": "markdown",
"id": "36ae48c7-cb08-4fef-977e-c7d4b96a464b",
@@ -69,7 +115,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 4,
"id": "ff70e83d-5ad0-4fc7-bb96-27d82ac166d7",
"metadata": {
"tags": []
@@ -81,14 +127,16 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mAction: list_tables_sql_db\n",
"Action Input: \"\"\u001b[0m\n",
"Observation: \u001b[38;5;200m\u001b[1;3mArtist, Invoice, Playlist, Genre, Album, PlaylistTrack, Track, InvoiceLine, MediaType, Employee, Customer\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I should look at the schema of the playlisttrack table\n",
"Action: schema_sql_db\n",
"Action Input: \"PlaylistTrack\"\u001b[0m\n",
"Observation: \u001b[33;1m\u001b[1;3m\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `list_tables_sql_db` with `{}`\n",
"\n",
"\n",
"\u001b[0m\u001b[38;5;200m\u001b[1;3mAlbum, Artist, Track, PlaylistTrack, InvoiceLine, sales_table, Playlist, Genre, Employee, Customer, Invoice, MediaType\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Invoking: `schema_sql_db` with `PlaylistTrack`\n",
"\n",
"\n",
"\u001b[0m\u001b[33;1m\u001b[1;3m\n",
"CREATE TABLE \"PlaylistTrack\" (\n",
"\t\"PlaylistId\" INTEGER NOT NULL, \n",
"\t\"TrackId\" INTEGER NOT NULL, \n",
@@ -97,13 +145,36 @@
"\tFOREIGN KEY(\"PlaylistId\") REFERENCES \"Playlist\" (\"PlaylistId\")\n",
")\n",
"\n",
"SELECT * FROM 'PlaylistTrack' LIMIT 3;\n",
"PlaylistId TrackId\n",
"1 3402\n",
"1 3389\n",
"1 3390\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: The PlaylistTrack table has two columns, PlaylistId and TrackId, and is linked to the Playlist and Track tables.\u001b[0m\n",
"/*\n",
"3 rows from PlaylistTrack table:\n",
"PlaylistId\tTrackId\n",
"1\t3402\n",
"1\t3389\n",
"1\t3390\n",
"*/\u001b[0m\u001b[32;1m\u001b[1;3mThe `PlaylistTrack` table has two columns: `PlaylistId` and `TrackId`. It is a junction table that represents the relationship between playlists and tracks. \n",
"\n",
"Here is the schema of the `PlaylistTrack` table:\n",
"\n",
"```\n",
"CREATE TABLE \"PlaylistTrack\" (\n",
"\t\"PlaylistId\" INTEGER NOT NULL, \n",
"\t\"TrackId\" INTEGER NOT NULL, \n",
"\tPRIMARY KEY (\"PlaylistId\", \"TrackId\"), \n",
"\tFOREIGN KEY(\"TrackId\") REFERENCES \"Track\" (\"TrackId\"), \n",
"\tFOREIGN KEY(\"PlaylistId\") REFERENCES \"Playlist\" (\"PlaylistId\")\n",
")\n",
"```\n",
"\n",
"Here are three sample rows from the `PlaylistTrack` table:\n",
"\n",
"```\n",
"PlaylistId TrackId\n",
"1 3402\n",
"1 3389\n",
"1 3390\n",
"```\n",
"\n",
"Please let me know if there is anything else I can help you with.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -111,10 +182,10 @@
{
"data": {
"text/plain": [
"'The PlaylistTrack table has two columns, PlaylistId and TrackId, and is linked to the Playlist and Track tables.'"
"'The `PlaylistTrack` table has two columns: `PlaylistId` and `TrackId`. It is a junction table that represents the relationship between playlists and tracks. \\n\\nHere is the schema of the `PlaylistTrack` table:\\n\\n```\\nCREATE TABLE \"PlaylistTrack\" (\\n\\t\"PlaylistId\" INTEGER NOT NULL, \\n\\t\"TrackId\" INTEGER NOT NULL, \\n\\tPRIMARY KEY (\"PlaylistId\", \"TrackId\"), \\n\\tFOREIGN KEY(\"TrackId\") REFERENCES \"Track\" (\"TrackId\"), \\n\\tFOREIGN KEY(\"PlaylistId\") REFERENCES \"Playlist\" (\"PlaylistId\")\\n)\\n```\\n\\nHere are three sample rows from the `PlaylistTrack` table:\\n\\n```\\nPlaylistId TrackId\\n1 3402\\n1 3389\\n1 3390\\n```\\n\\nPlease let me know if there is anything else I can help you with.'"
]
},
"execution_count": 3,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@@ -519,7 +590,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
"version": "3.9.1"
}
},
"nbformat": 4,