mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-21 14:43:07 +00:00
wip
This commit is contained in:
@@ -4,9 +4,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# File System\n",
|
||||
"# File Management Toolkit\n",
|
||||
"\n",
|
||||
"LangChain provides tools for interacting with a local file system out of the box. This notebook walks through some of them.\n",
|
||||
"The `FileManagementToolkit` provides tools for interacting with a local file system out of the box. This notebook walks through some of them.\n",
|
||||
"\n",
|
||||
"**Note:** these tools are not recommended for use outside a sandboxed environment! "
|
||||
]
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "487607cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Places\n",
|
||||
"\n",
|
||||
"This notebook goes through how to use Google Places API"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "8690845f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet googlemaps langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "fae31ef4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"GPLACES_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "abb502b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import GooglePlacesTool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "a83a02ac",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"places = GooglePlacesTool()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "2b65a285",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"1. Delfina Restaurant\\nAddress: 3621 18th St, San Francisco, CA 94110, USA\\nPhone: (415) 552-4055\\nWebsite: https://www.delfinasf.com/\\n\\n\\n2. Piccolo Forno\\nAddress: 725 Columbus Ave, San Francisco, CA 94133, USA\\nPhone: (415) 757-0087\\nWebsite: https://piccolo-forno-sf.com/\\n\\n\\n3. L'Osteria del Forno\\nAddress: 519 Columbus Ave, San Francisco, CA 94133, USA\\nPhone: (415) 982-1124\\nWebsite: Unknown\\n\\n\\n4. Il Fornaio\\nAddress: 1265 Battery St, San Francisco, CA 94111, USA\\nPhone: (415) 986-0100\\nWebsite: https://www.ilfornaio.com/\\n\\n\""
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"places.run(\"al fornos\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "66d3da8a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Trends\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use the Google Trends Tool to fetch trends information.\n",
|
||||
"\n",
|
||||
"First, you need to sign up for an `SerpApi key` key at: https://serpapi.com/users/sign_up.\n",
|
||||
"\n",
|
||||
"Then you must install `google-search-results` with the command:\n",
|
||||
"\n",
|
||||
"`pip install google-search-results`\n",
|
||||
"\n",
|
||||
"Then you will need to set the environment variable `SERPAPI_API_KEY` to your `SerpApi key`\n",
|
||||
"\n",
|
||||
"[Alternatively you can pass the key in as a argument to the wrapper `serp_api_key=\"your secret key\"`]\n",
|
||||
"\n",
|
||||
"## Use the Tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Requirement already satisfied: google-search-results in c:\\python311\\lib\\site-packages (2.4.2)\n",
|
||||
"Requirement already satisfied: requests in c:\\python311\\lib\\site-packages (from google-search-results) (2.31.0)\n",
|
||||
"Requirement already satisfied: charset-normalizer<4,>=2 in c:\\python311\\lib\\site-packages (from requests->google-search-results) (3.3.2)\n",
|
||||
"Requirement already satisfied: idna<4,>=2.5 in c:\\python311\\lib\\site-packages (from requests->google-search-results) (3.4)\n",
|
||||
"Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\python311\\lib\\site-packages (from requests->google-search-results) (2.1.0)\n",
|
||||
"Requirement already satisfied: certifi>=2017.4.17 in c:\\python311\\lib\\site-packages (from requests->google-search-results) (2023.7.22)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet google-search-results langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.tools.google_trends import GoogleTrendsQueryRun\n",
|
||||
"from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper\n",
|
||||
"\n",
|
||||
"os.environ[\"SERPAPI_API_KEY\"] = \"\"\n",
|
||||
"tool = GoogleTrendsQueryRun(api_wrapper=GoogleTrendsAPIWrapper())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Query: Water\\nDate From: Nov 20, 2022\\nDate To: Nov 11, 2023\\nMin Value: 72\\nMax Value: 100\\nAverage Value: 84.25490196078431\\nPrecent Change: 5.555555555555555%\\nTrend values: 72, 72, 74, 77, 86, 80, 82, 88, 79, 79, 85, 82, 81, 84, 83, 77, 80, 85, 82, 80, 88, 84, 82, 84, 83, 85, 92, 92, 100, 92, 100, 96, 94, 95, 94, 98, 96, 84, 86, 84, 85, 83, 83, 76, 81, 85, 78, 77, 81, 75, 76\\nRising Related Queries: avatar way of water, avatar the way of water, owala water bottle, air up water bottle, lake mead water level\\nTop Related Queries: water park, water bottle, water heater, water filter, water tank, water bill, water world, avatar way of water, avatar the way of water, coconut water, deep water, water cycle, water dispenser, water purifier, water pollution, distilled water, hot water heater, water cooler, sparkling water, american water, micellar water, density of water, tankless water heater, tonic water, water jug'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool.run(\"Water\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.16 ('langchain')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "15e58ce194949b77a891bd4339ce3d86a9bd138e905926019517993f97db9e6c"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,200 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Oracle AI Vector Search: Generate Summary\n",
|
||||
"\n",
|
||||
"Oracle AI Vector Search is designed for Artificial Intelligence (AI) workloads that allows you to query data based on semantics, rather than keywords.\n",
|
||||
"One of the biggest benefits of Oracle AI Vector Search is that semantic search on unstructured data can be combined with relational search on business data in one single system.\n",
|
||||
"This is not only powerful but also significantly more effective because you don't need to add a specialized vector database, eliminating the pain of data fragmentation between multiple systems.\n",
|
||||
"\n",
|
||||
"In addition, your vectors can benefit from all of Oracle Database’s most powerful features, like the following:\n",
|
||||
"\n",
|
||||
" * [Partitioning Support](https://www.oracle.com/database/technologies/partitioning.html)\n",
|
||||
" * [Real Application Clusters scalability](https://www.oracle.com/database/real-application-clusters/)\n",
|
||||
" * [Exadata smart scans](https://www.oracle.com/database/technologies/exadata/software/smartscan/)\n",
|
||||
" * [Shard processing across geographically distributed databases](https://www.oracle.com/database/distributed-database/)\n",
|
||||
" * [Transactions](https://docs.oracle.com/en/database/oracle/oracle-database/23/cncpt/transactions.html)\n",
|
||||
" * [Parallel SQL](https://docs.oracle.com/en/database/oracle/oracle-database/21/vldbg/parallel-exec-intro.html#GUID-D28717E4-0F77-44F5-BB4E-234C31D4E4BA)\n",
|
||||
" * [Disaster recovery](https://www.oracle.com/database/data-guard/)\n",
|
||||
" * [Security](https://www.oracle.com/security/database-security/)\n",
|
||||
" * [Oracle Machine Learning](https://www.oracle.com/artificial-intelligence/database-machine-learning/)\n",
|
||||
" * [Oracle Graph Database](https://www.oracle.com/database/integrated-graph-database/)\n",
|
||||
" * [Oracle Spatial and Graph](https://www.oracle.com/database/spatial/)\n",
|
||||
" * [Oracle Blockchain](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_blockchain_table.html#GUID-B469E277-978E-4378-A8C1-26D3FF96C9A6)\n",
|
||||
" * [JSON](https://docs.oracle.com/en/database/oracle/oracle-database/23/adjsn/json-in-oracle-database.html)\n",
|
||||
"\n",
|
||||
"The guide demonstrates how to use Summary Capabilities within Oracle AI Vector Search to generate summary for your documents using OracleSummary."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you are just starting with Oracle Database, consider exploring the [free Oracle 23 AI](https://www.oracle.com/database/free/#resources) which provides a great introduction to setting up your database environment. While working with the database, it is often advisable to avoid using the system user by default; instead, you can create your own user for enhanced security and customization. For detailed steps on user creation, refer to our [end-to-end guide](https://github.com/langchain-ai/langchain/blob/master/cookbook/oracleai_demo.ipynb) which also shows how to set up a user in Oracle. Additionally, understanding user privileges is crucial for managing database security effectively. You can learn more about this topic in the official [Oracle guide](https://docs.oracle.com/en/database/oracle/oracle-database/19/admqs/administering-user-accounts-and-security.html#GUID-36B21D72-1BBB-46C9-A0C9-F0D2A8591B8D) on administering user accounts and security."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"Please install Oracle Python Client driver to use Langchain with Oracle AI Vector Search. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pip install oracledb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Oracle Database\n",
|
||||
"The following sample code will show how to connect to Oracle Database. By default, python-oracledb runs in a ‘Thin’ mode which connects directly to Oracle Database. This mode does not need Oracle Client libraries. However, some additional functionality is available when python-oracledb uses them. Python-oracledb is said to be in ‘Thick’ mode when Oracle Client libraries are used. Both modes have comprehensive functionality supporting the Python Database API v2.0 Specification. See the following [guide](https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_a.html#featuresummary) that talks about features supported in each mode. You might want to switch to thick-mode if you are unable to use thin-mode."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# please update with your username, password, hostname and service_name\n",
|
||||
"username = \"<username>\"\n",
|
||||
"password = \"<password>\"\n",
|
||||
"dsn = \"<hostname>/<service_name>\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Connection failed!\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Summary\n",
|
||||
"The Oracle AI Vector Search Langchain library offers a suite of APIs designed for document summarization. It supports multiple summarization providers such as Database, OCIGENAI, HuggingFace, among others, allowing users to select the provider that best meets their needs. To utilize these capabilities, users must configure the summary parameters as specified. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-EC9DDB58-6A15-4B36-BA66-ECBA20D2CE57)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** The users may need to set proxy if they want to use some 3rd party summary generation providers other than Oracle's in-house and default provider: 'database'. If you don't have proxy, please remove the proxy parameter when you instantiate the OracleSummary."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# proxy to be used when we instantiate summary and embedder object\n",
|
||||
"proxy = \"<proxy>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following sample code will show how to generate summary:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.utilities.oracleai import OracleSummary\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
"# using 'ocigenai' provider\n",
|
||||
"summary_params = {\n",
|
||||
" \"provider\": \"ocigenai\",\n",
|
||||
" \"credential_name\": \"OCI_CRED\",\n",
|
||||
" \"url\": \"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com/20231130/actions/summarizeText\",\n",
|
||||
" \"model\": \"cohere.command\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# using 'huggingface' provider\n",
|
||||
"summary_params = {\n",
|
||||
" \"provider\": \"huggingface\",\n",
|
||||
" \"credential_name\": \"HF_CRED\",\n",
|
||||
" \"url\": \"https://api-inference.huggingface.co/models/\",\n",
|
||||
" \"model\": \"facebook/bart-large-cnn\",\n",
|
||||
" \"wait_for_model\": \"true\"\n",
|
||||
"}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"# using 'database' provider\n",
|
||||
"summary_params = {\n",
|
||||
" \"provider\": \"database\",\n",
|
||||
" \"glevel\": \"S\",\n",
|
||||
" \"numParagraphs\": 1,\n",
|
||||
" \"language\": \"english\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# get the summary instance\n",
|
||||
"# Remove proxy if not required\n",
|
||||
"summ = OracleSummary(conn=conn, params=summary_params, proxy=proxy)\n",
|
||||
"summary = summ.get_summary(\n",
|
||||
" \"In the heart of the forest, \"\n",
|
||||
" + \"a lone fox ventured out at dusk, seeking a lost treasure. \"\n",
|
||||
" + \"With each step, memories flooded back, guiding its path. \"\n",
|
||||
" + \"As the moon rose high, illuminating the night, the fox unearthed \"\n",
|
||||
" + \"not gold, but a forgotten friendship, worth more than any riches.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(f\"Summary generated by OracleSummary: {summary}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### End to End Demo\n",
|
||||
"Please refer to our complete demo guide [Oracle AI Vector Search End-to-End Demo Guide](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) to build an end to end RAG pipeline with the help of Oracle AI Vector Search.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,302 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c81da886",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Pandas Dataframe\n",
|
||||
"\n",
|
||||
"This notebook shows how to use agents to interact with a `Pandas DataFrame`. It is mostly optimized for question answering.\n",
|
||||
"\n",
|
||||
"**NOTE: this agent calls the `Python` agent under the hood, which executes LLM generated Python code - this can be bad if the LLM generated Python code is harmful. Use cautiously.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "0cdd9bf5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.agent_types import AgentType\n",
|
||||
"from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "051ebe84",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"df = pd.read_csv(\n",
|
||||
" \"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a62858e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using `ZERO_SHOT_REACT_DESCRIPTION`\n",
|
||||
"\n",
|
||||
"This shows how to initialize the agent using the `ZERO_SHOT_REACT_DESCRIPTION` agent type."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "4185ff46",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7233ab56",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using OpenAI Functions\n",
|
||||
"\n",
|
||||
"This shows how to initialize the agent using the OPENAI_FUNCTIONS agent type. Note that this is an alternative to the above."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "a8ea710e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = create_pandas_dataframe_agent(\n",
|
||||
" ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\"),\n",
|
||||
" df,\n",
|
||||
" verbose=True,\n",
|
||||
" agent_type=AgentType.OPENAI_FUNCTIONS,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "a9207a2e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `python_repl_ast` with `df.shape[0]`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m891\u001b[0m\u001b[32;1m\u001b[1;3mThere are 891 rows in the dataframe.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'There are 891 rows in the dataframe.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.invoke(\"how many rows are there?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "bd43617c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to count the number of people with more than 3 siblings\n",
|
||||
"Action: python_repl_ast\n",
|
||||
"Action Input: df[df['SibSp'] > 3].shape[0]\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m30\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: 30 people have more than 3 siblings.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'30 people have more than 3 siblings.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.invoke(\"how many people have more than 3 siblings\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "94e64b58",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to calculate the average age first\n",
|
||||
"Action: python_repl_ast\n",
|
||||
"Action Input: df['Age'].mean()\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m29.69911764705882\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now need to calculate the square root of the average age\n",
|
||||
"Action: python_repl_ast\n",
|
||||
"Action Input: math.sqrt(df['Age'].mean())\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mNameError(\"name 'math' is not defined\")\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to import the math library\n",
|
||||
"Action: python_repl_ast\n",
|
||||
"Action Input: import math\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now need to calculate the square root of the average age\n",
|
||||
"Action: python_repl_ast\n",
|
||||
"Action Input: math.sqrt(df['Age'].mean())\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m5.449689683556195\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: The square root of the average age is 5.449689683556195.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The square root of the average age is 5.449689683556195.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.invoke(\"whats the square root of the average age?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c4bc0584",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi DataFrame Example\n",
|
||||
"\n",
|
||||
"This next part shows how the agent can interact with multiple dataframes passed in as a list."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "42a15bd9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df1 = df.copy()\n",
|
||||
"df1[\"Age\"] = df1[\"Age\"].fillna(df1[\"Age\"].mean())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "eba13b4d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to compare the age columns in both dataframes\n",
|
||||
"Action: python_repl_ast\n",
|
||||
"Action Input: len(df1[df1['Age'] != df2['Age']])\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m177\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: 177 rows in the age column are different.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'177 rows in the age column are different.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent = create_pandas_dataframe_agent(OpenAI(temperature=0), [df, df1], verbose=True)\n",
|
||||
"agent.invoke(\"how many rows in the age column are different?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "60d08a56",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -62,6 +62,11 @@ SEARCH_TOOL_FEAT_TABLE = {
|
||||
"available_data": "Answer",
|
||||
"link": "/docs/integrations/tools/serpapi",
|
||||
},
|
||||
"Data for SEO": {
|
||||
"pricing": "Not free, price depends on API used",
|
||||
"available_data": "URL, Snippet, Title, Type",
|
||||
"link": "/docs/integrations/tools/dataforseo",
|
||||
},
|
||||
}
|
||||
|
||||
CODE_INTERPRETER_TOOL_FEAT_TABLE = {
|
||||
@@ -93,6 +98,20 @@ CODE_INTERPRETER_TOOL_FEAT_TABLE = {
|
||||
"return_results": "Text, Images",
|
||||
"link": "/docs/integrations/tools/azure_dynamic_sessions",
|
||||
},
|
||||
"Bash": {
|
||||
"langauges": "Bash",
|
||||
"sandbox_lifetime": "Resets on Execution",
|
||||
"upload": True,
|
||||
"return_results": "Bash execution results",
|
||||
"link": "/docs/integrations/tools/bash",
|
||||
},
|
||||
"Databrick Unity Cataong": {
|
||||
"langauges": "Python, SQL",
|
||||
"sandbox_lifetime": "Resets on Execution",
|
||||
"upload": False,
|
||||
"return_results": "Text",
|
||||
"link": "/docs/integrations/tools/databricks",
|
||||
},
|
||||
}
|
||||
|
||||
PRODUCTIVITY_TOOL_FEAT_TABLE = {
|
||||
@@ -128,6 +147,46 @@ PRODUCTIVITY_TOOL_FEAT_TABLE = {
|
||||
"link": "/docs/integrations/tools/infobip",
|
||||
"pricing": "Free trial, with variable pricing after",
|
||||
},
|
||||
"AWS Toolkit": {
|
||||
"link": "/docs/integrations/tools/aws",
|
||||
"pricing": "Free tier, with variable pricing after",
|
||||
},
|
||||
"ClickUp": {
|
||||
"link": "/docs/integrations/tools/clickup",
|
||||
"pricing": "Free tier, with variable pricing after",
|
||||
},
|
||||
"Cogniswitch": {
|
||||
"link": "/docs/integrations/tools/cogniswitch",
|
||||
"pricing": "Free trial, with variable pricing after",
|
||||
},
|
||||
"Google Drive": {
|
||||
"link": "/docs/integrations/tools/google_drive",
|
||||
"pricing": "Free",
|
||||
},
|
||||
"IFTTT Webhooks": {
|
||||
"link": "/docs/integrations/tools/ifttt",
|
||||
"pricing": "Free tier, with variable pricing after",
|
||||
},
|
||||
"Lemon AI": {
|
||||
"link": "/docs/integrations/tools/lemonai",
|
||||
"pricing": "Depends on service used",
|
||||
},
|
||||
"Power BI": {
|
||||
"link": "/docs/integrations/tools/power_bi",
|
||||
"pricing": "Free tier, with variable pricing after",
|
||||
},
|
||||
"Wolfram Alpha": {
|
||||
"link": "/docs/integrations/tools/wolfram_alpha",
|
||||
"pricing": "Free up to 2000 calls/month",
|
||||
},
|
||||
"Zapier": {
|
||||
"link": "/docs/integrations/tools/zapier",
|
||||
"pricing": "Free up to 100 tasks/month",
|
||||
},
|
||||
"Zenguard AI": {
|
||||
"link": "/docs/integrations/tools/zenguard",
|
||||
"pricing": "Free up to 1000 requests/day",
|
||||
},
|
||||
}
|
||||
|
||||
WEBBROWSING_TOOL_FEAT_TABLE = {
|
||||
@@ -161,6 +220,164 @@ DATABASE_TOOL_FEAT_TABLE = {
|
||||
"link": "/docs/integrations/tools/cassandra_database",
|
||||
"operations": "SELECT and schema introspection",
|
||||
},
|
||||
"GraphQL Toolkit": {
|
||||
"link": "/docs/integrations/tools/graphql",
|
||||
"operations": "GraphQL queries",
|
||||
},
|
||||
}
|
||||
|
||||
DOMAIN_SPECIFIC_SEARCH_TOOL_FEAT_TABLE = {
|
||||
"Amadeus": {"link": "/docs/integration/tools/amadeus", "domain": "Travel"},
|
||||
"Alpha Vantage": {
|
||||
"link": "/docs/integration/tools/alpha_vantage",
|
||||
"domain": "Finance",
|
||||
},
|
||||
"ArXiv": {"link": "/docs/integration/tools/arxiv", "domain": "Research"},
|
||||
"AskNews": {"link": "/docs/integration/tools/asknews", "domain": "News"},
|
||||
"Financial Datasets": {
|
||||
"link": "/docs/integration/tools/financial_datasets",
|
||||
"domain": "Finance",
|
||||
},
|
||||
"Google Finance": {
|
||||
"link": "/docs/integration/tools/google_finance",
|
||||
"domain": "Finance",
|
||||
},
|
||||
"Google Jobs": {"link": "/docs/integration/tools/google_jobs", "domain": "Jobs"},
|
||||
"Google Scholar": {
|
||||
"link": "/docs/integration/tools/google_scholar",
|
||||
"domain": "Research",
|
||||
},
|
||||
"Ionic Shopping": {
|
||||
"link": "/docs/integration/tools/ionic_shopping",
|
||||
"domain": "Shopping",
|
||||
},
|
||||
"NASA": {"link": "/docs/integration/tools/nasa", "domain": "Space"},
|
||||
"OpenWeatherMap": {
|
||||
"link": "/docs/integrations/tools/openweathermap",
|
||||
"domain": "Weather",
|
||||
},
|
||||
"Passio Nutrion": {
|
||||
"link": "/docs/integrations/tools/passio_nutrition_ai",
|
||||
"domain": "Nutrition",
|
||||
},
|
||||
"Polygon IO": {"link": "/docs/integrations/tools/polygon", "domain": "Finance"},
|
||||
"PubMed": {"link": "/docs/integrations/tools/pubmed", "domain": "Medical Research"},
|
||||
"Reddit Search": {
|
||||
"link": "/docs/integrations/tools/reddit_search",
|
||||
"domain": "Social Media",
|
||||
},
|
||||
"Semantic Scholar": {
|
||||
"link": "/docs/integrations/tools/semanticscholar",
|
||||
"domain": "Research",
|
||||
},
|
||||
"Stack Exchange": {
|
||||
"link": "/docs/integrations/tools/stackexchange",
|
||||
"domain": "StackOverflow",
|
||||
},
|
||||
"Steam Toolkit": {"link": "/docs/integrations/tools/steam", "domain": "Gaming"},
|
||||
"Wikidata": {
|
||||
"link": "/docs/integrations/tools/wikidata",
|
||||
"domain": "General Knowledge",
|
||||
},
|
||||
"Wikipedia": {
|
||||
"link": "/docs/integrations/tools/wikipedia",
|
||||
"domain": "General Knowledge",
|
||||
},
|
||||
"Yahoo Finance": {
|
||||
"link": "/docs/integrations/tools/yahoo_finance_news",
|
||||
"domain": "Finance",
|
||||
},
|
||||
"YouTube": {"link": "/docs/integrations/tools/youtube", "domain": "YouTube"},
|
||||
"Golden Query": {
|
||||
"link": "/docs/integrations/tools/golden_query",
|
||||
"domain": "General Knowledge",
|
||||
},
|
||||
}
|
||||
|
||||
MULTIMODAL_TOOL_FEAT_TABLE = {
|
||||
"SceneXplain": {
|
||||
"link": "/docs/integration/tools/sceneXplain",
|
||||
"modalities": "Images",
|
||||
},
|
||||
"Nuclia Understanding": {
|
||||
"link": "/docs/integration/tools/nuclia",
|
||||
"modalities": "Images, Videos, Audio, Documents",
|
||||
},
|
||||
"NVIDIA Riva": {
|
||||
"link": "/docs/integration/tools/nvidia_riva",
|
||||
"modalities": "Audio",
|
||||
},
|
||||
"Azure AI Services": {
|
||||
"link": "/docs/integration/tools/azure_ai_services",
|
||||
"modalities": "Images, Videos, Audio, Documents",
|
||||
},
|
||||
"Azure Cognitive Services": {
|
||||
"link": "/docs/integration/tools/azure_cognitive_services",
|
||||
"modalities": "Images, Videos, Audio, Documents",
|
||||
},
|
||||
"Dall-E Image Generator": {
|
||||
"link": "/docs/integrations/tools/dalle_image_generator",
|
||||
"modalities": "Images",
|
||||
},
|
||||
"Eden AI": {
|
||||
"link": "/docs/integrations/tools/edenai_tools",
|
||||
"modalities": "Images, Audio, Invoices",
|
||||
},
|
||||
"Eleven Labs": {
|
||||
"link": "/docs/integrations/tools/eleven_labs_tts",
|
||||
"modalities": "Audio",
|
||||
},
|
||||
"Google Cloud Text-to-Speech": {
|
||||
"link": "/docs/integrations/tools/google_cloud_texttospeech",
|
||||
"modalities": "Audio",
|
||||
},
|
||||
"Google Imagen": {
|
||||
"link": "/docs/integrations/tools/google_imagen",
|
||||
"modalities": "Images",
|
||||
},
|
||||
"Google Lens": {
|
||||
"link": "/docs/integrations/tools/google_lens",
|
||||
"modalities": "Images",
|
||||
},
|
||||
}
|
||||
|
||||
MISCELLANEOUS_TOOL_FEAT_TABLE = {
|
||||
"Dataherald": {
|
||||
"link": "/docs/integrations/tools/dataherald",
|
||||
"description": "Natural language to SQL API",
|
||||
},
|
||||
"File Management": {
|
||||
"link": "/docs/integrations/tools/filesystem",
|
||||
"description": "Manage your local file system",
|
||||
},
|
||||
"Gradio": {
|
||||
"link": "/docs/integrations/tools/gradio",
|
||||
"description": "Use Gradio ML apps in your agent",
|
||||
},
|
||||
"JSON Toolkit": {
|
||||
"link": "/docs/integrations/tools/json",
|
||||
"description": "Interact with large JSON blobs",
|
||||
},
|
||||
"OpenAPI": {
|
||||
"link": "/docs/integrations/tools/openapi",
|
||||
"description": "Consume arbitrary APIs conforming to the OpenAPI spec",
|
||||
},
|
||||
"Natural Language API": {
|
||||
"link": "/docs/integrations/tools/openapi_nla",
|
||||
"description": "Efficiently plan and combine calls across endpoints",
|
||||
},
|
||||
"Robocorp": {
|
||||
"link": "/docs/integrations/tools/robocorp",
|
||||
"description": "Integrate custom actions with your agents"
|
||||
},
|
||||
"Human as a tool": {
|
||||
"link": "/docs/integrations/tools/human_tools",
|
||||
"description": "Use human input as a tool",
|
||||
},
|
||||
"Memorize": {
|
||||
"link": "/docs/integrations/tools/memorize",
|
||||
"description": "Fine tune model to memorize data",
|
||||
},
|
||||
}
|
||||
|
||||
TOOLS_TEMPLATE = """\
|
||||
@@ -210,6 +427,24 @@ The following table shows tools that can be used to automate tasks in databases:
|
||||
|
||||
{database_table}
|
||||
|
||||
## Domain Specific Search
|
||||
|
||||
The following table shows tools that can be used to search for specific types of data:
|
||||
|
||||
{domain_specific_search_table}
|
||||
|
||||
## Multimodal
|
||||
|
||||
The following table shows tools that can be used for dealing with multimodal data:
|
||||
|
||||
{multimodal_table}
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
The following table shows tools that don't fit into the other categories:
|
||||
|
||||
{miscellaneous_table}
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
|
||||
@@ -258,6 +493,56 @@ def get_webbrowsing_table() -> str:
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
def get_miscellaneous_table() -> str:
|
||||
"""Get the table of miscellaneous tools."""
|
||||
header = ["tool", "description"]
|
||||
title = ["Tool/Toolkit", "Description"]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for miscellaneous_tool, feats in sorted(MISCELLANEOUS_TOOL_FEAT_TABLE.items()):
|
||||
# Fields are in the order of the header
|
||||
row = [
|
||||
f"[{miscellaneous_tool}]({feats['link']})",
|
||||
]
|
||||
for h in header[1:]:
|
||||
row.append(feats.get(h))
|
||||
rows.append(row)
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
def get_domain_specific_search_table() -> str:
|
||||
"""Get the table of domain specific tools."""
|
||||
header = ["tool", "domain"]
|
||||
title = ["Tool/Toolkit", "Domain"]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for domain_specific_tool, feats in sorted(
|
||||
DOMAIN_SPECIFIC_SEARCH_TOOL_FEAT_TABLE.items()
|
||||
):
|
||||
# Fields are in the order of the header
|
||||
row = [
|
||||
f"[{domain_specific_tool}]({feats['link']})",
|
||||
]
|
||||
for h in header[1:]:
|
||||
row.append(feats.get(h))
|
||||
rows.append(row)
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
def get_multimodal_table() -> str:
|
||||
"""Get the table of multimodal tools."""
|
||||
header = ["tool", "modalities"]
|
||||
title = ["Tool/Toolkit", "Modalties"]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for multi_modal_tool, feats in sorted(MULTIMODAL_TOOL_FEAT_TABLE.items()):
|
||||
# Fields are in the order of the header
|
||||
row = [
|
||||
f"[{multi_modal_tool}]({feats['link']})",
|
||||
]
|
||||
for h in header[1:]:
|
||||
row.append(feats.get(h))
|
||||
rows.append(row)
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
def get_database_table() -> str:
|
||||
"""Get the table of database tools."""
|
||||
header = ["tool", "operations"]
|
||||
@@ -307,10 +592,12 @@ def get_code_interpreter_table() -> str:
|
||||
"Return Types",
|
||||
]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for search_tool, feats in sorted(CODE_INTERPRETER_TOOL_FEAT_TABLE.items()):
|
||||
for code_interpreter_tool, feats in sorted(
|
||||
CODE_INTERPRETER_TOOL_FEAT_TABLE.items()
|
||||
):
|
||||
# Fields are in the order of the header
|
||||
row = [
|
||||
f"[{search_tool}]({feats['link']})",
|
||||
f"[{code_interpreter_tool}]({feats['link']})",
|
||||
]
|
||||
for h in header[1:]:
|
||||
value = feats.get(h)
|
||||
@@ -337,6 +624,9 @@ if __name__ == "__main__":
|
||||
productivity_table=get_productivity_table(),
|
||||
webbrowsing_table=get_webbrowsing_table(),
|
||||
database_table=get_database_table(),
|
||||
domain_specific_search_table=get_domain_specific_search_table(),
|
||||
multimodal_table=get_multimodal_table(),
|
||||
miscellaneous_table=get_miscellaneous_table(),
|
||||
)
|
||||
with open(output_integrations_dir / "tools" / "index.mdx", "w") as f:
|
||||
f.write(tools_page)
|
||||
|
||||
@@ -85,6 +85,22 @@
|
||||
{
|
||||
"source": "/v0.2/docs/integrations/toolkits/:path(.*/?)*",
|
||||
"destination": "/v0.2/docs/integrations/tools/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/v0.2/docs/integrations/tools/pandas",
|
||||
"destination": "/v0.1/docs/integrations/toolkits/pandas/"
|
||||
},
|
||||
{
|
||||
"source": "/v0.2/docs/integrations/tools/oracleai",
|
||||
"destination": "/v0.1/docs/integrations/tools/oracleai"
|
||||
},
|
||||
{
|
||||
"source": "/v0.2/docs/integrations/tools/google_places",
|
||||
"destination": "/v0.1/docs/integrations/tools/google_places"
|
||||
},
|
||||
{
|
||||
"source": "/v0.2/docs/integrations/tools/google_trends",
|
||||
"destination": "/v0.1/docs/integrations/tools/google_trends"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
338
libs/partners/ollama/langchain_ollama/testing.ipynb
Normal file
338
libs/partners/ollama/langchain_ollama/testing.ipynb
Normal file
@@ -0,0 +1,338 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Tuple\n",
|
||||
"from typing_extensions import Annotated\n",
|
||||
"\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"from langgraph.prebuilt import InjectedState\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(parse_docstring=True, response_format=\"content_and_artifact\")\n",
|
||||
"def get_context(question: List[str]) -> Tuple[str, List[Document]]:\n",
|
||||
" \"\"\"Get context on the question.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" question: The user question\n",
|
||||
" \"\"\"\n",
|
||||
" # return constant dummy output\n",
|
||||
" docs = [\n",
|
||||
" Document(\n",
|
||||
" \"FooBar company just raised 1 Billion dollars!\",\n",
|
||||
" metadata={\"source\": \"twitter\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" \"FooBar company is now only hiring AI's\", metadata={\"source\": \"twitter\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" \"FooBar company was founded in 2019\", metadata={\"source\": \"wikipedia\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" \"FooBar company makes friendly robots\", metadata={\"source\": \"wikipedia\"}\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs), docs\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(parse_docstring=True, response_format=\"content_and_artifact\")\n",
|
||||
"def cite_context_sources(\n",
|
||||
" claim: str, state: Annotated[dict, InjectedState]\n",
|
||||
") -> Tuple[str, List[Document]]:\n",
|
||||
" \"\"\"Cite which source a claim was based on.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" claim: The claim that was made.\n",
|
||||
" \"\"\"\n",
|
||||
" docs = []\n",
|
||||
" # We get the potentially cited docs from past ToolMessages in our state.\n",
|
||||
" for msg in state[\"messages\"]:\n",
|
||||
" if isinstance(msg, ToolMessage) and msg.name == \"get_context\":\n",
|
||||
" docs.extend(msg.artifact)\n",
|
||||
"\n",
|
||||
" class Cite(BaseModel):\n",
|
||||
" \"\"\"Return the index(es) of the documents that justify the claim\"\"\"\n",
|
||||
"\n",
|
||||
" indexes: List[int]\n",
|
||||
"\n",
|
||||
" structured_model = model.with_structured_output(Cite)\n",
|
||||
" system = f\"Which of the following documents best justifies the claim:\\n\\n{claim}\"\n",
|
||||
" context = \"\\n\\n\".join(\n",
|
||||
" f\"Document {i}:\\n\" + doc.page_content for i, doc in enumerate(docs)\n",
|
||||
" )\n",
|
||||
" citation = structured_model.invoke([(\"system\", system), (\"human\", context)])\n",
|
||||
" cited_docs = [docs[i] for i in citation.indexes]\n",
|
||||
" sources = \", \".join(doc.metadata[\"source\"] for doc in cited_docs)\n",
|
||||
" return sources, cited_docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import operator\n",
|
||||
"from typing import Annotated, Sequence, TypedDict\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import BaseMessage\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class AgentState(TypedDict):\n",
|
||||
" messages: Annotated[Sequence[BaseMessage], operator.add]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from copy import deepcopy\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import ToolMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"from langgraph.prebuilt import ToolNode\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\", temperature=0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the function that determines whether to continue or not\n",
|
||||
"def should_continue(state, config):\n",
|
||||
" messages = state[\"messages\"]\n",
|
||||
" last_message = messages[-1]\n",
|
||||
" # If there is no function call, then we finish\n",
|
||||
" if not last_message.tool_calls:\n",
|
||||
" return \"end\"\n",
|
||||
" # Otherwise if there is, we continue\n",
|
||||
" else:\n",
|
||||
" return \"continue\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [get_context, cite_context_sources]\n",
|
||||
"\n",
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state, config):\n",
|
||||
" messages = state[\"messages\"]\n",
|
||||
" model_with_tools = model.bind_tools(tools)\n",
|
||||
" response = model_with_tools.invoke(messages)\n",
|
||||
" # We return a list, because this will get added to the existing list\n",
|
||||
" return {\"messages\": [response]}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# ToolNode will automatically take care of injecting state into tools\n",
|
||||
"tool_node = ToolNode(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.graph import END, START, StateGraph\n",
|
||||
"\n",
|
||||
"# Define a new graph\n",
|
||||
"workflow = StateGraph(AgentState)\n",
|
||||
"\n",
|
||||
"# Define the two nodes we will cycle between\n",
|
||||
"workflow.add_node(\"agent\", call_model)\n",
|
||||
"workflow.add_node(\"action\", tool_node)\n",
|
||||
"\n",
|
||||
"# Set the entrypoint as `agent`\n",
|
||||
"# This means that this node is the first one called\n",
|
||||
"workflow.add_edge(START, \"agent\")\n",
|
||||
"\n",
|
||||
"# We now add a conditional edge\n",
|
||||
"workflow.add_conditional_edges(\n",
|
||||
" # First, we define the start node. We use `agent`.\n",
|
||||
" # This means these are the edges taken after the `agent` node is called.\n",
|
||||
" \"agent\",\n",
|
||||
" # Next, we pass in the function that will determine which node is called next.\n",
|
||||
" should_continue,\n",
|
||||
" # Finally we pass in a mapping.\n",
|
||||
" # The keys are strings, and the values are other nodes.\n",
|
||||
" # END is a special node marking that the graph should finish.\n",
|
||||
" # What will happen is we will call `should_continue`, and then the output of that\n",
|
||||
" # will be matched against the keys in this mapping.\n",
|
||||
" # Based on which one it matches, that node will then be called.\n",
|
||||
" {\n",
|
||||
" # If `tools`, then we call the tool node.\n",
|
||||
" \"continue\": \"action\",\n",
|
||||
" # Otherwise we finish.\n",
|
||||
" \"end\": END,\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# We now add a normal edge from `tools` to `agent`.\n",
|
||||
"# This means that after `tools` is called, `agent` node is called next.\n",
|
||||
"workflow.add_edge(\"action\", \"agent\")\n",
|
||||
"\n",
|
||||
"# Finally, we compile it!\n",
|
||||
"# This compiles it into a LangChain Runnable,\n",
|
||||
"# meaning you can use it as you would any other runnable\n",
|
||||
"app = workflow.compile()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Output from node 'agent':\n",
|
||||
"---\n",
|
||||
"{'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_fjVgtlkC4uIYgmtWDiRC4PLJ', 'function': {'arguments': '{\"question\":[\"what\\'s the latest news about FooBar\"]}', 'name': 'get_context'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 22, 'prompt_tokens': 87, 'total_tokens': 109}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_3aa7262c27', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-b79ed8f3-f4cd-407c-9595-618b34561cf6-0', tool_calls=[{'name': 'get_context', 'args': {'question': [\"what's the latest news about FooBar\"]}, 'id': 'call_fjVgtlkC4uIYgmtWDiRC4PLJ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 87, 'output_tokens': 22, 'total_tokens': 109})]}\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Output from node 'action':\n",
|
||||
"---\n",
|
||||
"{'messages': [ToolMessage(content=\"FooBar company just raised 1 Billion dollars!\\n\\nFooBar company is now only hiring AI's\\n\\nFooBar company was founded in 2019\\n\\nFooBar company makes friendly robots\", name='get_context', tool_call_id='call_fjVgtlkC4uIYgmtWDiRC4PLJ', artifact=[Document(metadata={'source': 'twitter'}, page_content='FooBar company just raised 1 Billion dollars!'), Document(metadata={'source': 'twitter'}, page_content=\"FooBar company is now only hiring AI's\"), Document(metadata={'source': 'wikipedia'}, page_content='FooBar company was founded in 2019'), Document(metadata={'source': 'wikipedia'}, page_content='FooBar company makes friendly robots')])]}\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Output from node 'agent':\n",
|
||||
"---\n",
|
||||
"{'messages': [AIMessage(content=\"Here are the latest news updates about FooBar:\\n\\n1. **Funding News**: FooBar company has recently raised $1 billion.\\n2. **Hiring Update**: FooBar company is now exclusively hiring AI's.\\n3. **Company Background**: FooBar company was founded in 2019 and specializes in making friendly robots.\", response_metadata={'token_usage': {'completion_tokens': 68, 'prompt_tokens': 153, 'total_tokens': 221}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_3aa7262c27', 'finish_reason': 'stop', 'logprobs': None}, id='run-a1c848e7-4d41-4ae2-8027-3735d509bd9c-0', usage_metadata={'input_tokens': 153, 'output_tokens': 68, 'total_tokens': 221})]}\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(\"what's the latest news about FooBar\")]\n",
|
||||
"for output in app.stream({\"messages\": messages}):\n",
|
||||
" # stream() yields dictionaries with output keyed by node name\n",
|
||||
" for key, value in output.items():\n",
|
||||
" print(f\"Output from node '{key}':\")\n",
|
||||
" print(\"---\")\n",
|
||||
" print(value)\n",
|
||||
" messages.extend(value[\"messages\"])\n",
|
||||
" print(\"\\n---\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph_sdk import get_client\n",
|
||||
"\n",
|
||||
"client = get_client(url=\"https://testing-imports-langgraph-r-a12e17b191b0592c8e5b9c432b7b8af7.default.us.langgraph.app\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assistants = await client.assistants.search()\n",
|
||||
"assistant = assistants[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "TypeError",
|
||||
"evalue": "Assistants.create() got an unexpected keyword argument 'file_ids'",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[1], line 8\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m#from langchain.tools import E2BDataAnalysisTool\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \n\u001b[1;32m 5\u001b[0m \n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m#tools = [E2BDataAnalysisTool(api_key=\"...\")]\u001b[39;00m\n\u001b[1;32m 7\u001b[0m tools\u001b[38;5;241m=\u001b[39m[]\n\u001b[0;32m----> 8\u001b[0m agent \u001b[38;5;241m=\u001b[39m \u001b[43mOpenAIAssistantRunnable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcreate_assistant\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 9\u001b[0m \u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mlangchain assistant e2b tool\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 10\u001b[0m \u001b[43m \u001b[49m\u001b[43minstructions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mYou are a personal math tutor. Write and run code to answer math questions.\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 11\u001b[0m \u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtools\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 12\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mgpt-4-1106-preview\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 13\u001b[0m \u001b[43m \u001b[49m\u001b[43mas_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\n\u001b[1;32m 14\u001b[0m \u001b[43m)\u001b[49m\n\u001b[1;32m 16\u001b[0m agent_executor \u001b[38;5;241m=\u001b[39m AgentExecutor(agent\u001b[38;5;241m=\u001b[39magent, tools\u001b[38;5;241m=\u001b[39mtools)\n\u001b[1;32m 17\u001b[0m agent_executor\u001b[38;5;241m.\u001b[39minvoke({\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mWhat\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124ms 10 - 4 raised to the 2.7\u001b[39m\u001b[38;5;124m\"\u001b[39m})\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.11.9/lib/python3.11/site-packages/langchain/agents/openai_assistant/base.py:270\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable.create_assistant\u001b[0;34m(cls, name, instructions, tools, model, client, **kwargs)\u001b[0m\n\u001b[1;32m 255\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Create an OpenAI Assistant and instantiate the Runnable.\u001b[39;00m\n\u001b[1;32m 256\u001b[0m \n\u001b[1;32m 257\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 267\u001b[0m \u001b[38;5;124;03m OpenAIAssistantRunnable configured to run using the created assistant.\u001b[39;00m\n\u001b[1;32m 268\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 269\u001b[0m client \u001b[38;5;241m=\u001b[39m client \u001b[38;5;129;01mor\u001b[39;00m _get_openai_client()\n\u001b[0;32m--> 270\u001b[0m assistant \u001b[38;5;241m=\u001b[39m \u001b[43mclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbeta\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43massistants\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcreate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 271\u001b[0m \u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 272\u001b[0m \u001b[43m \u001b[49m\u001b[43minstructions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minstructions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 273\u001b[0m \u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43m_get_assistants_tool\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mtool\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore\u001b[39;49;00m\n\u001b[1;32m 274\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 275\u001b[0m \u001b[43m \u001b[49m\u001b[43mfile_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfile_ids\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 276\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 277\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mcls\u001b[39m(assistant_id\u001b[38;5;241m=\u001b[39massistant\u001b[38;5;241m.\u001b[39mid, client\u001b[38;5;241m=\u001b[39mclient, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
|
||||
"\u001b[0;31mTypeError\u001b[0m: Assistants.create() got an unexpected keyword argument 'file_ids'"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_experimental.openai_assistant import OpenAIAssistantRunnable\n",
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"#from langchain.tools import E2BDataAnalysisTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#tools = [E2BDataAnalysisTool(api_key=\"...\")]\n",
|
||||
"tools=[]\n",
|
||||
"agent = OpenAIAssistantRunnable.create_assistant(\n",
|
||||
" name=\"langchain assistant e2b tool\",\n",
|
||||
" instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n",
|
||||
" tools=tools,\n",
|
||||
" model=\"gpt-4-1106-preview\",\n",
|
||||
" as_agent=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools)\n",
|
||||
"agent_executor.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "ModuleNotFoundError",
|
||||
"evalue": "No module named 'langgraph.checkpoint_sqlite'",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[3], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlanggraph\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcheckpoint_sqlite\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01maio\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AsyncSqliteSaver\n",
|
||||
"\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'langgraph.checkpoint_sqlite'"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langgraph.checkpoint_sqlite.aio import AsyncSqliteSaver"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
Reference in New Issue
Block a user