mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-18 21:09:00 +00:00
langchain[patch], experimental[patch]: update utilities imports (#15438)
This commit is contained in:
parent
73da8f863c
commit
baeac236b6
@ -107,7 +107,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///nba_roster.db\", sample_rows_in_table_info=0)\n",
|
||||
"\n",
|
||||
|
@ -28,9 +28,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool\n",
|
||||
"from langchain.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_community.tools.file_management.read import ReadFileTool\n",
|
||||
"from langchain_community.tools.file_management.write import WriteFileTool\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
|
@ -108,8 +108,8 @@
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"\n",
|
||||
"todo_prompt = PromptTemplate.from_template(\n",
|
||||
" \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n",
|
||||
|
@ -41,8 +41,8 @@
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -26,7 +26,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, BaseMultiActionAgent, Tool\n",
|
||||
"from langchain.utilities import SerpAPIWrapper"
|
||||
"from langchain_community.utilities import SerpAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -80,7 +80,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Connecting to Databricks with SQLDatabase wrapper\n",
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_databricks(catalog=\"samples\", schema=\"nyctaxi\")"
|
||||
]
|
||||
@ -115,7 +115,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SQLDatabaseChain\n",
|
||||
"from langchain_community.utilities import SQLDatabaseChain\n",
|
||||
"\n",
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)"
|
||||
]
|
||||
|
@ -74,9 +74,9 @@
|
||||
" CallbackManagerForRetrieverRun,\n",
|
||||
")\n",
|
||||
"from langchain.schema import BaseRetriever, Document\n",
|
||||
"from langchain.utilities import GoogleSerperAPIWrapper\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import GoogleSerperAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -32,8 +32,8 @@
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
|
||||
"from sqlalchemy import MetaData, create_engine\n",
|
||||
"\n",
|
||||
@ -75,8 +75,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain.utilities.sql_database import SQLDatabase\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities.sql_database import SQLDatabase\n",
|
||||
"from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
|
||||
"\n",
|
||||
|
@ -30,9 +30,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMMathChain\n",
|
||||
"from langchain.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_core.tools import Tool\n",
|
||||
"from langchain_experimental.plan_and_execute import (\n",
|
||||
" PlanAndExecute,\n",
|
||||
|
@ -32,8 +32,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough"
|
||||
]
|
||||
|
@ -26,8 +26,8 @@
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.utilities import GoogleSearchAPIWrapper\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import GoogleSearchAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -10,7 +10,7 @@ To set it up, follow the instructions on https://database.guide/2-sample-databas
|
||||
|
||||
```python
|
||||
from langchain_community.llms import OpenAI
|
||||
from langchain.utilities import SQLDatabase
|
||||
from langchain_community.utilities import SQLDatabase
|
||||
from langchain_experimental.sql import SQLDatabaseChain
|
||||
```
|
||||
|
||||
@ -201,7 +201,7 @@ How to add memory to a SQLDatabaseChain:
|
||||
|
||||
```python
|
||||
from langchain_community.llms import OpenAI
|
||||
from langchain.utilities import SQLDatabase
|
||||
from langchain_community.utilities import SQLDatabase
|
||||
from langchain_experimental.sql import SQLDatabaseChain
|
||||
```
|
||||
|
||||
@ -679,7 +679,7 @@ local_llm = HuggingFacePipeline(pipeline=pipe)
|
||||
|
||||
|
||||
```python
|
||||
from langchain.utilities import SQLDatabase
|
||||
from langchain_community.utilities import SQLDatabase
|
||||
from langchain_experimental.sql import SQLDatabaseChain
|
||||
|
||||
db = SQLDatabase.from_uri("sqlite:///../../../../notebooks/Chinook.db", include_tables=['Customer'])
|
||||
|
@ -129,7 +129,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"search = DuckDuckGoSearchAPIWrapper(max_results=4)\n",
|
||||
"\n",
|
||||
|
@ -29,9 +29,9 @@
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain.prompts.chat import ChatPromptValue\n",
|
||||
"from langchain.tools import WikipediaQueryRun\n",
|
||||
"from langchain.utilities import WikipediaAPIWrapper\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_function"
|
||||
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n",
|
||||
"from langchain_community.utilities import WikipediaAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -43,7 +43,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SQLDatabase"
|
||||
"from langchain_community.utilities import SQLDatabase"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -99,8 +99,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, Tool, initialize_agent\n",
|
||||
"from langchain.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"\n",
|
||||
"# Initialize the language model\n",
|
||||
"# You can add your own OpenAI API key by adding openai_api_key=\"<your_api_key>\"\n",
|
||||
|
@ -141,7 +141,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import SelfAskWithSearchChain\n",
|
||||
"from langchain.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"\n",
|
||||
"open_ai_llm = OpenAI(temperature=0)\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
|
@ -309,7 +309,7 @@
|
||||
" ReActJsonSingleInputOutputParser,\n",
|
||||
")\n",
|
||||
"from langchain.tools.render import render_text_description\n",
|
||||
"from langchain.utilities import SerpAPIWrapper"
|
||||
"from langchain_community.utilities import SerpAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -119,7 +119,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import Tool\n",
|
||||
"from langchain.utilities import GoogleSearchAPIWrapper\n",
|
||||
"from langchain_community.utilities import GoogleSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"search = GoogleSearchAPIWrapper()\n",
|
||||
"\n",
|
||||
|
@ -173,7 +173,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain.utilities.opaqueprompts as op\n",
|
||||
"import langchain_community.utilities.opaqueprompts as op\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
|
@ -53,8 +53,8 @@
|
||||
"from langchain.memory import ZepMemory\n",
|
||||
"from langchain.retrievers import ZepRetriever\n",
|
||||
"from langchain.schema import AIMessage, HumanMessage\n",
|
||||
"from langchain.utilities import WikipediaAPIWrapper\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import WikipediaAPIWrapper\n",
|
||||
"\n",
|
||||
"# Set this to your Zep server URL\n",
|
||||
"ZEP_API_URL = \"http://localhost:8000\"\n",
|
||||
|
@ -333,7 +333,7 @@ pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/google_drive).
|
||||
|
||||
```python
|
||||
from langchain.utilities.google_drive import GoogleDriveAPIWrapper
|
||||
from langchain_community.utilities.google_drive import GoogleDriveAPIWrapper
|
||||
from langchain_community.tools.google_drive.tool import GoogleDriveSearchTool
|
||||
```
|
||||
|
||||
@ -358,7 +358,7 @@ from langchain.tools import GooglePlacesTool
|
||||
`GOOGLE_API_KEY` and `GOOGLE_CSE_ID` respectively.
|
||||
|
||||
```python
|
||||
from langchain.utilities import GoogleSearchAPIWrapper
|
||||
from langchain_community.utilities import GoogleSearchAPIWrapper
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_search).
|
||||
@ -382,7 +382,7 @@ See a [usage example and authorization instructions](/docs/integrations/tools/go
|
||||
|
||||
```python
|
||||
from langchain_community.tools.google_finance import GoogleFinanceQueryRun
|
||||
from langchain.utilities.google_finance import GoogleFinanceAPIWrapper
|
||||
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
|
||||
```
|
||||
|
||||
### Google Jobs
|
||||
@ -397,7 +397,7 @@ See a [usage example and authorization instructions](/docs/integrations/tools/go
|
||||
|
||||
```python
|
||||
from langchain_community.tools.google_jobs import GoogleJobsQueryRun
|
||||
from langchain.utilities.google_finance import GoogleFinanceAPIWrapper
|
||||
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
|
||||
```
|
||||
|
||||
### Google Lens
|
||||
@ -406,7 +406,7 @@ See a [usage example and authorization instructions](/docs/integrations/tools/go
|
||||
|
||||
```python
|
||||
from langchain_community.tools.google_lens import GoogleLensQueryRun
|
||||
from langchain.utilities.google_lens import GoogleLensAPIWrapper
|
||||
from langchain_community.utilities.google_lens import GoogleLensAPIWrapper
|
||||
```
|
||||
|
||||
### Google Scholar
|
||||
@ -421,7 +421,7 @@ See a [usage example and authorization instructions](/docs/integrations/tools/go
|
||||
|
||||
```python
|
||||
from langchain_community.tools.google_scholar import GoogleScholarQueryRun
|
||||
from langchain.utilities.google_scholar import GoogleScholarAPIWrapper
|
||||
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
|
||||
```
|
||||
|
||||
### Google Trends
|
||||
@ -436,7 +436,7 @@ See a [usage example and authorization instructions](/docs/integrations/tools/go
|
||||
|
||||
```python
|
||||
from langchain_community.tools.google_trends import GoogleTrendsQueryRun
|
||||
from langchain.utilities.google_trends import GoogleTrendsAPIWrapper
|
||||
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
|
||||
```
|
||||
|
||||
|
||||
@ -536,7 +536,7 @@ from langchain_community.chat_loaders.gmail import GMailLoader
|
||||
See [usage examples and authorization instructions](/docs/integrations/tools/searchapi).
|
||||
|
||||
```python
|
||||
from langchain.utilities import SearchApiAPIWrapper
|
||||
from langchain_community.utilities import SearchApiAPIWrapper
|
||||
```
|
||||
|
||||
### SerpAPI
|
||||
@ -546,7 +546,7 @@ from langchain.utilities import SearchApiAPIWrapper
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/serpapi).
|
||||
|
||||
```python
|
||||
from langchain.utilities import SerpAPIWrapper
|
||||
from langchain_community.utilities import SerpAPIWrapper
|
||||
```
|
||||
|
||||
### Serper.dev
|
||||
@ -554,7 +554,7 @@ from langchain.utilities import SerpAPIWrapper
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/google_serper).
|
||||
|
||||
```python
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
from langchain_community.utilities import GoogleSerperAPIWrapper
|
||||
```
|
||||
|
||||
### YouTube
|
||||
|
@ -272,7 +272,7 @@ from langchain.retrievers import AzureCognitiveSearchRetriever
|
||||
See a [usage example](/docs/integrations/tools/bing_search).
|
||||
|
||||
```python
|
||||
from langchain.utilities import BingSearchAPIWrapper
|
||||
from langchain_community.utilities import BingSearchAPIWrapper
|
||||
```
|
||||
|
||||
## Toolkits
|
||||
@ -317,7 +317,7 @@ See a [usage example](/docs/integrations/toolkits/powerbi).
|
||||
|
||||
```python
|
||||
from langchain_community.agent_toolkits import PowerBIToolkit
|
||||
from langchain.utilities.powerbi import PowerBIDataset
|
||||
from langchain_community.utilities.powerbi import PowerBIDataset
|
||||
```
|
||||
|
||||
## More
|
||||
|
@ -119,5 +119,5 @@ from langchain.adapters import openai as lc_openai
|
||||
See a [usage example](/docs/integrations/tools/dalle_image_generator).
|
||||
|
||||
```python
|
||||
from langchain.utilities.dalle_image_generator import DallEAPIWrapper
|
||||
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
|
||||
```
|
||||
|
@ -29,7 +29,7 @@ blogs, or knowledge bases.
|
||||
You can use the `ApifyWrapper` to run Actors on the Apify platform.
|
||||
|
||||
```python
|
||||
from langchain.utilities import ApifyWrapper
|
||||
from langchain_community.utilities import ApifyWrapper
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/apify).
|
||||
|
@ -31,7 +31,7 @@ Args:
|
||||
## Examples
|
||||
```python
|
||||
# Connecting to CnosDB with SQLDatabase Wrapper
|
||||
from langchain.utilities import SQLDatabase
|
||||
from langchain_community.utilities import SQLDatabase
|
||||
|
||||
db = SQLDatabase.from_cnosdb()
|
||||
```
|
||||
@ -45,7 +45,7 @@ llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
|
||||
### SQL Database Chain
|
||||
This example demonstrates the use of the SQL Chain for answering a question over a CnosDB.
|
||||
```python
|
||||
from langchain.utilities import SQLDatabaseChain
|
||||
from langchain_community.utilities import SQLDatabaseChain
|
||||
|
||||
db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)
|
||||
|
||||
|
@ -22,7 +22,7 @@ os.environ["DATAFORSEO_PASSWORD"] = "your_password"
|
||||
The DataForSEO utility wraps the API. To import this utility, use:
|
||||
|
||||
```python
|
||||
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||
from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||
```
|
||||
|
||||
For a detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/dataforseo).
|
||||
|
@ -17,7 +17,7 @@
|
||||
There exists a GoldenQueryAPIWrapper utility which wraps this API. To import this utility:
|
||||
|
||||
```python
|
||||
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
|
||||
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/golden_query).
|
||||
|
@ -14,13 +14,13 @@ It is broken into two parts: setup, and then references to the specific Google S
|
||||
There exists a GoogleSerperAPIWrapper utility which wraps this API. To import this utility:
|
||||
|
||||
```python
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
from langchain_community.utilities import GoogleSerperAPIWrapper
|
||||
```
|
||||
|
||||
You can use it as part of a Self Ask chain:
|
||||
|
||||
```python
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
from langchain_community.utilities import GoogleSerperAPIWrapper
|
||||
from langchain_community.llms.openai import OpenAI
|
||||
from langchain.agents import initialize_agent, Tool
|
||||
from langchain.agents import AgentType
|
||||
|
@ -26,7 +26,7 @@ pip install pyowm
|
||||
There exists a OpenWeatherMapAPIWrapper utility which wraps this API. To import this utility:
|
||||
|
||||
```python
|
||||
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
|
||||
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/openweathermap).
|
||||
|
@ -21,7 +21,7 @@ To start, get your Portkey API key by [signing up here](https://app.portkey.ai/l
|
||||
For OpenAI, a simple integration with logging feature would look like this:
|
||||
```python
|
||||
from langchain_community.llms import OpenAI
|
||||
from langchain.utilities import Portkey
|
||||
from langchain_community.utilities import Portkey
|
||||
|
||||
# Add the Portkey API Key from your account
|
||||
headers = Portkey.Config(
|
||||
@ -40,7 +40,7 @@ A common Portkey X Langchain use case is to **trace a chain or an agent** and vi
|
||||
```python
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
from langchain_community.llms import OpenAI
|
||||
from langchain.utilities import Portkey
|
||||
from langchain_community.utilities import Portkey
|
||||
|
||||
# Add the Portkey API Key from your account
|
||||
headers = Portkey.Config(
|
||||
|
@ -27,8 +27,8 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain.utilities import Portkey\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import Portkey"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -14,13 +14,13 @@ This page covers how to use the [SearchApi](https://www.searchapi.io/) Google Se
|
||||
There is a SearchApiAPIWrapper utility which wraps this API. To import this utility:
|
||||
|
||||
```python
|
||||
from langchain.utilities import SearchApiAPIWrapper
|
||||
from langchain_community.utilities import SearchApiAPIWrapper
|
||||
```
|
||||
|
||||
You can use it as part of a Self Ask chain:
|
||||
|
||||
```python
|
||||
from langchain.utilities import SearchApiAPIWrapper
|
||||
from langchain_community.utilities import SearchApiAPIWrapper
|
||||
from langchain_community.llms.openai import OpenAI
|
||||
from langchain.agents import initialize_agent, Tool
|
||||
from langchain.agents import AgentType
|
||||
|
@ -40,7 +40,7 @@ To use the wrapper we need to pass the host of the SearxNG instance to the wrapp
|
||||
You can use the wrapper to get results from a SearxNG instance.
|
||||
|
||||
```python
|
||||
from langchain.utilities import SearxSearchWrapper
|
||||
from langchain_community.utilities import SearxSearchWrapper
|
||||
s = SearxSearchWrapper(searx_host="http://localhost:8888")
|
||||
s.run("what is a large language model?")
|
||||
```
|
||||
|
@ -14,7 +14,7 @@ It is broken into two parts: installation and setup, and then references to the
|
||||
There exists a SerpAPI utility which wraps this API. To import this utility:
|
||||
|
||||
```python
|
||||
from langchain.utilities import SerpAPIWrapper
|
||||
from langchain_community.utilities import SerpAPIWrapper
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/serpapi).
|
||||
|
@ -19,7 +19,7 @@ pip install stackapi
|
||||
There exists a StackExchangeAPIWrapper utility which wraps this API. To import this utility:
|
||||
|
||||
```python
|
||||
from langchain.utilities import StackExchangeAPIWrapper
|
||||
from langchain_community.utilities import StackExchangeAPIWrapper
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/stackexchange).
|
||||
|
@ -22,7 +22,7 @@ pip install wolframalpha
|
||||
There exists a WolframAlphaAPIWrapper utility which wraps this API. To import this utility:
|
||||
|
||||
```python
|
||||
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
|
||||
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/wolfram_alpha).
|
||||
|
@ -89,7 +89,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='This walkthrough demonstrates how to use an agent optimized for conversation. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\\n\\nIf we compare it to the standard ReAct agent, the main difference is the prompt. We want it to be much more conversational.\\n\\nfrom langchain.agents import AgentType, Tool, initialize_agent\\n\\nfrom langchain_community.llms import OpenAI\\n\\nfrom langchain.memory import ConversationBufferMemory\\n\\nfrom langchain.utilities import SerpAPIWrapper\\n\\nsearch = SerpAPIWrapper() tools = \\\\[ Tool( name=\"Current Search\", func=search.run, description=\"useful for when you need to answer questions about current events or the current state of the world\", ), \\\\]\\n\\n\\\\\\nllm = OpenAI(temperature=0)\\n\\nUsing LCEL\\n\\nWe will first show how to create this agent using LCEL\\n\\nfrom langchain import hub\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\n\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\n\\nfrom langchain.tools.render import render_text_description\\n\\nprompt = hub.pull(\"hwchase17/react-chat\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nllm_with_stop = llm.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nagent = ( { \"input\": lambda x: x\\\\[\"input\"\\\\], \"agent_scratchpad\": lambda x: format_log_to_str(x\\\\[\"intermediate_steps\"\\\\]), \"chat_history\": lambda x: x\\\\[\"chat_history\"\\\\], } | prompt | llm_with_stop | ReActSingleInputOutputParser() )\\n\\nfrom langchain.agents import AgentExecutor\\n\\nmemory = ConversationBufferMemory(memory_key=\"chat_history\") agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)\\n\\nagent_executor.invoke({\"input\": \"hi, i am bob\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Hi Bob, nice to meet you! How can I help you today?\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Hi Bob, nice to meet you! How can I help you today?\\'\\n\\nagent_executor.invoke({\"input\": \"whats my name?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Your name is Bob.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Your name is Bob.\\'\\n\\nagent_executor.invoke({\"input\": \"what are some movies showing 9/21/2023?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? Yes\\nAction: Current Search\\nAction Input: Movies showing 9/21/2023[\\'September 2023 Movies: The Creator • Dumb Money • Expend4bles • The Kill Room • The Inventor • The Equalizer 3 • PAW Patrol: The Mighty Movie, ...\\'] Do I need to use a tool? No\\nFinal Answer: According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\'\\n\\n\\\\\\nUse the off-the-shelf agent\\n\\nWe can also create this agent using the off-the-shelf agent class\\n\\nagent_executor = initialize_agent( tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory, )\\n\\nUse a chat model\\n\\nWe can also use a chat model here. The main difference here is in the prompts used.\\n\\nfrom langchain import hub\\n\\nfrom langchain_community.chat_models import ChatOpenAI\\n\\nprompt = hub.pull(\"hwchase17/react-chat-json\") chat_model = ChatOpenAI(temperature=0, model=\"gpt-4\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nchat_model_with_stop = chat_model.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_messages\\n\\nfrom langchain.agents.output_parsers import JSONAgentOutputParser\\n\\n# We need some extra steering, or the c', metadata={'title': 'Conversational', 'source': 'https://d01.getoutline.com/doc/conversational-B5dBkUgQ4b'}),\n",
|
||||
"[Document(page_content='This walkthrough demonstrates how to use an agent optimized for conversation. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\\n\\nIf we compare it to the standard ReAct agent, the main difference is the prompt. We want it to be much more conversational.\\n\\nfrom langchain.agents import AgentType, Tool, initialize_agent\\n\\nfrom langchain_community.llms import OpenAI\\n\\nfrom langchain.memory import ConversationBufferMemory\\n\\nfrom langchain_community.utilities import SerpAPIWrapper\\n\\nsearch = SerpAPIWrapper() tools = \\\\[ Tool( name=\"Current Search\", func=search.run, description=\"useful for when you need to answer questions about current events or the current state of the world\", ), \\\\]\\n\\n\\\\\\nllm = OpenAI(temperature=0)\\n\\nUsing LCEL\\n\\nWe will first show how to create this agent using LCEL\\n\\nfrom langchain import hub\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\n\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\n\\nfrom langchain.tools.render import render_text_description\\n\\nprompt = hub.pull(\"hwchase17/react-chat\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nllm_with_stop = llm.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nagent = ( { \"input\": lambda x: x\\\\[\"input\"\\\\], \"agent_scratchpad\": lambda x: format_log_to_str(x\\\\[\"intermediate_steps\"\\\\]), \"chat_history\": lambda x: x\\\\[\"chat_history\"\\\\], } | prompt | llm_with_stop | ReActSingleInputOutputParser() )\\n\\nfrom langchain.agents import AgentExecutor\\n\\nmemory = ConversationBufferMemory(memory_key=\"chat_history\") agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)\\n\\nagent_executor.invoke({\"input\": \"hi, i am bob\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Hi Bob, nice to meet you! How can I help you today?\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Hi Bob, nice to meet you! How can I help you today?\\'\\n\\nagent_executor.invoke({\"input\": \"whats my name?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Your name is Bob.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Your name is Bob.\\'\\n\\nagent_executor.invoke({\"input\": \"what are some movies showing 9/21/2023?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? Yes\\nAction: Current Search\\nAction Input: Movies showing 9/21/2023[\\'September 2023 Movies: The Creator • Dumb Money • Expend4bles • The Kill Room • The Inventor • The Equalizer 3 • PAW Patrol: The Mighty Movie, ...\\'] Do I need to use a tool? No\\nFinal Answer: According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\'\\n\\n\\\\\\nUse the off-the-shelf agent\\n\\nWe can also create this agent using the off-the-shelf agent class\\n\\nagent_executor = initialize_agent( tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory, )\\n\\nUse a chat model\\n\\nWe can also use a chat model here. The main difference here is in the prompts used.\\n\\nfrom langchain import hub\\n\\nfrom langchain_community.chat_models import ChatOpenAI\\n\\nprompt = hub.pull(\"hwchase17/react-chat-json\") chat_model = ChatOpenAI(temperature=0, model=\"gpt-4\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nchat_model_with_stop = chat_model.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_messages\\n\\nfrom langchain.agents.output_parsers import JSONAgentOutputParser\\n\\n# We need some extra steering, or the c', metadata={'title': 'Conversational', 'source': 'https://d01.getoutline.com/doc/conversational-B5dBkUgQ4b'}),\n",
|
||||
" Document(page_content='Quickstart\\n\\nIn this quickstart we\\'ll show you how to:\\n\\nGet setup with LangChain, LangSmith and LangServe\\n\\nUse the most basic and common components of LangChain: prompt templates, models, and output parsers\\n\\nUse LangChain Expression Language, the protocol that LangChain is built on and which facilitates component chaining\\n\\nBuild a simple application with LangChain\\n\\nTrace your application with LangSmith\\n\\nServe your application with LangServe\\n\\nThat\\'s a fair amount to cover! Let\\'s dive in.\\n\\nSetup\\n\\nInstallation\\n\\nTo install LangChain run:\\n\\nPip\\n\\nConda\\n\\npip install langchain\\n\\nFor more details, see our Installation guide.\\n\\nEnvironment\\n\\nUsing LangChain will usually require integrations with one or more model providers, data stores, APIs, etc. For this example, we\\'ll use OpenAI\\'s model APIs.\\n\\nFirst we\\'ll need to install their Python package:\\n\\npip install openai\\n\\nAccessing the API requires an API key, which you can get by creating an account and heading here. Once we have a key we\\'ll want to set it as an environment variable by running:\\n\\nexport OPENAI_API_KEY=\"...\"\\n\\nIf you\\'d prefer not to set an environment variable you can pass the key in directly via the openai_api_key named parameter when initiating the OpenAI LLM class:\\n\\nfrom langchain_community.chat_models import ChatOpenAI\\n\\nllm = ChatOpenAI(openai_api_key=\"...\")\\n\\nLangSmith\\n\\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with LangSmith.\\n\\nNote that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\\n\\nexport LANGCHAIN_TRACING_V2=\"true\" export LANGCHAIN_API_KEY=...\\n\\nLangServe\\n\\nLangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we\\'ll show how you can deploy your app with LangServe.\\n\\nInstall with:\\n\\npip install \"langserve\\\\[all\\\\]\"\\n\\nBuilding with LangChain\\n\\nLangChain provides many modules that can be used to build language model applications. Modules can be used as standalones in simple applications and they can be composed for more complex use cases. Composition is powered by LangChain Expression Language (LCEL), which defines a unified Runnable interface that many modules implement, making it possible to seamlessly chain components.\\n\\nThe simplest and most common chain contains three things:\\n\\nLLM/Chat Model: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them. Prompt Template: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial. Output Parser: These translate the raw response from the language model to a more workable format, making it easy to use the output downstream. In this guide we\\'ll cover those three components individually, and then go over how to combine them. Understanding these concepts will set you up well for being able to use and customize LangChain applications. Most LangChain applications allow you to configure the model and/or the prompt, so knowing how to take advantage of this will be a big enabler.\\n\\nLLM / Chat Model\\n\\nThere are two types of language models:\\n\\nLLM: underlying model takes a string as input and returns a string\\n\\nChatModel: underlying model takes a list of messages as input and returns a message\\n\\nStrings are simple, but what exactly are messages? The base message interface is defined by BaseMessage, which has two required attributes:\\n\\ncontent: The content of the message. Usually a string. role: The entity from which the BaseMessage is coming. LangChain provides several ob', metadata={'title': 'Quick Start', 'source': 'https://d01.getoutline.com/doc/quick-start-jGuGGGOTuL'}),\n",
|
||||
" Document(page_content='This walkthrough showcases using an agent to implement the [ReAct](https://react-lm.github.io/) logic.\\n\\n```javascript\\nfrom langchain.agents import AgentType, initialize_agent, load_tools\\nfrom langchain_community.llms import OpenAI\\n```\\n\\nFirst, let\\'s load the language model we\\'re going to use to control the agent.\\n\\n```javascript\\nllm = OpenAI(temperature=0)\\n```\\n\\nNext, let\\'s load some tools to use. Note that the llm-math tool uses an LLM, so we need to pass that in.\\n\\n```javascript\\ntools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\\n```\\n\\n## Using LCEL[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-lcel \"Direct link to Using LCEL\")\\n\\nWe will first show how to create the agent using LCEL\\n\\n```javascript\\nfrom langchain import hub\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\nfrom langchain.tools.render import render_text_description\\n```\\n\\n```javascript\\nprompt = hub.pull(\"hwchase17/react\")\\nprompt = prompt.partial(\\n tools=render_text_description(tools),\\n tool_names=\", \".join([t.name for t in tools]),\\n)\\n```\\n\\n```javascript\\nllm_with_stop = llm.bind(stop=[\"\\\\nObservation\"])\\n```\\n\\n```javascript\\nagent = (\\n {\\n \"input\": lambda x: x[\"input\"],\\n \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\\n }\\n | prompt\\n | llm_with_stop\\n | ReActSingleInputOutputParser()\\n)\\n```\\n\\n```javascript\\nfrom langchain.agents import AgentExecutor\\n```\\n\\n```javascript\\nagent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"model Vittoria Ceretti I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"25 years I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43Answer: 3.991298452658078 I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\",\\n \\'output\\': \"Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\"}\\n```\\n\\n## Using ZeroShotReactAgent[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-zeroshotreactagent \"Direct link to Using ZeroShotReactAgent\")\\n\\nWe will now show how to use the agent with an off-the-shelf agent implementation\\n\\n```javascript\\nagent_executor = initialize_agent(\\n tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\\n)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"\\n Observation: model Vittoria Ceretti\\n Thought: I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"\\n Observation: 25 years\\n Thought: I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43\\n Observation: Answer: 3.991298452658078\\n Thought: I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is L', metadata={'title': 'ReAct', 'source': 'https://d01.getoutline.com/doc/react-d6rxRS1MHk'})]"
|
||||
]
|
||||
|
@ -22,9 +22,9 @@
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.utilities.clickup import ClickupAPIWrapper\n",
|
||||
"from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities.clickup import ClickupAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -797,7 +797,7 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/rodrigolentini/repos/langchain-clickup/libs/langchain/langchain/utilities/clickup.py:145: UserWarning: Error encountered while trying to parse <class 'langchain.utilities.clickup.Task'>: 'NoneType' object is not subscriptable\n",
|
||||
"/Users/rodrigolentini/repos/langchain-clickup/libs/langchain/langchain/utilities/clickup.py:145: UserWarning: Error encountered while trying to parse <class 'langchain_community.utilities.clickup.Task'>: 'NoneType' object is not subscriptable\n",
|
||||
" Falling back to returning input data.\n",
|
||||
" warnings.warn(f'Error encountered while trying to parse {dataclass}: {e}\\n Falling back to returning input data.')\n"
|
||||
]
|
||||
|
@ -107,9 +107,9 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.utilities.github import GitHubAPIWrapper\n",
|
||||
"from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.utilities.github import GitHubAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -101,9 +101,9 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.utilities.gitlab import GitLabAPIWrapper\n",
|
||||
"from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities.gitlab import GitLabAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -50,9 +50,9 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.utilities.jira import JiraAPIWrapper\n",
|
||||
"from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities.jira import JiraAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -30,9 +30,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.utilities.nasa import NasaAPIWrapper\n",
|
||||
"from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities.nasa import NasaAPIWrapper\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0, openai_api_key=\"\")\n",
|
||||
"nasa = NasaAPIWrapper()\n",
|
||||
|
@ -38,9 +38,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azure.identity import DefaultAzureCredential\n",
|
||||
"from langchain.utilities.powerbi import PowerBIDataset\n",
|
||||
"from langchain_community.agent_toolkits import PowerBIToolkit, create_pbi_agent\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.utilities.powerbi import PowerBIDataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -25,9 +25,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import create_spark_sql_agent\n",
|
||||
"from langchain.utilities.spark_sql import SparkSQL\n",
|
||||
"from langchain_community.agent_toolkits import SparkSQLToolkit\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.utilities.spark_sql import SparkSQL"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -75,9 +75,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.utilities.steam import SteamWebAPIWrapper\n",
|
||||
"from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities.steam import SteamWebAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -46,7 +46,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities.alpha_vantage import AlphaVantageAPIWrapper"
|
||||
"from langchain_community.utilities.alpha_vantage import AlphaVantageAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -41,8 +41,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator\n",
|
||||
"from langchain.utilities import ApifyWrapper\n",
|
||||
"from langchain_community.document_loaders.base import Document"
|
||||
"from langchain_community.document_loaders.base import Document\n",
|
||||
"from langchain_community.utilities import ApifyWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -122,7 +122,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import ArxivAPIWrapper"
|
||||
"from langchain_community.utilities import ArxivAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -38,7 +38,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import BingSearchAPIWrapper"
|
||||
"from langchain_community.utilities import BingSearchAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -53,8 +53,8 @@
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.utilities.dalle_image_generator import DallEAPIWrapper\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0.9)\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
|
@ -19,7 +19,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper"
|
||||
"from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -164,7 +164,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"wrapper = DuckDuckGoSearchAPIWrapper(region=\"de-de\", time=\"d\", max_results=2)"
|
||||
]
|
||||
|
@ -45,7 +45,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities.golden_query import GoldenQueryAPIWrapper"
|
||||
"from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -99,8 +99,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities.google_drive import GoogleDriveAPIWrapper\n",
|
||||
"from langchain_community.tools.google_drive.tool import GoogleDriveSearchTool\n",
|
||||
"from langchain_community.utilities.google_drive import GoogleDriveAPIWrapper\n",
|
||||
"\n",
|
||||
"# By default, search only in the filename.\n",
|
||||
"tool = GoogleDriveSearchTool(\n",
|
||||
|
@ -43,8 +43,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.utilities.google_finance import GoogleFinanceAPIWrapper\n",
|
||||
"from langchain_community.tools.google_finance import GoogleFinanceQueryRun\n",
|
||||
"from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper\n",
|
||||
"\n",
|
||||
"os.environ[\"SERPAPI_API_KEY\"] = \"\"\n",
|
||||
"tool = GoogleFinanceQueryRun(api_wrapper=GoogleFinanceAPIWrapper())"
|
||||
|
@ -70,8 +70,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.utilities.google_jobs import GoogleJobsAPIWrapper\n",
|
||||
"from langchain_community.tools.google_jobs import GoogleJobsQueryRun\n",
|
||||
"from langchain_community.utilities.google_jobs import GoogleJobsAPIWrapper\n",
|
||||
"\n",
|
||||
"os.environ[\"SERPAPI_API_KEY\"] = \"[your serpapi key]\"\n",
|
||||
"tool = GoogleJobsQueryRun(api_wrapper=GoogleJobsAPIWrapper())"
|
||||
|
@ -50,8 +50,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.utilities.google_lens import GoogleLensAPIWrapper\n",
|
||||
"from langchain_community.tools.google_lens import GoogleLensQueryRun\n",
|
||||
"from langchain_community.utilities.google_lens import GoogleLensAPIWrapper\n",
|
||||
"\n",
|
||||
"os.environ[\"SERPAPI_API_KEY\"] = \"\"\n",
|
||||
"tool = GoogleLensQueryRun(api_wrapper=GoogleLensAPIWrapper())"
|
||||
|
@ -39,8 +39,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.utilities.google_scholar import GoogleScholarAPIWrapper\n",
|
||||
"from langchain_community.tools.google_scholar import GoogleScholarQueryRun"
|
||||
"from langchain_community.tools.google_scholar import GoogleScholarQueryRun\n",
|
||||
"from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -35,7 +35,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import Tool\n",
|
||||
"from langchain.utilities import GoogleSearchAPIWrapper\n",
|
||||
"from langchain_community.utilities import GoogleSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"search = GoogleSearchAPIWrapper()\n",
|
||||
"\n",
|
||||
|
@ -47,7 +47,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import GoogleSerperAPIWrapper"
|
||||
"from langchain_community.utilities import GoogleSerperAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -159,8 +159,8 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, Tool, initialize_agent\n",
|
||||
"from langchain.utilities import GoogleSerperAPIWrapper\n",
|
||||
"from langchain_community.llms.openai import OpenAI\n",
|
||||
"from langchain_community.utilities import GoogleSerperAPIWrapper\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"search = GoogleSerperAPIWrapper()\n",
|
||||
|
@ -51,8 +51,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.utilities.google_trends import GoogleTrendsAPIWrapper\n",
|
||||
"from langchain_community.tools.google_trends import GoogleTrendsQueryRun\n",
|
||||
"from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper\n",
|
||||
"\n",
|
||||
"os.environ[\"SERPAPI_API_KEY\"] = \"\"\n",
|
||||
"tool = GoogleTrendsQueryRun(api_wrapper=GoogleTrendsAPIWrapper())"
|
||||
|
@ -29,7 +29,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.utilities import OpenWeatherMapAPIWrapper\n",
|
||||
"from langchain_community.utilities import OpenWeatherMapAPIWrapper\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\"\n",
|
||||
"\n",
|
||||
|
@ -50,8 +50,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities.reddit_search import RedditSearchAPIWrapper\n",
|
||||
"from langchain_community.tools.reddit_search.tool import RedditSearchRun\n",
|
||||
"from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"search = RedditSearchRun(\n",
|
||||
" api_wrapper=RedditSearchAPIWrapper(\n",
|
||||
@ -173,9 +173,9 @@
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.utilities.reddit_search import RedditSearchAPIWrapper\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.tools.reddit_search.tool import RedditSearchRun\n",
|
||||
"from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"# Provide keys for Reddit\n",
|
||||
"client_id = \"\"\n",
|
||||
|
@ -87,7 +87,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import TextRequestsWrapper\n",
|
||||
"from langchain_community.utilities import TextRequestsWrapper\n",
|
||||
"\n",
|
||||
"requests = TextRequestsWrapper()"
|
||||
]
|
||||
|
@ -29,7 +29,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SearchApiAPIWrapper"
|
||||
"from langchain_community.utilities import SearchApiAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -125,8 +125,8 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, Tool, initialize_agent\n",
|
||||
"from langchain.utilities import SearchApiAPIWrapper\n",
|
||||
"from langchain_community.llms.openai import OpenAI\n",
|
||||
"from langchain_community.utilities import SearchApiAPIWrapper\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"search = SearchApiAPIWrapper()\n",
|
||||
|
@ -23,7 +23,7 @@
|
||||
"source": [
|
||||
"import pprint\n",
|
||||
"\n",
|
||||
"from langchain.utilities import SearxSearchWrapper"
|
||||
"from langchain_community.utilities import SearxSearchWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -17,7 +17,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SerpAPIWrapper"
|
||||
"from langchain_community.utilities import SerpAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -28,7 +28,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import StackExchangeAPIWrapper"
|
||||
"from langchain_community.utilities import StackExchangeAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -82,9 +82,9 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.utilities.tavily_search import TavilySearchAPIWrapper\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
||||
"from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper\n",
|
||||
"\n",
|
||||
"# set up API key\n",
|
||||
"os.environ[\"TAVILY_API_KEY\"] = \"...\"\n",
|
||||
|
@ -61,7 +61,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities.twilio import TwilioAPIWrapper"
|
||||
"from langchain_community.utilities.twilio import TwilioAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -113,7 +113,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities.twilio import TwilioAPIWrapper"
|
||||
"from langchain_community.utilities.twilio import TwilioAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -34,7 +34,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import WikipediaQueryRun\n",
|
||||
"from langchain.utilities import WikipediaAPIWrapper"
|
||||
"from langchain_community.utilities import WikipediaAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -53,7 +53,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper"
|
||||
"from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -61,9 +61,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.utilities.zapier import ZapierNLAWrapper\n",
|
||||
"from langchain_community.agent_toolkits import ZapierToolkit\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities.zapier import ZapierNLAWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -162,9 +162,9 @@
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain, SimpleSequentialChain, TransformChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.utilities.zapier import ZapierNLAWrapper\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.tools.zapier.tool import ZapierNLARunAction"
|
||||
"from langchain_community.tools.zapier.tool import ZapierNLARunAction\n",
|
||||
"from langchain_community.utilities.zapier import ZapierNLAWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -30,8 +30,8 @@
|
||||
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.utilities import GoogleSearchAPIWrapper\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import GoogleSearchAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -36,9 +36,9 @@
|
||||
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.utilities import GoogleSearchAPIWrapper\n",
|
||||
"from langchain_community.chat_message_histories import RedisChatMessageHistory\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import GoogleSearchAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -85,8 +85,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"from langchain_experimental.sql import SQLDatabaseChain\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n",
|
||||
@ -836,9 +836,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, create_sql_agent\n",
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"from langchain_community.agent_toolkits import SQLDatabaseToolkit\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
|
||||
@ -995,9 +995,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, create_sql_agent\n",
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"from langchain_community.agent_toolkits import SQLDatabaseToolkit\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"\n",
|
||||
"# db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
|
||||
|
@ -480,9 +480,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers.web_research import WebResearchRetriever\n",
|
||||
"from langchain.utilities import GoogleSearchAPIWrapper\n",
|
||||
"from langchain_community.chat_models.openai import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.utilities import GoogleSearchAPIWrapper\n",
|
||||
"from langchain_community.vectorstores import Chroma"
|
||||
]
|
||||
},
|
||||
@ -631,7 +631,7 @@
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain.indexes import VectorstoreIndexCreator\n",
|
||||
"from langchain.utilities import ApifyWrapper\n",
|
||||
"from langchain_community.utilities import ApifyWrapper\n",
|
||||
"\n",
|
||||
"apify = ApifyWrapper()\n",
|
||||
"# Call the Actor to obtain text from the crawled webpages\n",
|
||||
|
@ -6,7 +6,7 @@ from pytest import CaptureFixture, MonkeyPatch
|
||||
from langchain_community.llms.arcee import Arcee
|
||||
|
||||
|
||||
@patch("langchain.utilities.arcee.requests.get")
|
||||
@patch("langchain_community.utilities.arcee.requests.get")
|
||||
def test_arcee_api_key_is_secret_string(mock_get: MagicMock) -> None:
|
||||
mock_response = mock_get.return_value
|
||||
mock_response.status_code = 200
|
||||
@ -24,7 +24,7 @@ def test_arcee_api_key_is_secret_string(mock_get: MagicMock) -> None:
|
||||
assert isinstance(arcee_without_env_var.arcee_api_key, SecretStr)
|
||||
|
||||
|
||||
@patch("langchain.utilities.arcee.requests.get")
|
||||
@patch("langchain_community.utilities.arcee.requests.get")
|
||||
def test_api_key_masked_when_passed_via_constructor(
|
||||
mock_get: MagicMock, capsys: CaptureFixture
|
||||
) -> None:
|
||||
@ -47,7 +47,7 @@ def test_api_key_masked_when_passed_via_constructor(
|
||||
assert "**********" == captured.out
|
||||
|
||||
|
||||
@patch("langchain.utilities.arcee.requests.get")
|
||||
@patch("langchain_community.utilities.arcee.requests.get")
|
||||
def test_api_key_masked_when_passed_from_env(
|
||||
mock_get: MagicMock, capsys: CaptureFixture, monkeypatch: MonkeyPatch
|
||||
) -> None:
|
||||
|
@ -23,7 +23,7 @@ class BashProcess:
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.utilities.bash import BashProcess
|
||||
from langchain_community.utilities.bash import BashProcess
|
||||
|
||||
bash = BashProcess(
|
||||
strip_newlines = False,
|
||||
|
@ -13,7 +13,7 @@ from typing import Any, Dict, List, Optional
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.utilities import PythonREPL
|
||||
from langchain_community.utilities import PythonREPL
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT
|
||||
|
@ -10,8 +10,8 @@ from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema import BasePromptTemplate
|
||||
from langchain.utilities.sql_database import SQLDatabase
|
||||
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
from langchain_experimental.pydantic_v1 import Extra, Field, root_validator
|
||||
|
@ -8,8 +8,8 @@ from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema import BaseOutputParser, BasePromptTemplate
|
||||
from langchain.utilities.sql_database import SQLDatabase
|
||||
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
"""Test SQL Database Chain."""
|
||||
from langchain.utilities.sql_database import SQLDatabase
|
||||
from langchain_community.llms.openai import OpenAI
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine, insert
|
||||
|
||||
from langchain_experimental.sql.base import (
|
||||
|
@ -259,57 +259,73 @@ def __getattr__(name: str) -> Any:
|
||||
|
||||
return BasePromptTemplate
|
||||
elif name == "ArxivAPIWrapper":
|
||||
from langchain.utilities import ArxivAPIWrapper
|
||||
from langchain_community.utilities import ArxivAPIWrapper
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.ArxivAPIWrapper")
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.ArxivAPIWrapper"
|
||||
)
|
||||
|
||||
return ArxivAPIWrapper
|
||||
elif name == "GoldenQueryAPIWrapper":
|
||||
from langchain.utilities import GoldenQueryAPIWrapper
|
||||
from langchain_community.utilities import GoldenQueryAPIWrapper
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.GoldenQueryAPIWrapper")
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.GoldenQueryAPIWrapper"
|
||||
)
|
||||
|
||||
return GoldenQueryAPIWrapper
|
||||
elif name == "GoogleSearchAPIWrapper":
|
||||
from langchain.utilities import GoogleSearchAPIWrapper
|
||||
from langchain_community.utilities import GoogleSearchAPIWrapper
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.GoogleSearchAPIWrapper")
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.GoogleSearchAPIWrapper"
|
||||
)
|
||||
|
||||
return GoogleSearchAPIWrapper
|
||||
elif name == "GoogleSerperAPIWrapper":
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
from langchain_community.utilities import GoogleSerperAPIWrapper
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.GoogleSerperAPIWrapper")
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.GoogleSerperAPIWrapper"
|
||||
)
|
||||
|
||||
return GoogleSerperAPIWrapper
|
||||
elif name == "PowerBIDataset":
|
||||
from langchain.utilities import PowerBIDataset
|
||||
from langchain_community.utilities import PowerBIDataset
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.PowerBIDataset")
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.PowerBIDataset"
|
||||
)
|
||||
|
||||
return PowerBIDataset
|
||||
elif name == "SearxSearchWrapper":
|
||||
from langchain.utilities import SearxSearchWrapper
|
||||
from langchain_community.utilities import SearxSearchWrapper
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.SearxSearchWrapper")
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.SearxSearchWrapper"
|
||||
)
|
||||
|
||||
return SearxSearchWrapper
|
||||
elif name == "WikipediaAPIWrapper":
|
||||
from langchain.utilities import WikipediaAPIWrapper
|
||||
from langchain_community.utilities import WikipediaAPIWrapper
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.WikipediaAPIWrapper")
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.WikipediaAPIWrapper"
|
||||
)
|
||||
|
||||
return WikipediaAPIWrapper
|
||||
elif name == "WolframAlphaAPIWrapper":
|
||||
from langchain.utilities import WolframAlphaAPIWrapper
|
||||
from langchain_community.utilities import WolframAlphaAPIWrapper
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.WolframAlphaAPIWrapper")
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.WolframAlphaAPIWrapper"
|
||||
)
|
||||
|
||||
return WolframAlphaAPIWrapper
|
||||
elif name == "SQLDatabase":
|
||||
from langchain.utilities import SQLDatabase
|
||||
from langchain_community.utilities import SQLDatabase
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.SQLDatabase")
|
||||
_warn_on_import(name, replacement="langchain_community.utilities.SQLDatabase")
|
||||
|
||||
return SQLDatabase
|
||||
elif name == "FAISS":
|
||||
@ -328,9 +344,11 @@ def __getattr__(name: str) -> Any:
|
||||
return ElasticVectorSearch
|
||||
# For backwards compatibility
|
||||
elif name == "SerpAPIChain" or name == "SerpAPIWrapper":
|
||||
from langchain.utilities import SerpAPIWrapper
|
||||
from langchain_community.utilities import SerpAPIWrapper
|
||||
|
||||
_warn_on_import(name, replacement="langchain.utilities.SerpAPIWrapper")
|
||||
_warn_on_import(
|
||||
name, replacement="langchain_community.utilities.SerpAPIWrapper"
|
||||
)
|
||||
|
||||
return SerpAPIWrapper
|
||||
elif name == "verbose":
|
||||
|
@ -25,8 +25,8 @@ from langchain.callbacks.manager import Callbacks
|
||||
from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs
|
||||
from langchain.chains.api.base import APIChain
|
||||
from langchain.chains.llm_math.base import LLMMathChain
|
||||
from langchain.utilities.dalle_image_generator import DallEAPIWrapper
|
||||
from langchain.utilities.requests import TextRequestsWrapper
|
||||
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
|
||||
from langchain_community.utilities.requests import TextRequestsWrapper
|
||||
from langchain_community.tools.arxiv.tool import ArxivQueryRun
|
||||
from langchain_community.tools.golden_query.tool import GoldenQueryRun
|
||||
from langchain_community.tools.pubmed.tool import PubmedQueryRun
|
||||
@ -77,32 +77,32 @@ from langchain_community.tools.dataforseo_api_search import DataForSeoAPISearchR
|
||||
from langchain_community.tools.dataforseo_api_search import DataForSeoAPISearchResults
|
||||
from langchain_community.tools.memorize.tool import Memorize
|
||||
from langchain_community.tools.reddit_search.tool import RedditSearchRun
|
||||
from langchain.utilities.arxiv import ArxivAPIWrapper
|
||||
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
|
||||
from langchain.utilities.pubmed import PubMedAPIWrapper
|
||||
from langchain.utilities.bing_search import BingSearchAPIWrapper
|
||||
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
|
||||
from langchain.utilities.google_lens import GoogleLensAPIWrapper
|
||||
from langchain.utilities.google_jobs import GoogleJobsAPIWrapper
|
||||
from langchain.utilities.google_search import GoogleSearchAPIWrapper
|
||||
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
|
||||
from langchain.utilities.google_scholar import GoogleScholarAPIWrapper
|
||||
from langchain.utilities.google_finance import GoogleFinanceAPIWrapper
|
||||
from langchain.utilities.google_trends import GoogleTrendsAPIWrapper
|
||||
from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
|
||||
from langchain.utilities.awslambda import LambdaWrapper
|
||||
from langchain.utilities.graphql import GraphQLAPIWrapper
|
||||
from langchain.utilities.searchapi import SearchApiAPIWrapper
|
||||
from langchain.utilities.searx_search import SearxSearchWrapper
|
||||
from langchain.utilities.serpapi import SerpAPIWrapper
|
||||
from langchain.utilities.stackexchange import StackExchangeAPIWrapper
|
||||
from langchain.utilities.twilio import TwilioAPIWrapper
|
||||
from langchain.utilities.merriam_webster import MerriamWebsterAPIWrapper
|
||||
from langchain.utilities.wikipedia import WikipediaAPIWrapper
|
||||
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
|
||||
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
|
||||
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||
from langchain.utilities.reddit_search import RedditSearchAPIWrapper
|
||||
from langchain_community.utilities.arxiv import ArxivAPIWrapper
|
||||
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
|
||||
from langchain_community.utilities.pubmed import PubMedAPIWrapper
|
||||
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
|
||||
from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
|
||||
from langchain_community.utilities.google_lens import GoogleLensAPIWrapper
|
||||
from langchain_community.utilities.google_jobs import GoogleJobsAPIWrapper
|
||||
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
|
||||
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
|
||||
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
|
||||
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
|
||||
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
|
||||
from langchain_community.utilities.metaphor_search import MetaphorSearchAPIWrapper
|
||||
from langchain_community.utilities.awslambda import LambdaWrapper
|
||||
from langchain_community.utilities.graphql import GraphQLAPIWrapper
|
||||
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
|
||||
from langchain_community.utilities.searx_search import SearxSearchWrapper
|
||||
from langchain_community.utilities.serpapi import SerpAPIWrapper
|
||||
from langchain_community.utilities.stackexchange import StackExchangeAPIWrapper
|
||||
from langchain_community.utilities.twilio import TwilioAPIWrapper
|
||||
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
|
||||
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
|
||||
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
|
||||
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
|
||||
from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||
from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper
|
||||
|
||||
|
||||
def _get_tools_requests_get() -> BaseTool:
|
||||
|
@ -6,12 +6,12 @@ from typing import Any, List, Optional, Union
|
||||
|
||||
import yaml
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.utils.loading import try_load_from_hub
|
||||
|
||||
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.agents.types import AGENT_TO_CLASS
|
||||
from langchain.chains.loading import load_chain, load_chain_from_config
|
||||
from langchain.utilities.loading import try_load_from_hub
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
@ -1,6 +1,9 @@
|
||||
"""Chain that does self-ask with search."""
|
||||
from typing import Any, Sequence, Union
|
||||
|
||||
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
|
||||
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
|
||||
from langchain_community.utilities.serpapi import SerpAPIWrapper
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts import BasePromptTemplate
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
@ -14,9 +17,6 @@ from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputPar
|
||||
from langchain.agents.self_ask_with_search.prompt import PROMPT
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.agents.utils import validate_tools_single_input
|
||||
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
|
||||
from langchain.utilities.searchapi import SearchApiAPIWrapper
|
||||
from langchain.utilities.serpapi import SerpAPIWrapper
|
||||
|
||||
|
||||
class SelfAskWithSearchAgent(Agent):
|
||||
|
@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
from typing import Any, Dict, List, Optional, Sequence, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from langchain_community.utilities.requests import TextRequestsWrapper
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts import BasePromptTemplate
|
||||
from langchain_core.pydantic_v1 import Field, root_validator
|
||||
@ -15,7 +16,6 @@ from langchain.callbacks.manager import (
|
||||
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.utilities.requests import TextRequestsWrapper
|
||||
|
||||
|
||||
def _extract_scheme_and_domain(url: str) -> Tuple[str, str]:
|
||||
|
@ -5,6 +5,7 @@ import json
|
||||
from typing import Any, Dict, List, NamedTuple, Optional, cast
|
||||
|
||||
from langchain_community.tools.openapi.utils.api_models import APIOperation
|
||||
from langchain_community.utilities.requests import Requests
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from requests import Response
|
||||
@ -14,7 +15,6 @@ from langchain.chains.api.openapi.requests_chain import APIRequesterChain
|
||||
from langchain.chains.api.openapi.response_chain import APIResponderChain
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.utilities.requests import Requests
|
||||
|
||||
|
||||
class _ParamMapping(NamedTuple):
|
||||
|
@ -3,12 +3,12 @@ from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain_community.utilities.requests import TextRequestsWrapper
|
||||
from langchain_core.pydantic_v1 import Extra, Field, root_validator
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.utilities.requests import TextRequestsWrapper
|
||||
|
||||
DEFAULT_HEADERS = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501
|
||||
|
@ -10,6 +10,7 @@ from langchain_core.prompts.loading import (
|
||||
load_prompt,
|
||||
load_prompt_from_config,
|
||||
)
|
||||
from langchain_core.utils.loading import try_load_from_hub
|
||||
|
||||
from langchain.chains import ReduceDocumentsChain
|
||||
from langchain.chains.api.base import APIChain
|
||||
@ -28,7 +29,6 @@ from langchain.chains.qa_with_sources.base import QAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
|
||||
from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA
|
||||
from langchain.utilities.loading import try_load_from_hub
|
||||
|
||||
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
|
||||
|
||||
|
@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Un
|
||||
|
||||
import requests
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_community.utilities.openapi import OpenAPISpec
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate
|
||||
from langchain_core.utils.input import get_colored_text
|
||||
@ -18,7 +19,6 @@ from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.sequential import SequentialChain
|
||||
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
|
||||
from langchain.tools import APIOperation
|
||||
from langchain.utilities.openapi import OpenAPISpec
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from openapi_pydantic import Parameter
|
||||
|
@ -1,12 +1,12 @@
|
||||
from typing import List, Optional, TypedDict, Union
|
||||
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import BasePromptTemplate
|
||||
from langchain_core.runnables import Runnable, RunnableParallel
|
||||
|
||||
from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS
|
||||
from langchain.utilities.sql_database import SQLDatabase
|
||||
|
||||
|
||||
def _strip(text: str) -> str:
|
||||
|
@ -3,6 +3,7 @@ from abc import ABC, abstractmethod
|
||||
from itertools import islice
|
||||
from typing import Any, Dict, Iterable, List, Optional
|
||||
|
||||
from langchain_community.utilities.redis import get_client
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
from langchain_core.prompts import BasePromptTemplate
|
||||
@ -15,7 +16,6 @@ from langchain.memory.prompt import (
|
||||
ENTITY_SUMMARIZATION_PROMPT,
|
||||
)
|
||||
from langchain.memory.utils import get_prompt_input_key
|
||||
from langchain.utilities.redis import get_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""For backwards compatibility."""
|
||||
from langchain.utilities.python import PythonREPL
|
||||
from langchain_community.utilities.python import PythonREPL
|
||||
|
||||
__all__ = ["PythonREPL"]
|
||||
|
@ -1,5 +1,5 @@
|
||||
"""DEPRECATED: Kept for backwards compatibility."""
|
||||
from langchain.utilities import Requests, RequestsWrapper, TextRequestsWrapper
|
||||
from langchain_community.utilities import Requests, RequestsWrapper, TextRequestsWrapper
|
||||
|
||||
__all__ = [
|
||||
"Requests",
|
||||
|
@ -5,6 +5,7 @@ from typing import List, Optional
|
||||
from langchain_community.document_loaders import AsyncHtmlLoader
|
||||
from langchain_community.document_transformers import Html2TextTransformer
|
||||
from langchain_community.llms import LlamaCpp
|
||||
from langchain_community.utilities import GoogleSearchAPIWrapper
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.language_models import BaseLLM
|
||||
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
|
||||
@ -20,7 +21,6 @@ from langchain.chains import LLMChain
|
||||
from langchain.chains.prompt_selector import ConditionalPromptSelector
|
||||
from langchain.output_parsers.pydantic import PydanticOutputParser
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
|
||||
from langchain.utilities import GoogleSearchAPIWrapper
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""For backwards compatibility."""
|
||||
from langchain.utilities.serpapi import SerpAPIWrapper
|
||||
from langchain_community.utilities.serpapi import SerpAPIWrapper
|
||||
|
||||
__all__ = ["SerpAPIWrapper"]
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Keep here for backwards compatibility."""
|
||||
from langchain.utilities.sql_database import SQLDatabase
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
|
||||
__all__ = ["SQLDatabase"]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user