mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-28 15:00:23 +00:00
community[major], core[patch], langchain[patch], experimental[patch]: Create langchain-community (#14463)
Moved the following modules to new package langchain-community in a backwards compatible fashion: ``` mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community ``` Moved the following to core ``` mv langchain/langchain/utils/json_schema.py core/langchain_core/utils mv langchain/langchain/utils/html.py core/langchain_core/utils mv langchain/langchain/utils/strings.py core/langchain_core/utils cat langchain/langchain/utils/env.py >> core/langchain_core/utils/env.py rm langchain/langchain/utils/env.py ``` See .scripts/community_split/script_integrations.sh for all changes
This commit is contained in:
1125
libs/community/langchain_community/tools/__init__.py
Normal file
1125
libs/community/langchain_community/tools/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
102
libs/community/langchain_community/tools/ainetwork/app.py
Normal file
102
libs/community/langchain_community/tools/ainetwork/app.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import builtins
|
||||
import json
|
||||
from enum import Enum
|
||||
from typing import List, Optional, Type, Union
|
||||
|
||||
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.ainetwork.base import AINBaseTool
|
||||
|
||||
|
||||
class AppOperationType(str, Enum):
|
||||
"""Type of app operation as enumerator."""
|
||||
|
||||
SET_ADMIN = "SET_ADMIN"
|
||||
GET_CONFIG = "GET_CONFIG"
|
||||
|
||||
|
||||
class AppSchema(BaseModel):
|
||||
"""Schema for app operations."""
|
||||
|
||||
type: AppOperationType = Field(...)
|
||||
appName: str = Field(..., description="Name of the application on the blockchain")
|
||||
address: Optional[Union[str, List[str]]] = Field(
|
||||
None,
|
||||
description=(
|
||||
"A single address or a list of addresses. Default: current session's "
|
||||
"address"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class AINAppOps(AINBaseTool):
|
||||
"""Tool for app operations."""
|
||||
|
||||
name: str = "AINappOps"
|
||||
description: str = """
|
||||
Create an app in the AINetwork Blockchain database by creating the /apps/<appName> path.
|
||||
An address set as `admin` can grant `owner` rights to other addresses (refer to `AINownerOps` for more details).
|
||||
Also, `admin` is initialized to have all `owner` permissions and `rule` allowed for that path.
|
||||
|
||||
## appName Rule
|
||||
- [a-z_0-9]+
|
||||
|
||||
## address Rules
|
||||
- 0x[0-9a-fA-F]{40}
|
||||
- Defaults to the current session's address
|
||||
- Multiple addresses can be specified if needed
|
||||
|
||||
## SET_ADMIN Example 1
|
||||
- type: SET_ADMIN
|
||||
- appName: ain_project
|
||||
|
||||
### Result:
|
||||
1. Path /apps/ain_project created.
|
||||
2. Current session's address registered as admin.
|
||||
|
||||
## SET_ADMIN Example 2
|
||||
- type: SET_ADMIN
|
||||
- appName: test_project
|
||||
- address: [<address1>, <address2>]
|
||||
|
||||
### Result:
|
||||
1. Path /apps/test_project created.
|
||||
2. <address1> and <address2> registered as admin.
|
||||
|
||||
""" # noqa: E501
|
||||
args_schema: Type[BaseModel] = AppSchema
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
type: AppOperationType,
|
||||
appName: str,
|
||||
address: Optional[Union[str, List[str]]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
from ain.types import ValueOnlyTransactionInput
|
||||
from ain.utils import getTimestamp
|
||||
|
||||
try:
|
||||
if type is AppOperationType.SET_ADMIN:
|
||||
if address is None:
|
||||
address = self.interface.wallet.defaultAccount.address
|
||||
if isinstance(address, str):
|
||||
address = [address]
|
||||
|
||||
res = await self.interface.db.ref(
|
||||
f"/manage_app/{appName}/create/{getTimestamp()}"
|
||||
).setValue(
|
||||
transactionInput=ValueOnlyTransactionInput(
|
||||
value={"admin": {address: True for address in address}}
|
||||
)
|
||||
)
|
||||
elif type is AppOperationType.GET_CONFIG:
|
||||
res = await self.interface.db.ref(
|
||||
f"/manage_app/{appName}/config"
|
||||
).getValue()
|
||||
else:
|
||||
raise ValueError(f"Unsupported 'type': {type}.")
|
||||
return json.dumps(res, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
return f"{builtins.type(e).__name__}: {str(e)}"
|
73
libs/community/langchain_community/tools/ainetwork/base.py
Normal file
73
libs/community/langchain_community/tools/ainetwork/base.py
Normal file
@@ -0,0 +1,73 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.ainetwork.utils import authenticate
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ain.ain import Ain
|
||||
|
||||
|
||||
class OperationType(str, Enum):
|
||||
"""Type of operation as enumerator."""
|
||||
|
||||
SET = "SET"
|
||||
GET = "GET"
|
||||
|
||||
|
||||
class AINBaseTool(BaseTool):
|
||||
"""Base class for the AINetwork tools."""
|
||||
|
||||
interface: Ain = Field(default_factory=authenticate)
|
||||
"""The interface object for the AINetwork Blockchain."""
|
||||
|
||||
def _run(
|
||||
self,
|
||||
*args: Any,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
except RuntimeError:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
if loop.is_closed():
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
if loop.is_running():
|
||||
result_container = []
|
||||
|
||||
def thread_target() -> None:
|
||||
nonlocal result_container
|
||||
new_loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(new_loop)
|
||||
try:
|
||||
result_container.append(
|
||||
new_loop.run_until_complete(self._arun(*args, **kwargs))
|
||||
)
|
||||
except Exception as e:
|
||||
result_container.append(e)
|
||||
finally:
|
||||
new_loop.close()
|
||||
|
||||
thread = threading.Thread(target=thread_target)
|
||||
thread.start()
|
||||
thread.join()
|
||||
result = result_container[0]
|
||||
if isinstance(result, Exception):
|
||||
raise result
|
||||
return result
|
||||
|
||||
else:
|
||||
result = loop.run_until_complete(self._arun(*args, **kwargs))
|
||||
loop.close()
|
||||
return result
|
115
libs/community/langchain_community/tools/ainetwork/owner.py
Normal file
115
libs/community/langchain_community/tools/ainetwork/owner.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import builtins
|
||||
import json
|
||||
from typing import List, Optional, Type, Union
|
||||
|
||||
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
|
||||
|
||||
|
||||
class RuleSchema(BaseModel):
|
||||
"""Schema for owner operations."""
|
||||
|
||||
type: OperationType = Field(...)
|
||||
path: str = Field(..., description="Blockchain reference path")
|
||||
address: Optional[Union[str, List[str]]] = Field(
|
||||
None, description="A single address or a list of addresses"
|
||||
)
|
||||
write_owner: Optional[bool] = Field(
|
||||
False, description="Authority to edit the `owner` property of the path"
|
||||
)
|
||||
write_rule: Optional[bool] = Field(
|
||||
False, description="Authority to edit `write rule` for the path"
|
||||
)
|
||||
write_function: Optional[bool] = Field(
|
||||
False, description="Authority to `set function` for the path"
|
||||
)
|
||||
branch_owner: Optional[bool] = Field(
|
||||
False, description="Authority to initialize `owner` of sub-paths"
|
||||
)
|
||||
|
||||
|
||||
class AINOwnerOps(AINBaseTool):
|
||||
"""Tool for owner operations."""
|
||||
|
||||
name: str = "AINownerOps"
|
||||
description: str = """
|
||||
Rules for `owner` in AINetwork Blockchain database.
|
||||
An address set as `owner` can modify permissions according to its granted authorities
|
||||
|
||||
## Path Rule
|
||||
- (/[a-zA-Z_0-9]+)+
|
||||
- Permission checks ascend from the most specific (child) path to broader (parent) paths until an `owner` is located.
|
||||
|
||||
## Address Rules
|
||||
- 0x[0-9a-fA-F]{40}: 40-digit hexadecimal address
|
||||
- *: All addresses permitted
|
||||
- Defaults to the current session's address
|
||||
|
||||
## SET
|
||||
- `SET` alters permissions for specific addresses, while other addresses remain unaffected.
|
||||
- When removing an address of `owner`, set all authorities for that address to false.
|
||||
- message `write_owner permission evaluated false` if fail
|
||||
|
||||
### Example
|
||||
- type: SET
|
||||
- path: /apps/langchain
|
||||
- address: [<address 1>, <address 2>]
|
||||
- write_owner: True
|
||||
- write_rule: True
|
||||
- write_function: True
|
||||
- branch_owner: True
|
||||
|
||||
## GET
|
||||
- Provides all addresses with `owner` permissions and their authorities in the path.
|
||||
|
||||
### Example
|
||||
- type: GET
|
||||
- path: /apps/langchain
|
||||
""" # noqa: E501
|
||||
args_schema: Type[BaseModel] = RuleSchema
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
type: OperationType,
|
||||
path: str,
|
||||
address: Optional[Union[str, List[str]]] = None,
|
||||
write_owner: Optional[bool] = None,
|
||||
write_rule: Optional[bool] = None,
|
||||
write_function: Optional[bool] = None,
|
||||
branch_owner: Optional[bool] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
from ain.types import ValueOnlyTransactionInput
|
||||
|
||||
try:
|
||||
if type is OperationType.SET:
|
||||
if address is None:
|
||||
address = self.interface.wallet.defaultAccount.address
|
||||
if isinstance(address, str):
|
||||
address = [address]
|
||||
res = await self.interface.db.ref(path).setOwner(
|
||||
transactionInput=ValueOnlyTransactionInput(
|
||||
value={
|
||||
".owner": {
|
||||
"owners": {
|
||||
address: {
|
||||
"write_owner": write_owner or False,
|
||||
"write_rule": write_rule or False,
|
||||
"write_function": write_function or False,
|
||||
"branch_owner": branch_owner or False,
|
||||
}
|
||||
for address in address
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
elif type is OperationType.GET:
|
||||
res = await self.interface.db.ref(path).getOwner()
|
||||
else:
|
||||
raise ValueError(f"Unsupported 'type': {type}.")
|
||||
return json.dumps(res, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
return f"{builtins.type(e).__name__}: {str(e)}"
|
82
libs/community/langchain_community/tools/ainetwork/rule.py
Normal file
82
libs/community/langchain_community/tools/ainetwork/rule.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import builtins
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
|
||||
|
||||
|
||||
class RuleSchema(BaseModel):
|
||||
"""Schema for owner operations."""
|
||||
|
||||
type: OperationType = Field(...)
|
||||
path: str = Field(..., description="Path on the blockchain where the rule applies")
|
||||
eval: Optional[str] = Field(None, description="eval string to determine permission")
|
||||
|
||||
|
||||
class AINRuleOps(AINBaseTool):
|
||||
"""Tool for owner operations."""
|
||||
|
||||
name: str = "AINruleOps"
|
||||
description: str = """
|
||||
Covers the write `rule` for the AINetwork Blockchain database. The SET type specifies write permissions using the `eval` variable as a JavaScript eval string.
|
||||
In order to AINvalueOps with SET at the path, the execution result of the `eval` string must be true.
|
||||
|
||||
## Path Rules
|
||||
1. Allowed characters for directory: `[a-zA-Z_0-9]`
|
||||
2. Use `$<key>` for template variables as directory.
|
||||
|
||||
## Eval String Special Variables
|
||||
- auth.addr: Address of the writer for the path
|
||||
- newData: New data for the path
|
||||
- data: Current data for the path
|
||||
- currentTime: Time in seconds
|
||||
- lastBlockNumber: Latest processed block number
|
||||
|
||||
## Eval String Functions
|
||||
- getValue(<path>)
|
||||
- getRule(<path>)
|
||||
- getOwner(<path>)
|
||||
- getFunction(<path>)
|
||||
- evalRule(<path>, <value to set>, auth, currentTime)
|
||||
- evalOwner(<path>, 'write_owner', auth)
|
||||
|
||||
## SET Example
|
||||
- type: SET
|
||||
- path: /apps/langchain_project_1/$from/$to/$img
|
||||
- eval: auth.addr===$from&&!getValue('/apps/image_db/'+$img)
|
||||
|
||||
## GET Example
|
||||
- type: GET
|
||||
- path: /apps/langchain_project_1
|
||||
""" # noqa: E501
|
||||
args_schema: Type[BaseModel] = RuleSchema
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
type: OperationType,
|
||||
path: str,
|
||||
eval: Optional[str] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
from ain.types import ValueOnlyTransactionInput
|
||||
|
||||
try:
|
||||
if type is OperationType.SET:
|
||||
if eval is None:
|
||||
raise ValueError("'eval' is required for SET operation.")
|
||||
|
||||
res = await self.interface.db.ref(path).setRule(
|
||||
transactionInput=ValueOnlyTransactionInput(
|
||||
value={".rule": {"write": eval}}
|
||||
)
|
||||
)
|
||||
elif type is OperationType.GET:
|
||||
res = await self.interface.db.ref(path).getRule()
|
||||
else:
|
||||
raise ValueError(f"Unsupported 'type': {type}.")
|
||||
return json.dumps(res, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
return f"{builtins.type(e).__name__}: {str(e)}"
|
@@ -0,0 +1,34 @@
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.ainetwork.base import AINBaseTool
|
||||
|
||||
|
||||
class TransferSchema(BaseModel):
|
||||
"""Schema for transfer operations."""
|
||||
|
||||
address: str = Field(..., description="Address to transfer AIN to")
|
||||
amount: int = Field(..., description="Amount of AIN to transfer")
|
||||
|
||||
|
||||
class AINTransfer(AINBaseTool):
|
||||
"""Tool for transfer operations."""
|
||||
|
||||
name: str = "AINtransfer"
|
||||
description: str = "Transfers AIN to a specified address"
|
||||
args_schema: Type[TransferSchema] = TransferSchema
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
address: str,
|
||||
amount: int,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
try:
|
||||
res = await self.interface.wallet.transfer(address, amount, nonce=-1)
|
||||
return json.dumps(res, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
return f"{type(e).__name__}: {str(e)}"
|
62
libs/community/langchain_community/tools/ainetwork/utils.py
Normal file
62
libs/community/langchain_community/tools/ainetwork/utils.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""AINetwork Blockchain tool utils."""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Literal, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ain.ain import Ain
|
||||
|
||||
|
||||
def authenticate(network: Optional[Literal["mainnet", "testnet"]] = "testnet") -> Ain:
|
||||
"""Authenticate using the AIN Blockchain"""
|
||||
|
||||
try:
|
||||
from ain.ain import Ain
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Cannot import ain-py related modules. Please install the package with "
|
||||
"`pip install ain-py`."
|
||||
) from e
|
||||
|
||||
if network == "mainnet":
|
||||
provider_url = "https://mainnet-api.ainetwork.ai/"
|
||||
chain_id = 1
|
||||
if "AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY" in os.environ:
|
||||
private_key = os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"]
|
||||
else:
|
||||
raise EnvironmentError(
|
||||
"Error: The AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY environmental variable "
|
||||
"has not been set."
|
||||
)
|
||||
elif network == "testnet":
|
||||
provider_url = "https://testnet-api.ainetwork.ai/"
|
||||
chain_id = 0
|
||||
if "AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY" in os.environ:
|
||||
private_key = os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"]
|
||||
else:
|
||||
raise EnvironmentError(
|
||||
"Error: The AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY environmental variable "
|
||||
"has not been set."
|
||||
)
|
||||
elif network is None:
|
||||
if (
|
||||
"AIN_BLOCKCHAIN_PROVIDER_URL" in os.environ
|
||||
and "AIN_BLOCKCHAIN_CHAIN_ID" in os.environ
|
||||
and "AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY" in os.environ
|
||||
):
|
||||
provider_url = os.environ["AIN_BLOCKCHAIN_PROVIDER_URL"]
|
||||
chain_id = int(os.environ["AIN_BLOCKCHAIN_CHAIN_ID"])
|
||||
private_key = os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"]
|
||||
else:
|
||||
raise EnvironmentError(
|
||||
"Error: The AIN_BLOCKCHAIN_PROVIDER_URL and "
|
||||
"AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY and AIN_BLOCKCHAIN_CHAIN_ID "
|
||||
"environmental variable has not been set."
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported 'network': {network}")
|
||||
|
||||
ain = Ain(provider_url, chain_id)
|
||||
ain.wallet.addAndSetDefaultAccount(private_key)
|
||||
return ain
|
85
libs/community/langchain_community/tools/ainetwork/value.py
Normal file
85
libs/community/langchain_community/tools/ainetwork/value.py
Normal file
@@ -0,0 +1,85 @@
|
||||
import builtins
|
||||
import json
|
||||
from typing import Optional, Type, Union
|
||||
|
||||
from langchain_core.callbacks import AsyncCallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
|
||||
|
||||
|
||||
class ValueSchema(BaseModel):
|
||||
"""Schema for value operations."""
|
||||
|
||||
type: OperationType = Field(...)
|
||||
path: str = Field(..., description="Blockchain reference path")
|
||||
value: Optional[Union[int, str, float, dict]] = Field(
|
||||
None, description="Value to be set at the path"
|
||||
)
|
||||
|
||||
|
||||
class AINValueOps(AINBaseTool):
|
||||
"""Tool for value operations."""
|
||||
|
||||
name: str = "AINvalueOps"
|
||||
description: str = """
|
||||
Covers the read and write value for the AINetwork Blockchain database.
|
||||
|
||||
## SET
|
||||
- Set a value at a given path
|
||||
|
||||
### Example
|
||||
- type: SET
|
||||
- path: /apps/langchain_test_1/object
|
||||
- value: {1: 2, "34": 56}
|
||||
|
||||
## GET
|
||||
- Retrieve a value at a given path
|
||||
|
||||
### Example
|
||||
- type: GET
|
||||
- path: /apps/langchain_test_1/DB
|
||||
|
||||
## Special paths
|
||||
- `/accounts/<address>/balance`: Account balance
|
||||
- `/accounts/<address>/nonce`: Account nonce
|
||||
- `/apps`: Applications
|
||||
- `/consensus`: Consensus
|
||||
- `/checkin`: Check-in
|
||||
- `/deposit/<service id>/<address>/<deposit id>`: Deposit
|
||||
- `/deposit_accounts/<service id>/<address>/<account id>`: Deposit accounts
|
||||
- `/escrow`: Escrow
|
||||
- `/payments`: Payment
|
||||
- `/sharding`: Sharding
|
||||
- `/token/name`: Token name
|
||||
- `/token/symbol`: Token symbol
|
||||
- `/token/total_supply`: Token total supply
|
||||
- `/transfer/<address from>/<address to>/<key>/value`: Transfer
|
||||
- `/withdraw/<service id>/<address>/<withdraw id>`: Withdraw
|
||||
"""
|
||||
args_schema: Type[BaseModel] = ValueSchema
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
type: OperationType,
|
||||
path: str,
|
||||
value: Optional[Union[int, str, float, dict]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
from ain.types import ValueOnlyTransactionInput
|
||||
|
||||
try:
|
||||
if type is OperationType.SET:
|
||||
if value is None:
|
||||
raise ValueError("'value' is required for SET operation.")
|
||||
|
||||
res = await self.interface.db.ref(path).setValue(
|
||||
transactionInput=ValueOnlyTransactionInput(value=value)
|
||||
)
|
||||
elif type is OperationType.GET:
|
||||
res = await self.interface.db.ref(path).getValue()
|
||||
else:
|
||||
raise ValueError(f"Unsupported 'type': {type}.")
|
||||
return json.dumps(res, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
return f"{builtins.type(e).__name__}: {str(e)}"
|
@@ -0,0 +1,9 @@
|
||||
"""Amadeus tools."""
|
||||
|
||||
from langchain_community.tools.amadeus.closest_airport import AmadeusClosestAirport
|
||||
from langchain_community.tools.amadeus.flight_search import AmadeusFlightSearch
|
||||
|
||||
__all__ = [
|
||||
"AmadeusClosestAirport",
|
||||
"AmadeusFlightSearch",
|
||||
]
|
18
libs/community/langchain_community/tools/amadeus/base.py
Normal file
18
libs/community/langchain_community/tools/amadeus/base.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""Base class for Amadeus tools."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.amadeus.utils import authenticate
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from amadeus import Client
|
||||
|
||||
|
||||
class AmadeusBaseTool(BaseTool):
|
||||
"""Base Tool for Amadeus."""
|
||||
|
||||
client: Client = Field(default_factory=authenticate)
|
@@ -0,0 +1,50 @@
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_community.tools.amadeus.base import AmadeusBaseTool
|
||||
|
||||
|
||||
class ClosestAirportSchema(BaseModel):
|
||||
"""Schema for the AmadeusClosestAirport tool."""
|
||||
|
||||
location: str = Field(
|
||||
description=(
|
||||
" The location for which you would like to find the nearest airport "
|
||||
" along with optional details such as country, state, region, or "
|
||||
" province, allowing for easy processing and identification of "
|
||||
" the closest airport. Examples of the format are the following:\n"
|
||||
" Cali, Colombia\n "
|
||||
" Lincoln, Nebraska, United States\n"
|
||||
" New York, United States\n"
|
||||
" Sydney, New South Wales, Australia\n"
|
||||
" Rome, Lazio, Italy\n"
|
||||
" Toronto, Ontario, Canada\n"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class AmadeusClosestAirport(AmadeusBaseTool):
|
||||
"""Tool for finding the closest airport to a particular location."""
|
||||
|
||||
name: str = "closest_airport"
|
||||
description: str = (
|
||||
"Use this tool to find the closest airport to a particular location."
|
||||
)
|
||||
args_schema: Type[ClosestAirportSchema] = ClosestAirportSchema
|
||||
|
||||
def _run(
|
||||
self,
|
||||
location: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
content = (
|
||||
f" What is the nearest airport to {location}? Please respond with the "
|
||||
" airport's International Air Transport Association (IATA) Location "
|
||||
' Identifier in the following JSON format. JSON: "iataCode": "IATA '
|
||||
' Location Identifier" '
|
||||
)
|
||||
|
||||
return ChatOpenAI(temperature=0).predict(content)
|
@@ -0,0 +1,152 @@
|
||||
import logging
|
||||
from datetime import datetime as dt
|
||||
from typing import Dict, Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.amadeus.base import AmadeusBaseTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FlightSearchSchema(BaseModel):
|
||||
"""Schema for the AmadeusFlightSearch tool."""
|
||||
|
||||
originLocationCode: str = Field(
|
||||
description=(
|
||||
" The three letter International Air Transport "
|
||||
" Association (IATA) Location Identifier for the "
|
||||
" search's origin airport. "
|
||||
)
|
||||
)
|
||||
destinationLocationCode: str = Field(
|
||||
description=(
|
||||
" The three letter International Air Transport "
|
||||
" Association (IATA) Location Identifier for the "
|
||||
" search's destination airport. "
|
||||
)
|
||||
)
|
||||
departureDateTimeEarliest: str = Field(
|
||||
description=(
|
||||
" The earliest departure datetime from the origin airport "
|
||||
" for the flight search in the following format: "
|
||||
' "YYYY-MM-DDTHH:MM", where "T" separates the date and time '
|
||||
' components. For example: "2023-06-09T10:30:00" represents '
|
||||
" June 9th, 2023, at 10:30 AM. "
|
||||
)
|
||||
)
|
||||
departureDateTimeLatest: str = Field(
|
||||
description=(
|
||||
" The latest departure datetime from the origin airport "
|
||||
" for the flight search in the following format: "
|
||||
' "YYYY-MM-DDTHH:MM", where "T" separates the date and time '
|
||||
' components. For example: "2023-06-09T10:30:00" represents '
|
||||
" June 9th, 2023, at 10:30 AM. "
|
||||
)
|
||||
)
|
||||
page_number: int = Field(
|
||||
default=1,
|
||||
description="The specific page number of flight results to retrieve",
|
||||
)
|
||||
|
||||
|
||||
class AmadeusFlightSearch(AmadeusBaseTool):
|
||||
"""Tool for searching for a single flight between two airports."""
|
||||
|
||||
name: str = "single_flight_search"
|
||||
description: str = (
|
||||
" Use this tool to search for a single flight between the origin and "
|
||||
" destination airports at a departure between an earliest and "
|
||||
" latest datetime. "
|
||||
)
|
||||
args_schema: Type[FlightSearchSchema] = FlightSearchSchema
|
||||
|
||||
def _run(
|
||||
self,
|
||||
originLocationCode: str,
|
||||
destinationLocationCode: str,
|
||||
departureDateTimeEarliest: str,
|
||||
departureDateTimeLatest: str,
|
||||
page_number: int = 1,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> list:
|
||||
try:
|
||||
from amadeus import ResponseError
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Unable to import amadeus, please install with `pip install amadeus`."
|
||||
) from e
|
||||
|
||||
RESULTS_PER_PAGE = 10
|
||||
|
||||
# Authenticate and retrieve a client
|
||||
client = self.client
|
||||
|
||||
# Check that earliest and latest dates are in the same day
|
||||
earliestDeparture = dt.strptime(departureDateTimeEarliest, "%Y-%m-%dT%H:%M:%S")
|
||||
latestDeparture = dt.strptime(departureDateTimeLatest, "%Y-%m-%dT%H:%M:%S")
|
||||
|
||||
if earliestDeparture.date() != latestDeparture.date():
|
||||
logger.error(
|
||||
" Error: Earliest and latest departure dates need to be the "
|
||||
" same date. If you're trying to search for round-trip "
|
||||
" flights, call this function for the outbound flight first, "
|
||||
" and then call again for the return flight. "
|
||||
)
|
||||
return [None]
|
||||
|
||||
# Collect all results from the Amadeus Flight Offers Search API
|
||||
try:
|
||||
response = client.shopping.flight_offers_search.get(
|
||||
originLocationCode=originLocationCode,
|
||||
destinationLocationCode=destinationLocationCode,
|
||||
departureDate=latestDeparture.strftime("%Y-%m-%d"),
|
||||
adults=1,
|
||||
)
|
||||
except ResponseError as error:
|
||||
print(error)
|
||||
|
||||
# Generate output dictionary
|
||||
output = []
|
||||
|
||||
for offer in response.data:
|
||||
itinerary: Dict = {}
|
||||
itinerary["price"] = {}
|
||||
itinerary["price"]["total"] = offer["price"]["total"]
|
||||
currency = offer["price"]["currency"]
|
||||
currency = response.result["dictionaries"]["currencies"][currency]
|
||||
itinerary["price"]["currency"] = {}
|
||||
itinerary["price"]["currency"] = currency
|
||||
|
||||
segments = []
|
||||
for segment in offer["itineraries"][0]["segments"]:
|
||||
flight = {}
|
||||
flight["departure"] = segment["departure"]
|
||||
flight["arrival"] = segment["arrival"]
|
||||
flight["flightNumber"] = segment["number"]
|
||||
carrier = segment["carrierCode"]
|
||||
carrier = response.result["dictionaries"]["carriers"][carrier]
|
||||
flight["carrier"] = carrier
|
||||
|
||||
segments.append(flight)
|
||||
|
||||
itinerary["segments"] = []
|
||||
itinerary["segments"] = segments
|
||||
|
||||
output.append(itinerary)
|
||||
|
||||
# Filter out flights after latest departure time
|
||||
for index, offer in enumerate(output):
|
||||
offerDeparture = dt.strptime(
|
||||
offer["segments"][0]["departure"]["at"], "%Y-%m-%dT%H:%M:%S"
|
||||
)
|
||||
|
||||
if offerDeparture > latestDeparture:
|
||||
output.pop(index)
|
||||
|
||||
# Return the paginated results
|
||||
startIndex = (page_number - 1) * RESULTS_PER_PAGE
|
||||
endIndex = startIndex + RESULTS_PER_PAGE
|
||||
|
||||
return output[startIndex:endIndex]
|
42
libs/community/langchain_community/tools/amadeus/utils.py
Normal file
42
libs/community/langchain_community/tools/amadeus/utils.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""O365 tool utils."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from amadeus import Client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def authenticate() -> Client:
|
||||
"""Authenticate using the Amadeus API"""
|
||||
try:
|
||||
from amadeus import Client
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Cannot import amadeus. Please install the package with "
|
||||
"`pip install amadeus`."
|
||||
) from e
|
||||
|
||||
if "AMADEUS_CLIENT_ID" in os.environ and "AMADEUS_CLIENT_SECRET" in os.environ:
|
||||
client_id = os.environ["AMADEUS_CLIENT_ID"]
|
||||
client_secret = os.environ["AMADEUS_CLIENT_SECRET"]
|
||||
else:
|
||||
logger.error(
|
||||
"Error: The AMADEUS_CLIENT_ID and AMADEUS_CLIENT_SECRET environmental "
|
||||
"variables have not been set. Visit the following link on how to "
|
||||
"acquire these authorization tokens: "
|
||||
"https://developers.amadeus.com/register"
|
||||
)
|
||||
return None
|
||||
|
||||
hostname = "test" # Default hostname
|
||||
if "AMADEUS_HOSTNAME" in os.environ:
|
||||
hostname = os.environ["AMADEUS_HOSTNAME"]
|
||||
|
||||
client = Client(client_id=client_id, client_secret=client_secret, hostname=hostname)
|
||||
|
||||
return client
|
@@ -0,0 +1 @@
|
||||
"""Arxiv API toolkit."""
|
37
libs/community/langchain_community/tools/arxiv/tool.py
Normal file
37
libs/community/langchain_community/tools/arxiv/tool.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Tool for the Arxiv API."""
|
||||
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.arxiv import ArxivAPIWrapper
|
||||
|
||||
|
||||
class ArxivInput(BaseModel):
|
||||
query: str = Field(description="search query to look up")
|
||||
|
||||
|
||||
class ArxivQueryRun(BaseTool):
|
||||
"""Tool that searches the Arxiv API."""
|
||||
|
||||
name: str = "arxiv"
|
||||
description: str = (
|
||||
"A wrapper around Arxiv.org "
|
||||
"Useful for when you need to answer questions about Physics, Mathematics, "
|
||||
"Computer Science, Quantitative Biology, Quantitative Finance, Statistics, "
|
||||
"Electrical Engineering, and Economics "
|
||||
"from scientific articles on arxiv.org. "
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: ArxivAPIWrapper = Field(default_factory=ArxivAPIWrapper)
|
||||
args_schema: Type[BaseModel] = ArxivInput
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the Arxiv tool."""
|
||||
return self.api_wrapper.run(query)
|
@@ -0,0 +1,25 @@
|
||||
"""Azure Cognitive Services Tools."""
|
||||
|
||||
from langchain_community.tools.azure_cognitive_services.form_recognizer import (
|
||||
AzureCogsFormRecognizerTool,
|
||||
)
|
||||
from langchain_community.tools.azure_cognitive_services.image_analysis import (
|
||||
AzureCogsImageAnalysisTool,
|
||||
)
|
||||
from langchain_community.tools.azure_cognitive_services.speech2text import (
|
||||
AzureCogsSpeech2TextTool,
|
||||
)
|
||||
from langchain_community.tools.azure_cognitive_services.text2speech import (
|
||||
AzureCogsText2SpeechTool,
|
||||
)
|
||||
from langchain_community.tools.azure_cognitive_services.text_analytics_health import (
|
||||
AzureCogsTextAnalyticsHealthTool,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AzureCogsImageAnalysisTool",
|
||||
"AzureCogsFormRecognizerTool",
|
||||
"AzureCogsSpeech2TextTool",
|
||||
"AzureCogsText2SpeechTool",
|
||||
"AzureCogsTextAnalyticsHealthTool",
|
||||
]
|
@@ -0,0 +1,143 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
from langchain_community.tools.azure_cognitive_services.utils import (
|
||||
detect_file_src_type,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AzureCogsFormRecognizerTool(BaseTool):
|
||||
"""Tool that queries the Azure Cognitive Services Form Recognizer API.
|
||||
|
||||
In order to set this up, follow instructions at:
|
||||
https://learn.microsoft.com/en-us/azure/applied-ai-services/form-recognizer/quickstarts/get-started-sdks-rest-api?view=form-recog-3.0.0&pivots=programming-language-python
|
||||
"""
|
||||
|
||||
azure_cogs_key: str = "" #: :meta private:
|
||||
azure_cogs_endpoint: str = "" #: :meta private:
|
||||
doc_analysis_client: Any #: :meta private:
|
||||
|
||||
name: str = "azure_cognitive_services_form_recognizer"
|
||||
description: str = (
|
||||
"A wrapper around Azure Cognitive Services Form Recognizer. "
|
||||
"Useful for when you need to "
|
||||
"extract text, tables, and key-value pairs from documents. "
|
||||
"Input should be a url to a document."
|
||||
)
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and endpoint exists in environment."""
|
||||
azure_cogs_key = get_from_dict_or_env(
|
||||
values, "azure_cogs_key", "AZURE_COGS_KEY"
|
||||
)
|
||||
|
||||
azure_cogs_endpoint = get_from_dict_or_env(
|
||||
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
|
||||
)
|
||||
|
||||
try:
|
||||
from azure.ai.formrecognizer import DocumentAnalysisClient
|
||||
from azure.core.credentials import AzureKeyCredential
|
||||
|
||||
values["doc_analysis_client"] = DocumentAnalysisClient(
|
||||
endpoint=azure_cogs_endpoint,
|
||||
credential=AzureKeyCredential(azure_cogs_key),
|
||||
)
|
||||
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"azure-ai-formrecognizer is not installed. "
|
||||
"Run `pip install azure-ai-formrecognizer` to install."
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
def _parse_tables(self, tables: List[Any]) -> List[Any]:
|
||||
result = []
|
||||
for table in tables:
|
||||
rc, cc = table.row_count, table.column_count
|
||||
_table = [["" for _ in range(cc)] for _ in range(rc)]
|
||||
for cell in table.cells:
|
||||
_table[cell.row_index][cell.column_index] = cell.content
|
||||
result.append(_table)
|
||||
return result
|
||||
|
||||
def _parse_kv_pairs(self, kv_pairs: List[Any]) -> List[Any]:
|
||||
result = []
|
||||
for kv_pair in kv_pairs:
|
||||
key = kv_pair.key.content if kv_pair.key else ""
|
||||
value = kv_pair.value.content if kv_pair.value else ""
|
||||
result.append((key, value))
|
||||
return result
|
||||
|
||||
def _document_analysis(self, document_path: str) -> Dict:
|
||||
document_src_type = detect_file_src_type(document_path)
|
||||
if document_src_type == "local":
|
||||
with open(document_path, "rb") as document:
|
||||
poller = self.doc_analysis_client.begin_analyze_document(
|
||||
"prebuilt-document", document
|
||||
)
|
||||
elif document_src_type == "remote":
|
||||
poller = self.doc_analysis_client.begin_analyze_document_from_url(
|
||||
"prebuilt-document", document_path
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Invalid document path: {document_path}")
|
||||
|
||||
result = poller.result()
|
||||
res_dict = {}
|
||||
|
||||
if result.content is not None:
|
||||
res_dict["content"] = result.content
|
||||
|
||||
if result.tables is not None:
|
||||
res_dict["tables"] = self._parse_tables(result.tables)
|
||||
|
||||
if result.key_value_pairs is not None:
|
||||
res_dict["key_value_pairs"] = self._parse_kv_pairs(result.key_value_pairs)
|
||||
|
||||
return res_dict
|
||||
|
||||
def _format_document_analysis_result(self, document_analysis_result: Dict) -> str:
|
||||
formatted_result = []
|
||||
if "content" in document_analysis_result:
|
||||
formatted_result.append(
|
||||
f"Content: {document_analysis_result['content']}".replace("\n", " ")
|
||||
)
|
||||
|
||||
if "tables" in document_analysis_result:
|
||||
for i, table in enumerate(document_analysis_result["tables"]):
|
||||
formatted_result.append(f"Table {i}: {table}".replace("\n", " "))
|
||||
|
||||
if "key_value_pairs" in document_analysis_result:
|
||||
for kv_pair in document_analysis_result["key_value_pairs"]:
|
||||
formatted_result.append(
|
||||
f"{kv_pair[0]}: {kv_pair[1]}".replace("\n", " ")
|
||||
)
|
||||
|
||||
return "\n".join(formatted_result)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
try:
|
||||
document_analysis_result = self._document_analysis(query)
|
||||
if not document_analysis_result:
|
||||
return "No good document analysis result was found"
|
||||
|
||||
return self._format_document_analysis_result(document_analysis_result)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error while running AzureCogsFormRecognizerTool: {e}")
|
@@ -0,0 +1,147 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
from langchain_community.tools.azure_cognitive_services.utils import (
|
||||
detect_file_src_type,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AzureCogsImageAnalysisTool(BaseTool):
|
||||
"""Tool that queries the Azure Cognitive Services Image Analysis API.
|
||||
|
||||
In order to set this up, follow instructions at:
|
||||
https://learn.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40
|
||||
"""
|
||||
|
||||
azure_cogs_key: str = "" #: :meta private:
|
||||
azure_cogs_endpoint: str = "" #: :meta private:
|
||||
vision_service: Any #: :meta private:
|
||||
analysis_options: Any #: :meta private:
|
||||
|
||||
name: str = "azure_cognitive_services_image_analysis"
|
||||
description: str = (
|
||||
"A wrapper around Azure Cognitive Services Image Analysis. "
|
||||
"Useful for when you need to analyze images. "
|
||||
"Input should be a url to an image."
|
||||
)
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and endpoint exists in environment."""
|
||||
azure_cogs_key = get_from_dict_or_env(
|
||||
values, "azure_cogs_key", "AZURE_COGS_KEY"
|
||||
)
|
||||
|
||||
azure_cogs_endpoint = get_from_dict_or_env(
|
||||
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
|
||||
)
|
||||
|
||||
try:
|
||||
import azure.ai.vision as sdk
|
||||
|
||||
values["vision_service"] = sdk.VisionServiceOptions(
|
||||
endpoint=azure_cogs_endpoint, key=azure_cogs_key
|
||||
)
|
||||
|
||||
values["analysis_options"] = sdk.ImageAnalysisOptions()
|
||||
values["analysis_options"].features = (
|
||||
sdk.ImageAnalysisFeature.CAPTION
|
||||
| sdk.ImageAnalysisFeature.OBJECTS
|
||||
| sdk.ImageAnalysisFeature.TAGS
|
||||
| sdk.ImageAnalysisFeature.TEXT
|
||||
)
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"azure-ai-vision is not installed. "
|
||||
"Run `pip install azure-ai-vision` to install."
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
def _image_analysis(self, image_path: str) -> Dict:
|
||||
try:
|
||||
import azure.ai.vision as sdk
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
image_src_type = detect_file_src_type(image_path)
|
||||
if image_src_type == "local":
|
||||
vision_source = sdk.VisionSource(filename=image_path)
|
||||
elif image_src_type == "remote":
|
||||
vision_source = sdk.VisionSource(url=image_path)
|
||||
else:
|
||||
raise ValueError(f"Invalid image path: {image_path}")
|
||||
|
||||
image_analyzer = sdk.ImageAnalyzer(
|
||||
self.vision_service, vision_source, self.analysis_options
|
||||
)
|
||||
result = image_analyzer.analyze()
|
||||
|
||||
res_dict = {}
|
||||
if result.reason == sdk.ImageAnalysisResultReason.ANALYZED:
|
||||
if result.caption is not None:
|
||||
res_dict["caption"] = result.caption.content
|
||||
|
||||
if result.objects is not None:
|
||||
res_dict["objects"] = [obj.name for obj in result.objects]
|
||||
|
||||
if result.tags is not None:
|
||||
res_dict["tags"] = [tag.name for tag in result.tags]
|
||||
|
||||
if result.text is not None:
|
||||
res_dict["text"] = [line.content for line in result.text.lines]
|
||||
|
||||
else:
|
||||
error_details = sdk.ImageAnalysisErrorDetails.from_result(result)
|
||||
raise RuntimeError(
|
||||
f"Image analysis failed.\n"
|
||||
f"Reason: {error_details.reason}\n"
|
||||
f"Details: {error_details.message}"
|
||||
)
|
||||
|
||||
return res_dict
|
||||
|
||||
def _format_image_analysis_result(self, image_analysis_result: Dict) -> str:
|
||||
formatted_result = []
|
||||
if "caption" in image_analysis_result:
|
||||
formatted_result.append("Caption: " + image_analysis_result["caption"])
|
||||
|
||||
if (
|
||||
"objects" in image_analysis_result
|
||||
and len(image_analysis_result["objects"]) > 0
|
||||
):
|
||||
formatted_result.append(
|
||||
"Objects: " + ", ".join(image_analysis_result["objects"])
|
||||
)
|
||||
|
||||
if "tags" in image_analysis_result and len(image_analysis_result["tags"]) > 0:
|
||||
formatted_result.append("Tags: " + ", ".join(image_analysis_result["tags"]))
|
||||
|
||||
if "text" in image_analysis_result and len(image_analysis_result["text"]) > 0:
|
||||
formatted_result.append("Text: " + ", ".join(image_analysis_result["text"]))
|
||||
|
||||
return "\n".join(formatted_result)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
try:
|
||||
image_analysis_result = self._image_analysis(query)
|
||||
if not image_analysis_result:
|
||||
return "No good image analysis result was found"
|
||||
|
||||
return self._format_image_analysis_result(image_analysis_result)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error while running AzureCogsImageAnalysisTool: {e}")
|
@@ -0,0 +1,120 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
from langchain_community.tools.azure_cognitive_services.utils import (
|
||||
detect_file_src_type,
|
||||
download_audio_from_url,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AzureCogsSpeech2TextTool(BaseTool):
|
||||
"""Tool that queries the Azure Cognitive Services Speech2Text API.
|
||||
|
||||
In order to set this up, follow instructions at:
|
||||
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python
|
||||
"""
|
||||
|
||||
azure_cogs_key: str = "" #: :meta private:
|
||||
azure_cogs_region: str = "" #: :meta private:
|
||||
speech_language: str = "en-US" #: :meta private:
|
||||
speech_config: Any #: :meta private:
|
||||
|
||||
name: str = "azure_cognitive_services_speech2text"
|
||||
description: str = (
|
||||
"A wrapper around Azure Cognitive Services Speech2Text. "
|
||||
"Useful for when you need to transcribe audio to text. "
|
||||
"Input should be a url to an audio file."
|
||||
)
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and endpoint exists in environment."""
|
||||
azure_cogs_key = get_from_dict_or_env(
|
||||
values, "azure_cogs_key", "AZURE_COGS_KEY"
|
||||
)
|
||||
|
||||
azure_cogs_region = get_from_dict_or_env(
|
||||
values, "azure_cogs_region", "AZURE_COGS_REGION"
|
||||
)
|
||||
|
||||
try:
|
||||
import azure.cognitiveservices.speech as speechsdk
|
||||
|
||||
values["speech_config"] = speechsdk.SpeechConfig(
|
||||
subscription=azure_cogs_key, region=azure_cogs_region
|
||||
)
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"azure-cognitiveservices-speech is not installed. "
|
||||
"Run `pip install azure-cognitiveservices-speech` to install."
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
def _continuous_recognize(self, speech_recognizer: Any) -> str:
|
||||
done = False
|
||||
text = ""
|
||||
|
||||
def stop_cb(evt: Any) -> None:
|
||||
"""callback that stop continuous recognition"""
|
||||
speech_recognizer.stop_continuous_recognition_async()
|
||||
nonlocal done
|
||||
done = True
|
||||
|
||||
def retrieve_cb(evt: Any) -> None:
|
||||
"""callback that retrieves the intermediate recognition results"""
|
||||
nonlocal text
|
||||
text += evt.result.text
|
||||
|
||||
# retrieve text on recognized events
|
||||
speech_recognizer.recognized.connect(retrieve_cb)
|
||||
# stop continuous recognition on either session stopped or canceled events
|
||||
speech_recognizer.session_stopped.connect(stop_cb)
|
||||
speech_recognizer.canceled.connect(stop_cb)
|
||||
|
||||
# Start continuous speech recognition
|
||||
speech_recognizer.start_continuous_recognition_async()
|
||||
while not done:
|
||||
time.sleep(0.5)
|
||||
return text
|
||||
|
||||
def _speech2text(self, audio_path: str, speech_language: str) -> str:
|
||||
try:
|
||||
import azure.cognitiveservices.speech as speechsdk
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
audio_src_type = detect_file_src_type(audio_path)
|
||||
if audio_src_type == "local":
|
||||
audio_config = speechsdk.AudioConfig(filename=audio_path)
|
||||
elif audio_src_type == "remote":
|
||||
tmp_audio_path = download_audio_from_url(audio_path)
|
||||
audio_config = speechsdk.AudioConfig(filename=tmp_audio_path)
|
||||
else:
|
||||
raise ValueError(f"Invalid audio path: {audio_path}")
|
||||
|
||||
self.speech_config.speech_recognition_language = speech_language
|
||||
speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config)
|
||||
return self._continuous_recognize(speech_recognizer)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
try:
|
||||
text = self._speech2text(query, self.speech_language)
|
||||
return text
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error while running AzureCogsSpeech2TextTool: {e}")
|
@@ -0,0 +1,102 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import tempfile
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AzureCogsText2SpeechTool(BaseTool):
|
||||
"""Tool that queries the Azure Cognitive Services Text2Speech API.
|
||||
|
||||
In order to set this up, follow instructions at:
|
||||
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-text-to-speech?pivots=programming-language-python
|
||||
"""
|
||||
|
||||
azure_cogs_key: str = "" #: :meta private:
|
||||
azure_cogs_region: str = "" #: :meta private:
|
||||
speech_language: str = "en-US" #: :meta private:
|
||||
speech_config: Any #: :meta private:
|
||||
|
||||
name: str = "azure_cognitive_services_text2speech"
|
||||
description: str = (
|
||||
"A wrapper around Azure Cognitive Services Text2Speech. "
|
||||
"Useful for when you need to convert text to speech. "
|
||||
)
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and endpoint exists in environment."""
|
||||
azure_cogs_key = get_from_dict_or_env(
|
||||
values, "azure_cogs_key", "AZURE_COGS_KEY"
|
||||
)
|
||||
|
||||
azure_cogs_region = get_from_dict_or_env(
|
||||
values, "azure_cogs_region", "AZURE_COGS_REGION"
|
||||
)
|
||||
|
||||
try:
|
||||
import azure.cognitiveservices.speech as speechsdk
|
||||
|
||||
values["speech_config"] = speechsdk.SpeechConfig(
|
||||
subscription=azure_cogs_key, region=azure_cogs_region
|
||||
)
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"azure-cognitiveservices-speech is not installed. "
|
||||
"Run `pip install azure-cognitiveservices-speech` to install."
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
def _text2speech(self, text: str, speech_language: str) -> str:
|
||||
try:
|
||||
import azure.cognitiveservices.speech as speechsdk
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
self.speech_config.speech_synthesis_language = speech_language
|
||||
speech_synthesizer = speechsdk.SpeechSynthesizer(
|
||||
speech_config=self.speech_config, audio_config=None
|
||||
)
|
||||
result = speech_synthesizer.speak_text(text)
|
||||
|
||||
if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
|
||||
stream = speechsdk.AudioDataStream(result)
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="wb", suffix=".wav", delete=False
|
||||
) as f:
|
||||
stream.save_to_wav_file(f.name)
|
||||
|
||||
return f.name
|
||||
|
||||
elif result.reason == speechsdk.ResultReason.Canceled:
|
||||
cancellation_details = result.cancellation_details
|
||||
logger.debug(f"Speech synthesis canceled: {cancellation_details.reason}")
|
||||
if cancellation_details.reason == speechsdk.CancellationReason.Error:
|
||||
raise RuntimeError(
|
||||
f"Speech synthesis error: {cancellation_details.error_details}"
|
||||
)
|
||||
|
||||
return "Speech synthesis canceled."
|
||||
|
||||
else:
|
||||
return f"Speech synthesis failed: {result.reason}"
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
try:
|
||||
speech_file = self._text2speech(query, self.speech_language)
|
||||
return speech_file
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error while running AzureCogsText2SpeechTool: {e}")
|
@@ -0,0 +1,104 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AzureCogsTextAnalyticsHealthTool(BaseTool):
|
||||
"""Tool that queries the Azure Cognitive Services Text Analytics for Health API.
|
||||
|
||||
In order to set this up, follow instructions at:
|
||||
https://learn.microsoft.com/en-us/azure/ai-services/language-service/text-analytics-for-health/quickstart?tabs=windows&pivots=programming-language-python
|
||||
"""
|
||||
|
||||
azure_cogs_key: str = "" #: :meta private:
|
||||
azure_cogs_endpoint: str = "" #: :meta private:
|
||||
text_analytics_client: Any #: :meta private:
|
||||
|
||||
name: str = "azure_cognitive_services_text_analyics_health"
|
||||
description: str = (
|
||||
"A wrapper around Azure Cognitive Services Text Analytics for Health. "
|
||||
"Useful for when you need to identify entities in healthcare data. "
|
||||
"Input should be text."
|
||||
)
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and endpoint exists in environment."""
|
||||
azure_cogs_key = get_from_dict_or_env(
|
||||
values, "azure_cogs_key", "AZURE_COGS_KEY"
|
||||
)
|
||||
|
||||
azure_cogs_endpoint = get_from_dict_or_env(
|
||||
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
|
||||
)
|
||||
|
||||
try:
|
||||
import azure.ai.textanalytics as sdk
|
||||
from azure.core.credentials import AzureKeyCredential
|
||||
|
||||
values["text_analytics_client"] = sdk.TextAnalyticsClient(
|
||||
endpoint=azure_cogs_endpoint,
|
||||
credential=AzureKeyCredential(azure_cogs_key),
|
||||
)
|
||||
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"azure-ai-textanalytics is not installed. "
|
||||
"Run `pip install azure-ai-textanalytics` to install."
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
def _text_analysis(self, text: str) -> Dict:
|
||||
poller = self.text_analytics_client.begin_analyze_healthcare_entities(
|
||||
[{"id": "1", "language": "en", "text": text}]
|
||||
)
|
||||
|
||||
result = poller.result()
|
||||
|
||||
res_dict = {}
|
||||
|
||||
docs = [doc for doc in result if not doc.is_error]
|
||||
|
||||
if docs is not None:
|
||||
res_dict["entities"] = [
|
||||
f"{x.text} is a healthcare entity of type {x.category}"
|
||||
for y in docs
|
||||
for x in y.entities
|
||||
]
|
||||
|
||||
return res_dict
|
||||
|
||||
def _format_text_analysis_result(self, text_analysis_result: Dict) -> str:
|
||||
formatted_result = []
|
||||
if "entities" in text_analysis_result:
|
||||
formatted_result.append(
|
||||
f"""The text contains the following healthcare entities: {
|
||||
', '.join(text_analysis_result['entities'])
|
||||
}""".replace("\n", " ")
|
||||
)
|
||||
|
||||
return "\n".join(formatted_result)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
try:
|
||||
text_analysis_result = self._text_analysis(query)
|
||||
|
||||
return self._format_text_analysis_result(text_analysis_result)
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"Error while running AzureCogsTextAnalyticsHealthTool: {e}"
|
||||
)
|
@@ -0,0 +1,29 @@
|
||||
import os
|
||||
import tempfile
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def detect_file_src_type(file_path: str) -> str:
|
||||
"""Detect if the file is local or remote."""
|
||||
if os.path.isfile(file_path):
|
||||
return "local"
|
||||
|
||||
parsed_url = urlparse(file_path)
|
||||
if parsed_url.scheme and parsed_url.netloc:
|
||||
return "remote"
|
||||
|
||||
return "invalid"
|
||||
|
||||
|
||||
def download_audio_from_url(audio_url: str) -> str:
|
||||
"""Download audio from url to local."""
|
||||
ext = audio_url.split(".")[-1]
|
||||
response = requests.get(audio_url, stream=True)
|
||||
response.raise_for_status()
|
||||
with tempfile.NamedTemporaryFile(mode="wb", suffix=f".{ext}", delete=False) as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
|
||||
return f.name
|
162
libs/community/langchain_community/tools/bearly/tool.py
Normal file
162
libs/community/langchain_community/tools/bearly/tool.py
Normal file
@@ -0,0 +1,162 @@
|
||||
import base64
|
||||
import itertools
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Type
|
||||
|
||||
import requests
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools import Tool
|
||||
|
||||
|
||||
def strip_markdown_code(md_string: str) -> str:
|
||||
"""Strip markdown code from a string."""
|
||||
stripped_string = re.sub(r"^`{1,3}.*?\n", "", md_string, flags=re.DOTALL)
|
||||
stripped_string = re.sub(r"`{1,3}$", "", stripped_string)
|
||||
return stripped_string
|
||||
|
||||
|
||||
def head_file(path: str, n: int) -> List[str]:
|
||||
"""Get the first n lines of a file."""
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
return [str(line) for line in itertools.islice(f, n)]
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def file_to_base64(path: str) -> str:
|
||||
"""Convert a file to base64."""
|
||||
with open(path, "rb") as f:
|
||||
return base64.b64encode(f.read()).decode()
|
||||
|
||||
|
||||
class BearlyInterpreterToolArguments(BaseModel):
|
||||
"""Arguments for the BearlyInterpreterTool."""
|
||||
|
||||
python_code: str = Field(
|
||||
...,
|
||||
example="print('Hello World')",
|
||||
description=(
|
||||
"The pure python script to be evaluated. "
|
||||
"The contents will be in main.py. "
|
||||
"It should not be in markdown format."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
base_description = """Evaluates python code in a sandbox environment. \
|
||||
The environment resets on every execution. \
|
||||
You must send the whole script every time and print your outputs. \
|
||||
Script should be pure python code that can be evaluated. \
|
||||
It should be in python format NOT markdown. \
|
||||
The code should NOT be wrapped in backticks. \
|
||||
All python packages including requests, matplotlib, scipy, numpy, pandas, \
|
||||
etc are available. \
|
||||
If you have any files outputted write them to "output/" relative to the execution \
|
||||
path. Output can only be read from the directory, stdout, and stdin. \
|
||||
Do not use things like plot.show() as it will \
|
||||
not work instead write them out `output/` and a link to the file will be returned. \
|
||||
print() any output and results so you can capture the output."""
|
||||
|
||||
|
||||
class FileInfo(BaseModel):
|
||||
"""Information about a file to be uploaded."""
|
||||
|
||||
source_path: str
|
||||
description: str
|
||||
target_path: str
|
||||
|
||||
|
||||
class BearlyInterpreterTool:
|
||||
"""Tool for evaluating python code in a sandbox environment."""
|
||||
|
||||
api_key: str
|
||||
endpoint = "https://exec.bearly.ai/v1/interpreter"
|
||||
name = "bearly_interpreter"
|
||||
args_schema: Type[BaseModel] = BearlyInterpreterToolArguments
|
||||
files: Dict[str, FileInfo] = {}
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
self.api_key = api_key
|
||||
|
||||
@property
|
||||
def file_description(self) -> str:
|
||||
if len(self.files) == 0:
|
||||
return ""
|
||||
lines = ["The following files available in the evaluation environment:"]
|
||||
for target_path, file_info in self.files.items():
|
||||
peek_content = head_file(file_info.source_path, 4)
|
||||
lines.append(
|
||||
f"- path: `{target_path}` \n first four lines: {peek_content}"
|
||||
f" \n description: `{file_info.description}`"
|
||||
)
|
||||
return "\n".join(lines)
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (base_description + "\n\n" + self.file_description).strip()
|
||||
|
||||
def make_input_files(self) -> List[dict]:
|
||||
files = []
|
||||
for target_path, file_info in self.files.items():
|
||||
files.append(
|
||||
{
|
||||
"pathname": target_path,
|
||||
"contentsBasesixtyfour": file_to_base64(file_info.source_path),
|
||||
}
|
||||
)
|
||||
return files
|
||||
|
||||
def _run(self, python_code: str) -> dict:
|
||||
script = strip_markdown_code(python_code)
|
||||
resp = requests.post(
|
||||
"https://exec.bearly.ai/v1/interpreter",
|
||||
data=json.dumps(
|
||||
{
|
||||
"fileContents": script,
|
||||
"inputFiles": self.make_input_files(),
|
||||
"outputDir": "output/",
|
||||
"outputAsLinks": True,
|
||||
}
|
||||
),
|
||||
headers={"Authorization": self.api_key},
|
||||
).json()
|
||||
return {
|
||||
"stdout": base64.b64decode(resp["stdoutBasesixtyfour"]).decode()
|
||||
if resp["stdoutBasesixtyfour"]
|
||||
else "",
|
||||
"stderr": base64.b64decode(resp["stderrBasesixtyfour"]).decode()
|
||||
if resp["stderrBasesixtyfour"]
|
||||
else "",
|
||||
"fileLinks": resp["fileLinks"],
|
||||
"exitCode": resp["exitCode"],
|
||||
}
|
||||
|
||||
async def _arun(self, query: str) -> str:
|
||||
"""Use the tool asynchronously."""
|
||||
raise NotImplementedError("custom_search does not support async")
|
||||
|
||||
def add_file(self, source_path: str, target_path: str, description: str) -> None:
|
||||
if target_path in self.files:
|
||||
raise ValueError("target_path already exists")
|
||||
if not Path(source_path).exists():
|
||||
raise ValueError("source_path does not exist")
|
||||
self.files[target_path] = FileInfo(
|
||||
target_path=target_path, source_path=source_path, description=description
|
||||
)
|
||||
|
||||
def clear_files(self) -> None:
|
||||
self.files = {}
|
||||
|
||||
# TODO: this is because we can't have a dynamic description
|
||||
# because of the base pydantic class
|
||||
def as_tool(self) -> Tool:
|
||||
return Tool.from_function(
|
||||
func=self._run,
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
args_schema=self.args_schema,
|
||||
)
|
@@ -0,0 +1,5 @@
|
||||
"""Bing Search API toolkit."""
|
||||
|
||||
from langchain_community.tools.bing_search.tool import BingSearchResults, BingSearchRun
|
||||
|
||||
__all__ = ["BingSearchRun", "BingSearchResults"]
|
49
libs/community/langchain_community/tools/bing_search/tool.py
Normal file
49
libs/community/langchain_community/tools/bing_search/tool.py
Normal file
@@ -0,0 +1,49 @@
|
||||
"""Tool for the Bing search API."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
|
||||
|
||||
|
||||
class BingSearchRun(BaseTool):
|
||||
"""Tool that queries the Bing search API."""
|
||||
|
||||
name: str = "bing_search"
|
||||
description: str = (
|
||||
"A wrapper around Bing Search. "
|
||||
"Useful for when you need to answer questions about current events. "
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: BingSearchAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.api_wrapper.run(query)
|
||||
|
||||
|
||||
class BingSearchResults(BaseTool):
|
||||
"""Tool that queries the Bing Search API and gets back json."""
|
||||
|
||||
name: str = "Bing Search Results JSON"
|
||||
description: str = (
|
||||
"A wrapper around Bing Search. "
|
||||
"Useful for when you need to answer questions about current events. "
|
||||
"Input should be a search query. Output is a JSON array of the query results"
|
||||
)
|
||||
num_results: int = 4
|
||||
api_wrapper: BingSearchAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return str(self.api_wrapper.results(query, self.num_results))
|
@@ -0,0 +1,45 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.brave_search import BraveSearchWrapper
|
||||
|
||||
|
||||
class BraveSearch(BaseTool):
|
||||
"""Tool that queries the BraveSearch."""
|
||||
|
||||
name: str = "brave_search"
|
||||
description: str = (
|
||||
"a search engine. "
|
||||
"useful for when you need to answer questions about current events."
|
||||
" input should be a search query."
|
||||
)
|
||||
search_wrapper: BraveSearchWrapper
|
||||
|
||||
@classmethod
|
||||
def from_api_key(
|
||||
cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any
|
||||
) -> BraveSearch:
|
||||
"""Create a tool from an api key.
|
||||
|
||||
Args:
|
||||
api_key: The api key to use.
|
||||
search_kwargs: Any additional kwargs to pass to the search wrapper.
|
||||
**kwargs: Any additional kwargs to pass to the tool.
|
||||
|
||||
Returns:
|
||||
A tool.
|
||||
"""
|
||||
wrapper = BraveSearchWrapper(api_key=api_key, search_kwargs=search_kwargs or {})
|
||||
return cls(search_wrapper=wrapper, **kwargs)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.search_wrapper.run(query)
|
131
libs/community/langchain_community/tools/clickup/prompt.py
Normal file
131
libs/community/langchain_community/tools/clickup/prompt.py
Normal file
@@ -0,0 +1,131 @@
|
||||
# flake8: noqa
|
||||
CLICKUP_TASK_CREATE_PROMPT = """
|
||||
This tool is a wrapper around clickup's create_task API, useful when you need to create a CLICKUP task.
|
||||
The input to this tool is a dictionary specifying the fields of the CLICKUP task, and will be passed into clickup's CLICKUP `create_task` function.
|
||||
Only add fields described by the user.
|
||||
Use the following mapping in order to map the user's priority to the clickup priority: {{
|
||||
Urgent = 1,
|
||||
High = 2,
|
||||
Normal = 3,
|
||||
Low = 4,
|
||||
}}. If the user passes in "urgent" replace the priority value as 1.
|
||||
|
||||
Here are a few task descriptions and corresponding input examples:
|
||||
Task: create a task called "Daily report"
|
||||
Example Input: {{"name": "Daily report"}}
|
||||
Task: Make an open task called "ClickUp toolkit refactor" with description "Refactor the clickup toolkit to use dataclasses for parsing", with status "open"
|
||||
Example Input: {{"name": "ClickUp toolkit refactor", "description": "Refactor the clickup toolkit to use dataclasses for parsing", "status": "Open"}}
|
||||
Task: create a task with priority 3 called "New Task Name" with description "New Task Description", with status "open"
|
||||
Example Input: {{"name": "New Task Name", "description": "New Task Description", "status": "Open", "priority": 3}}
|
||||
Task: Add a task called "Bob's task" and assign it to Bob (user id: 81928627)
|
||||
Example Input: {{"name": "Bob's task", "description": "Task for Bob", "assignees": [81928627]}}
|
||||
"""
|
||||
|
||||
CLICKUP_LIST_CREATE_PROMPT = """
|
||||
This tool is a wrapper around clickup's create_list API, useful when you need to create a CLICKUP list.
|
||||
The input to this tool is a dictionary specifying the fields of a clickup list, and will be passed to clickup's create_list function.
|
||||
Only add fields described by the user.
|
||||
Use the following mapping in order to map the user's priority to the clickup priority: {{
|
||||
Urgent = 1,
|
||||
High = 2,
|
||||
Normal = 3,
|
||||
Low = 4,
|
||||
}}. If the user passes in "urgent" replace the priority value as 1.
|
||||
|
||||
Here are a few list descriptions and corresponding input examples:
|
||||
Description: make a list with name "General List"
|
||||
Example Input: {{"name": "General List"}}
|
||||
Description: add a new list ("TODOs") with low priority
|
||||
Example Input: {{"name": "General List", "priority": 4}}
|
||||
Description: create a list with name "List name", content "List content", priority 2, and status "red"
|
||||
Example Input: {{"name": "List name", "content": "List content", "priority": 2, "status": "red"}}
|
||||
"""
|
||||
|
||||
CLICKUP_FOLDER_CREATE_PROMPT = """
|
||||
This tool is a wrapper around clickup's create_folder API, useful when you need to create a CLICKUP folder.
|
||||
The input to this tool is a dictionary specifying the fields of a clickup folder, and will be passed to clickup's create_folder function.
|
||||
For example, to create a folder with name "Folder name" you would pass in the following dictionary:
|
||||
{{
|
||||
"name": "Folder name",
|
||||
}}
|
||||
"""
|
||||
|
||||
CLICKUP_GET_TASK_PROMPT = """
|
||||
This tool is a wrapper around clickup's API,
|
||||
Do NOT use to get a task specific attribute. Use get task attribute instead.
|
||||
useful when you need to get a specific task for the user. Given the task id you want to create a request similar to the following dictionary:
|
||||
payload = {{"task_id": "86a0t44tq"}}
|
||||
"""
|
||||
|
||||
CLICKUP_GET_TASK_ATTRIBUTE_PROMPT = """
|
||||
This tool is a wrapper around clickup's API,
|
||||
useful when you need to get a specific attribute from a task. Given the task id and desired attribute create a request similar to the following dictionary:
|
||||
payload = {{"task_id": "<task_id_to_update>", "attribute_name": "<attribute_name_to_update>"}}
|
||||
|
||||
Here are some example queries their corresponding payloads:
|
||||
Get the name of task 23jn23kjn -> {{"task_id": "23jn23kjn", "attribute_name": "name"}}
|
||||
What is the priority of task 86a0t44tq? -> {{"task_id": "86a0t44tq", "attribute_name": "priority"}}
|
||||
Output the description of task sdc9ds9jc -> {{"task_id": "sdc9ds9jc", "attribute_name": "description"}}
|
||||
Who is assigned to task bgjfnbfg0 -> {{"task_id": "bgjfnbfg0", "attribute_name": "assignee"}}
|
||||
Which is the status of task kjnsdcjc? -> {{"task_id": "kjnsdcjc", "attribute_name": "description"}}
|
||||
How long is the time estimate of task sjncsd999? -> {{"task_id": "sjncsd999", "attribute_name": "time_estimate"}}
|
||||
Is task jnsd98sd archived?-> {{"task_id": "jnsd98sd", "attribute_name": "archive"}}
|
||||
"""
|
||||
|
||||
CLICKUP_GET_ALL_TEAMS_PROMPT = """
|
||||
This tool is a wrapper around clickup's API, useful when you need to get all teams that the user is a part of.
|
||||
To get a list of all the teams there is no necessary request parameters.
|
||||
"""
|
||||
|
||||
CLICKUP_GET_LIST_PROMPT = """
|
||||
This tool is a wrapper around clickup's API,
|
||||
useful when you need to get a specific list for the user. Given the list id you want to create a request similar to the following dictionary:
|
||||
payload = {{"list_id": "901300608424"}}
|
||||
"""
|
||||
|
||||
CLICKUP_GET_FOLDERS_PROMPT = """
|
||||
This tool is a wrapper around clickup's API,
|
||||
useful when you need to get a specific folder for the user. Given the user's workspace id you want to create a request similar to the following dictionary:
|
||||
payload = {{"folder_id": "90130119692"}}
|
||||
"""
|
||||
|
||||
CLICKUP_GET_SPACES_PROMPT = """
|
||||
This tool is a wrapper around clickup's API,
|
||||
useful when you need to get all the spaces available to a user. Given the user's workspace id you want to create a request similar to the following dictionary:
|
||||
payload = {{"team_id": "90130119692"}}
|
||||
"""
|
||||
|
||||
CLICKUP_GET_SPACES_PROMPT = """
|
||||
This tool is a wrapper around clickup's API,
|
||||
useful when you need to get all the spaces available to a user. Given the user's workspace id you want to create a request similar to the following dictionary:
|
||||
payload = {{"team_id": "90130119692"}}
|
||||
"""
|
||||
|
||||
CLICKUP_UPDATE_TASK_PROMPT = """
|
||||
This tool is a wrapper around clickup's API,
|
||||
useful when you need to update a specific attribute of a task. Given the task id, desired attribute to change and the new value you want to create a request similar to the following dictionary:
|
||||
payload = {{"task_id": "<task_id_to_update>", "attribute_name": "<attribute_name_to_update>", "value": "<value_to_update_to>"}}
|
||||
|
||||
Here are some example queries their corresponding payloads:
|
||||
Change the name of task 23jn23kjn to new task name -> {{"task_id": "23jn23kjn", "attribute_name": "name", "value": "new task name"}}
|
||||
Update the priority of task 86a0t44tq to 1 -> {{"task_id": "86a0t44tq", "attribute_name": "priority", "value": 1}}
|
||||
Re-write the description of task sdc9ds9jc to 'a new task description' -> {{"task_id": "sdc9ds9jc", "attribute_name": "description", "value": "a new task description"}}
|
||||
Forward the status of task kjnsdcjc to done -> {{"task_id": "kjnsdcjc", "attribute_name": "description", "status": "done"}}
|
||||
Increase the time estimate of task sjncsd999 to 3h -> {{"task_id": "sjncsd999", "attribute_name": "time_estimate", "value": 8000}}
|
||||
Archive task jnsd98sd -> {{"task_id": "jnsd98sd", "attribute_name": "archive", "value": true}}
|
||||
*IMPORTANT*: Pay attention to the exact syntax above and the correct use of quotes.
|
||||
For changing priority and time estimates, we expect integers (int).
|
||||
For name, description and status we expect strings (str).
|
||||
For archive, we expect a boolean (bool).
|
||||
"""
|
||||
|
||||
CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT = """
|
||||
This tool is a wrapper around clickup's API,
|
||||
useful when you need to update the assignees of a task. Given the task id, the operation add or remove (rem), and the list of user ids. You want to create a request similar to the following dictionary:
|
||||
payload = {{"task_id": "<task_id_to_update>", "operation": "<operation, (add or rem)>", "users": [<user_id_1>, <user_id_2>]}}
|
||||
|
||||
Here are some example queries their corresponding payloads:
|
||||
Add 81928627 and 3987234 as assignees to task 21hw21jn -> {{"task_id": "21hw21jn", "operation": "add", "users": [81928627, 3987234]}}
|
||||
Remove 67823487 as assignee from task jin34ji4 -> {{"task_id": "jin34ji4", "operation": "rem", "users": [67823487]}}
|
||||
*IMPORTANT*: Users id should always be ints.
|
||||
"""
|
42
libs/community/langchain_community/tools/clickup/tool.py
Normal file
42
libs/community/langchain_community/tools/clickup/tool.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""
|
||||
This tool allows agents to interact with the clickup library
|
||||
and operate on a Clickup instance.
|
||||
To use this tool, you must first set as environment variables:
|
||||
client_secret
|
||||
client_id
|
||||
code
|
||||
|
||||
Below is a sample script that uses the Clickup tool:
|
||||
|
||||
```python
|
||||
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
|
||||
from langchain_community.utilities.clickup import ClickupAPIWrapper
|
||||
|
||||
clickup = ClickupAPIWrapper()
|
||||
toolkit = ClickupToolkit.from_clickup_api_wrapper(clickup)
|
||||
```
|
||||
"""
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.clickup import ClickupAPIWrapper
|
||||
|
||||
|
||||
class ClickupAction(BaseTool):
|
||||
"""Tool that queries the Clickup API."""
|
||||
|
||||
api_wrapper: ClickupAPIWrapper = Field(default_factory=ClickupAPIWrapper)
|
||||
mode: str
|
||||
name: str = ""
|
||||
description: str = ""
|
||||
|
||||
def _run(
|
||||
self,
|
||||
instructions: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the Clickup API to run an operation."""
|
||||
return self.api_wrapper.run(self.mode, instructions)
|
@@ -0,0 +1,4 @@
|
||||
from langchain_community.tools.render import format_tool_to_openai_function
|
||||
|
||||
# For backwards compatibility
|
||||
__all__ = ["format_tool_to_openai_function"]
|
@@ -0,0 +1,9 @@
|
||||
from langchain_community.tools.dataforseo_api_search.tool import (
|
||||
DataForSeoAPISearchResults,
|
||||
DataForSeoAPISearchRun,
|
||||
)
|
||||
|
||||
"""DataForSeo API Toolkit."""
|
||||
"""Tool for the DataForSeo SERP API."""
|
||||
|
||||
__all__ = ["DataForSeoAPISearchRun", "DataForSeoAPISearchResults"]
|
@@ -0,0 +1,71 @@
|
||||
"""Tool for the DataForSeo SERP API."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
CallbackManagerForToolRun,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||
|
||||
|
||||
class DataForSeoAPISearchRun(BaseTool):
|
||||
"""Tool that queries the DataForSeo Google search API."""
|
||||
|
||||
name: str = "dataforseo_api_search"
|
||||
description: str = (
|
||||
"A robust Google Search API provided by DataForSeo."
|
||||
"This tool is handy when you need information about trending topics "
|
||||
"or current events."
|
||||
)
|
||||
api_wrapper: DataForSeoAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return str(self.api_wrapper.run(query))
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool asynchronously."""
|
||||
return (await self.api_wrapper.arun(query)).__str__()
|
||||
|
||||
|
||||
class DataForSeoAPISearchResults(BaseTool):
|
||||
"""Tool that queries the DataForSeo Google Search API
|
||||
and get back json."""
|
||||
|
||||
name: str = "DataForSeo-Results-JSON"
|
||||
description: str = (
|
||||
"A comprehensive Google Search API provided by DataForSeo."
|
||||
"This tool is useful for obtaining real-time data on current events "
|
||||
"or popular searches."
|
||||
"The input should be a search query and the output is a JSON object "
|
||||
"of the query results."
|
||||
)
|
||||
api_wrapper: DataForSeoAPIWrapper = Field(default_factory=DataForSeoAPIWrapper)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return str(self.api_wrapper.results(query))
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool asynchronously."""
|
||||
return (await self.api_wrapper.aresults(query)).__str__()
|
@@ -0,0 +1,5 @@
|
||||
"""DuckDuckGo Search API toolkit."""
|
||||
|
||||
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchRun
|
||||
|
||||
__all__ = ["DuckDuckGoSearchRun"]
|
83
libs/community/langchain_community/tools/ddg_search/tool.py
Normal file
83
libs/community/langchain_community/tools/ddg_search/tool.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""Tool for the DuckDuckGo search API."""
|
||||
|
||||
import warnings
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
|
||||
|
||||
|
||||
class DDGInput(BaseModel):
|
||||
query: str = Field(description="search query to look up")
|
||||
|
||||
|
||||
class DuckDuckGoSearchRun(BaseTool):
|
||||
"""Tool that queries the DuckDuckGo search API."""
|
||||
|
||||
name: str = "duckduckgo_search"
|
||||
description: str = (
|
||||
"A wrapper around DuckDuckGo Search. "
|
||||
"Useful for when you need to answer questions about current events. "
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: DuckDuckGoSearchAPIWrapper = Field(
|
||||
default_factory=DuckDuckGoSearchAPIWrapper
|
||||
)
|
||||
args_schema: Type[BaseModel] = DDGInput
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.api_wrapper.run(query)
|
||||
|
||||
|
||||
class DuckDuckGoSearchResults(BaseTool):
|
||||
"""Tool that queries the DuckDuckGo search API and gets back json."""
|
||||
|
||||
name: str = "DuckDuckGo Results JSON"
|
||||
description: str = (
|
||||
"A wrapper around Duck Duck Go Search. "
|
||||
"Useful for when you need to answer questions about current events. "
|
||||
"Input should be a search query. Output is a JSON array of the query results"
|
||||
)
|
||||
max_results: int = Field(alias="num_results", default=4)
|
||||
api_wrapper: DuckDuckGoSearchAPIWrapper = Field(
|
||||
default_factory=DuckDuckGoSearchAPIWrapper
|
||||
)
|
||||
backend: str = "text"
|
||||
args_schema: Type[BaseModel] = DDGInput
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
res = self.api_wrapper.results(query, self.max_results, source=self.backend)
|
||||
res_strs = [", ".join([f"{k}: {v}" for k, v in d.items()]) for d in res]
|
||||
return ", ".join([f"[{rs}]" for rs in res_strs])
|
||||
|
||||
|
||||
def DuckDuckGoSearchTool(*args: Any, **kwargs: Any) -> DuckDuckGoSearchRun:
|
||||
"""
|
||||
Deprecated. Use DuckDuckGoSearchRun instead.
|
||||
|
||||
Args:
|
||||
*args:
|
||||
**kwargs:
|
||||
|
||||
Returns:
|
||||
DuckDuckGoSearchRun
|
||||
"""
|
||||
warnings.warn(
|
||||
"DuckDuckGoSearchTool will be deprecated in the future. "
|
||||
"Please use DuckDuckGoSearchRun instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
return DuckDuckGoSearchRun(*args, **kwargs)
|
@@ -0,0 +1,243 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import json
|
||||
import os
|
||||
from io import StringIO
|
||||
from sys import version_info
|
||||
from typing import IO, TYPE_CHECKING, Any, Callable, List, Optional, Type
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
CallbackManager,
|
||||
CallbackManagerForToolRun,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, PrivateAttr
|
||||
|
||||
from langchain_community.tools import BaseTool, Tool
|
||||
from langchain_community.tools.e2b_data_analysis.unparse import Unparser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from e2b import EnvVars
|
||||
from e2b.templates.data_analysis import Artifact
|
||||
|
||||
base_description = """Evaluates python code in a sandbox environment. \
|
||||
The environment is long running and exists across multiple executions. \
|
||||
You must send the whole script every time and print your outputs. \
|
||||
Script should be pure python code that can be evaluated. \
|
||||
It should be in python format NOT markdown. \
|
||||
The code should NOT be wrapped in backticks. \
|
||||
All python packages including requests, matplotlib, scipy, numpy, pandas, \
|
||||
etc are available. Create and display chart using `plt.show()`."""
|
||||
|
||||
|
||||
def _unparse(tree: ast.AST) -> str:
|
||||
"""Unparse the AST."""
|
||||
if version_info.minor < 9:
|
||||
s = StringIO()
|
||||
Unparser(tree, file=s)
|
||||
source_code = s.getvalue()
|
||||
s.close()
|
||||
else:
|
||||
source_code = ast.unparse(tree) # type: ignore[attr-defined]
|
||||
return source_code
|
||||
|
||||
|
||||
def add_last_line_print(code: str) -> str:
|
||||
"""Add print statement to the last line if it's missing.
|
||||
|
||||
Sometimes, the LLM-generated code doesn't have `print(variable_name)`, instead the
|
||||
LLM tries to print the variable only by writing `variable_name` (as you would in
|
||||
REPL, for example).
|
||||
|
||||
This methods checks the AST of the generated Python code and adds the print
|
||||
statement to the last line if it's missing.
|
||||
"""
|
||||
tree = ast.parse(code)
|
||||
node = tree.body[-1]
|
||||
if isinstance(node, ast.Expr) and isinstance(node.value, ast.Call):
|
||||
if isinstance(node.value.func, ast.Name) and node.value.func.id == "print":
|
||||
return _unparse(tree)
|
||||
|
||||
if isinstance(node, ast.Expr):
|
||||
tree.body[-1] = ast.Expr(
|
||||
value=ast.Call(
|
||||
func=ast.Name(id="print", ctx=ast.Load()),
|
||||
args=[node.value],
|
||||
keywords=[],
|
||||
)
|
||||
)
|
||||
|
||||
return _unparse(tree)
|
||||
|
||||
|
||||
class UploadedFile(BaseModel):
|
||||
"""Description of the uploaded path with its remote path."""
|
||||
|
||||
name: str
|
||||
remote_path: str
|
||||
description: str
|
||||
|
||||
|
||||
class E2BDataAnalysisToolArguments(BaseModel):
|
||||
"""Arguments for the E2BDataAnalysisTool."""
|
||||
|
||||
python_code: str = Field(
|
||||
...,
|
||||
example="print('Hello World')",
|
||||
description=(
|
||||
"The python script to be evaluated. "
|
||||
"The contents will be in main.py. "
|
||||
"It should not be in markdown format."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class E2BDataAnalysisTool(BaseTool):
|
||||
"""Tool for running python code in a sandboxed environment for data analysis."""
|
||||
|
||||
name = "e2b_data_analysis"
|
||||
args_schema: Type[BaseModel] = E2BDataAnalysisToolArguments
|
||||
session: Any
|
||||
description: str
|
||||
_uploaded_files: List[UploadedFile] = PrivateAttr(default_factory=list)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: Optional[str] = None,
|
||||
cwd: Optional[str] = None,
|
||||
env_vars: Optional[EnvVars] = None,
|
||||
on_stdout: Optional[Callable[[str], Any]] = None,
|
||||
on_stderr: Optional[Callable[[str], Any]] = None,
|
||||
on_artifact: Optional[Callable[[Artifact], Any]] = None,
|
||||
on_exit: Optional[Callable[[int], Any]] = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
try:
|
||||
from e2b import DataAnalysis
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Unable to import e2b, please install with `pip install e2b`."
|
||||
) from e
|
||||
|
||||
# If no API key is provided, E2B will try to read it from the environment
|
||||
# variable E2B_API_KEY
|
||||
super().__init__(description=base_description, **kwargs)
|
||||
self.session = DataAnalysis(
|
||||
api_key=api_key,
|
||||
cwd=cwd,
|
||||
env_vars=env_vars,
|
||||
on_stdout=on_stdout,
|
||||
on_stderr=on_stderr,
|
||||
on_exit=on_exit,
|
||||
on_artifact=on_artifact,
|
||||
)
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the cloud sandbox."""
|
||||
self._uploaded_files = []
|
||||
self.session.close()
|
||||
|
||||
@property
|
||||
def uploaded_files_description(self) -> str:
|
||||
if len(self._uploaded_files) == 0:
|
||||
return ""
|
||||
lines = ["The following files available in the sandbox:"]
|
||||
|
||||
for f in self._uploaded_files:
|
||||
if f.description == "":
|
||||
lines.append(f"- path: `{f.remote_path}`")
|
||||
else:
|
||||
lines.append(
|
||||
f"- path: `{f.remote_path}` \n description: `{f.description}`"
|
||||
)
|
||||
return "\n".join(lines)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
python_code: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
callbacks: Optional[CallbackManager] = None,
|
||||
) -> str:
|
||||
python_code = add_last_line_print(python_code)
|
||||
|
||||
if callbacks is not None:
|
||||
on_artifact = getattr(callbacks.metadata, "on_artifact", None)
|
||||
else:
|
||||
on_artifact = None
|
||||
|
||||
stdout, stderr, artifacts = self.session.run_python(
|
||||
python_code, on_artifact=on_artifact
|
||||
)
|
||||
|
||||
out = {
|
||||
"stdout": stdout,
|
||||
"stderr": stderr,
|
||||
"artifacts": list(map(lambda artifact: artifact.name, artifacts)),
|
||||
}
|
||||
return json.dumps(out)
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
python_code: str,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
raise NotImplementedError("e2b_data_analysis does not support async")
|
||||
|
||||
def run_command(
|
||||
self,
|
||||
cmd: str,
|
||||
) -> dict:
|
||||
"""Run shell command in the sandbox."""
|
||||
proc = self.session.process.start(cmd)
|
||||
output = proc.wait()
|
||||
return {
|
||||
"stdout": output.stdout,
|
||||
"stderr": output.stderr,
|
||||
"exit_code": output.exit_code,
|
||||
}
|
||||
|
||||
def install_python_packages(self, package_names: str | List[str]) -> None:
|
||||
"""Install python packages in the sandbox."""
|
||||
self.session.install_python_packages(package_names)
|
||||
|
||||
def install_system_packages(self, package_names: str | List[str]) -> None:
|
||||
"""Install system packages (via apt) in the sandbox."""
|
||||
self.session.install_system_packages(package_names)
|
||||
|
||||
def download_file(self, remote_path: str) -> bytes:
|
||||
"""Download file from the sandbox."""
|
||||
return self.session.download_file(remote_path)
|
||||
|
||||
def upload_file(self, file: IO, description: str) -> UploadedFile:
|
||||
"""Upload file to the sandbox.
|
||||
|
||||
The file is uploaded to the '/home/user/<filename>' path."""
|
||||
remote_path = self.session.upload_file(file)
|
||||
|
||||
f = UploadedFile(
|
||||
name=os.path.basename(file.name),
|
||||
remote_path=remote_path,
|
||||
description=description,
|
||||
)
|
||||
self._uploaded_files.append(f)
|
||||
self.description = self.description + "\n" + self.uploaded_files_description
|
||||
return f
|
||||
|
||||
def remove_uploaded_file(self, uploaded_file: UploadedFile) -> None:
|
||||
"""Remove uploaded file from the sandbox."""
|
||||
self.session.filesystem.remove(uploaded_file.remote_path)
|
||||
self._uploaded_files = [
|
||||
f
|
||||
for f in self._uploaded_files
|
||||
if f.remote_path != uploaded_file.remote_path
|
||||
]
|
||||
self.description = self.description + "\n" + self.uploaded_files_description
|
||||
|
||||
def as_tool(self) -> Tool:
|
||||
return Tool.from_function(
|
||||
func=self._run,
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
args_schema=self.args_schema,
|
||||
)
|
@@ -0,0 +1,736 @@
|
||||
# mypy: disable-error-code=no-untyped-def
|
||||
# Because Python >3.9 doesn't support ast.unparse,
|
||||
# we copied the unparse functionality from here:
|
||||
# https://github.com/python/cpython/blob/3.8/Tools/parser/unparse.py
|
||||
"Usage: unparse.py <path to source file>"
|
||||
import ast
|
||||
import io
|
||||
import sys
|
||||
import tokenize
|
||||
|
||||
# Large float and imaginary literals get turned into infinities in the AST.
|
||||
# We unparse those infinities to INFSTR.
|
||||
INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
|
||||
|
||||
|
||||
def interleave(inter, f, seq):
|
||||
"""Call f on each item in seq, calling inter() in between."""
|
||||
seq = iter(seq)
|
||||
try:
|
||||
f(next(seq))
|
||||
except StopIteration:
|
||||
pass
|
||||
else:
|
||||
for x in seq:
|
||||
inter()
|
||||
f(x)
|
||||
|
||||
|
||||
class Unparser:
|
||||
"""Methods in this class recursively traverse an AST and
|
||||
output source code for the abstract syntax; original formatting
|
||||
is disregarded."""
|
||||
|
||||
def __init__(self, tree, file=sys.stdout):
|
||||
"""Unparser(tree, file=sys.stdout) -> None.
|
||||
Print the source for tree to file."""
|
||||
self.f = file
|
||||
self._indent = 0
|
||||
self.dispatch(tree)
|
||||
self.f.flush()
|
||||
|
||||
def fill(self, text=""):
|
||||
"Indent a piece of text, according to the current indentation level"
|
||||
self.f.write("\n" + " " * self._indent + text)
|
||||
|
||||
def write(self, text):
|
||||
"Append a piece of text to the current line."
|
||||
self.f.write(text)
|
||||
|
||||
def enter(self):
|
||||
"Print ':', and increase the indentation."
|
||||
self.write(":")
|
||||
self._indent += 1
|
||||
|
||||
def leave(self):
|
||||
"Decrease the indentation level."
|
||||
self._indent -= 1
|
||||
|
||||
def dispatch(self, tree):
|
||||
"Dispatcher function, dispatching tree type T to method _T."
|
||||
if isinstance(tree, list):
|
||||
for t in tree:
|
||||
self.dispatch(t)
|
||||
return
|
||||
meth = getattr(self, "_" + tree.__class__.__name__)
|
||||
meth(tree)
|
||||
|
||||
############### Unparsing methods ######################
|
||||
# There should be one method per concrete grammar type #
|
||||
# Constructors should be grouped by sum type. Ideally, #
|
||||
# this would follow the order in the grammar, but #
|
||||
# currently doesn't. #
|
||||
########################################################
|
||||
|
||||
def _Module(self, tree):
|
||||
for stmt in tree.body:
|
||||
self.dispatch(stmt)
|
||||
|
||||
# stmt
|
||||
def _Expr(self, tree):
|
||||
self.fill()
|
||||
self.dispatch(tree.value)
|
||||
|
||||
def _NamedExpr(self, tree):
|
||||
self.write("(")
|
||||
self.dispatch(tree.target)
|
||||
self.write(" := ")
|
||||
self.dispatch(tree.value)
|
||||
self.write(")")
|
||||
|
||||
def _Import(self, t):
|
||||
self.fill("import ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.names)
|
||||
|
||||
def _ImportFrom(self, t):
|
||||
self.fill("from ")
|
||||
self.write("." * t.level)
|
||||
if t.module:
|
||||
self.write(t.module)
|
||||
self.write(" import ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.names)
|
||||
|
||||
def _Assign(self, t):
|
||||
self.fill()
|
||||
for target in t.targets:
|
||||
self.dispatch(target)
|
||||
self.write(" = ")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _AugAssign(self, t):
|
||||
self.fill()
|
||||
self.dispatch(t.target)
|
||||
self.write(" " + self.binop[t.op.__class__.__name__] + "= ")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _AnnAssign(self, t):
|
||||
self.fill()
|
||||
if not t.simple and isinstance(t.target, ast.Name):
|
||||
self.write("(")
|
||||
self.dispatch(t.target)
|
||||
if not t.simple and isinstance(t.target, ast.Name):
|
||||
self.write(")")
|
||||
self.write(": ")
|
||||
self.dispatch(t.annotation)
|
||||
if t.value:
|
||||
self.write(" = ")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _Return(self, t):
|
||||
self.fill("return")
|
||||
if t.value:
|
||||
self.write(" ")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _Pass(self, t):
|
||||
self.fill("pass")
|
||||
|
||||
def _Break(self, t):
|
||||
self.fill("break")
|
||||
|
||||
def _Continue(self, t):
|
||||
self.fill("continue")
|
||||
|
||||
def _Delete(self, t):
|
||||
self.fill("del ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.targets)
|
||||
|
||||
def _Assert(self, t):
|
||||
self.fill("assert ")
|
||||
self.dispatch(t.test)
|
||||
if t.msg:
|
||||
self.write(", ")
|
||||
self.dispatch(t.msg)
|
||||
|
||||
def _Global(self, t):
|
||||
self.fill("global ")
|
||||
interleave(lambda: self.write(", "), self.write, t.names)
|
||||
|
||||
def _Nonlocal(self, t):
|
||||
self.fill("nonlocal ")
|
||||
interleave(lambda: self.write(", "), self.write, t.names)
|
||||
|
||||
def _Await(self, t):
|
||||
self.write("(")
|
||||
self.write("await")
|
||||
if t.value:
|
||||
self.write(" ")
|
||||
self.dispatch(t.value)
|
||||
self.write(")")
|
||||
|
||||
def _Yield(self, t):
|
||||
self.write("(")
|
||||
self.write("yield")
|
||||
if t.value:
|
||||
self.write(" ")
|
||||
self.dispatch(t.value)
|
||||
self.write(")")
|
||||
|
||||
def _YieldFrom(self, t):
|
||||
self.write("(")
|
||||
self.write("yield from")
|
||||
if t.value:
|
||||
self.write(" ")
|
||||
self.dispatch(t.value)
|
||||
self.write(")")
|
||||
|
||||
def _Raise(self, t):
|
||||
self.fill("raise")
|
||||
if not t.exc:
|
||||
assert not t.cause
|
||||
return
|
||||
self.write(" ")
|
||||
self.dispatch(t.exc)
|
||||
if t.cause:
|
||||
self.write(" from ")
|
||||
self.dispatch(t.cause)
|
||||
|
||||
def _Try(self, t):
|
||||
self.fill("try")
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
for ex in t.handlers:
|
||||
self.dispatch(ex)
|
||||
if t.orelse:
|
||||
self.fill("else")
|
||||
self.enter()
|
||||
self.dispatch(t.orelse)
|
||||
self.leave()
|
||||
if t.finalbody:
|
||||
self.fill("finally")
|
||||
self.enter()
|
||||
self.dispatch(t.finalbody)
|
||||
self.leave()
|
||||
|
||||
def _ExceptHandler(self, t):
|
||||
self.fill("except")
|
||||
if t.type:
|
||||
self.write(" ")
|
||||
self.dispatch(t.type)
|
||||
if t.name:
|
||||
self.write(" as ")
|
||||
self.write(t.name)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
def _ClassDef(self, t):
|
||||
self.write("\n")
|
||||
for deco in t.decorator_list:
|
||||
self.fill("@")
|
||||
self.dispatch(deco)
|
||||
self.fill("class " + t.name)
|
||||
self.write("(")
|
||||
comma = False
|
||||
for e in t.bases:
|
||||
if comma:
|
||||
self.write(", ")
|
||||
else:
|
||||
comma = True
|
||||
self.dispatch(e)
|
||||
for e in t.keywords:
|
||||
if comma:
|
||||
self.write(", ")
|
||||
else:
|
||||
comma = True
|
||||
self.dispatch(e)
|
||||
self.write(")")
|
||||
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
def _FunctionDef(self, t):
|
||||
self.__FunctionDef_helper(t, "def")
|
||||
|
||||
def _AsyncFunctionDef(self, t):
|
||||
self.__FunctionDef_helper(t, "async def")
|
||||
|
||||
def __FunctionDef_helper(self, t, fill_suffix):
|
||||
self.write("\n")
|
||||
for deco in t.decorator_list:
|
||||
self.fill("@")
|
||||
self.dispatch(deco)
|
||||
def_str = fill_suffix + " " + t.name + "("
|
||||
self.fill(def_str)
|
||||
self.dispatch(t.args)
|
||||
self.write(")")
|
||||
if t.returns:
|
||||
self.write(" -> ")
|
||||
self.dispatch(t.returns)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
def _For(self, t):
|
||||
self.__For_helper("for ", t)
|
||||
|
||||
def _AsyncFor(self, t):
|
||||
self.__For_helper("async for ", t)
|
||||
|
||||
def __For_helper(self, fill, t):
|
||||
self.fill(fill)
|
||||
self.dispatch(t.target)
|
||||
self.write(" in ")
|
||||
self.dispatch(t.iter)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
if t.orelse:
|
||||
self.fill("else")
|
||||
self.enter()
|
||||
self.dispatch(t.orelse)
|
||||
self.leave()
|
||||
|
||||
def _If(self, t):
|
||||
self.fill("if ")
|
||||
self.dispatch(t.test)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
# collapse nested ifs into equivalent elifs.
|
||||
while t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0], ast.If):
|
||||
t = t.orelse[0]
|
||||
self.fill("elif ")
|
||||
self.dispatch(t.test)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
# final else
|
||||
if t.orelse:
|
||||
self.fill("else")
|
||||
self.enter()
|
||||
self.dispatch(t.orelse)
|
||||
self.leave()
|
||||
|
||||
def _While(self, t):
|
||||
self.fill("while ")
|
||||
self.dispatch(t.test)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
if t.orelse:
|
||||
self.fill("else")
|
||||
self.enter()
|
||||
self.dispatch(t.orelse)
|
||||
self.leave()
|
||||
|
||||
def _With(self, t):
|
||||
self.fill("with ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.items)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
def _AsyncWith(self, t):
|
||||
self.fill("async with ")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.items)
|
||||
self.enter()
|
||||
self.dispatch(t.body)
|
||||
self.leave()
|
||||
|
||||
# expr
|
||||
def _JoinedStr(self, t):
|
||||
self.write("f")
|
||||
string = io.StringIO()
|
||||
self._fstring_JoinedStr(t, string.write)
|
||||
self.write(repr(string.getvalue()))
|
||||
|
||||
def _FormattedValue(self, t):
|
||||
self.write("f")
|
||||
string = io.StringIO()
|
||||
self._fstring_FormattedValue(t, string.write)
|
||||
self.write(repr(string.getvalue()))
|
||||
|
||||
def _fstring_JoinedStr(self, t, write):
|
||||
for value in t.values:
|
||||
meth = getattr(self, "_fstring_" + type(value).__name__)
|
||||
meth(value, write)
|
||||
|
||||
def _fstring_Constant(self, t, write):
|
||||
assert isinstance(t.value, str)
|
||||
value = t.value.replace("{", "{{").replace("}", "}}")
|
||||
write(value)
|
||||
|
||||
def _fstring_FormattedValue(self, t, write):
|
||||
write("{")
|
||||
expr = io.StringIO()
|
||||
Unparser(t.value, expr)
|
||||
expr = expr.getvalue().rstrip("\n")
|
||||
if expr.startswith("{"):
|
||||
write(" ") # Separate pair of opening brackets as "{ {"
|
||||
write(expr)
|
||||
if t.conversion != -1:
|
||||
conversion = chr(t.conversion)
|
||||
assert conversion in "sra"
|
||||
write(f"!{conversion}")
|
||||
if t.format_spec:
|
||||
write(":")
|
||||
meth = getattr(self, "_fstring_" + type(t.format_spec).__name__)
|
||||
meth(t.format_spec, write)
|
||||
write("}")
|
||||
|
||||
def _Name(self, t):
|
||||
self.write(t.id)
|
||||
|
||||
def _write_constant(self, value):
|
||||
if isinstance(value, (float, complex)):
|
||||
# Substitute overflowing decimal literal for AST infinities.
|
||||
self.write(repr(value).replace("inf", INFSTR))
|
||||
else:
|
||||
self.write(repr(value))
|
||||
|
||||
def _Constant(self, t):
|
||||
value = t.value
|
||||
if isinstance(value, tuple):
|
||||
self.write("(")
|
||||
if len(value) == 1:
|
||||
self._write_constant(value[0])
|
||||
self.write(",")
|
||||
else:
|
||||
interleave(lambda: self.write(", "), self._write_constant, value)
|
||||
self.write(")")
|
||||
elif value is ...:
|
||||
self.write("...")
|
||||
else:
|
||||
if t.kind == "u":
|
||||
self.write("u")
|
||||
self._write_constant(t.value)
|
||||
|
||||
def _List(self, t):
|
||||
self.write("[")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.elts)
|
||||
self.write("]")
|
||||
|
||||
def _ListComp(self, t):
|
||||
self.write("[")
|
||||
self.dispatch(t.elt)
|
||||
for gen in t.generators:
|
||||
self.dispatch(gen)
|
||||
self.write("]")
|
||||
|
||||
def _GeneratorExp(self, t):
|
||||
self.write("(")
|
||||
self.dispatch(t.elt)
|
||||
for gen in t.generators:
|
||||
self.dispatch(gen)
|
||||
self.write(")")
|
||||
|
||||
def _SetComp(self, t):
|
||||
self.write("{")
|
||||
self.dispatch(t.elt)
|
||||
for gen in t.generators:
|
||||
self.dispatch(gen)
|
||||
self.write("}")
|
||||
|
||||
def _DictComp(self, t):
|
||||
self.write("{")
|
||||
self.dispatch(t.key)
|
||||
self.write(": ")
|
||||
self.dispatch(t.value)
|
||||
for gen in t.generators:
|
||||
self.dispatch(gen)
|
||||
self.write("}")
|
||||
|
||||
def _comprehension(self, t):
|
||||
if t.is_async:
|
||||
self.write(" async for ")
|
||||
else:
|
||||
self.write(" for ")
|
||||
self.dispatch(t.target)
|
||||
self.write(" in ")
|
||||
self.dispatch(t.iter)
|
||||
for if_clause in t.ifs:
|
||||
self.write(" if ")
|
||||
self.dispatch(if_clause)
|
||||
|
||||
def _IfExp(self, t):
|
||||
self.write("(")
|
||||
self.dispatch(t.body)
|
||||
self.write(" if ")
|
||||
self.dispatch(t.test)
|
||||
self.write(" else ")
|
||||
self.dispatch(t.orelse)
|
||||
self.write(")")
|
||||
|
||||
def _Set(self, t):
|
||||
assert t.elts # should be at least one element
|
||||
self.write("{")
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.elts)
|
||||
self.write("}")
|
||||
|
||||
def _Dict(self, t):
|
||||
self.write("{")
|
||||
|
||||
def write_key_value_pair(k, v):
|
||||
self.dispatch(k)
|
||||
self.write(": ")
|
||||
self.dispatch(v)
|
||||
|
||||
def write_item(item):
|
||||
k, v = item
|
||||
if k is None:
|
||||
# for dictionary unpacking operator in dicts {**{'y': 2}}
|
||||
# see PEP 448 for details
|
||||
self.write("**")
|
||||
self.dispatch(v)
|
||||
else:
|
||||
write_key_value_pair(k, v)
|
||||
|
||||
interleave(lambda: self.write(", "), write_item, zip(t.keys, t.values))
|
||||
self.write("}")
|
||||
|
||||
def _Tuple(self, t):
|
||||
self.write("(")
|
||||
if len(t.elts) == 1:
|
||||
elt = t.elts[0]
|
||||
self.dispatch(elt)
|
||||
self.write(",")
|
||||
else:
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.elts)
|
||||
self.write(")")
|
||||
|
||||
unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"}
|
||||
|
||||
def _UnaryOp(self, t):
|
||||
self.write("(")
|
||||
self.write(self.unop[t.op.__class__.__name__])
|
||||
self.write(" ")
|
||||
self.dispatch(t.operand)
|
||||
self.write(")")
|
||||
|
||||
binop = {
|
||||
"Add": "+",
|
||||
"Sub": "-",
|
||||
"Mult": "*",
|
||||
"MatMult": "@",
|
||||
"Div": "/",
|
||||
"Mod": "%",
|
||||
"LShift": "<<",
|
||||
"RShift": ">>",
|
||||
"BitOr": "|",
|
||||
"BitXor": "^",
|
||||
"BitAnd": "&",
|
||||
"FloorDiv": "//",
|
||||
"Pow": "**",
|
||||
}
|
||||
|
||||
def _BinOp(self, t):
|
||||
self.write("(")
|
||||
self.dispatch(t.left)
|
||||
self.write(" " + self.binop[t.op.__class__.__name__] + " ")
|
||||
self.dispatch(t.right)
|
||||
self.write(")")
|
||||
|
||||
cmpops = {
|
||||
"Eq": "==",
|
||||
"NotEq": "!=",
|
||||
"Lt": "<",
|
||||
"LtE": "<=",
|
||||
"Gt": ">",
|
||||
"GtE": ">=",
|
||||
"Is": "is",
|
||||
"IsNot": "is not",
|
||||
"In": "in",
|
||||
"NotIn": "not in",
|
||||
}
|
||||
|
||||
def _Compare(self, t):
|
||||
self.write("(")
|
||||
self.dispatch(t.left)
|
||||
for o, e in zip(t.ops, t.comparators):
|
||||
self.write(" " + self.cmpops[o.__class__.__name__] + " ")
|
||||
self.dispatch(e)
|
||||
self.write(")")
|
||||
|
||||
boolops = {ast.And: "and", ast.Or: "or"}
|
||||
|
||||
def _BoolOp(self, t):
|
||||
self.write("(")
|
||||
s = " %s " % self.boolops[t.op.__class__]
|
||||
interleave(lambda: self.write(s), self.dispatch, t.values)
|
||||
self.write(")")
|
||||
|
||||
def _Attribute(self, t):
|
||||
self.dispatch(t.value)
|
||||
# Special case: 3.__abs__() is a syntax error, so if t.value
|
||||
# is an integer literal then we need to either parenthesize
|
||||
# it or add an extra space to get 3 .__abs__().
|
||||
if isinstance(t.value, ast.Constant) and isinstance(t.value.value, int):
|
||||
self.write(" ")
|
||||
self.write(".")
|
||||
self.write(t.attr)
|
||||
|
||||
def _Call(self, t):
|
||||
self.dispatch(t.func)
|
||||
self.write("(")
|
||||
comma = False
|
||||
for e in t.args:
|
||||
if comma:
|
||||
self.write(", ")
|
||||
else:
|
||||
comma = True
|
||||
self.dispatch(e)
|
||||
for e in t.keywords:
|
||||
if comma:
|
||||
self.write(", ")
|
||||
else:
|
||||
comma = True
|
||||
self.dispatch(e)
|
||||
self.write(")")
|
||||
|
||||
def _Subscript(self, t):
|
||||
self.dispatch(t.value)
|
||||
self.write("[")
|
||||
if (
|
||||
isinstance(t.slice, ast.Index)
|
||||
and isinstance(t.slice.value, ast.Tuple)
|
||||
and t.slice.value.elts
|
||||
):
|
||||
if len(t.slice.value.elts) == 1:
|
||||
elt = t.slice.value.elts[0]
|
||||
self.dispatch(elt)
|
||||
self.write(",")
|
||||
else:
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.slice.value.elts)
|
||||
else:
|
||||
self.dispatch(t.slice)
|
||||
self.write("]")
|
||||
|
||||
def _Starred(self, t):
|
||||
self.write("*")
|
||||
self.dispatch(t.value)
|
||||
|
||||
# slice
|
||||
def _Ellipsis(self, t):
|
||||
self.write("...")
|
||||
|
||||
def _Index(self, t):
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _Slice(self, t):
|
||||
if t.lower:
|
||||
self.dispatch(t.lower)
|
||||
self.write(":")
|
||||
if t.upper:
|
||||
self.dispatch(t.upper)
|
||||
if t.step:
|
||||
self.write(":")
|
||||
self.dispatch(t.step)
|
||||
|
||||
def _ExtSlice(self, t):
|
||||
if len(t.dims) == 1:
|
||||
elt = t.dims[0]
|
||||
self.dispatch(elt)
|
||||
self.write(",")
|
||||
else:
|
||||
interleave(lambda: self.write(", "), self.dispatch, t.dims)
|
||||
|
||||
# argument
|
||||
def _arg(self, t):
|
||||
self.write(t.arg)
|
||||
if t.annotation:
|
||||
self.write(": ")
|
||||
self.dispatch(t.annotation)
|
||||
|
||||
# others
|
||||
def _arguments(self, t):
|
||||
first = True
|
||||
# normal arguments
|
||||
all_args = t.posonlyargs + t.args
|
||||
defaults = [None] * (len(all_args) - len(t.defaults)) + t.defaults
|
||||
for index, elements in enumerate(zip(all_args, defaults), 1):
|
||||
a, d = elements
|
||||
if first:
|
||||
first = False
|
||||
else:
|
||||
self.write(", ")
|
||||
self.dispatch(a)
|
||||
if d:
|
||||
self.write("=")
|
||||
self.dispatch(d)
|
||||
if index == len(t.posonlyargs):
|
||||
self.write(", /")
|
||||
|
||||
# varargs, or bare '*' if no varargs but keyword-only arguments present
|
||||
if t.vararg or t.kwonlyargs:
|
||||
if first:
|
||||
first = False
|
||||
else:
|
||||
self.write(", ")
|
||||
self.write("*")
|
||||
if t.vararg:
|
||||
self.write(t.vararg.arg)
|
||||
if t.vararg.annotation:
|
||||
self.write(": ")
|
||||
self.dispatch(t.vararg.annotation)
|
||||
|
||||
# keyword-only arguments
|
||||
if t.kwonlyargs:
|
||||
for a, d in zip(t.kwonlyargs, t.kw_defaults):
|
||||
if first:
|
||||
first = False
|
||||
else:
|
||||
self.write(", ")
|
||||
self.dispatch(a)
|
||||
if d:
|
||||
self.write("=")
|
||||
self.dispatch(d)
|
||||
|
||||
# kwargs
|
||||
if t.kwarg:
|
||||
if first:
|
||||
first = False
|
||||
else:
|
||||
self.write(", ")
|
||||
self.write("**" + t.kwarg.arg)
|
||||
if t.kwarg.annotation:
|
||||
self.write(": ")
|
||||
self.dispatch(t.kwarg.annotation)
|
||||
|
||||
def _keyword(self, t):
|
||||
if t.arg is None:
|
||||
self.write("**")
|
||||
else:
|
||||
self.write(t.arg)
|
||||
self.write("=")
|
||||
self.dispatch(t.value)
|
||||
|
||||
def _Lambda(self, t):
|
||||
self.write("(")
|
||||
self.write("lambda ")
|
||||
self.dispatch(t.args)
|
||||
self.write(": ")
|
||||
self.dispatch(t.body)
|
||||
self.write(")")
|
||||
|
||||
def _alias(self, t):
|
||||
self.write(t.name)
|
||||
if t.asname:
|
||||
self.write(" as " + t.asname)
|
||||
|
||||
def _withitem(self, t):
|
||||
self.dispatch(t.context_expr)
|
||||
if t.optional_vars:
|
||||
self.write(" as ")
|
||||
self.dispatch(t.optional_vars)
|
||||
|
||||
|
||||
def roundtrip(filename, output=sys.stdout):
|
||||
with open(filename, "rb") as pyfile:
|
||||
encoding = tokenize.detect_encoding(pyfile.readline)[0]
|
||||
with open(filename, "r", encoding=encoding) as pyfile:
|
||||
source = pyfile.read()
|
||||
tree = compile(source, filename, "exec", ast.PyCF_ONLY_AST)
|
||||
Unparser(tree, output)
|
34
libs/community/langchain_community/tools/edenai/__init__.py
Normal file
34
libs/community/langchain_community/tools/edenai/__init__.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""Edenai Tools."""
|
||||
from langchain_community.tools.edenai.audio_speech_to_text import (
|
||||
EdenAiSpeechToTextTool,
|
||||
)
|
||||
from langchain_community.tools.edenai.audio_text_to_speech import (
|
||||
EdenAiTextToSpeechTool,
|
||||
)
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
from langchain_community.tools.edenai.image_explicitcontent import (
|
||||
EdenAiExplicitImageTool,
|
||||
)
|
||||
from langchain_community.tools.edenai.image_objectdetection import (
|
||||
EdenAiObjectDetectionTool,
|
||||
)
|
||||
from langchain_community.tools.edenai.ocr_identityparser import (
|
||||
EdenAiParsingIDTool,
|
||||
)
|
||||
from langchain_community.tools.edenai.ocr_invoiceparser import (
|
||||
EdenAiParsingInvoiceTool,
|
||||
)
|
||||
from langchain_community.tools.edenai.text_moderation import (
|
||||
EdenAiTextModerationTool,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"EdenAiExplicitImageTool",
|
||||
"EdenAiObjectDetectionTool",
|
||||
"EdenAiParsingIDTool",
|
||||
"EdenAiParsingInvoiceTool",
|
||||
"EdenAiTextToSpeechTool",
|
||||
"EdenAiSpeechToTextTool",
|
||||
"EdenAiTextModerationTool",
|
||||
"EdenaiTool",
|
||||
]
|
@@ -0,0 +1,103 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from typing import List, Optional
|
||||
|
||||
import requests
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import validator
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdenAiSpeechToTextTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Speech To Text API.
|
||||
|
||||
for api reference check edenai documentation:
|
||||
https://app.edenai.run/bricks/speech/asynchronous-speech-to-text.
|
||||
|
||||
To use, you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
You can find your token here: https://app.edenai.run/admin/account/settings
|
||||
|
||||
"""
|
||||
|
||||
edenai_api_key: Optional[str] = None
|
||||
|
||||
name = "edenai_speech_to_text"
|
||||
description = (
|
||||
"A wrapper around edenai Services speech to text "
|
||||
"Useful for when you have to convert audio to text."
|
||||
"Input should be a url to an audio file."
|
||||
)
|
||||
is_async = True
|
||||
|
||||
language: Optional[str] = "en"
|
||||
speakers: Optional[int]
|
||||
profanity_filter: bool = False
|
||||
custom_vocabulary: Optional[List[str]]
|
||||
|
||||
feature: str = "audio"
|
||||
subfeature: str = "speech_to_text_async"
|
||||
base_url = "https://api.edenai.run/v2/audio/speech_to_text_async/"
|
||||
|
||||
@validator("providers")
|
||||
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
|
||||
"""
|
||||
This tool has no feature to combine providers results.
|
||||
Therefore we only allow one provider
|
||||
"""
|
||||
if len(v) > 1:
|
||||
raise ValueError(
|
||||
"Please select only one provider. "
|
||||
"The feature to combine providers results is not available "
|
||||
"for this tool."
|
||||
)
|
||||
return v
|
||||
|
||||
def _wait_processing(self, url: str) -> requests.Response:
|
||||
for _ in range(10):
|
||||
time.sleep(1)
|
||||
audio_analysis_result = self._get_edenai(url)
|
||||
temp = audio_analysis_result.json()
|
||||
if temp["status"] == "finished":
|
||||
if temp["results"][self.providers[0]]["error"] is not None:
|
||||
raise Exception(
|
||||
f"""EdenAI returned an unexpected response
|
||||
{temp['results'][self.providers[0]]['error']}"""
|
||||
)
|
||||
else:
|
||||
return audio_analysis_result
|
||||
|
||||
raise Exception("Edenai speech to text job id processing Timed out")
|
||||
|
||||
def _parse_response(self, response: dict) -> str:
|
||||
return response["public_id"]
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
all_params = {
|
||||
"file_url": query,
|
||||
"language": self.language,
|
||||
"speakers": self.speakers,
|
||||
"profanity_filter": self.profanity_filter,
|
||||
"custom_vocabulary": self.custom_vocabulary,
|
||||
}
|
||||
|
||||
# filter so we don't send val to api when val is `None
|
||||
query_params = {k: v for k, v in all_params.items() if v is not None}
|
||||
|
||||
job_id = self._call_eden_ai(query_params)
|
||||
url = self.base_url + job_id
|
||||
audio_analysis_result = self._wait_processing(url)
|
||||
result = audio_analysis_result.text
|
||||
formatted_text = json.loads(result)
|
||||
return formatted_text["results"][self.providers[0]]["text"]
|
@@ -0,0 +1,116 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Literal, Optional
|
||||
|
||||
import requests
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import Field, root_validator, validator
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdenAiTextToSpeechTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Text to speech API.
|
||||
for api reference check edenai documentation:
|
||||
https://docs.edenai.co/reference/audio_text_to_speech_create.
|
||||
|
||||
To use, you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
You can find your token here: https://app.edenai.run/admin/account/settings
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_text_to_speech"
|
||||
description = (
|
||||
"A wrapper around edenai Services text to speech."
|
||||
"Useful for when you need to convert text to speech."
|
||||
"""the output is a string representing the URL of the audio file,
|
||||
or the path to the downloaded wav file """
|
||||
)
|
||||
|
||||
language: Optional[str] = "en"
|
||||
"""
|
||||
language of the text passed to the model.
|
||||
"""
|
||||
|
||||
# optional params see api documentation for more info
|
||||
return_type: Literal["url", "wav"] = "url"
|
||||
rate: Optional[int]
|
||||
pitch: Optional[int]
|
||||
volume: Optional[int]
|
||||
audio_format: Optional[str]
|
||||
sampling_rate: Optional[int]
|
||||
voice_models: Dict[str, str] = Field(default_factory=dict)
|
||||
|
||||
voice: Literal["MALE", "FEMALE"]
|
||||
"""voice option : 'MALE' or 'FEMALE' """
|
||||
|
||||
feature: str = "audio"
|
||||
subfeature: str = "text_to_speech"
|
||||
|
||||
@validator("providers")
|
||||
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
|
||||
"""
|
||||
This tool has no feature to combine providers results.
|
||||
Therefore we only allow one provider
|
||||
"""
|
||||
if len(v) > 1:
|
||||
raise ValueError(
|
||||
"Please select only one provider. "
|
||||
"The feature to combine providers results is not available "
|
||||
"for this tool."
|
||||
)
|
||||
return v
|
||||
|
||||
@root_validator
|
||||
def check_voice_models_key_is_provider_name(cls, values: dict) -> dict:
|
||||
for key in values.get("voice_models", {}).keys():
|
||||
if key not in values.get("providers", []):
|
||||
raise ValueError(
|
||||
"voice_model should be formatted like this "
|
||||
"{<provider_name>: <its_voice_model>}"
|
||||
)
|
||||
return values
|
||||
|
||||
def _download_wav(self, url: str, save_path: str) -> None:
|
||||
response = requests.get(url)
|
||||
if response.status_code == 200:
|
||||
with open(save_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
else:
|
||||
raise ValueError("Error while downloading wav file")
|
||||
|
||||
def _parse_response(self, response: list) -> str:
|
||||
result = response[0]
|
||||
if self.return_type == "url":
|
||||
return result["audio_resource_url"]
|
||||
else:
|
||||
self._download_wav(result["audio_resource_url"], "audio.wav")
|
||||
return "audio.wav"
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
all_params = {
|
||||
"text": query,
|
||||
"language": self.language,
|
||||
"option": self.voice,
|
||||
"return_type": self.return_type,
|
||||
"rate": self.rate,
|
||||
"pitch": self.pitch,
|
||||
"volume": self.volume,
|
||||
"audio_format": self.audio_format,
|
||||
"sampling_rate": self.sampling_rate,
|
||||
"settings": self.voice_models,
|
||||
}
|
||||
|
||||
# filter so we don't send val to api when val is `None
|
||||
query_params = {k: v for k, v in all_params.items() if v is not None}
|
||||
|
||||
return self._call_eden_ai(query_params)
|
@@ -0,0 +1,159 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import requests
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdenaiTool(BaseTool):
|
||||
|
||||
"""
|
||||
the base tool for all the EdenAI Tools .
|
||||
you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
You can find your token here: https://app.edenai.run/admin/account/settings
|
||||
"""
|
||||
|
||||
feature: str
|
||||
subfeature: str
|
||||
edenai_api_key: Optional[str] = None
|
||||
is_async: bool = False
|
||||
|
||||
providers: List[str]
|
||||
"""provider to use for the API call."""
|
||||
|
||||
@root_validator(allow_reuse=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key exists in environment."""
|
||||
values["edenai_api_key"] = get_from_dict_or_env(
|
||||
values, "edenai_api_key", "EDENAI_API_KEY"
|
||||
)
|
||||
return values
|
||||
|
||||
@staticmethod
|
||||
def get_user_agent() -> str:
|
||||
from langchain_community import __version__
|
||||
|
||||
return f"langchain/{__version__}"
|
||||
|
||||
def _call_eden_ai(self, query_params: Dict[str, Any]) -> str:
|
||||
"""
|
||||
Make an API call to the EdenAI service with the specified query parameters.
|
||||
|
||||
Args:
|
||||
query_params (dict): The parameters to include in the API call.
|
||||
|
||||
Returns:
|
||||
requests.Response: The response from the EdenAI API call.
|
||||
|
||||
"""
|
||||
|
||||
# faire l'API call
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.edenai_api_key}",
|
||||
"User-Agent": self.get_user_agent(),
|
||||
}
|
||||
|
||||
url = f"https://api.edenai.run/v2/{self.feature}/{self.subfeature}"
|
||||
|
||||
payload = {
|
||||
"providers": str(self.providers),
|
||||
"response_as_dict": False,
|
||||
"attributes_as_list": True,
|
||||
"show_original_response": False,
|
||||
}
|
||||
|
||||
payload.update(query_params)
|
||||
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
|
||||
self._raise_on_error(response)
|
||||
|
||||
try:
|
||||
return self._parse_response(response.json())
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"An error occurred while running tool: {e}")
|
||||
|
||||
def _raise_on_error(self, response: requests.Response) -> None:
|
||||
if response.status_code >= 500:
|
||||
raise Exception(f"EdenAI Server: Error {response.status_code}")
|
||||
elif response.status_code >= 400:
|
||||
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
|
||||
elif response.status_code != 200:
|
||||
raise Exception(
|
||||
f"EdenAI returned an unexpected response with status "
|
||||
f"{response.status_code}: {response.text}"
|
||||
)
|
||||
|
||||
# case where edenai call succeeded but provider returned an error
|
||||
# (eg: rate limit, server error, etc.)
|
||||
if self.is_async is False:
|
||||
# async call are different and only return a job_id,
|
||||
# not the provider response directly
|
||||
provider_response = response.json()[0]
|
||||
if provider_response.get("status") == "fail":
|
||||
err_msg = provider_response["error"]["message"]
|
||||
raise ValueError(err_msg)
|
||||
|
||||
@abstractmethod
|
||||
def _run(
|
||||
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _parse_response(self, response: Any) -> str:
|
||||
"""Take a dict response and condense it's data in a human readable string"""
|
||||
pass
|
||||
|
||||
def _get_edenai(self, url: str) -> requests.Response:
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"authorization": f"Bearer {self.edenai_api_key}",
|
||||
"User-Agent": self.get_user_agent(),
|
||||
}
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
self._raise_on_error(response)
|
||||
|
||||
return response
|
||||
|
||||
def _parse_json_multilevel(
|
||||
self, extracted_data: dict, formatted_list: list, level: int = 0
|
||||
) -> None:
|
||||
for section, subsections in extracted_data.items():
|
||||
indentation = " " * level
|
||||
if isinstance(subsections, str):
|
||||
subsections = subsections.replace("\n", ",")
|
||||
formatted_list.append(f"{indentation}{section} : {subsections}")
|
||||
|
||||
elif isinstance(subsections, list):
|
||||
formatted_list.append(f"{indentation}{section} : ")
|
||||
self._list_handling(subsections, formatted_list, level + 1)
|
||||
|
||||
elif isinstance(subsections, dict):
|
||||
formatted_list.append(f"{indentation}{section} : ")
|
||||
self._parse_json_multilevel(subsections, formatted_list, level + 1)
|
||||
|
||||
def _list_handling(
|
||||
self, subsection_list: list, formatted_list: list, level: int
|
||||
) -> None:
|
||||
for list_item in subsection_list:
|
||||
if isinstance(list_item, dict):
|
||||
self._parse_json_multilevel(list_item, formatted_list, level)
|
||||
|
||||
elif isinstance(list_item, list):
|
||||
self._list_handling(list_item, formatted_list, level + 1)
|
||||
|
||||
else:
|
||||
formatted_list.append(f"{' ' * level}{list_item}")
|
@@ -0,0 +1,68 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdenAiExplicitImageTool(EdenaiTool):
|
||||
|
||||
"""Tool that queries the Eden AI Explicit image detection.
|
||||
|
||||
for api reference check edenai documentation:
|
||||
https://docs.edenai.co/reference/image_explicit_content_create.
|
||||
|
||||
To use, you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
You can find your token here: https://app.edenai.run/admin/account/settings
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_image_explicit_content_detection"
|
||||
|
||||
description = (
|
||||
"A wrapper around edenai Services Explicit image detection. "
|
||||
"""Useful for when you have to extract Explicit Content from images.
|
||||
it detects adult only content in images,
|
||||
that is generally inappropriate for people under
|
||||
the age of 18 and includes nudity, sexual activity,
|
||||
pornography, violence, gore content, etc."""
|
||||
"Input should be the string url of the image ."
|
||||
)
|
||||
|
||||
combine_available = True
|
||||
feature = "image"
|
||||
subfeature = "explicit_content"
|
||||
|
||||
def _parse_json(self, json_data: dict) -> str:
|
||||
result_str = f"nsfw_likelihood: {json_data['nsfw_likelihood']}\n"
|
||||
for idx, found_obj in enumerate(json_data["items"]):
|
||||
label = found_obj["label"].lower()
|
||||
likelihood = found_obj["likelihood"]
|
||||
result_str += f"{idx}: {label} likelihood {likelihood},\n"
|
||||
|
||||
return result_str[:-2]
|
||||
|
||||
def _parse_response(self, json_data: list) -> str:
|
||||
if len(json_data) == 1:
|
||||
result = self._parse_json(json_data[0])
|
||||
else:
|
||||
for entry in json_data:
|
||||
if entry.get("provider") == "eden-ai":
|
||||
result = self._parse_json(entry)
|
||||
|
||||
return result
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
query_params = {"file_url": query, "attributes_as_list": False}
|
||||
return self._call_eden_ai(query_params)
|
@@ -0,0 +1,76 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdenAiObjectDetectionTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Object detection API.
|
||||
|
||||
for api reference check edenai documentation:
|
||||
https://docs.edenai.co/reference/image_object_detection_create.
|
||||
|
||||
To use, you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
You can find your token here: https://app.edenai.run/admin/account/settings
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_object_detection"
|
||||
|
||||
description = (
|
||||
"A wrapper around edenai Services Object Detection . "
|
||||
"""Useful for when you have to do an to identify and locate
|
||||
(with bounding boxes) objects in an image """
|
||||
"Input should be the string url of the image to identify."
|
||||
)
|
||||
|
||||
show_positions: bool = False
|
||||
|
||||
feature = "image"
|
||||
subfeature = "object_detection"
|
||||
|
||||
def _parse_json(self, json_data: dict) -> str:
|
||||
result = []
|
||||
label_info = []
|
||||
|
||||
for found_obj in json_data["items"]:
|
||||
label_str = f"{found_obj['label']} - Confidence {found_obj['confidence']}"
|
||||
x_min = found_obj.get("x_min")
|
||||
x_max = found_obj.get("x_max")
|
||||
y_min = found_obj.get("y_min")
|
||||
y_max = found_obj.get("y_max")
|
||||
if self.show_positions and all(
|
||||
[x_min, x_max, y_min, y_max]
|
||||
): # some providers don't return positions
|
||||
label_str += f""",at the position x_min: {x_min}, x_max: {x_max},
|
||||
y_min: {y_min}, y_max: {y_max}"""
|
||||
label_info.append(label_str)
|
||||
|
||||
result.append("\n".join(label_info))
|
||||
return "\n\n".join(result)
|
||||
|
||||
def _parse_response(self, response: list) -> str:
|
||||
if len(response) == 1:
|
||||
result = self._parse_json(response[0])
|
||||
else:
|
||||
for entry in response:
|
||||
if entry.get("provider") == "eden-ai":
|
||||
result = self._parse_json(entry)
|
||||
|
||||
return result
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
query_params = {"file_url": query, "attributes_as_list": False}
|
||||
return self._call_eden_ai(query_params)
|
@@ -0,0 +1,69 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdenAiParsingIDTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Identity parsing API.
|
||||
|
||||
for api reference check edenai documentation:
|
||||
https://docs.edenai.co/reference/ocr_identity_parser_create.
|
||||
|
||||
To use, you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
You can find your token here: https://app.edenai.run/admin/account/settings
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_identity_parsing"
|
||||
|
||||
description = (
|
||||
"A wrapper around edenai Services Identity parsing. "
|
||||
"Useful for when you have to extract information from an ID Document "
|
||||
"Input should be the string url of the document to parse."
|
||||
)
|
||||
|
||||
feature = "ocr"
|
||||
subfeature = "identity_parser"
|
||||
|
||||
language: Optional[str] = None
|
||||
"""
|
||||
language of the text passed to the model.
|
||||
"""
|
||||
|
||||
def _parse_response(self, response: list) -> str:
|
||||
formatted_list: list = []
|
||||
|
||||
if len(response) == 1:
|
||||
self._parse_json_multilevel(
|
||||
response[0]["extracted_data"][0], formatted_list
|
||||
)
|
||||
else:
|
||||
for entry in response:
|
||||
if entry.get("provider") == "eden-ai":
|
||||
self._parse_json_multilevel(
|
||||
entry["extracted_data"][0], formatted_list
|
||||
)
|
||||
|
||||
return "\n".join(formatted_list)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
query_params = {
|
||||
"file_url": query,
|
||||
"language": self.language,
|
||||
"attributes_as_list": False,
|
||||
}
|
||||
|
||||
return self._call_eden_ai(query_params)
|
@@ -0,0 +1,73 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdenAiParsingInvoiceTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Invoice parsing API.
|
||||
|
||||
for api reference check edenai documentation:
|
||||
https://docs.edenai.co/reference/ocr_invoice_parser_create.
|
||||
|
||||
To use, you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
You can find your token here: https://app.edenai.run/admin/account/settings
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_invoice_parsing"
|
||||
|
||||
description = (
|
||||
"A wrapper around edenai Services invoice parsing. "
|
||||
"""Useful for when you have to extract information from
|
||||
an image it enables to take invoices
|
||||
in a variety of formats and returns the data in contains
|
||||
(items, prices, addresses, vendor name, etc.)
|
||||
in a structured format to automate the invoice processing """
|
||||
"Input should be the string url of the document to parse."
|
||||
)
|
||||
|
||||
language: Optional[str] = None
|
||||
"""
|
||||
language of the image passed to the model.
|
||||
"""
|
||||
|
||||
feature = "ocr"
|
||||
subfeature = "invoice_parser"
|
||||
|
||||
def _parse_response(self, response: list) -> str:
|
||||
formatted_list: list = []
|
||||
|
||||
if len(response) == 1:
|
||||
self._parse_json_multilevel(
|
||||
response[0]["extracted_data"][0], formatted_list
|
||||
)
|
||||
else:
|
||||
for entry in response:
|
||||
if entry.get("provider") == "eden-ai":
|
||||
self._parse_json_multilevel(
|
||||
entry["extracted_data"][0], formatted_list
|
||||
)
|
||||
|
||||
return "\n".join(formatted_list)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
query_params = {
|
||||
"file_url": query,
|
||||
"language": self.language,
|
||||
"attributes_as_list": False,
|
||||
}
|
||||
|
||||
return self._call_eden_ai(query_params)
|
@@ -0,0 +1,73 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdenAiTextModerationTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Explicit text detection.
|
||||
|
||||
for api reference check edenai documentation:
|
||||
https://docs.edenai.co/reference/image_explicit_content_create.
|
||||
|
||||
To use, you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
You can find your token here: https://app.edenai.run/admin/account/settings
|
||||
|
||||
"""
|
||||
|
||||
name = "edenai_explicit_content_detection_text"
|
||||
|
||||
description = (
|
||||
"A wrapper around edenai Services explicit content detection for text. "
|
||||
"""Useful for when you have to scan text for offensive,
|
||||
sexually explicit or suggestive content,
|
||||
it checks also if there is any content of self-harm,
|
||||
violence, racist or hate speech."""
|
||||
"""the structure of the output is :
|
||||
'the type of the explicit content : the likelihood of it being explicit'
|
||||
the likelihood is a number
|
||||
between 1 and 5, 1 being the lowest and 5 the highest.
|
||||
something is explicit if the likelihood is equal or higher than 3.
|
||||
for example :
|
||||
nsfw_likelihood: 1
|
||||
this is not explicit.
|
||||
for example :
|
||||
nsfw_likelihood: 3
|
||||
this is explicit.
|
||||
"""
|
||||
"Input should be a string."
|
||||
)
|
||||
|
||||
language: str
|
||||
|
||||
feature: str = "text"
|
||||
subfeature: str = "moderation"
|
||||
|
||||
def _parse_response(self, response: list) -> str:
|
||||
formatted_result = []
|
||||
for result in response:
|
||||
if "nsfw_likelihood" in result.keys():
|
||||
formatted_result.append(
|
||||
"nsfw_likelihood: " + str(result["nsfw_likelihood"])
|
||||
)
|
||||
|
||||
for label, likelihood in zip(result["label"], result["likelihood"]):
|
||||
formatted_result.append(f'"{label}": {str(likelihood)}')
|
||||
|
||||
return "\n".join(formatted_result)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
query_params = {"text": query, "language": self.language}
|
||||
return self._call_eden_ai(query_params)
|
@@ -0,0 +1,5 @@
|
||||
"""Eleven Labs Services Tools."""
|
||||
|
||||
from langchain_community.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
|
||||
|
||||
__all__ = ["ElevenLabsText2SpeechTool"]
|
@@ -0,0 +1,8 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class ElevenLabsModel(str, Enum):
|
||||
"""Models available for Eleven Labs Text2Speech."""
|
||||
|
||||
MULTI_LINGUAL = "eleven_multilingual_v1"
|
||||
MONO_LINGUAL = "eleven_monolingual_v1"
|
@@ -0,0 +1,80 @@
|
||||
import tempfile
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
def _import_elevenlabs() -> Any:
|
||||
try:
|
||||
import elevenlabs
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Cannot import elevenlabs, please install `pip install elevenlabs`."
|
||||
) from e
|
||||
return elevenlabs
|
||||
|
||||
|
||||
class ElevenLabsModel(str, Enum):
|
||||
"""Models available for Eleven Labs Text2Speech."""
|
||||
|
||||
MULTI_LINGUAL = "eleven_multilingual_v1"
|
||||
MONO_LINGUAL = "eleven_monolingual_v1"
|
||||
|
||||
|
||||
class ElevenLabsText2SpeechTool(BaseTool):
|
||||
"""Tool that queries the Eleven Labs Text2Speech API.
|
||||
|
||||
In order to set this up, follow instructions at:
|
||||
https://docs.elevenlabs.io/welcome/introduction
|
||||
"""
|
||||
|
||||
model: Union[ElevenLabsModel, str] = ElevenLabsModel.MULTI_LINGUAL
|
||||
|
||||
name: str = "eleven_labs_text2speech"
|
||||
description: str = (
|
||||
"A wrapper around Eleven Labs Text2Speech. "
|
||||
"Useful for when you need to convert text to speech. "
|
||||
"It supports multiple languages, including English, German, Polish, "
|
||||
"Spanish, Italian, French, Portuguese, and Hindi. "
|
||||
)
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key exists in environment."""
|
||||
_ = get_from_dict_or_env(values, "eleven_api_key", "ELEVEN_API_KEY")
|
||||
|
||||
return values
|
||||
|
||||
def _run(
|
||||
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
elevenlabs = _import_elevenlabs()
|
||||
try:
|
||||
speech = elevenlabs.generate(text=query, model=self.model)
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="bx", suffix=".wav", delete=False
|
||||
) as f:
|
||||
f.write(speech)
|
||||
return f.name
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error while running ElevenLabsText2SpeechTool: {e}")
|
||||
|
||||
def play(self, speech_file: str) -> None:
|
||||
"""Play the text as speech."""
|
||||
elevenlabs = _import_elevenlabs()
|
||||
with open(speech_file, mode="rb") as f:
|
||||
speech = f.read()
|
||||
|
||||
elevenlabs.play(speech)
|
||||
|
||||
def stream_speech(self, query: str) -> None:
|
||||
"""Stream the text as speech as it is generated.
|
||||
Play the text in your speakers."""
|
||||
elevenlabs = _import_elevenlabs()
|
||||
speech_stream = elevenlabs.generate(text=query, model=self.model, stream=True)
|
||||
elevenlabs.stream(speech_stream)
|
@@ -0,0 +1,19 @@
|
||||
"""File Management Tools."""
|
||||
|
||||
from langchain_community.tools.file_management.copy import CopyFileTool
|
||||
from langchain_community.tools.file_management.delete import DeleteFileTool
|
||||
from langchain_community.tools.file_management.file_search import FileSearchTool
|
||||
from langchain_community.tools.file_management.list_dir import ListDirectoryTool
|
||||
from langchain_community.tools.file_management.move import MoveFileTool
|
||||
from langchain_community.tools.file_management.read import ReadFileTool
|
||||
from langchain_community.tools.file_management.write import WriteFileTool
|
||||
|
||||
__all__ = [
|
||||
"CopyFileTool",
|
||||
"DeleteFileTool",
|
||||
"FileSearchTool",
|
||||
"MoveFileTool",
|
||||
"ReadFileTool",
|
||||
"WriteFileTool",
|
||||
"ListDirectoryTool",
|
||||
]
|
@@ -0,0 +1,53 @@
|
||||
import shutil
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.file_management.utils import (
|
||||
INVALID_PATH_TEMPLATE,
|
||||
BaseFileToolMixin,
|
||||
FileValidationError,
|
||||
)
|
||||
|
||||
|
||||
class FileCopyInput(BaseModel):
|
||||
"""Input for CopyFileTool."""
|
||||
|
||||
source_path: str = Field(..., description="Path of the file to copy")
|
||||
destination_path: str = Field(..., description="Path to save the copied file")
|
||||
|
||||
|
||||
class CopyFileTool(BaseFileToolMixin, BaseTool):
|
||||
"""Tool that copies a file."""
|
||||
|
||||
name: str = "copy_file"
|
||||
args_schema: Type[BaseModel] = FileCopyInput
|
||||
description: str = "Create a copy of a file in a specified location"
|
||||
|
||||
def _run(
|
||||
self,
|
||||
source_path: str,
|
||||
destination_path: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
try:
|
||||
source_path_ = self.get_relative_path(source_path)
|
||||
except FileValidationError:
|
||||
return INVALID_PATH_TEMPLATE.format(
|
||||
arg_name="source_path", value=source_path
|
||||
)
|
||||
try:
|
||||
destination_path_ = self.get_relative_path(destination_path)
|
||||
except FileValidationError:
|
||||
return INVALID_PATH_TEMPLATE.format(
|
||||
arg_name="destination_path", value=destination_path
|
||||
)
|
||||
try:
|
||||
shutil.copy2(source_path_, destination_path_, follow_symlinks=False)
|
||||
return f"File copied successfully from {source_path} to {destination_path}."
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
# TODO: Add aiofiles method
|
@@ -0,0 +1,45 @@
|
||||
import os
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.file_management.utils import (
|
||||
INVALID_PATH_TEMPLATE,
|
||||
BaseFileToolMixin,
|
||||
FileValidationError,
|
||||
)
|
||||
|
||||
|
||||
class FileDeleteInput(BaseModel):
|
||||
"""Input for DeleteFileTool."""
|
||||
|
||||
file_path: str = Field(..., description="Path of the file to delete")
|
||||
|
||||
|
||||
class DeleteFileTool(BaseFileToolMixin, BaseTool):
|
||||
"""Tool that deletes a file."""
|
||||
|
||||
name: str = "file_delete"
|
||||
args_schema: Type[BaseModel] = FileDeleteInput
|
||||
description: str = "Delete a file"
|
||||
|
||||
def _run(
|
||||
self,
|
||||
file_path: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
try:
|
||||
file_path_ = self.get_relative_path(file_path)
|
||||
except FileValidationError:
|
||||
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
|
||||
if not file_path_.exists():
|
||||
return f"Error: no such file or directory: {file_path}"
|
||||
try:
|
||||
os.remove(file_path_)
|
||||
return f"File deleted successfully: {file_path}."
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
# TODO: Add aiofiles method
|
@@ -0,0 +1,62 @@
|
||||
import fnmatch
|
||||
import os
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.file_management.utils import (
|
||||
INVALID_PATH_TEMPLATE,
|
||||
BaseFileToolMixin,
|
||||
FileValidationError,
|
||||
)
|
||||
|
||||
|
||||
class FileSearchInput(BaseModel):
|
||||
"""Input for FileSearchTool."""
|
||||
|
||||
dir_path: str = Field(
|
||||
default=".",
|
||||
description="Subdirectory to search in.",
|
||||
)
|
||||
pattern: str = Field(
|
||||
...,
|
||||
description="Unix shell regex, where * matches everything.",
|
||||
)
|
||||
|
||||
|
||||
class FileSearchTool(BaseFileToolMixin, BaseTool):
|
||||
"""Tool that searches for files in a subdirectory that match a regex pattern."""
|
||||
|
||||
name: str = "file_search"
|
||||
args_schema: Type[BaseModel] = FileSearchInput
|
||||
description: str = (
|
||||
"Recursively search for files in a subdirectory that match the regex pattern"
|
||||
)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
pattern: str,
|
||||
dir_path: str = ".",
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
try:
|
||||
dir_path_ = self.get_relative_path(dir_path)
|
||||
except FileValidationError:
|
||||
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
|
||||
matches = []
|
||||
try:
|
||||
for root, _, filenames in os.walk(dir_path_):
|
||||
for filename in fnmatch.filter(filenames, pattern):
|
||||
absolute_path = os.path.join(root, filename)
|
||||
relative_path = os.path.relpath(absolute_path, dir_path_)
|
||||
matches.append(relative_path)
|
||||
if matches:
|
||||
return "\n".join(matches)
|
||||
else:
|
||||
return f"No files found for pattern {pattern} in directory {dir_path}"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
# TODO: Add aiofiles method
|
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.file_management.utils import (
|
||||
INVALID_PATH_TEMPLATE,
|
||||
BaseFileToolMixin,
|
||||
FileValidationError,
|
||||
)
|
||||
|
||||
|
||||
class DirectoryListingInput(BaseModel):
|
||||
"""Input for ListDirectoryTool."""
|
||||
|
||||
dir_path: str = Field(default=".", description="Subdirectory to list.")
|
||||
|
||||
|
||||
class ListDirectoryTool(BaseFileToolMixin, BaseTool):
|
||||
"""Tool that lists files and directories in a specified folder."""
|
||||
|
||||
name: str = "list_directory"
|
||||
args_schema: Type[BaseModel] = DirectoryListingInput
|
||||
description: str = "List files and directories in a specified folder"
|
||||
|
||||
def _run(
|
||||
self,
|
||||
dir_path: str = ".",
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
try:
|
||||
dir_path_ = self.get_relative_path(dir_path)
|
||||
except FileValidationError:
|
||||
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
|
||||
try:
|
||||
entries = os.listdir(dir_path_)
|
||||
if entries:
|
||||
return "\n".join(entries)
|
||||
else:
|
||||
return f"No files found in directory {dir_path}"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
# TODO: Add aiofiles method
|
@@ -0,0 +1,56 @@
|
||||
import shutil
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.file_management.utils import (
|
||||
INVALID_PATH_TEMPLATE,
|
||||
BaseFileToolMixin,
|
||||
FileValidationError,
|
||||
)
|
||||
|
||||
|
||||
class FileMoveInput(BaseModel):
|
||||
"""Input for MoveFileTool."""
|
||||
|
||||
source_path: str = Field(..., description="Path of the file to move")
|
||||
destination_path: str = Field(..., description="New path for the moved file")
|
||||
|
||||
|
||||
class MoveFileTool(BaseFileToolMixin, BaseTool):
|
||||
"""Tool that moves a file."""
|
||||
|
||||
name: str = "move_file"
|
||||
args_schema: Type[BaseModel] = FileMoveInput
|
||||
description: str = "Move or rename a file from one location to another"
|
||||
|
||||
def _run(
|
||||
self,
|
||||
source_path: str,
|
||||
destination_path: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
try:
|
||||
source_path_ = self.get_relative_path(source_path)
|
||||
except FileValidationError:
|
||||
return INVALID_PATH_TEMPLATE.format(
|
||||
arg_name="source_path", value=source_path
|
||||
)
|
||||
try:
|
||||
destination_path_ = self.get_relative_path(destination_path)
|
||||
except FileValidationError:
|
||||
return INVALID_PATH_TEMPLATE.format(
|
||||
arg_name="destination_path_", value=destination_path_
|
||||
)
|
||||
if not source_path_.exists():
|
||||
return f"Error: no such file or directory {source_path}"
|
||||
try:
|
||||
# shutil.move expects str args in 3.8
|
||||
shutil.move(str(source_path_), destination_path_)
|
||||
return f"File moved successfully from {source_path} to {destination_path}."
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
# TODO: Add aiofiles method
|
@@ -0,0 +1,45 @@
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.file_management.utils import (
|
||||
INVALID_PATH_TEMPLATE,
|
||||
BaseFileToolMixin,
|
||||
FileValidationError,
|
||||
)
|
||||
|
||||
|
||||
class ReadFileInput(BaseModel):
|
||||
"""Input for ReadFileTool."""
|
||||
|
||||
file_path: str = Field(..., description="name of file")
|
||||
|
||||
|
||||
class ReadFileTool(BaseFileToolMixin, BaseTool):
|
||||
"""Tool that reads a file."""
|
||||
|
||||
name: str = "read_file"
|
||||
args_schema: Type[BaseModel] = ReadFileInput
|
||||
description: str = "Read file from disk"
|
||||
|
||||
def _run(
|
||||
self,
|
||||
file_path: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
try:
|
||||
read_path = self.get_relative_path(file_path)
|
||||
except FileValidationError:
|
||||
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
|
||||
if not read_path.exists():
|
||||
return f"Error: no such file or directory: {file_path}"
|
||||
try:
|
||||
with read_path.open("r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
# TODO: Add aiofiles method
|
@@ -0,0 +1,54 @@
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.pydantic_v1 import BaseModel
|
||||
|
||||
|
||||
def is_relative_to(path: Path, root: Path) -> bool:
|
||||
"""Check if path is relative to root."""
|
||||
if sys.version_info >= (3, 9):
|
||||
# No need for a try/except block in Python 3.8+.
|
||||
return path.is_relative_to(root)
|
||||
try:
|
||||
path.relative_to(root)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
INVALID_PATH_TEMPLATE = (
|
||||
"Error: Access denied to {arg_name}: {value}."
|
||||
" Permission granted exclusively to the current working directory"
|
||||
)
|
||||
|
||||
|
||||
class FileValidationError(ValueError):
|
||||
"""Error for paths outside the root directory."""
|
||||
|
||||
|
||||
class BaseFileToolMixin(BaseModel):
|
||||
"""Mixin for file system tools."""
|
||||
|
||||
root_dir: Optional[str] = None
|
||||
"""The final path will be chosen relative to root_dir if specified."""
|
||||
|
||||
def get_relative_path(self, file_path: str) -> Path:
|
||||
"""Get the relative path, returning an error if unsupported."""
|
||||
if self.root_dir is None:
|
||||
return Path(file_path)
|
||||
return get_validated_relative_path(Path(self.root_dir), file_path)
|
||||
|
||||
|
||||
def get_validated_relative_path(root: Path, user_path: str) -> Path:
|
||||
"""Resolve a relative path, raising an error if not within the root directory."""
|
||||
# Note, this still permits symlinks from outside that point within the root.
|
||||
# Further validation would be needed if those are to be disallowed.
|
||||
root = root.resolve()
|
||||
full_path = (root / user_path).resolve()
|
||||
|
||||
if not is_relative_to(full_path, root):
|
||||
raise FileValidationError(
|
||||
f"Path {user_path} is outside of the allowed directory {root}"
|
||||
)
|
||||
return full_path
|
@@ -0,0 +1,51 @@
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.file_management.utils import (
|
||||
INVALID_PATH_TEMPLATE,
|
||||
BaseFileToolMixin,
|
||||
FileValidationError,
|
||||
)
|
||||
|
||||
|
||||
class WriteFileInput(BaseModel):
|
||||
"""Input for WriteFileTool."""
|
||||
|
||||
file_path: str = Field(..., description="name of file")
|
||||
text: str = Field(..., description="text to write to file")
|
||||
append: bool = Field(
|
||||
default=False, description="Whether to append to an existing file."
|
||||
)
|
||||
|
||||
|
||||
class WriteFileTool(BaseFileToolMixin, BaseTool):
|
||||
"""Tool that writes a file to disk."""
|
||||
|
||||
name: str = "write_file"
|
||||
args_schema: Type[BaseModel] = WriteFileInput
|
||||
description: str = "Write file to disk"
|
||||
|
||||
def _run(
|
||||
self,
|
||||
file_path: str,
|
||||
text: str,
|
||||
append: bool = False,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
try:
|
||||
write_path = self.get_relative_path(file_path)
|
||||
except FileValidationError:
|
||||
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
|
||||
try:
|
||||
write_path.parent.mkdir(exist_ok=True, parents=False)
|
||||
mode = "a" if append else "w"
|
||||
with write_path.open(mode, encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
return f"File written successfully to {file_path}."
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
# TODO: Add aiofiles method
|
@@ -0,0 +1 @@
|
||||
""" GitHub Tool """
|
100
libs/community/langchain_community/tools/github/prompt.py
Normal file
100
libs/community/langchain_community/tools/github/prompt.py
Normal file
@@ -0,0 +1,100 @@
|
||||
# flake8: noqa
|
||||
GET_ISSUES_PROMPT = """
|
||||
This tool will fetch a list of the repository's issues. It will return the title, and issue number of 5 issues. It takes no input."""
|
||||
|
||||
GET_ISSUE_PROMPT = """
|
||||
This tool will fetch the title, body, and comment thread of a specific issue. **VERY IMPORTANT**: You must specify the issue number as an integer."""
|
||||
|
||||
COMMENT_ON_ISSUE_PROMPT = """
|
||||
This tool is useful when you need to comment on a GitHub issue. Simply pass in the issue number and the comment you would like to make. Please use this sparingly as we don't want to clutter the comment threads. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
|
||||
|
||||
- First you must specify the issue number as an integer
|
||||
- Then you must place two newlines
|
||||
- Then you must specify your comment"""
|
||||
|
||||
CREATE_PULL_REQUEST_PROMPT = """
|
||||
This tool is useful when you need to create a new pull request in a GitHub repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
|
||||
|
||||
- First you must specify the title of the pull request
|
||||
- Then you must place two newlines
|
||||
- Then you must write the body or description of the pull request
|
||||
|
||||
When appropriate, always reference relevant issues in the body by using the syntax `closes #<issue_number` like `closes #3, closes #6`.
|
||||
For example, if you would like to create a pull request called "README updates" with contents "added contributors' names, closes #3", you would pass in the following string:
|
||||
|
||||
README updates
|
||||
|
||||
added contributors' names, closes #3"""
|
||||
|
||||
CREATE_FILE_PROMPT = """
|
||||
This tool is a wrapper for the GitHub API, useful when you need to create a file in a GitHub repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
|
||||
|
||||
- First you must specify which file to create by passing a full file path (**IMPORTANT**: the path must not start with a slash)
|
||||
- Then you must specify the contents of the file
|
||||
|
||||
For example, if you would like to create a file called /test/test.txt with contents "test contents", you would pass in the following string:
|
||||
|
||||
test/test.txt
|
||||
|
||||
test contents"""
|
||||
|
||||
READ_FILE_PROMPT = """
|
||||
This tool is a wrapper for the GitHub API, useful when you need to read the contents of a file. Simply pass in the full file path of the file you would like to read. **IMPORTANT**: the path must not start with a slash"""
|
||||
|
||||
UPDATE_FILE_PROMPT = """
|
||||
This tool is a wrapper for the GitHub API, useful when you need to update the contents of a file in a GitHub repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
|
||||
|
||||
- First you must specify which file to modify by passing a full file path (**IMPORTANT**: the path must not start with a slash)
|
||||
- Then you must specify the old contents which you would like to replace wrapped in OLD <<<< and >>>> OLD
|
||||
- Then you must specify the new contents which you would like to replace the old contents with wrapped in NEW <<<< and >>>> NEW
|
||||
|
||||
For example, if you would like to replace the contents of the file /test/test.txt from "old contents" to "new contents", you would pass in the following string:
|
||||
|
||||
test/test.txt
|
||||
|
||||
This is text that will not be changed
|
||||
OLD <<<<
|
||||
old contents
|
||||
>>>> OLD
|
||||
NEW <<<<
|
||||
new contents
|
||||
>>>> NEW"""
|
||||
|
||||
DELETE_FILE_PROMPT = """
|
||||
This tool is a wrapper for the GitHub API, useful when you need to delete a file in a GitHub repository. Simply pass in the full file path of the file you would like to delete. **IMPORTANT**: the path must not start with a slash"""
|
||||
|
||||
GET_PR_PROMPT = """
|
||||
This tool will fetch the title, body, comment thread and commit history of a specific Pull Request (by PR number). **VERY IMPORTANT**: You must specify the PR number as an integer."""
|
||||
|
||||
LIST_PRS_PROMPT = """
|
||||
This tool will fetch a list of the repository's Pull Requests (PRs). It will return the title, and PR number of 5 PRs. It takes no input."""
|
||||
|
||||
LIST_PULL_REQUEST_FILES = """
|
||||
This tool will fetch the full text of all files in a pull request (PR) given the PR number as an input. This is useful for understanding the code changes in a PR or contributing to it. **VERY IMPORTANT**: You must specify the PR number as an integer input parameter."""
|
||||
|
||||
OVERVIEW_EXISTING_FILES_IN_MAIN = """
|
||||
This tool will provide an overview of all existing files in the main branch of the repository. It will list the file names, their respective paths, and a brief summary of their contents. This can be useful for understanding the structure and content of the repository, especially when navigating through large codebases. No input parameters are required."""
|
||||
|
||||
OVERVIEW_EXISTING_FILES_BOT_BRANCH = """
|
||||
This tool will provide an overview of all files in your current working branch where you should implement changes. This is great for getting a high level overview of the structure of your code. No input parameters are required."""
|
||||
|
||||
SEARCH_ISSUES_AND_PRS_PROMPT = """
|
||||
This tool will search for issues and pull requests in the repository. **VERY IMPORTANT**: You must specify the search query as a string input parameter."""
|
||||
|
||||
SEARCH_CODE_PROMPT = """
|
||||
This tool will search for code in the repository. **VERY IMPORTANT**: You must specify the search query as a string input parameter."""
|
||||
|
||||
CREATE_REVIEW_REQUEST_PROMPT = """
|
||||
This tool will create a review request on the open pull request that matches the current active branch. **VERY IMPORTANT**: You must specify the username of the person who is being requested as a string input parameter."""
|
||||
|
||||
LIST_BRANCHES_IN_REPO_PROMPT = """
|
||||
This tool will fetch a list of all branches in the repository. It will return the name of each branch. No input parameters are required."""
|
||||
|
||||
SET_ACTIVE_BRANCH_PROMPT = """
|
||||
This tool will set the active branch in the repository, similar to `git checkout <branch_name>` and `git switch -c <branch_name>`. **VERY IMPORTANT**: You must specify the name of the branch as a string input parameter."""
|
||||
|
||||
CREATE_BRANCH_PROMPT = """
|
||||
This tool will create a new branch in the repository. **VERY IMPORTANT**: You must specify the name of the new branch as a string input parameter."""
|
||||
|
||||
GET_FILES_FROM_DIRECTORY_PROMPT = """
|
||||
This tool will fetch a list of all files in a specified directory. **VERY IMPORTANT**: You must specify the path of the directory as a string input parameter."""
|
37
libs/community/langchain_community/tools/github/tool.py
Normal file
37
libs/community/langchain_community/tools/github/tool.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""
|
||||
This tool allows agents to interact with the pygithub library
|
||||
and operate on a GitHub repository.
|
||||
|
||||
To use this tool, you must first set as environment variables:
|
||||
GITHUB_API_TOKEN
|
||||
GITHUB_REPOSITORY -> format: {owner}/{repo}
|
||||
|
||||
"""
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.github import GitHubAPIWrapper
|
||||
|
||||
|
||||
class GitHubAction(BaseTool):
|
||||
"""Tool for interacting with the GitHub API."""
|
||||
|
||||
api_wrapper: GitHubAPIWrapper = Field(default_factory=GitHubAPIWrapper)
|
||||
mode: str
|
||||
name: str = ""
|
||||
description: str = ""
|
||||
args_schema: Optional[Type[BaseModel]] = None
|
||||
|
||||
def _run(
|
||||
self,
|
||||
instructions: Optional[str] = "",
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the GitHub API to run an operation."""
|
||||
if not instructions or instructions == "{}":
|
||||
# Catch other forms of empty input that GPT-4 likes to send.
|
||||
instructions = ""
|
||||
return self.api_wrapper.run(self.mode, instructions)
|
@@ -0,0 +1 @@
|
||||
""" GitLab Tool """
|
70
libs/community/langchain_community/tools/gitlab/prompt.py
Normal file
70
libs/community/langchain_community/tools/gitlab/prompt.py
Normal file
@@ -0,0 +1,70 @@
|
||||
# flake8: noqa
|
||||
GET_ISSUES_PROMPT = """
|
||||
This tool will fetch a list of the repository's issues. It will return the title, and issue number of 5 issues. It takes no input.
|
||||
"""
|
||||
|
||||
GET_ISSUE_PROMPT = """
|
||||
This tool will fetch the title, body, and comment thread of a specific issue. **VERY IMPORTANT**: You must specify the issue number as an integer.
|
||||
"""
|
||||
|
||||
COMMENT_ON_ISSUE_PROMPT = """
|
||||
This tool is useful when you need to comment on a GitLab issue. Simply pass in the issue number and the comment you would like to make. Please use this sparingly as we don't want to clutter the comment threads. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
|
||||
|
||||
- First you must specify the issue number as an integer
|
||||
- Then you must place two newlines
|
||||
- Then you must specify your comment
|
||||
"""
|
||||
CREATE_PULL_REQUEST_PROMPT = """
|
||||
This tool is useful when you need to create a new pull request in a GitLab repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
|
||||
|
||||
- First you must specify the title of the pull request
|
||||
- Then you must place two newlines
|
||||
- Then you must write the body or description of the pull request
|
||||
|
||||
To reference an issue in the body, put its issue number directly after a #.
|
||||
For example, if you would like to create a pull request called "README updates" with contents "added contributors' names, closes issue #3", you would pass in the following string:
|
||||
|
||||
README updates
|
||||
|
||||
added contributors' names, closes issue #3
|
||||
"""
|
||||
CREATE_FILE_PROMPT = """
|
||||
This tool is a wrapper for the GitLab API, useful when you need to create a file in a GitLab repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
|
||||
|
||||
- First you must specify which file to create by passing a full file path (**IMPORTANT**: the path must not start with a slash)
|
||||
- Then you must specify the contents of the file
|
||||
|
||||
For example, if you would like to create a file called /test/test.txt with contents "test contents", you would pass in the following string:
|
||||
|
||||
test/test.txt
|
||||
|
||||
test contents
|
||||
"""
|
||||
|
||||
READ_FILE_PROMPT = """
|
||||
This tool is a wrapper for the GitLab API, useful when you need to read the contents of a file in a GitLab repository. Simply pass in the full file path of the file you would like to read. **IMPORTANT**: the path must not start with a slash
|
||||
"""
|
||||
|
||||
UPDATE_FILE_PROMPT = """
|
||||
This tool is a wrapper for the GitLab API, useful when you need to update the contents of a file in a GitLab repository. **VERY IMPORTANT**: Your input to this tool MUST strictly follow these rules:
|
||||
|
||||
- First you must specify which file to modify by passing a full file path (**IMPORTANT**: the path must not start with a slash)
|
||||
- Then you must specify the old contents which you would like to replace wrapped in OLD <<<< and >>>> OLD
|
||||
- Then you must specify the new contents which you would like to replace the old contents with wrapped in NEW <<<< and >>>> NEW
|
||||
|
||||
For example, if you would like to replace the contents of the file /test/test.txt from "old contents" to "new contents", you would pass in the following string:
|
||||
|
||||
test/test.txt
|
||||
|
||||
This is text that will not be changed
|
||||
OLD <<<<
|
||||
old contents
|
||||
>>>> OLD
|
||||
NEW <<<<
|
||||
new contents
|
||||
>>>> NEW
|
||||
"""
|
||||
|
||||
DELETE_FILE_PROMPT = """
|
||||
This tool is a wrapper for the GitLab API, useful when you need to delete a file in a GitLab repository. Simply pass in the full file path of the file you would like to delete. **IMPORTANT**: the path must not start with a slash
|
||||
"""
|
33
libs/community/langchain_community/tools/gitlab/tool.py
Normal file
33
libs/community/langchain_community/tools/gitlab/tool.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""
|
||||
This tool allows agents to interact with the python-gitlab library
|
||||
and operate on a GitLab repository.
|
||||
|
||||
To use this tool, you must first set as environment variables:
|
||||
GITLAB_PRIVATE_ACCESS_TOKEN
|
||||
GITLAB_REPOSITORY -> format: {owner}/{repo}
|
||||
|
||||
"""
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.gitlab import GitLabAPIWrapper
|
||||
|
||||
|
||||
class GitLabAction(BaseTool):
|
||||
"""Tool for interacting with the GitLab API."""
|
||||
|
||||
api_wrapper: GitLabAPIWrapper = Field(default_factory=GitLabAPIWrapper)
|
||||
mode: str
|
||||
name: str = ""
|
||||
description: str = ""
|
||||
|
||||
def _run(
|
||||
self,
|
||||
instructions: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the GitLab API to run an operation."""
|
||||
return self.api_wrapper.run(self.mode, instructions)
|
17
libs/community/langchain_community/tools/gmail/__init__.py
Normal file
17
libs/community/langchain_community/tools/gmail/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""Gmail tools."""
|
||||
|
||||
from langchain_community.tools.gmail.create_draft import GmailCreateDraft
|
||||
from langchain_community.tools.gmail.get_message import GmailGetMessage
|
||||
from langchain_community.tools.gmail.get_thread import GmailGetThread
|
||||
from langchain_community.tools.gmail.search import GmailSearch
|
||||
from langchain_community.tools.gmail.send_message import GmailSendMessage
|
||||
from langchain_community.tools.gmail.utils import get_gmail_credentials
|
||||
|
||||
__all__ = [
|
||||
"GmailCreateDraft",
|
||||
"GmailSendMessage",
|
||||
"GmailSearch",
|
||||
"GmailGetMessage",
|
||||
"GmailGetThread",
|
||||
"get_gmail_credentials",
|
||||
]
|
37
libs/community/langchain_community/tools/gmail/base.py
Normal file
37
libs/community/langchain_community/tools/gmail/base.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Base class for Gmail tools."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.tools.gmail.utils import build_resource_service
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# This is for linting and IDE typehints
|
||||
from googleapiclient.discovery import Resource
|
||||
else:
|
||||
try:
|
||||
# We do this so pydantic can resolve the types when instantiating
|
||||
from googleapiclient.discovery import Resource
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class GmailBaseTool(BaseTool):
|
||||
"""Base class for Gmail tools."""
|
||||
|
||||
api_resource: Resource = Field(default_factory=build_resource_service)
|
||||
|
||||
@classmethod
|
||||
def from_api_resource(cls, api_resource: Resource) -> "GmailBaseTool":
|
||||
"""Create a tool from an api resource.
|
||||
|
||||
Args:
|
||||
api_resource: The api resource to use.
|
||||
|
||||
Returns:
|
||||
A tool.
|
||||
"""
|
||||
return cls(service=api_resource)
|
@@ -0,0 +1,87 @@
|
||||
import base64
|
||||
from email.message import EmailMessage
|
||||
from typing import List, Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.gmail.base import GmailBaseTool
|
||||
|
||||
|
||||
class CreateDraftSchema(BaseModel):
|
||||
"""Input for CreateDraftTool."""
|
||||
|
||||
message: str = Field(
|
||||
...,
|
||||
description="The message to include in the draft.",
|
||||
)
|
||||
to: List[str] = Field(
|
||||
...,
|
||||
description="The list of recipients.",
|
||||
)
|
||||
subject: str = Field(
|
||||
...,
|
||||
description="The subject of the message.",
|
||||
)
|
||||
cc: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="The list of CC recipients.",
|
||||
)
|
||||
bcc: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="The list of BCC recipients.",
|
||||
)
|
||||
|
||||
|
||||
class GmailCreateDraft(GmailBaseTool):
|
||||
"""Tool that creates a draft email for Gmail."""
|
||||
|
||||
name: str = "create_gmail_draft"
|
||||
description: str = (
|
||||
"Use this tool to create a draft email with the provided message fields."
|
||||
)
|
||||
args_schema: Type[CreateDraftSchema] = CreateDraftSchema
|
||||
|
||||
def _prepare_draft_message(
|
||||
self,
|
||||
message: str,
|
||||
to: List[str],
|
||||
subject: str,
|
||||
cc: Optional[List[str]] = None,
|
||||
bcc: Optional[List[str]] = None,
|
||||
) -> dict:
|
||||
draft_message = EmailMessage()
|
||||
draft_message.set_content(message)
|
||||
|
||||
draft_message["To"] = ", ".join(to)
|
||||
draft_message["Subject"] = subject
|
||||
if cc is not None:
|
||||
draft_message["Cc"] = ", ".join(cc)
|
||||
|
||||
if bcc is not None:
|
||||
draft_message["Bcc"] = ", ".join(bcc)
|
||||
|
||||
encoded_message = base64.urlsafe_b64encode(draft_message.as_bytes()).decode()
|
||||
return {"message": {"raw": encoded_message}}
|
||||
|
||||
def _run(
|
||||
self,
|
||||
message: str,
|
||||
to: List[str],
|
||||
subject: str,
|
||||
cc: Optional[List[str]] = None,
|
||||
bcc: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
try:
|
||||
create_message = self._prepare_draft_message(message, to, subject, cc, bcc)
|
||||
draft = (
|
||||
self.api_resource.users()
|
||||
.drafts()
|
||||
.create(userId="me", body=create_message)
|
||||
.execute()
|
||||
)
|
||||
output = f'Draft created. Draft Id: {draft["id"]}'
|
||||
return output
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred: {e}")
|
@@ -0,0 +1,70 @@
|
||||
import base64
|
||||
import email
|
||||
from typing import Dict, Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.gmail.base import GmailBaseTool
|
||||
from langchain_community.tools.gmail.utils import clean_email_body
|
||||
|
||||
|
||||
class SearchArgsSchema(BaseModel):
|
||||
"""Input for GetMessageTool."""
|
||||
|
||||
message_id: str = Field(
|
||||
...,
|
||||
description="The unique ID of the email message, retrieved from a search.",
|
||||
)
|
||||
|
||||
|
||||
class GmailGetMessage(GmailBaseTool):
|
||||
"""Tool that gets a message by ID from Gmail."""
|
||||
|
||||
name: str = "get_gmail_message"
|
||||
description: str = (
|
||||
"Use this tool to fetch an email by message ID."
|
||||
" Returns the thread ID, snippet, body, subject, and sender."
|
||||
)
|
||||
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
|
||||
|
||||
def _run(
|
||||
self,
|
||||
message_id: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> Dict:
|
||||
"""Run the tool."""
|
||||
query = (
|
||||
self.api_resource.users()
|
||||
.messages()
|
||||
.get(userId="me", format="raw", id=message_id)
|
||||
)
|
||||
message_data = query.execute()
|
||||
raw_message = base64.urlsafe_b64decode(message_data["raw"])
|
||||
|
||||
email_msg = email.message_from_bytes(raw_message)
|
||||
|
||||
subject = email_msg["Subject"]
|
||||
sender = email_msg["From"]
|
||||
|
||||
message_body = ""
|
||||
if email_msg.is_multipart():
|
||||
for part in email_msg.walk():
|
||||
ctype = part.get_content_type()
|
||||
cdispo = str(part.get("Content-Disposition"))
|
||||
if ctype == "text/plain" and "attachment" not in cdispo:
|
||||
message_body = part.get_payload(decode=True).decode("utf-8")
|
||||
break
|
||||
else:
|
||||
message_body = email_msg.get_payload(decode=True).decode("utf-8")
|
||||
|
||||
body = clean_email_body(message_body)
|
||||
|
||||
return {
|
||||
"id": message_id,
|
||||
"threadId": message_data["threadId"],
|
||||
"snippet": message_data["snippet"],
|
||||
"body": body,
|
||||
"subject": subject,
|
||||
"sender": sender,
|
||||
}
|
48
libs/community/langchain_community/tools/gmail/get_thread.py
Normal file
48
libs/community/langchain_community/tools/gmail/get_thread.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from typing import Dict, Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.gmail.base import GmailBaseTool
|
||||
|
||||
|
||||
class GetThreadSchema(BaseModel):
|
||||
"""Input for GetMessageTool."""
|
||||
|
||||
# From https://support.google.com/mail/answer/7190?hl=en
|
||||
thread_id: str = Field(
|
||||
...,
|
||||
description="The thread ID.",
|
||||
)
|
||||
|
||||
|
||||
class GmailGetThread(GmailBaseTool):
|
||||
"""Tool that gets a thread by ID from Gmail."""
|
||||
|
||||
name: str = "get_gmail_thread"
|
||||
description: str = (
|
||||
"Use this tool to search for email messages."
|
||||
" The input must be a valid Gmail query."
|
||||
" The output is a JSON list of messages."
|
||||
)
|
||||
args_schema: Type[GetThreadSchema] = GetThreadSchema
|
||||
|
||||
def _run(
|
||||
self,
|
||||
thread_id: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> Dict:
|
||||
"""Run the tool."""
|
||||
query = self.api_resource.users().threads().get(userId="me", id=thread_id)
|
||||
thread_data = query.execute()
|
||||
if not isinstance(thread_data, dict):
|
||||
raise ValueError("The output of the query must be a list.")
|
||||
messages = thread_data["messages"]
|
||||
thread_data["messages"] = []
|
||||
keys_to_keep = ["id", "snippet", "snippet"]
|
||||
# TODO: Parse body.
|
||||
for message in messages:
|
||||
thread_data["messages"].append(
|
||||
{k: message[k] for k in keys_to_keep if k in message}
|
||||
)
|
||||
return thread_data
|
140
libs/community/langchain_community/tools/gmail/search.py
Normal file
140
libs/community/langchain_community/tools/gmail/search.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import base64
|
||||
import email
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.gmail.base import GmailBaseTool
|
||||
from langchain_community.tools.gmail.utils import clean_email_body
|
||||
|
||||
|
||||
class Resource(str, Enum):
|
||||
"""Enumerator of Resources to search."""
|
||||
|
||||
THREADS = "threads"
|
||||
MESSAGES = "messages"
|
||||
|
||||
|
||||
class SearchArgsSchema(BaseModel):
|
||||
"""Input for SearchGmailTool."""
|
||||
|
||||
# From https://support.google.com/mail/answer/7190?hl=en
|
||||
query: str = Field(
|
||||
...,
|
||||
description="The Gmail query. Example filters include from:sender,"
|
||||
" to:recipient, subject:subject, -filtered_term,"
|
||||
" in:folder, is:important|read|starred, after:year/mo/date, "
|
||||
"before:year/mo/date, label:label_name"
|
||||
' "exact phrase".'
|
||||
" Search newer/older than using d (day), m (month), and y (year): "
|
||||
"newer_than:2d, older_than:1y."
|
||||
" Attachments with extension example: filename:pdf. Multiple term"
|
||||
" matching example: from:amy OR from:david.",
|
||||
)
|
||||
resource: Resource = Field(
|
||||
default=Resource.MESSAGES,
|
||||
description="Whether to search for threads or messages.",
|
||||
)
|
||||
max_results: int = Field(
|
||||
default=10,
|
||||
description="The maximum number of results to return.",
|
||||
)
|
||||
|
||||
|
||||
class GmailSearch(GmailBaseTool):
|
||||
"""Tool that searches for messages or threads in Gmail."""
|
||||
|
||||
name: str = "search_gmail"
|
||||
description: str = (
|
||||
"Use this tool to search for email messages or threads."
|
||||
" The input must be a valid Gmail query."
|
||||
" The output is a JSON list of the requested resource."
|
||||
)
|
||||
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
|
||||
|
||||
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
# Add the thread message snippets to the thread results
|
||||
results = []
|
||||
for thread in threads:
|
||||
thread_id = thread["id"]
|
||||
thread_data = (
|
||||
self.api_resource.users()
|
||||
.threads()
|
||||
.get(userId="me", id=thread_id)
|
||||
.execute()
|
||||
)
|
||||
messages = thread_data["messages"]
|
||||
thread["messages"] = []
|
||||
for message in messages:
|
||||
snippet = message["snippet"]
|
||||
thread["messages"].append({"snippet": snippet, "id": message["id"]})
|
||||
results.append(thread)
|
||||
|
||||
return results
|
||||
|
||||
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
results = []
|
||||
for message in messages:
|
||||
message_id = message["id"]
|
||||
message_data = (
|
||||
self.api_resource.users()
|
||||
.messages()
|
||||
.get(userId="me", format="raw", id=message_id)
|
||||
.execute()
|
||||
)
|
||||
|
||||
raw_message = base64.urlsafe_b64decode(message_data["raw"])
|
||||
|
||||
email_msg = email.message_from_bytes(raw_message)
|
||||
|
||||
subject = email_msg["Subject"]
|
||||
sender = email_msg["From"]
|
||||
|
||||
message_body = ""
|
||||
if email_msg.is_multipart():
|
||||
for part in email_msg.walk():
|
||||
ctype = part.get_content_type()
|
||||
cdispo = str(part.get("Content-Disposition"))
|
||||
if ctype == "text/plain" and "attachment" not in cdispo:
|
||||
message_body = part.get_payload(decode=True).decode("utf-8")
|
||||
break
|
||||
else:
|
||||
message_body = email_msg.get_payload(decode=True).decode("utf-8")
|
||||
|
||||
body = clean_email_body(message_body)
|
||||
|
||||
results.append(
|
||||
{
|
||||
"id": message["id"],
|
||||
"threadId": message_data["threadId"],
|
||||
"snippet": message_data["snippet"],
|
||||
"body": body,
|
||||
"subject": subject,
|
||||
"sender": sender,
|
||||
}
|
||||
)
|
||||
return results
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
resource: Resource = Resource.MESSAGES,
|
||||
max_results: int = 10,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Run the tool."""
|
||||
results = (
|
||||
self.api_resource.users()
|
||||
.messages()
|
||||
.list(userId="me", q=query, maxResults=max_results)
|
||||
.execute()
|
||||
.get(resource.value, [])
|
||||
)
|
||||
if resource == Resource.THREADS:
|
||||
return self._parse_threads(results)
|
||||
elif resource == Resource.MESSAGES:
|
||||
return self._parse_messages(results)
|
||||
else:
|
||||
raise NotImplementedError(f"Resource of type {resource} not implemented.")
|
@@ -0,0 +1,89 @@
|
||||
"""Send Gmail messages."""
|
||||
import base64
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.gmail.base import GmailBaseTool
|
||||
|
||||
|
||||
class SendMessageSchema(BaseModel):
|
||||
"""Input for SendMessageTool."""
|
||||
|
||||
message: str = Field(
|
||||
...,
|
||||
description="The message to send.",
|
||||
)
|
||||
to: Union[str, List[str]] = Field(
|
||||
...,
|
||||
description="The list of recipients.",
|
||||
)
|
||||
subject: str = Field(
|
||||
...,
|
||||
description="The subject of the message.",
|
||||
)
|
||||
cc: Optional[Union[str, List[str]]] = Field(
|
||||
None,
|
||||
description="The list of CC recipients.",
|
||||
)
|
||||
bcc: Optional[Union[str, List[str]]] = Field(
|
||||
None,
|
||||
description="The list of BCC recipients.",
|
||||
)
|
||||
|
||||
|
||||
class GmailSendMessage(GmailBaseTool):
|
||||
"""Tool that sends a message to Gmail."""
|
||||
|
||||
name: str = "send_gmail_message"
|
||||
description: str = (
|
||||
"Use this tool to send email messages." " The input is the message, recipients"
|
||||
)
|
||||
|
||||
def _prepare_message(
|
||||
self,
|
||||
message: str,
|
||||
to: Union[str, List[str]],
|
||||
subject: str,
|
||||
cc: Optional[Union[str, List[str]]] = None,
|
||||
bcc: Optional[Union[str, List[str]]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Create a message for an email."""
|
||||
mime_message = MIMEMultipart()
|
||||
mime_message.attach(MIMEText(message, "html"))
|
||||
|
||||
mime_message["To"] = ", ".join(to if isinstance(to, list) else [to])
|
||||
mime_message["Subject"] = subject
|
||||
if cc is not None:
|
||||
mime_message["Cc"] = ", ".join(cc if isinstance(cc, list) else [cc])
|
||||
|
||||
if bcc is not None:
|
||||
mime_message["Bcc"] = ", ".join(bcc if isinstance(bcc, list) else [bcc])
|
||||
|
||||
encoded_message = base64.urlsafe_b64encode(mime_message.as_bytes()).decode()
|
||||
return {"raw": encoded_message}
|
||||
|
||||
def _run(
|
||||
self,
|
||||
message: str,
|
||||
to: Union[str, List[str]],
|
||||
subject: str,
|
||||
cc: Optional[Union[str, List[str]]] = None,
|
||||
bcc: Optional[Union[str, List[str]]] = None,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Run the tool."""
|
||||
try:
|
||||
create_message = self._prepare_message(message, to, subject, cc=cc, bcc=bcc)
|
||||
send_message = (
|
||||
self.api_resource.users()
|
||||
.messages()
|
||||
.send(userId="me", body=create_message)
|
||||
)
|
||||
sent_message = send_message.execute()
|
||||
return f'Message sent. Message Id: {sent_message["id"]}'
|
||||
except Exception as error:
|
||||
raise Exception(f"An error occurred: {error}")
|
132
libs/community/langchain_community/tools/gmail/utils.py
Normal file
132
libs/community/langchain_community/tools/gmail/utils.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""Gmail tool utils."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from google.auth.transport.requests import Request
|
||||
from google.oauth2.credentials import Credentials
|
||||
from google_auth_oauthlib.flow import InstalledAppFlow
|
||||
from googleapiclient.discovery import Resource
|
||||
from googleapiclient.discovery import build as build_resource
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def import_google() -> Tuple[Request, Credentials]:
|
||||
"""Import google libraries.
|
||||
|
||||
Returns:
|
||||
Tuple[Request, Credentials]: Request and Credentials classes.
|
||||
"""
|
||||
# google-auth-httplib2
|
||||
try:
|
||||
from google.auth.transport.requests import Request # noqa: F401
|
||||
from google.oauth2.credentials import Credentials # noqa: F401
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"You need to install google-auth-httplib2 to use this toolkit. "
|
||||
"Try running pip install --upgrade google-auth-httplib2"
|
||||
)
|
||||
return Request, Credentials
|
||||
|
||||
|
||||
def import_installed_app_flow() -> InstalledAppFlow:
|
||||
"""Import InstalledAppFlow class.
|
||||
|
||||
Returns:
|
||||
InstalledAppFlow: InstalledAppFlow class.
|
||||
"""
|
||||
try:
|
||||
from google_auth_oauthlib.flow import InstalledAppFlow
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"You need to install google-auth-oauthlib to use this toolkit. "
|
||||
"Try running pip install --upgrade google-auth-oauthlib"
|
||||
)
|
||||
return InstalledAppFlow
|
||||
|
||||
|
||||
def import_googleapiclient_resource_builder() -> build_resource:
|
||||
"""Import googleapiclient.discovery.build function.
|
||||
|
||||
Returns:
|
||||
build_resource: googleapiclient.discovery.build function.
|
||||
"""
|
||||
try:
|
||||
from googleapiclient.discovery import build
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"You need to install googleapiclient to use this toolkit. "
|
||||
"Try running pip install --upgrade google-api-python-client"
|
||||
)
|
||||
return build
|
||||
|
||||
|
||||
DEFAULT_SCOPES = ["https://mail.google.com/"]
|
||||
DEFAULT_CREDS_TOKEN_FILE = "token.json"
|
||||
DEFAULT_CLIENT_SECRETS_FILE = "credentials.json"
|
||||
|
||||
|
||||
def get_gmail_credentials(
|
||||
token_file: Optional[str] = None,
|
||||
client_secrets_file: Optional[str] = None,
|
||||
scopes: Optional[List[str]] = None,
|
||||
) -> Credentials:
|
||||
"""Get credentials."""
|
||||
# From https://developers.google.com/gmail/api/quickstart/python
|
||||
Request, Credentials = import_google()
|
||||
InstalledAppFlow = import_installed_app_flow()
|
||||
creds = None
|
||||
scopes = scopes or DEFAULT_SCOPES
|
||||
token_file = token_file or DEFAULT_CREDS_TOKEN_FILE
|
||||
client_secrets_file = client_secrets_file or DEFAULT_CLIENT_SECRETS_FILE
|
||||
# The file token.json stores the user's access and refresh tokens, and is
|
||||
# created automatically when the authorization flow completes for the first
|
||||
# time.
|
||||
if os.path.exists(token_file):
|
||||
creds = Credentials.from_authorized_user_file(token_file, scopes)
|
||||
# If there are no (valid) credentials available, let the user log in.
|
||||
if not creds or not creds.valid:
|
||||
if creds and creds.expired and creds.refresh_token:
|
||||
creds.refresh(Request())
|
||||
else:
|
||||
# https://developers.google.com/gmail/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa
|
||||
flow = InstalledAppFlow.from_client_secrets_file(
|
||||
client_secrets_file, scopes
|
||||
)
|
||||
creds = flow.run_local_server(port=0)
|
||||
# Save the credentials for the next run
|
||||
with open(token_file, "w") as token:
|
||||
token.write(creds.to_json())
|
||||
return creds
|
||||
|
||||
|
||||
def build_resource_service(
|
||||
credentials: Optional[Credentials] = None,
|
||||
service_name: str = "gmail",
|
||||
service_version: str = "v1",
|
||||
) -> Resource:
|
||||
"""Build a Gmail service."""
|
||||
credentials = credentials or get_gmail_credentials()
|
||||
builder = import_googleapiclient_resource_builder()
|
||||
return builder(service_name, service_version, credentials=credentials)
|
||||
|
||||
|
||||
def clean_email_body(body: str) -> str:
|
||||
"""Clean email body."""
|
||||
try:
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
try:
|
||||
soup = BeautifulSoup(str(body), "html.parser")
|
||||
body = soup.get_text()
|
||||
return str(body)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
return str(body)
|
||||
except ImportError:
|
||||
logger.warning("BeautifulSoup not installed. Skipping cleaning.")
|
||||
return str(body)
|
@@ -0,0 +1,8 @@
|
||||
"""Golden API toolkit."""
|
||||
|
||||
|
||||
from langchain_community.tools.golden_query.tool import GoldenQueryRun
|
||||
|
||||
__all__ = [
|
||||
"GoldenQueryRun",
|
||||
]
|
@@ -0,0 +1,34 @@
|
||||
"""Tool for the Golden API."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
|
||||
|
||||
|
||||
class GoldenQueryRun(BaseTool):
|
||||
"""Tool that adds the capability to query using the Golden API and get back JSON."""
|
||||
|
||||
name: str = "Golden-Query"
|
||||
description: str = (
|
||||
"A wrapper around Golden Query API."
|
||||
" Useful for getting entities that match"
|
||||
" a natural language query from Golden's Knowledge Base."
|
||||
"\nExample queries:"
|
||||
"\n- companies in nanotech"
|
||||
"\n- list of cloud providers starting in 2019"
|
||||
"\nInput should be the natural language query."
|
||||
"\nOutput is a paginated list of results or an error object"
|
||||
" in JSON format."
|
||||
)
|
||||
api_wrapper: GoldenQueryAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the Golden tool."""
|
||||
return self.api_wrapper.run(query)
|
@@ -0,0 +1,7 @@
|
||||
"""Google Cloud Tools."""
|
||||
|
||||
from langchain_community.tools.google_cloud.texttospeech import (
|
||||
GoogleCloudTextToSpeechTool,
|
||||
)
|
||||
|
||||
__all__ = ["GoogleCloudTextToSpeechTool"]
|
@@ -0,0 +1,91 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import tempfile
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.vertexai import get_client_info
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from google.cloud import texttospeech
|
||||
|
||||
|
||||
def _import_google_cloud_texttospeech() -> Any:
|
||||
try:
|
||||
from google.cloud import texttospeech
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Cannot import google.cloud.texttospeech, please install "
|
||||
"`pip install google-cloud-texttospeech`."
|
||||
) from e
|
||||
return texttospeech
|
||||
|
||||
|
||||
def _encoding_file_extension_map(encoding: texttospeech.AudioEncoding) -> Optional[str]:
|
||||
texttospeech = _import_google_cloud_texttospeech()
|
||||
|
||||
ENCODING_FILE_EXTENSION_MAP = {
|
||||
texttospeech.AudioEncoding.LINEAR16: ".wav",
|
||||
texttospeech.AudioEncoding.MP3: ".mp3",
|
||||
texttospeech.AudioEncoding.OGG_OPUS: ".ogg",
|
||||
texttospeech.AudioEncoding.MULAW: ".wav",
|
||||
texttospeech.AudioEncoding.ALAW: ".wav",
|
||||
}
|
||||
return ENCODING_FILE_EXTENSION_MAP.get(encoding)
|
||||
|
||||
|
||||
class GoogleCloudTextToSpeechTool(BaseTool):
|
||||
"""Tool that queries the Google Cloud Text to Speech API.
|
||||
|
||||
In order to set this up, follow instructions at:
|
||||
https://cloud.google.com/text-to-speech/docs/before-you-begin
|
||||
"""
|
||||
|
||||
name: str = "google_cloud_texttospeech"
|
||||
description: str = (
|
||||
"A wrapper around Google Cloud Text-to-Speech. "
|
||||
"Useful for when you need to synthesize audio from text. "
|
||||
"It supports multiple languages, including English, German, Polish, "
|
||||
"Spanish, Italian, French, Portuguese, and Hindi. "
|
||||
)
|
||||
|
||||
_client: Any
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
"""Initializes private fields."""
|
||||
texttospeech = _import_google_cloud_texttospeech()
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self._client = texttospeech.TextToSpeechClient(
|
||||
client_info=get_client_info(module="text-to-speech")
|
||||
)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
input_text: str,
|
||||
language_code: str = "en-US",
|
||||
ssml_gender: Optional[texttospeech.SsmlVoiceGender] = None,
|
||||
audio_encoding: Optional[texttospeech.AudioEncoding] = None,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
texttospeech = _import_google_cloud_texttospeech()
|
||||
ssml_gender = ssml_gender or texttospeech.SsmlVoiceGender.NEUTRAL
|
||||
audio_encoding = audio_encoding or texttospeech.AudioEncoding.MP3
|
||||
|
||||
response = self._client.synthesize_speech(
|
||||
input=texttospeech.SynthesisInput(text=input_text),
|
||||
voice=texttospeech.VoiceSelectionParams(
|
||||
language_code=language_code, ssml_gender=ssml_gender
|
||||
),
|
||||
audio_config=texttospeech.AudioConfig(audio_encoding=audio_encoding),
|
||||
)
|
||||
|
||||
suffix = _encoding_file_extension_map(audio_encoding)
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="bx", suffix=suffix, delete=False) as f:
|
||||
f.write(response.audio_content)
|
||||
return f.name
|
@@ -0,0 +1,5 @@
|
||||
"""Google Finance API Toolkit."""
|
||||
|
||||
from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun
|
||||
|
||||
__all__ = ["GoogleFinanceQueryRun"]
|
@@ -0,0 +1,29 @@
|
||||
"""Tool for the Google Finance"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
|
||||
|
||||
|
||||
class GoogleFinanceQueryRun(BaseTool):
|
||||
"""Tool that queries the Google Finance API."""
|
||||
|
||||
name: str = "google_finance"
|
||||
description: str = (
|
||||
"A wrapper around Google Finance Search. "
|
||||
"Useful for when you need to get information about"
|
||||
"google search Finance from Google Finance"
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: GoogleFinanceAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.api_wrapper.run(query)
|
@@ -0,0 +1,5 @@
|
||||
"""Google Jobs API Toolkit."""
|
||||
|
||||
from langchain_community.tools.google_jobs.tool import GoogleJobsQueryRun
|
||||
|
||||
__all__ = ["GoogleJobsQueryRun"]
|
29
libs/community/langchain_community/tools/google_jobs/tool.py
Normal file
29
libs/community/langchain_community/tools/google_jobs/tool.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""Tool for the Google Trends"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.google_jobs import GoogleJobsAPIWrapper
|
||||
|
||||
|
||||
class GoogleJobsQueryRun(BaseTool):
|
||||
"""Tool that queries the Google Jobs API."""
|
||||
|
||||
name: str = "google_jobs"
|
||||
description: str = (
|
||||
"A wrapper around Google Jobs Search. "
|
||||
"Useful for when you need to get information about"
|
||||
"google search Jobs from Google Jobs"
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: GoogleJobsAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.api_wrapper.run(query)
|
@@ -0,0 +1,5 @@
|
||||
"""Google Lens API Toolkit."""
|
||||
|
||||
from langchain_community.tools.google_lens.tool import GoogleLensQueryRun
|
||||
|
||||
__all__ = ["GoogleLensQueryRun"]
|
29
libs/community/langchain_community/tools/google_lens/tool.py
Normal file
29
libs/community/langchain_community/tools/google_lens/tool.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""Tool for the Google Lens"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.google_lens import GoogleLensAPIWrapper
|
||||
|
||||
|
||||
class GoogleLensQueryRun(BaseTool):
|
||||
"""Tool that queries the Google Lens API."""
|
||||
|
||||
name: str = "google_Lens"
|
||||
description: str = (
|
||||
"A wrapper around Google Lens Search. "
|
||||
"Useful for when you need to get information related"
|
||||
"to an image from Google Lens"
|
||||
"Input should be a url to an image."
|
||||
)
|
||||
api_wrapper: GoogleLensAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.api_wrapper.run(query)
|
@@ -0,0 +1,5 @@
|
||||
"""Google Places API Toolkit."""
|
||||
|
||||
from langchain_community.tools.google_places.tool import GooglePlacesTool
|
||||
|
||||
__all__ = ["GooglePlacesTool"]
|
@@ -0,0 +1,37 @@
|
||||
"""Tool for the Google search API."""
|
||||
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.google_places_api import GooglePlacesAPIWrapper
|
||||
|
||||
|
||||
class GooglePlacesSchema(BaseModel):
|
||||
"""Input for GooglePlacesTool."""
|
||||
|
||||
query: str = Field(..., description="Query for google maps")
|
||||
|
||||
|
||||
class GooglePlacesTool(BaseTool):
|
||||
"""Tool that queries the Google places API."""
|
||||
|
||||
name: str = "google_places"
|
||||
description: str = (
|
||||
"A wrapper around Google Places. "
|
||||
"Useful for when you need to validate or "
|
||||
"discover addressed from ambiguous text. "
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper)
|
||||
args_schema: Type[BaseModel] = GooglePlacesSchema
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.api_wrapper.run(query)
|
@@ -0,0 +1,5 @@
|
||||
"""Google Scholar API Toolkit."""
|
||||
|
||||
from langchain_community.tools.google_scholar.tool import GoogleScholarQueryRun
|
||||
|
||||
__all__ = ["GoogleScholarQueryRun"]
|
@@ -0,0 +1,29 @@
|
||||
"""Tool for the Google Scholar"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
|
||||
|
||||
|
||||
class GoogleScholarQueryRun(BaseTool):
|
||||
"""Tool that queries the Google search API."""
|
||||
|
||||
name: str = "google_scholar"
|
||||
description: str = (
|
||||
"A wrapper around Google Scholar Search. "
|
||||
"Useful for when you need to get information about"
|
||||
"research papers from Google Scholar"
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: GoogleScholarAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.api_wrapper.run(query)
|
@@ -0,0 +1,8 @@
|
||||
"""Google Search API Toolkit."""
|
||||
|
||||
from langchain_community.tools.google_search.tool import (
|
||||
GoogleSearchResults,
|
||||
GoogleSearchRun,
|
||||
)
|
||||
|
||||
__all__ = ["GoogleSearchRun", "GoogleSearchResults"]
|
@@ -0,0 +1,49 @@
|
||||
"""Tool for the Google search API."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
|
||||
|
||||
|
||||
class GoogleSearchRun(BaseTool):
|
||||
"""Tool that queries the Google search API."""
|
||||
|
||||
name: str = "google_search"
|
||||
description: str = (
|
||||
"A wrapper around Google Search. "
|
||||
"Useful for when you need to answer questions about current events. "
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: GoogleSearchAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.api_wrapper.run(query)
|
||||
|
||||
|
||||
class GoogleSearchResults(BaseTool):
|
||||
"""Tool that queries the Google Search API and gets back json."""
|
||||
|
||||
name: str = "Google Search Results JSON"
|
||||
description: str = (
|
||||
"A wrapper around Google Search. "
|
||||
"Useful for when you need to answer questions about current events. "
|
||||
"Input should be a search query. Output is a JSON array of the query results"
|
||||
)
|
||||
num_results: int = 4
|
||||
api_wrapper: GoogleSearchAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return str(self.api_wrapper.results(query, self.num_results))
|
@@ -0,0 +1,9 @@
|
||||
from langchain_community.tools.google_serper.tool import (
|
||||
GoogleSerperResults,
|
||||
GoogleSerperRun,
|
||||
)
|
||||
|
||||
"""Google Serper API Toolkit."""
|
||||
"""Tool for the Serer.dev Google Search API."""
|
||||
|
||||
__all__ = ["GoogleSerperRun", "GoogleSerperResults"]
|
@@ -0,0 +1,70 @@
|
||||
"""Tool for the Serper.dev Google Search API."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
CallbackManagerForToolRun,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
|
||||
|
||||
|
||||
class GoogleSerperRun(BaseTool):
|
||||
"""Tool that queries the Serper.dev Google search API."""
|
||||
|
||||
name: str = "google_serper"
|
||||
description: str = (
|
||||
"A low-cost Google Search API."
|
||||
"Useful for when you need to answer questions about current events."
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: GoogleSerperAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return str(self.api_wrapper.run(query))
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool asynchronously."""
|
||||
return (await self.api_wrapper.arun(query)).__str__()
|
||||
|
||||
|
||||
class GoogleSerperResults(BaseTool):
|
||||
"""Tool that queries the Serper.dev Google Search API
|
||||
and get back json."""
|
||||
|
||||
name: str = "google_serper_results_json"
|
||||
description: str = (
|
||||
"A low-cost Google Search API."
|
||||
"Useful for when you need to answer questions about current events."
|
||||
"Input should be a search query. Output is a JSON object of the query results"
|
||||
)
|
||||
api_wrapper: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return str(self.api_wrapper.results(query))
|
||||
|
||||
async def _arun(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool asynchronously."""
|
||||
|
||||
return (await self.api_wrapper.aresults(query)).__str__()
|
@@ -0,0 +1,5 @@
|
||||
"""Google Trends API Toolkit."""
|
||||
|
||||
from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun
|
||||
|
||||
__all__ = ["GoogleTrendsQueryRun"]
|
@@ -0,0 +1,29 @@
|
||||
"""Tool for the Google Trends"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
|
||||
|
||||
|
||||
class GoogleTrendsQueryRun(BaseTool):
|
||||
"""Tool that queries the Google trends API."""
|
||||
|
||||
name: str = "google_trends"
|
||||
description: str = (
|
||||
"A wrapper around Google Trends Search. "
|
||||
"Useful for when you need to get information about"
|
||||
"google search trends from Google Trends"
|
||||
"Input should be a search query."
|
||||
)
|
||||
api_wrapper: GoogleTrendsAPIWrapper
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the tool."""
|
||||
return self.api_wrapper.run(query)
|
@@ -0,0 +1 @@
|
||||
"""Tools for interacting with a GraphQL API"""
|
36
libs/community/langchain_community/tools/graphql/tool.py
Normal file
36
libs/community/langchain_community/tools/graphql/tool.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from langchain_community.utilities.graphql import GraphQLAPIWrapper
|
||||
|
||||
|
||||
class BaseGraphQLTool(BaseTool):
|
||||
"""Base tool for querying a GraphQL API."""
|
||||
|
||||
graphql_wrapper: GraphQLAPIWrapper
|
||||
|
||||
name: str = "query_graphql"
|
||||
description: str = """\
|
||||
Input to this tool is a detailed and correct GraphQL query, output is a result from the API.
|
||||
If the query is not correct, an error message will be returned.
|
||||
If an error is returned with 'Bad request' in it, rewrite the query and try again.
|
||||
If an error is returned with 'Unauthorized' in it, do not try again, but tell the user to change their authentication.
|
||||
|
||||
Example Input: query {{ allUsers {{ id, name, email }} }}\
|
||||
""" # noqa: E501
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def _run(
|
||||
self,
|
||||
tool_input: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
result = self.graphql_wrapper.run(tool_input)
|
||||
return json.dumps(result, indent=2)
|
@@ -0,0 +1,5 @@
|
||||
"""Tool for asking for human input."""
|
||||
|
||||
from langchain_community.tools.human.tool import HumanInputRun
|
||||
|
||||
__all__ = ["HumanInputRun"]
|
34
libs/community/langchain_community/tools/human/tool.py
Normal file
34
libs/community/langchain_community/tools/human/tool.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""Tool for asking human input."""
|
||||
|
||||
from typing import Callable, Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
|
||||
def _print_func(text: str) -> None:
|
||||
print("\n")
|
||||
print(text)
|
||||
|
||||
|
||||
class HumanInputRun(BaseTool):
|
||||
"""Tool that asks user for input."""
|
||||
|
||||
name: str = "human"
|
||||
description: str = (
|
||||
"You can ask a human for guidance when you think you "
|
||||
"got stuck or you are not sure what to do next. "
|
||||
"The input should be a question for the human."
|
||||
)
|
||||
prompt_func: Callable[[str], None] = Field(default_factory=lambda: _print_func)
|
||||
input_func: Callable = Field(default_factory=lambda: input)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
query: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Use the Human input tool."""
|
||||
self.prompt_func(query)
|
||||
return self.input_func()
|
60
libs/community/langchain_community/tools/ifttt.py
Normal file
60
libs/community/langchain_community/tools/ifttt.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""From https://github.com/SidU/teams-langchain-js/wiki/Connecting-IFTTT-Services.
|
||||
|
||||
# Creating a webhook
|
||||
- Go to https://ifttt.com/create
|
||||
|
||||
# Configuring the "If This"
|
||||
- Click on the "If This" button in the IFTTT interface.
|
||||
- Search for "Webhooks" in the search bar.
|
||||
- Choose the first option for "Receive a web request with a JSON payload."
|
||||
- Choose an Event Name that is specific to the service you plan to connect to.
|
||||
This will make it easier for you to manage the webhook URL.
|
||||
For example, if you're connecting to Spotify, you could use "Spotify" as your
|
||||
Event Name.
|
||||
- Click the "Create Trigger" button to save your settings and create your webhook.
|
||||
|
||||
# Configuring the "Then That"
|
||||
- Tap on the "Then That" button in the IFTTT interface.
|
||||
- Search for the service you want to connect, such as Spotify.
|
||||
- Choose an action from the service, such as "Add track to a playlist".
|
||||
- Configure the action by specifying the necessary details, such as the playlist name,
|
||||
e.g., "Songs from AI".
|
||||
- Reference the JSON Payload received by the Webhook in your action. For the Spotify
|
||||
scenario, choose "{{JsonPayload}}" as your search query.
|
||||
- Tap the "Create Action" button to save your action settings.
|
||||
- Once you have finished configuring your action, click the "Finish" button to
|
||||
complete the setup.
|
||||
- Congratulations! You have successfully connected the Webhook to the desired
|
||||
service, and you're ready to start receiving data and triggering actions 🎉
|
||||
|
||||
# Finishing up
|
||||
- To get your webhook URL go to https://ifttt.com/maker_webhooks/settings
|
||||
- Copy the IFTTT key value from there. The URL is of the form
|
||||
https://maker.ifttt.com/use/YOUR_IFTTT_KEY. Grab the YOUR_IFTTT_KEY value.
|
||||
"""
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
|
||||
class IFTTTWebhook(BaseTool):
|
||||
"""IFTTT Webhook.
|
||||
|
||||
Args:
|
||||
name: name of the tool
|
||||
description: description of the tool
|
||||
url: url to hit with the json event.
|
||||
"""
|
||||
|
||||
url: str
|
||||
|
||||
def _run(
|
||||
self,
|
||||
tool_input: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
body = {"this": tool_input}
|
||||
response = requests.post(self.url, data=body)
|
||||
return response.text
|
@@ -0,0 +1 @@
|
||||
"""Tools for interacting with the user."""
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user