mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-01 00:03:29 +00:00
fix
This commit is contained in:
parent
192db2236a
commit
c29c0c9b82
@ -43,7 +43,7 @@ The Generated SQL is runable.
|
||||
# Dependencies
|
||||
1. First you need to install python requirements.
|
||||
```
|
||||
python>=3.9
|
||||
python>=3.10
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
or if you use conda envirenment, you can use this command
|
||||
|
@ -28,7 +28,6 @@ dependencies:
|
||||
- kiwisolver==1.4.4
|
||||
- matplotlib==3.7.0
|
||||
- multidict==6.0.4
|
||||
- openai==0.27.0
|
||||
- packaging==23.0
|
||||
- psutil==5.9.4
|
||||
- pycocotools==2.0.6
|
||||
@ -63,4 +62,7 @@ dependencies:
|
||||
- unstructured==0.6.3
|
||||
- pytesseract==0.3.10
|
||||
- markdown2
|
||||
- chromadb
|
||||
- chromadb
|
||||
- colorama
|
||||
- playsound
|
||||
- distro
|
@ -1,199 +0,0 @@
|
||||
"""Handles loading of plugins."""
|
||||
from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar
|
||||
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
|
||||
PromptGenerator = TypeVar("PromptGenerator")
|
||||
|
||||
|
||||
class Message(TypedDict):
|
||||
role: str
|
||||
content: str
|
||||
|
||||
|
||||
class BaseOpenAIPlugin(AutoGPTPluginTemplate):
|
||||
"""
|
||||
This is a BaseOpenAIPlugin class for generating Auto-GPT plugins.
|
||||
"""
|
||||
|
||||
def __init__(self, manifests_specs_clients: dict):
|
||||
# super().__init__()
|
||||
self._name = manifests_specs_clients["manifest"]["name_for_model"]
|
||||
self._version = manifests_specs_clients["manifest"]["schema_version"]
|
||||
self._description = manifests_specs_clients["manifest"]["description_for_model"]
|
||||
self._client = manifests_specs_clients["client"]
|
||||
self._manifest = manifests_specs_clients["manifest"]
|
||||
self._openapi_spec = manifests_specs_clients["openapi_spec"]
|
||||
|
||||
def can_handle_on_response(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the on_response method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the on_response method."""
|
||||
return False
|
||||
|
||||
def on_response(self, response: str, *args, **kwargs) -> str:
|
||||
"""This method is called when a response is received from the model."""
|
||||
return response
|
||||
|
||||
def can_handle_post_prompt(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the post_prompt method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the post_prompt method."""
|
||||
return False
|
||||
|
||||
def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
|
||||
"""This method is called just after the generate_prompt is called,
|
||||
but actually before the prompt is generated.
|
||||
Args:
|
||||
prompt (PromptGenerator): The prompt generator.
|
||||
Returns:
|
||||
PromptGenerator: The prompt generator.
|
||||
"""
|
||||
return prompt
|
||||
|
||||
def can_handle_on_planning(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the on_planning method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the on_planning method."""
|
||||
return False
|
||||
|
||||
def on_planning(
|
||||
self, prompt: PromptGenerator, messages: List[Message]
|
||||
) -> Optional[str]:
|
||||
"""This method is called before the planning chat completion is done.
|
||||
Args:
|
||||
prompt (PromptGenerator): The prompt generator.
|
||||
messages (List[str]): The list of messages.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle_post_planning(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the post_planning method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the post_planning method."""
|
||||
return False
|
||||
|
||||
def post_planning(self, response: str) -> str:
|
||||
"""This method is called after the planning chat completion is done.
|
||||
Args:
|
||||
response (str): The response.
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
return response
|
||||
|
||||
def can_handle_pre_instruction(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the pre_instruction method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the pre_instruction method."""
|
||||
return False
|
||||
|
||||
def pre_instruction(self, messages: List[Message]) -> List[Message]:
|
||||
"""This method is called before the instruction chat is done.
|
||||
Args:
|
||||
messages (List[Message]): The list of context messages.
|
||||
Returns:
|
||||
List[Message]: The resulting list of messages.
|
||||
"""
|
||||
return messages
|
||||
|
||||
def can_handle_on_instruction(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the on_instruction method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the on_instruction method."""
|
||||
return False
|
||||
|
||||
def on_instruction(self, messages: List[Message]) -> Optional[str]:
|
||||
"""This method is called when the instruction chat is done.
|
||||
Args:
|
||||
messages (List[Message]): The list of context messages.
|
||||
Returns:
|
||||
Optional[str]: The resulting message.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_handle_post_instruction(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the post_instruction method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the post_instruction method."""
|
||||
return False
|
||||
|
||||
def post_instruction(self, response: str) -> str:
|
||||
"""This method is called after the instruction chat is done.
|
||||
Args:
|
||||
response (str): The response.
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
return response
|
||||
|
||||
def can_handle_pre_command(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the pre_command method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the pre_command method."""
|
||||
return False
|
||||
|
||||
def pre_command(
|
||||
self, command_name: str, arguments: Dict[str, Any]
|
||||
) -> Tuple[str, Dict[str, Any]]:
|
||||
"""This method is called before the command is executed.
|
||||
Args:
|
||||
command_name (str): The command name.
|
||||
arguments (Dict[str, Any]): The arguments.
|
||||
Returns:
|
||||
Tuple[str, Dict[str, Any]]: The command name and the arguments.
|
||||
"""
|
||||
return command_name, arguments
|
||||
|
||||
def can_handle_post_command(self) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the post_command method.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the post_command method."""
|
||||
return False
|
||||
|
||||
def post_command(self, command_name: str, response: str) -> str:
|
||||
"""This method is called after the command is executed.
|
||||
Args:
|
||||
command_name (str): The command name.
|
||||
response (str): The response.
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
return response
|
||||
|
||||
def can_handle_chat_completion(
|
||||
self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int
|
||||
) -> bool:
|
||||
"""This method is called to check that the plugin can
|
||||
handle the chat_completion method.
|
||||
Args:
|
||||
messages (List[Message]): The messages.
|
||||
model (str): The model name.
|
||||
temperature (float): The temperature.
|
||||
max_tokens (int): The max tokens.
|
||||
Returns:
|
||||
bool: True if the plugin can handle the chat_completion method."""
|
||||
return False
|
||||
|
||||
def handle_chat_completion(
|
||||
self, messages: List[Message], model: str, temperature: float, max_tokens: int
|
||||
) -> str:
|
||||
"""This method is called when the chat completion is done.
|
||||
Args:
|
||||
messages (List[Message]): The messages.
|
||||
model (str): The model name.
|
||||
temperature (float): The temperature.
|
||||
max_tokens (int): The max tokens.
|
||||
Returns:
|
||||
str: The resulting response.
|
||||
"""
|
||||
pass
|
@ -1,107 +0,0 @@
|
||||
import json
|
||||
|
||||
|
||||
def is_valid_int(value: str) -> bool:
|
||||
"""Check if the value is a valid integer
|
||||
|
||||
Args:
|
||||
value (str): The value to check
|
||||
|
||||
Returns:
|
||||
bool: True if the value is a valid integer, False otherwise
|
||||
"""
|
||||
try:
|
||||
int(value)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def get_command(response_json: Dict):
|
||||
"""Parse the response and return the command name and arguments
|
||||
|
||||
Args:
|
||||
response_json (json): The response from the AI
|
||||
|
||||
Returns:
|
||||
tuple: The command name and arguments
|
||||
|
||||
Raises:
|
||||
json.decoder.JSONDecodeError: If the response is not valid JSON
|
||||
|
||||
Exception: If any other error occurs
|
||||
"""
|
||||
try:
|
||||
if "command" not in response_json:
|
||||
return "Error:", "Missing 'command' object in JSON"
|
||||
|
||||
if not isinstance(response_json, dict):
|
||||
return "Error:", f"'response_json' object is not dictionary {response_json}"
|
||||
|
||||
command = response_json["command"]
|
||||
if not isinstance(command, dict):
|
||||
return "Error:", "'command' object is not a dictionary"
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", "Missing 'name' field in 'command' object"
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
except json.decoder.JSONDecodeError:
|
||||
return "Error:", "Invalid JSON"
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error:", str(e)
|
||||
|
||||
|
||||
|
||||
|
||||
def execute_command(
|
||||
command_registry: CommandRegistry,
|
||||
command_name: str,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
command_name (str): The name of the command to execute
|
||||
arguments (dict): The arguments for the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
cmd = command_registry.commands.get(command_name)
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
||||
if command_name == "memory_add":
|
||||
return get_memory(CFG).add(arguments["string"])
|
||||
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
else:
|
||||
for command in prompt.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
):
|
||||
return command["function"](**arguments)
|
||||
return (
|
||||
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
||||
" list for available commands and only respond in the specified JSON"
|
||||
" format."
|
||||
)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
@ -1,6 +1,5 @@
|
||||
from pilot.configs.config import Config
|
||||
from pilot.prompts.generator import PromptGenerator
|
||||
from __future__ import annotations
|
||||
from typing import Any, Optional, Type
|
||||
from pilot.prompts.prompt import build_default_prompt_generator
|
||||
|
||||
|
@ -3,7 +3,6 @@ import io
|
||||
import uuid
|
||||
from base64 import b64decode
|
||||
|
||||
import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
@ -26,12 +25,9 @@ def generate_image(prompt: str, size: int = 256) -> str:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if CFG.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size)
|
||||
|
||||
# HuggingFace
|
||||
elif CFG.image_provider == "huggingface":
|
||||
if CFG.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename)
|
||||
# SD WebUI
|
||||
elif CFG.image_provider == "sdwebui":
|
||||
@ -76,45 +72,6 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
||||
|
||||
return f"Saved to disk:{filename}"
|
||||
|
||||
|
||||
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
||||
"""Generate an image with DALL-E.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to use
|
||||
filename (str): The filename to save the image to
|
||||
size (int): The size of the image
|
||||
|
||||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
|
||||
# Check for supported image sizes
|
||||
if size not in [256, 512, 1024]:
|
||||
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
|
||||
logger.info(
|
||||
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
|
||||
)
|
||||
size = closest
|
||||
|
||||
response = openai.Image.create(
|
||||
prompt=prompt,
|
||||
n=1,
|
||||
size=f"{size}x{size}",
|
||||
response_format="b64_json",
|
||||
api_key=CFG.openai_api_key,
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
image_data = b64decode(response["data"][0]["b64_json"])
|
||||
|
||||
with open(filename, mode="wb") as png:
|
||||
png.write(image_data)
|
||||
|
||||
return f"Saved to disk:{filename}"
|
||||
|
||||
|
||||
def generate_image_with_sd_webui(
|
||||
prompt: str,
|
||||
filename: str,
|
||||
|
@ -37,7 +37,7 @@ ISDEBUG = False
|
||||
|
||||
DB_SETTINGS = {
|
||||
"user": "root",
|
||||
"password": "root",
|
||||
"password": "aa12345678",
|
||||
"host": "127.0.0.1",
|
||||
"port": 3306
|
||||
}
|
@ -225,8 +225,7 @@ auto_dbgpt_one_shot = Conversation(
|
||||
),
|
||||
offset=0,
|
||||
sep_style=SeparatorStyle.SINGLE,
|
||||
sep=" ",
|
||||
sep2="</s>",
|
||||
sep="###",
|
||||
)
|
||||
|
||||
auto_dbgpt_without_shot = Conversation(
|
||||
|
141
pilot/plugins.py
141
pilot/plugins.py
@ -9,14 +9,11 @@ from typing import List, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
from zipimport import zipimporter
|
||||
|
||||
import openapi_python_client
|
||||
import requests
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from openapi_python_client.cli import Config as OpenAPIConfig
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.logs import logger
|
||||
from pilot.agent.base_open_ai_plugin import BaseOpenAIPlugin
|
||||
|
||||
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
|
||||
"""
|
||||
@ -49,63 +46,6 @@ def write_dict_to_json_file(data: dict, file_path: str) -> None:
|
||||
with open(file_path, "w") as file:
|
||||
json.dump(data, file, indent=4)
|
||||
|
||||
|
||||
|
||||
def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
"""
|
||||
Fetch the manifest for a list of OpenAI plugins.
|
||||
Args:
|
||||
urls (List): List of URLs to fetch.
|
||||
Returns:
|
||||
dict: per url dictionary of manifest and spec.
|
||||
"""
|
||||
# TODO add directory scan
|
||||
manifests = {}
|
||||
for url in cfg.plugins_openai:
|
||||
openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}"
|
||||
create_directory_if_not_exists(openai_plugin_client_dir)
|
||||
if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"):
|
||||
try:
|
||||
response = requests.get(f"{url}/.well-known/ai-plugin.json")
|
||||
if response.status_code == 200:
|
||||
manifest = response.json()
|
||||
if manifest["schema_version"] != "v1":
|
||||
logger.warn(
|
||||
f"Unsupported manifest version: {manifest['schem_version']} for {url}"
|
||||
)
|
||||
continue
|
||||
if manifest["api"]["type"] != "openapi":
|
||||
logger.warn(
|
||||
f"Unsupported API type: {manifest['api']['type']} for {url}"
|
||||
)
|
||||
continue
|
||||
write_dict_to_json_file(
|
||||
manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
|
||||
)
|
||||
else:
|
||||
logger.warn(
|
||||
f"Failed to fetch manifest for {url}: {response.status_code}"
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.warn(f"Error while requesting manifest from {url}: {e}")
|
||||
else:
|
||||
logger.info(f"Manifest for {url} already exists")
|
||||
manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
|
||||
if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
|
||||
openapi_spec = openapi_python_client._get_document(
|
||||
url=manifest["api"]["url"], path=None, timeout=5
|
||||
)
|
||||
write_dict_to_json_file(
|
||||
openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
|
||||
)
|
||||
else:
|
||||
logger.info(f"OpenAPI spec for {url} already exists")
|
||||
openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
|
||||
manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
|
||||
return manifests
|
||||
|
||||
|
||||
|
||||
def create_directory_if_not_exists(directory_path: str) -> bool:
|
||||
"""
|
||||
Create a directory if it does not exist.
|
||||
@ -126,76 +66,6 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
|
||||
logger.info(f"Directory {directory_path} already exists")
|
||||
return True
|
||||
|
||||
|
||||
def initialize_openai_plugins(
|
||||
manifests_specs: dict, cfg: Config, debug: bool = False
|
||||
) -> dict:
|
||||
"""
|
||||
Initialize OpenAI plugins.
|
||||
Args:
|
||||
manifests_specs (dict): per url dictionary of manifest and spec.
|
||||
cfg (Config): Config instance including plugins config
|
||||
debug (bool, optional): Enable debug logging. Defaults to False.
|
||||
Returns:
|
||||
dict: per url dictionary of manifest, spec and client.
|
||||
"""
|
||||
openai_plugins_dir = f"{cfg.plugins_dir}/openai"
|
||||
if create_directory_if_not_exists(openai_plugins_dir):
|
||||
for url, manifest_spec in manifests_specs.items():
|
||||
openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}"
|
||||
_meta_option = (openapi_python_client.MetaType.SETUP,)
|
||||
_config = OpenAPIConfig(
|
||||
**{
|
||||
"project_name_override": "client",
|
||||
"package_name_override": "client",
|
||||
}
|
||||
)
|
||||
prev_cwd = Path.cwd()
|
||||
os.chdir(openai_plugin_client_dir)
|
||||
Path("ai-plugin.json")
|
||||
if not os.path.exists("client"):
|
||||
client_results = openapi_python_client.create_new_client(
|
||||
url=manifest_spec["manifest"]["api"]["url"],
|
||||
path=None,
|
||||
meta=_meta_option,
|
||||
config=_config,
|
||||
)
|
||||
if client_results:
|
||||
logger.warn(
|
||||
f"Error creating OpenAPI client: {client_results[0].header} \n"
|
||||
f" details: {client_results[0].detail}"
|
||||
)
|
||||
continue
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"client", "client/client/client.py"
|
||||
)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
client = module.Client(base_url=url)
|
||||
os.chdir(prev_cwd)
|
||||
manifest_spec["client"] = client
|
||||
return manifests_specs
|
||||
|
||||
|
||||
def instantiate_openai_plugin_clients(
|
||||
manifests_specs_clients: dict, cfg: Config, debug: bool = False
|
||||
) -> dict:
|
||||
"""
|
||||
Instantiates BaseOpenAIPlugin instances for each OpenAI plugin.
|
||||
Args:
|
||||
manifests_specs_clients (dict): per url dictionary of manifest, spec and client.
|
||||
cfg (Config): Config instance including plugins config
|
||||
debug (bool, optional): Enable debug logging. Defaults to False.
|
||||
Returns:
|
||||
plugins (dict): per url dictionary of BaseOpenAIPlugin instances.
|
||||
|
||||
"""
|
||||
plugins = {}
|
||||
for url, manifest_spec_client in manifests_specs_clients.items():
|
||||
plugins[url] = BaseOpenAIPlugin(manifest_spec_client)
|
||||
return plugins
|
||||
|
||||
|
||||
def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
|
||||
"""Scan the plugins directory for plugins and loads them.
|
||||
|
||||
@ -232,17 +102,6 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
||||
and denylist_allowlist_check(a_module.__name__, cfg)
|
||||
):
|
||||
loaded_plugins.append(a_module())
|
||||
# OpenAI plugins
|
||||
if cfg.plugins_openai:
|
||||
manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg)
|
||||
if manifests_specs.keys():
|
||||
manifests_specs_clients = initialize_openai_plugins(
|
||||
manifests_specs, cfg, debug
|
||||
)
|
||||
for url, openai_plugin_meta in manifests_specs_clients.items():
|
||||
if denylist_allowlist_check(url, cfg):
|
||||
plugin = BaseOpenAIPlugin(openai_plugin_meta)
|
||||
loaded_plugins.append(plugin)
|
||||
|
||||
if loaded_plugins:
|
||||
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
|
||||
|
@ -18,7 +18,6 @@ importlib-resources==5.12.0
|
||||
kiwisolver==1.4.4
|
||||
matplotlib==3.7.0
|
||||
multidict==6.0.4
|
||||
openai==0.27.0
|
||||
packaging==23.0
|
||||
psutil==5.9.4
|
||||
pycocotools==2.0.6
|
||||
@ -59,9 +58,6 @@ mkdocs
|
||||
requests
|
||||
gTTS==2.3.1
|
||||
|
||||
# OpenAI and Generic plugins import
|
||||
openapi-python-client==0.13.4
|
||||
|
||||
# Testing dependencies
|
||||
pytest
|
||||
asynctest
|
||||
@ -74,4 +70,6 @@ vcrpy
|
||||
pytest-recording
|
||||
chromadb
|
||||
markdown2
|
||||
|
||||
colorama
|
||||
playsound
|
||||
distro
|
Loading…
Reference in New Issue
Block a user