This commit is contained in:
tuyang.yhj
2023-05-12 23:53:21 +08:00
23 changed files with 97 additions and 566 deletions

2
.gitignore vendored
View File

@@ -8,6 +8,8 @@ __pycache__/
.idea .idea
.vscode .vscode
.idea
.chroma
# Distribution / packaging # Distribution / packaging
.Python .Python
build/ build/

3
.idea/.gitignore generated vendored
View File

@@ -1,3 +0,0 @@
# Default ignored files
/shelf/
/workspace.xml

12
.idea/DB-GPT.iml generated
View File

@@ -1,12 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Python 3.10 (Auto-GPT)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="GOOGLE" />
<option name="myDocStringFormat" value="Google" />
</component>
</module>

View File

@@ -1,6 +0,0 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

8
.idea/modules.xml generated
View File

@@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/DB-GPT.iml" filepath="$PROJECT_DIR$/.idea/DB-GPT.iml" />
</modules>
</component>
</project>

6
.idea/vcs.xml generated
View File

@@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>

25
.vscode/launch.json vendored
View File

@@ -1,25 +0,0 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Python: Current File",
"type": "python",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
"justMyCode": true,
"env": {"PYTHONPATH": "${workspaceFolder}"},
"envFile": "${workspaceFolder}/.env"
},
{
"name": "Python: Module",
"type": "python",
"request": "launch",
"module": "pilot",
"justMyCode": true,
}
]
}

View File

@@ -43,7 +43,7 @@ The Generated SQL is runable.
# Dependencies # Dependencies
1. First you need to install python requirements. 1. First you need to install python requirements.
``` ```
python>=3.9 python>=3.10
pip install -r requirements.txt pip install -r requirements.txt
``` ```
or if you use conda envirenment, you can use this command or if you use conda envirenment, you can use this command
@@ -68,7 +68,7 @@ The password just for test, you can change this if necessary
2. Run model server 2. Run model server
``` ```
cd pilot/server cd pilot/server
python vicuna_server.py python llmserver.py
``` ```
3. Run gradio webui 3. Run gradio webui

View File

@@ -28,7 +28,6 @@ dependencies:
- kiwisolver==1.4.4 - kiwisolver==1.4.4
- matplotlib==3.7.0 - matplotlib==3.7.0
- multidict==6.0.4 - multidict==6.0.4
- openai==0.27.0
- packaging==23.0 - packaging==23.0
- psutil==5.9.4 - psutil==5.9.4
- pycocotools==2.0.6 - pycocotools==2.0.6
@@ -63,4 +62,7 @@ dependencies:
- unstructured==0.6.3 - unstructured==0.6.3
- pytesseract==0.3.10 - pytesseract==0.3.10
- markdown2 - markdown2
- chromadb - chromadb
- colorama
- playsound
- distro

View File

@@ -5,6 +5,9 @@ from __future__ import annotations
from pilot.configs.config import Config from pilot.configs.config import Config
from pilot.singleton import Singleton from pilot.singleton import Singleton
from pilot.configs.config import Config
from typing import List
from pilot.model.base import Message
class AgentManager(metaclass=Singleton): class AgentManager(metaclass=Singleton):
@@ -14,6 +17,19 @@ class AgentManager(metaclass=Singleton):
self.next_key = 0 self.next_key = 0
self.agents = {} # key, (task, full_message_history, model) self.agents = {} # key, (task, full_message_history, model)
self.cfg = Config() self.cfg = Config()
"""Agent manager for managing DB-GPT agents
In order to compatible auto gpt plugins,
we use the same template with it.
Args: next_keys
agents
cfg
"""
def __init__(self) -> None:
self.next_key = 0
self.agents = {} #TODO need to define
self.cfg = Config()
# Create new GPT agent # Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit # TODO: Centralise use of create_chat_completion() to globally enforce token limit

View File

@@ -1,199 +0,0 @@
"""Handles loading of plugins."""
from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar
from auto_gpt_plugin_template import AutoGPTPluginTemplate
PromptGenerator = TypeVar("PromptGenerator")
class Message(TypedDict):
role: str
content: str
class BaseOpenAIPlugin(AutoGPTPluginTemplate):
"""
This is a BaseOpenAIPlugin class for generating Auto-GPT plugins.
"""
def __init__(self, manifests_specs_clients: dict):
# super().__init__()
self._name = manifests_specs_clients["manifest"]["name_for_model"]
self._version = manifests_specs_clients["manifest"]["schema_version"]
self._description = manifests_specs_clients["manifest"]["description_for_model"]
self._client = manifests_specs_clients["client"]
self._manifest = manifests_specs_clients["manifest"]
self._openapi_spec = manifests_specs_clients["openapi_spec"]
def can_handle_on_response(self) -> bool:
"""This method is called to check that the plugin can
handle the on_response method.
Returns:
bool: True if the plugin can handle the on_response method."""
return False
def on_response(self, response: str, *args, **kwargs) -> str:
"""This method is called when a response is received from the model."""
return response
def can_handle_post_prompt(self) -> bool:
"""This method is called to check that the plugin can
handle the post_prompt method.
Returns:
bool: True if the plugin can handle the post_prompt method."""
return False
def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
"""This method is called just after the generate_prompt is called,
but actually before the prompt is generated.
Args:
prompt (PromptGenerator): The prompt generator.
Returns:
PromptGenerator: The prompt generator.
"""
return prompt
def can_handle_on_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the on_planning method.
Returns:
bool: True if the plugin can handle the on_planning method."""
return False
def on_planning(
self, prompt: PromptGenerator, messages: List[Message]
) -> Optional[str]:
"""This method is called before the planning chat completion is done.
Args:
prompt (PromptGenerator): The prompt generator.
messages (List[str]): The list of messages.
"""
pass
def can_handle_post_planning(self) -> bool:
"""This method is called to check that the plugin can
handle the post_planning method.
Returns:
bool: True if the plugin can handle the post_planning method."""
return False
def post_planning(self, response: str) -> str:
"""This method is called after the planning chat completion is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
return response
def can_handle_pre_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_instruction method.
Returns:
bool: True if the plugin can handle the pre_instruction method."""
return False
def pre_instruction(self, messages: List[Message]) -> List[Message]:
"""This method is called before the instruction chat is done.
Args:
messages (List[Message]): The list of context messages.
Returns:
List[Message]: The resulting list of messages.
"""
return messages
def can_handle_on_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the on_instruction method.
Returns:
bool: True if the plugin can handle the on_instruction method."""
return False
def on_instruction(self, messages: List[Message]) -> Optional[str]:
"""This method is called when the instruction chat is done.
Args:
messages (List[Message]): The list of context messages.
Returns:
Optional[str]: The resulting message.
"""
pass
def can_handle_post_instruction(self) -> bool:
"""This method is called to check that the plugin can
handle the post_instruction method.
Returns:
bool: True if the plugin can handle the post_instruction method."""
return False
def post_instruction(self, response: str) -> str:
"""This method is called after the instruction chat is done.
Args:
response (str): The response.
Returns:
str: The resulting response.
"""
return response
def can_handle_pre_command(self) -> bool:
"""This method is called to check that the plugin can
handle the pre_command method.
Returns:
bool: True if the plugin can handle the pre_command method."""
return False
def pre_command(
self, command_name: str, arguments: Dict[str, Any]
) -> Tuple[str, Dict[str, Any]]:
"""This method is called before the command is executed.
Args:
command_name (str): The command name.
arguments (Dict[str, Any]): The arguments.
Returns:
Tuple[str, Dict[str, Any]]: The command name and the arguments.
"""
return command_name, arguments
def can_handle_post_command(self) -> bool:
"""This method is called to check that the plugin can
handle the post_command method.
Returns:
bool: True if the plugin can handle the post_command method."""
return False
def post_command(self, command_name: str, response: str) -> str:
"""This method is called after the command is executed.
Args:
command_name (str): The command name.
response (str): The response.
Returns:
str: The resulting response.
"""
return response
def can_handle_chat_completion(
self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int
) -> bool:
"""This method is called to check that the plugin can
handle the chat_completion method.
Args:
messages (List[Message]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
bool: True if the plugin can handle the chat_completion method."""
return False
def handle_chat_completion(
self, messages: List[Message], model: str, temperature: float, max_tokens: int
) -> str:
"""This method is called when the chat completion is done.
Args:
messages (List[Message]): The messages.
model (str): The model name.
temperature (float): The temperature.
max_tokens (int): The max tokens.
Returns:
str: The resulting response.
"""
pass

View File

@@ -1,107 +0,0 @@
import json
def is_valid_int(value: str) -> bool:
"""Check if the value is a valid integer
Args:
value (str): The value to check
Returns:
bool: True if the value is a valid integer, False otherwise
"""
try:
int(value)
return True
except ValueError:
return False
def get_command(response_json: Dict):
"""Parse the response and return the command name and arguments
Args:
response_json (json): The response from the AI
Returns:
tuple: The command name and arguments
Raises:
json.decoder.JSONDecodeError: If the response is not valid JSON
Exception: If any other error occurs
"""
try:
if "command" not in response_json:
return "Error:", "Missing 'command' object in JSON"
if not isinstance(response_json, dict):
return "Error:", f"'response_json' object is not dictionary {response_json}"
command = response_json["command"]
if not isinstance(command, dict):
return "Error:", "'command' object is not a dictionary"
if "name" not in command:
return "Error:", "Missing 'name' field in 'command' object"
command_name = command["name"]
# Use an empty dictionary if 'args' field is not present in 'command' object
arguments = command.get("args", {})
return command_name, arguments
except json.decoder.JSONDecodeError:
return "Error:", "Invalid JSON"
# All other errors, return "Error: + error message"
except Exception as e:
return "Error:", str(e)
def execute_command(
command_registry: CommandRegistry,
command_name: str,
arguments,
prompt: PromptGenerator,
):
"""Execute the command and return the result
Args:
command_name (str): The name of the command to execute
arguments (dict): The arguments for the command
Returns:
str: The result of the command
"""
try:
cmd = command_registry.commands.get(command_name)
# If the command is found, call it with the provided arguments
if cmd:
return cmd(**arguments)
# TODO: Remove commands below after they are moved to the command registry.
command_name = map_command_synonyms(command_name.lower())
if command_name == "memory_add":
return get_memory(CFG).add(arguments["string"])
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again
else:
for command in prompt.commands:
if (
command_name == command["label"].lower()
or command_name == command["name"].lower()
):
return command["function"](**arguments)
return (
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
" list for available commands and only respond in the specified JSON"
" format."
)
except Exception as e:
return f"Error: {str(e)}"

View File

@@ -1,6 +1,5 @@
from pilot.configs.config import Config from pilot.configs.config import Config
from pilot.prompts.generator import PromptGenerator from pilot.prompts.generator import PromptGenerator
from __future__ import annotations
from typing import Any, Optional, Type from typing import Any, Optional, Type
from pilot.prompts.prompt import build_default_prompt_generator from pilot.prompts.prompt import build_default_prompt_generator

View File

@@ -3,7 +3,6 @@ import io
import uuid import uuid
from base64 import b64decode from base64 import b64decode
import openai
import requests import requests
from PIL import Image from PIL import Image
@@ -26,12 +25,9 @@ def generate_image(prompt: str, size: int = 256) -> str:
str: The filename of the image str: The filename of the image
""" """
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg" filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
# DALL-E
if CFG.image_provider == "dalle":
return generate_image_with_dalle(prompt, filename, size)
# HuggingFace # HuggingFace
elif CFG.image_provider == "huggingface": if CFG.image_provider == "huggingface":
return generate_image_with_hf(prompt, filename) return generate_image_with_hf(prompt, filename)
# SD WebUI # SD WebUI
elif CFG.image_provider == "sdwebui": elif CFG.image_provider == "sdwebui":
@@ -76,45 +72,6 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
return f"Saved to disk:{filename}" return f"Saved to disk:{filename}"
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
"""Generate an image with DALL-E.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
size (int): The size of the image
Returns:
str: The filename of the image
"""
# Check for supported image sizes
if size not in [256, 512, 1024]:
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
logger.info(
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
)
size = closest
response = openai.Image.create(
prompt=prompt,
n=1,
size=f"{size}x{size}",
response_format="b64_json",
api_key=CFG.openai_api_key,
)
logger.info(f"Image Generated for prompt:{prompt}")
image_data = b64decode(response["data"][0]["b64_json"])
with open(filename, mode="wb") as png:
png.write(image_data)
return f"Saved to disk:{filename}"
def generate_image_with_sd_webui( def generate_image_with_sd_webui(
prompt: str, prompt: str,
filename: str, filename: str,

View File

@@ -3,6 +3,7 @@
import os import os
from typing import List from typing import List
from auto_gpt_plugin_template import AutoGPTPluginTemplate from auto_gpt_plugin_template import AutoGPTPluginTemplate
from pilot.singleton import Singleton from pilot.singleton import Singleton
@@ -18,7 +19,10 @@ class Config(metaclass=Singleton):
self.temperature = float(os.getenv("TEMPERATURE", 0.7)) self.temperature = float(os.getenv("TEMPERATURE", 0.7))
# TODO change model_config there
self.execute_local_commands = (
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
)
# User agent header to use when making HTTP requests # User agent header to use when making HTTP requests
# Some websites might just completely deny request with an error code if # Some websites might just completely deny request with an error code if
# no user agent was found. # no user agent was found.

View File

@@ -37,7 +37,7 @@ ISDEBUG = False
DB_SETTINGS = { DB_SETTINGS = {
"user": "root", "user": "root",
"password": "root", "password": "aa12345678",
"host": "127.0.0.1", "host": "127.0.0.1",
"port": 3306 "port": 3306
} }

View File

@@ -225,8 +225,7 @@ auto_dbgpt_one_shot = Conversation(
), ),
offset=0, offset=0,
sep_style=SeparatorStyle.SINGLE, sep_style=SeparatorStyle.SINGLE,
sep=" ", sep="###",
sep2="</s>",
) )
auto_dbgpt_without_shot = Conversation( auto_dbgpt_without_shot = Conversation(

11
pilot/model/base.py Normal file
View File

@@ -0,0 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List, TypedDict
class Message(TypedDict):
"""LLM Message object containing usually like (role: content) """
role: str
content: str

3
pilot/model/chat.py Normal file
View File

@@ -0,0 +1,3 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-

47
pilot/model/llm_utils.py Normal file
View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from typing import List, Optional
from pilot.model.base import Message
from pilot.configs.config import Config
from pilot.server.llmserver import generate_output
def create_chat_completion(
messages: List[Message], # type: ignore
model: Optional[str] = None,
temperature: float = None,
max_tokens: Optional[int] = None,
) -> str:
"""Create a chat completion using the vicuna local model
Args:
messages(List[Message]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.7.
max_tokens (int, optional): The max tokens to use. Defaults to None
Returns:
str: The response from chat completion
"""
cfg = Config()
if temperature is None:
temperature = cfg.temperature
for plugin in cfg.plugins:
if plugin.can_handle_chat_completion(
messages=messages,
model=model,
temperature=temperature,
max_tokens=max_tokens,
):
message = plugin.handle_chat_completion(
messages=messages,
model=model,
temperature=temperature,
max_tokens=max_tokens,
)
if message is not None:
return message
response = None
# TODO impl this use vicuna server api

View File

@@ -9,14 +9,11 @@ from typing import List, Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
from zipimport import zipimporter from zipimport import zipimporter
import openapi_python_client
import requests import requests
from auto_gpt_plugin_template import AutoGPTPluginTemplate from auto_gpt_plugin_template import AutoGPTPluginTemplate
from openapi_python_client.cli import Config as OpenAPIConfig
from pilot.configs.config import Config from pilot.configs.config import Config
from pilot.logs import logger from pilot.logs import logger
from pilot.agent.base_open_ai_plugin import BaseOpenAIPlugin
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]: def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
""" """
@@ -49,63 +46,6 @@ def write_dict_to_json_file(data: dict, file_path: str) -> None:
with open(file_path, "w") as file: with open(file_path, "w") as file:
json.dump(data, file, indent=4) json.dump(data, file, indent=4)
def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
"""
Fetch the manifest for a list of OpenAI plugins.
Args:
urls (List): List of URLs to fetch.
Returns:
dict: per url dictionary of manifest and spec.
"""
# TODO add directory scan
manifests = {}
for url in cfg.plugins_openai:
openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}"
create_directory_if_not_exists(openai_plugin_client_dir)
if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"):
try:
response = requests.get(f"{url}/.well-known/ai-plugin.json")
if response.status_code == 200:
manifest = response.json()
if manifest["schema_version"] != "v1":
logger.warn(
f"Unsupported manifest version: {manifest['schem_version']} for {url}"
)
continue
if manifest["api"]["type"] != "openapi":
logger.warn(
f"Unsupported API type: {manifest['api']['type']} for {url}"
)
continue
write_dict_to_json_file(
manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
)
else:
logger.warn(
f"Failed to fetch manifest for {url}: {response.status_code}"
)
except requests.exceptions.RequestException as e:
logger.warn(f"Error while requesting manifest from {url}: {e}")
else:
logger.info(f"Manifest for {url} already exists")
manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
openapi_spec = openapi_python_client._get_document(
url=manifest["api"]["url"], path=None, timeout=5
)
write_dict_to_json_file(
openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
)
else:
logger.info(f"OpenAPI spec for {url} already exists")
openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
return manifests
def create_directory_if_not_exists(directory_path: str) -> bool: def create_directory_if_not_exists(directory_path: str) -> bool:
""" """
Create a directory if it does not exist. Create a directory if it does not exist.
@@ -126,76 +66,6 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
logger.info(f"Directory {directory_path} already exists") logger.info(f"Directory {directory_path} already exists")
return True return True
def initialize_openai_plugins(
manifests_specs: dict, cfg: Config, debug: bool = False
) -> dict:
"""
Initialize OpenAI plugins.
Args:
manifests_specs (dict): per url dictionary of manifest and spec.
cfg (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
dict: per url dictionary of manifest, spec and client.
"""
openai_plugins_dir = f"{cfg.plugins_dir}/openai"
if create_directory_if_not_exists(openai_plugins_dir):
for url, manifest_spec in manifests_specs.items():
openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}"
_meta_option = (openapi_python_client.MetaType.SETUP,)
_config = OpenAPIConfig(
**{
"project_name_override": "client",
"package_name_override": "client",
}
)
prev_cwd = Path.cwd()
os.chdir(openai_plugin_client_dir)
Path("ai-plugin.json")
if not os.path.exists("client"):
client_results = openapi_python_client.create_new_client(
url=manifest_spec["manifest"]["api"]["url"],
path=None,
meta=_meta_option,
config=_config,
)
if client_results:
logger.warn(
f"Error creating OpenAPI client: {client_results[0].header} \n"
f" details: {client_results[0].detail}"
)
continue
spec = importlib.util.spec_from_file_location(
"client", "client/client/client.py"
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
client = module.Client(base_url=url)
os.chdir(prev_cwd)
manifest_spec["client"] = client
return manifests_specs
def instantiate_openai_plugin_clients(
manifests_specs_clients: dict, cfg: Config, debug: bool = False
) -> dict:
"""
Instantiates BaseOpenAIPlugin instances for each OpenAI plugin.
Args:
manifests_specs_clients (dict): per url dictionary of manifest, spec and client.
cfg (Config): Config instance including plugins config
debug (bool, optional): Enable debug logging. Defaults to False.
Returns:
plugins (dict): per url dictionary of BaseOpenAIPlugin instances.
"""
plugins = {}
for url, manifest_spec_client in manifests_specs_clients.items():
plugins[url] = BaseOpenAIPlugin(manifest_spec_client)
return plugins
def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]: def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
"""Scan the plugins directory for plugins and loads them. """Scan the plugins directory for plugins and loads them.
@@ -234,17 +104,6 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
and denylist_allowlist_check(a_module.__name__, cfg) and denylist_allowlist_check(a_module.__name__, cfg)
): ):
loaded_plugins.append(a_module()) loaded_plugins.append(a_module())
# OpenAI plugins
if cfg.plugins_openai:
manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg)
if manifests_specs.keys():
manifests_specs_clients = initialize_openai_plugins(
manifests_specs, cfg, debug
)
for url, openai_plugin_meta in manifests_specs_clients.items():
if denylist_allowlist_check(url, cfg):
plugin = BaseOpenAIPlugin(openai_plugin_meta)
loaded_plugins.append(plugin)
if loaded_plugins: if loaded_plugins:
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------") logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")

View File

@@ -18,7 +18,6 @@ importlib-resources==5.12.0
kiwisolver==1.4.4 kiwisolver==1.4.4
matplotlib==3.7.0 matplotlib==3.7.0
multidict==6.0.4 multidict==6.0.4
openai==0.27.0
packaging==23.0 packaging==23.0
psutil==5.9.4 psutil==5.9.4
pycocotools==2.0.6 pycocotools==2.0.6
@@ -59,9 +58,6 @@ mkdocs
requests requests
gTTS==2.3.1 gTTS==2.3.1
# OpenAI and Generic plugins import
openapi-python-client==0.13.4
# Testing dependencies # Testing dependencies
pytest pytest
asynctest asynctest
@@ -74,4 +70,6 @@ vcrpy
pytest-recording pytest-recording
chromadb chromadb
markdown2 markdown2
colorama
playsound
distro