diff --git a/.gitignore b/.gitignore
index 78f55da35..c4c4a344e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,6 +8,8 @@ __pycache__/
.idea
.vscode
+.idea
+.chroma
# Distribution / packaging
.Python
build/
diff --git a/.idea/.gitignore b/.idea/.gitignore
deleted file mode 100644
index 26d33521a..000000000
--- a/.idea/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-# Default ignored files
-/shelf/
-/workspace.xml
diff --git a/.idea/DB-GPT.iml b/.idea/DB-GPT.iml
deleted file mode 100644
index 9725c1b01..000000000
--- a/.idea/DB-GPT.iml
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
deleted file mode 100644
index 105ce2da2..000000000
--- a/.idea/inspectionProfiles/profiles_settings.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
deleted file mode 100644
index a22f312c8..000000000
--- a/.idea/modules.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
deleted file mode 100644
index 94a25f7f4..000000000
--- a/.idea/vcs.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.vscode/launch.json b/.vscode/launch.json
deleted file mode 100644
index 09a35ce9c..000000000
--- a/.vscode/launch.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- // Use IntelliSense to learn about possible attributes.
- // Hover to view descriptions of existing attributes.
- // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
- "version": "0.2.0",
- "configurations": [
- {
- "name": "Python: Current File",
- "type": "python",
- "request": "launch",
- "program": "${file}",
- "console": "integratedTerminal",
- "justMyCode": true,
- "env": {"PYTHONPATH": "${workspaceFolder}"},
- "envFile": "${workspaceFolder}/.env"
- },
- {
- "name": "Python: Module",
- "type": "python",
- "request": "launch",
- "module": "pilot",
- "justMyCode": true,
- }
- ]
-}
\ No newline at end of file
diff --git a/README.md b/README.md
index 7378799d3..845e1ffa4 100644
--- a/README.md
+++ b/README.md
@@ -43,7 +43,7 @@ The Generated SQL is runable.
# Dependencies
1. First you need to install python requirements.
```
-python>=3.9
+python>=3.10
pip install -r requirements.txt
```
or if you use conda envirenment, you can use this command
@@ -68,7 +68,7 @@ The password just for test, you can change this if necessary
2. Run model server
```
cd pilot/server
-python vicuna_server.py
+python llmserver.py
```
3. Run gradio webui
diff --git a/environment.yml b/environment.yml
index d5a68abb2..756f5f465 100644
--- a/environment.yml
+++ b/environment.yml
@@ -28,7 +28,6 @@ dependencies:
- kiwisolver==1.4.4
- matplotlib==3.7.0
- multidict==6.0.4
- - openai==0.27.0
- packaging==23.0
- psutil==5.9.4
- pycocotools==2.0.6
@@ -63,4 +62,7 @@ dependencies:
- unstructured==0.6.3
- pytesseract==0.3.10
- markdown2
- - chromadb
\ No newline at end of file
+ - chromadb
+ - colorama
+ - playsound
+ - distro
\ No newline at end of file
diff --git a/pilot/agent/agent_manager.py b/pilot/agent/agent_manager.py
index 1ceecec6f..89754bd1c 100644
--- a/pilot/agent/agent_manager.py
+++ b/pilot/agent/agent_manager.py
@@ -5,6 +5,9 @@ from __future__ import annotations
from pilot.configs.config import Config
from pilot.singleton import Singleton
+from pilot.configs.config import Config
+from typing import List
+from pilot.model.base import Message
class AgentManager(metaclass=Singleton):
@@ -14,6 +17,19 @@ class AgentManager(metaclass=Singleton):
self.next_key = 0
self.agents = {} # key, (task, full_message_history, model)
self.cfg = Config()
+ """Agent manager for managing DB-GPT agents
+ In order to compatible auto gpt plugins,
+ we use the same template with it.
+
+ Args: next_keys
+ agents
+ cfg
+ """
+
+ def __init__(self) -> None:
+ self.next_key = 0
+ self.agents = {} #TODO need to define
+ self.cfg = Config()
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
diff --git a/pilot/agent/base_open_ai_plugin.py b/pilot/agent/base_open_ai_plugin.py
deleted file mode 100644
index 046295c0d..000000000
--- a/pilot/agent/base_open_ai_plugin.py
+++ /dev/null
@@ -1,199 +0,0 @@
-"""Handles loading of plugins."""
-from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar
-
-from auto_gpt_plugin_template import AutoGPTPluginTemplate
-
-PromptGenerator = TypeVar("PromptGenerator")
-
-
-class Message(TypedDict):
- role: str
- content: str
-
-
-class BaseOpenAIPlugin(AutoGPTPluginTemplate):
- """
- This is a BaseOpenAIPlugin class for generating Auto-GPT plugins.
- """
-
- def __init__(self, manifests_specs_clients: dict):
- # super().__init__()
- self._name = manifests_specs_clients["manifest"]["name_for_model"]
- self._version = manifests_specs_clients["manifest"]["schema_version"]
- self._description = manifests_specs_clients["manifest"]["description_for_model"]
- self._client = manifests_specs_clients["client"]
- self._manifest = manifests_specs_clients["manifest"]
- self._openapi_spec = manifests_specs_clients["openapi_spec"]
-
- def can_handle_on_response(self) -> bool:
- """This method is called to check that the plugin can
- handle the on_response method.
- Returns:
- bool: True if the plugin can handle the on_response method."""
- return False
-
- def on_response(self, response: str, *args, **kwargs) -> str:
- """This method is called when a response is received from the model."""
- return response
-
- def can_handle_post_prompt(self) -> bool:
- """This method is called to check that the plugin can
- handle the post_prompt method.
- Returns:
- bool: True if the plugin can handle the post_prompt method."""
- return False
-
- def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
- """This method is called just after the generate_prompt is called,
- but actually before the prompt is generated.
- Args:
- prompt (PromptGenerator): The prompt generator.
- Returns:
- PromptGenerator: The prompt generator.
- """
- return prompt
-
- def can_handle_on_planning(self) -> bool:
- """This method is called to check that the plugin can
- handle the on_planning method.
- Returns:
- bool: True if the plugin can handle the on_planning method."""
- return False
-
- def on_planning(
- self, prompt: PromptGenerator, messages: List[Message]
- ) -> Optional[str]:
- """This method is called before the planning chat completion is done.
- Args:
- prompt (PromptGenerator): The prompt generator.
- messages (List[str]): The list of messages.
- """
- pass
-
- def can_handle_post_planning(self) -> bool:
- """This method is called to check that the plugin can
- handle the post_planning method.
- Returns:
- bool: True if the plugin can handle the post_planning method."""
- return False
-
- def post_planning(self, response: str) -> str:
- """This method is called after the planning chat completion is done.
- Args:
- response (str): The response.
- Returns:
- str: The resulting response.
- """
- return response
-
- def can_handle_pre_instruction(self) -> bool:
- """This method is called to check that the plugin can
- handle the pre_instruction method.
- Returns:
- bool: True if the plugin can handle the pre_instruction method."""
- return False
-
- def pre_instruction(self, messages: List[Message]) -> List[Message]:
- """This method is called before the instruction chat is done.
- Args:
- messages (List[Message]): The list of context messages.
- Returns:
- List[Message]: The resulting list of messages.
- """
- return messages
-
- def can_handle_on_instruction(self) -> bool:
- """This method is called to check that the plugin can
- handle the on_instruction method.
- Returns:
- bool: True if the plugin can handle the on_instruction method."""
- return False
-
- def on_instruction(self, messages: List[Message]) -> Optional[str]:
- """This method is called when the instruction chat is done.
- Args:
- messages (List[Message]): The list of context messages.
- Returns:
- Optional[str]: The resulting message.
- """
- pass
-
- def can_handle_post_instruction(self) -> bool:
- """This method is called to check that the plugin can
- handle the post_instruction method.
- Returns:
- bool: True if the plugin can handle the post_instruction method."""
- return False
-
- def post_instruction(self, response: str) -> str:
- """This method is called after the instruction chat is done.
- Args:
- response (str): The response.
- Returns:
- str: The resulting response.
- """
- return response
-
- def can_handle_pre_command(self) -> bool:
- """This method is called to check that the plugin can
- handle the pre_command method.
- Returns:
- bool: True if the plugin can handle the pre_command method."""
- return False
-
- def pre_command(
- self, command_name: str, arguments: Dict[str, Any]
- ) -> Tuple[str, Dict[str, Any]]:
- """This method is called before the command is executed.
- Args:
- command_name (str): The command name.
- arguments (Dict[str, Any]): The arguments.
- Returns:
- Tuple[str, Dict[str, Any]]: The command name and the arguments.
- """
- return command_name, arguments
-
- def can_handle_post_command(self) -> bool:
- """This method is called to check that the plugin can
- handle the post_command method.
- Returns:
- bool: True if the plugin can handle the post_command method."""
- return False
-
- def post_command(self, command_name: str, response: str) -> str:
- """This method is called after the command is executed.
- Args:
- command_name (str): The command name.
- response (str): The response.
- Returns:
- str: The resulting response.
- """
- return response
-
- def can_handle_chat_completion(
- self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int
- ) -> bool:
- """This method is called to check that the plugin can
- handle the chat_completion method.
- Args:
- messages (List[Message]): The messages.
- model (str): The model name.
- temperature (float): The temperature.
- max_tokens (int): The max tokens.
- Returns:
- bool: True if the plugin can handle the chat_completion method."""
- return False
-
- def handle_chat_completion(
- self, messages: List[Message], model: str, temperature: float, max_tokens: int
- ) -> str:
- """This method is called when the chat completion is done.
- Args:
- messages (List[Message]): The messages.
- model (str): The model name.
- temperature (float): The temperature.
- max_tokens (int): The max tokens.
- Returns:
- str: The resulting response.
- """
- pass
diff --git a/pilot/commands/commands.py b/pilot/commands/commands.py
deleted file mode 100644
index 7a0d9f478..000000000
--- a/pilot/commands/commands.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import json
-
-
-def is_valid_int(value: str) -> bool:
- """Check if the value is a valid integer
-
- Args:
- value (str): The value to check
-
- Returns:
- bool: True if the value is a valid integer, False otherwise
- """
- try:
- int(value)
- return True
- except ValueError:
- return False
-
-
-def get_command(response_json: Dict):
- """Parse the response and return the command name and arguments
-
- Args:
- response_json (json): The response from the AI
-
- Returns:
- tuple: The command name and arguments
-
- Raises:
- json.decoder.JSONDecodeError: If the response is not valid JSON
-
- Exception: If any other error occurs
- """
- try:
- if "command" not in response_json:
- return "Error:", "Missing 'command' object in JSON"
-
- if not isinstance(response_json, dict):
- return "Error:", f"'response_json' object is not dictionary {response_json}"
-
- command = response_json["command"]
- if not isinstance(command, dict):
- return "Error:", "'command' object is not a dictionary"
-
- if "name" not in command:
- return "Error:", "Missing 'name' field in 'command' object"
-
- command_name = command["name"]
-
- # Use an empty dictionary if 'args' field is not present in 'command' object
- arguments = command.get("args", {})
-
- return command_name, arguments
- except json.decoder.JSONDecodeError:
- return "Error:", "Invalid JSON"
- # All other errors, return "Error: + error message"
- except Exception as e:
- return "Error:", str(e)
-
-
-
-
-def execute_command(
- command_registry: CommandRegistry,
- command_name: str,
- arguments,
- prompt: PromptGenerator,
-):
- """Execute the command and return the result
-
- Args:
- command_name (str): The name of the command to execute
- arguments (dict): The arguments for the command
-
- Returns:
- str: The result of the command
- """
- try:
- cmd = command_registry.commands.get(command_name)
-
- # If the command is found, call it with the provided arguments
- if cmd:
- return cmd(**arguments)
-
- # TODO: Remove commands below after they are moved to the command registry.
- command_name = map_command_synonyms(command_name.lower())
-
- if command_name == "memory_add":
- return get_memory(CFG).add(arguments["string"])
-
- # TODO: Change these to take in a file rather than pasted code, if
- # non-file is given, return instructions "Input should be a python
- # filepath, write your code to file and try again
- else:
- for command in prompt.commands:
- if (
- command_name == command["label"].lower()
- or command_name == command["name"].lower()
- ):
- return command["function"](**arguments)
- return (
- f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
- " list for available commands and only respond in the specified JSON"
- " format."
- )
- except Exception as e:
- return f"Error: {str(e)}"
diff --git a/pilot/commands/commands_load.py b/pilot/commands/commands_load.py
index 795ea9f3e..b173eb03a 100644
--- a/pilot/commands/commands_load.py
+++ b/pilot/commands/commands_load.py
@@ -1,6 +1,5 @@
from pilot.configs.config import Config
from pilot.prompts.generator import PromptGenerator
-from __future__ import annotations
from typing import Any, Optional, Type
from pilot.prompts.prompt import build_default_prompt_generator
diff --git a/pilot/commands/image_gen.py b/pilot/commands/image_gen.py
index d1c56d8a7..25a6c80fd 100644
--- a/pilot/commands/image_gen.py
+++ b/pilot/commands/image_gen.py
@@ -3,7 +3,6 @@ import io
import uuid
from base64 import b64decode
-import openai
import requests
from PIL import Image
@@ -26,12 +25,9 @@ def generate_image(prompt: str, size: int = 256) -> str:
str: The filename of the image
"""
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
-
- # DALL-E
- if CFG.image_provider == "dalle":
- return generate_image_with_dalle(prompt, filename, size)
+
# HuggingFace
- elif CFG.image_provider == "huggingface":
+ if CFG.image_provider == "huggingface":
return generate_image_with_hf(prompt, filename)
# SD WebUI
elif CFG.image_provider == "sdwebui":
@@ -76,45 +72,6 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
return f"Saved to disk:{filename}"
-
-def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
- """Generate an image with DALL-E.
-
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
- size (int): The size of the image
-
- Returns:
- str: The filename of the image
- """
-
- # Check for supported image sizes
- if size not in [256, 512, 1024]:
- closest = min([256, 512, 1024], key=lambda x: abs(x - size))
- logger.info(
- f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
- )
- size = closest
-
- response = openai.Image.create(
- prompt=prompt,
- n=1,
- size=f"{size}x{size}",
- response_format="b64_json",
- api_key=CFG.openai_api_key,
- )
-
- logger.info(f"Image Generated for prompt:{prompt}")
-
- image_data = b64decode(response["data"][0]["b64_json"])
-
- with open(filename, mode="wb") as png:
- png.write(image_data)
-
- return f"Saved to disk:{filename}"
-
-
def generate_image_with_sd_webui(
prompt: str,
filename: str,
diff --git a/pilot/configs/config.py b/pilot/configs/config.py
index 57650fee2..bd1c67ac7 100644
--- a/pilot/configs/config.py
+++ b/pilot/configs/config.py
@@ -3,6 +3,7 @@
import os
from typing import List
+
from auto_gpt_plugin_template import AutoGPTPluginTemplate
from pilot.singleton import Singleton
@@ -18,7 +19,10 @@ class Config(metaclass=Singleton):
self.temperature = float(os.getenv("TEMPERATURE", 0.7))
-
+ # TODO change model_config there
+ self.execute_local_commands = (
+ os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
+ )
# User agent header to use when making HTTP requests
# Some websites might just completely deny request with an error code if
# no user agent was found.
diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py
index 0db9845a7..fff4ad60d 100644
--- a/pilot/configs/model_config.py
+++ b/pilot/configs/model_config.py
@@ -37,7 +37,7 @@ ISDEBUG = False
DB_SETTINGS = {
"user": "root",
- "password": "root",
+ "password": "aa12345678",
"host": "127.0.0.1",
"port": 3306
}
\ No newline at end of file
diff --git a/pilot/conversation.py b/pilot/conversation.py
index 28ea3d21c..5f76c814f 100644
--- a/pilot/conversation.py
+++ b/pilot/conversation.py
@@ -225,8 +225,7 @@ auto_dbgpt_one_shot = Conversation(
),
offset=0,
sep_style=SeparatorStyle.SINGLE,
- sep=" ",
- sep2="",
+ sep="###",
)
auto_dbgpt_without_shot = Conversation(
diff --git a/pilot/model/base.py b/pilot/model/base.py
new file mode 100644
index 000000000..8199198eb
--- /dev/null
+++ b/pilot/model/base.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+from typing import List, TypedDict
+
+class Message(TypedDict):
+ """LLM Message object containing usually like (role: content) """
+
+ role: str
+ content: str
+
diff --git a/pilot/model/chat.py b/pilot/model/chat.py
new file mode 100644
index 000000000..97206f2d5
--- /dev/null
+++ b/pilot/model/chat.py
@@ -0,0 +1,3 @@
+#!/usr/bin/env python3
+# -*- coding:utf-8 -*-
+
diff --git a/pilot/model/llm_utils.py b/pilot/model/llm_utils.py
new file mode 100644
index 000000000..196246118
--- /dev/null
+++ b/pilot/model/llm_utils.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+# -*- coding:utf-8 -*-
+
+from typing import List, Optional
+from pilot.model.base import Message
+from pilot.configs.config import Config
+from pilot.server.llmserver import generate_output
+
+def create_chat_completion(
+ messages: List[Message], # type: ignore
+ model: Optional[str] = None,
+ temperature: float = None,
+ max_tokens: Optional[int] = None,
+) -> str:
+ """Create a chat completion using the vicuna local model
+
+ Args:
+ messages(List[Message]): The messages to send to the chat completion
+ model (str, optional): The model to use. Defaults to None.
+ temperature (float, optional): The temperature to use. Defaults to 0.7.
+ max_tokens (int, optional): The max tokens to use. Defaults to None
+
+ Returns:
+ str: The response from chat completion
+ """
+ cfg = Config()
+ if temperature is None:
+ temperature = cfg.temperature
+
+ for plugin in cfg.plugins:
+ if plugin.can_handle_chat_completion(
+ messages=messages,
+ model=model,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ ):
+ message = plugin.handle_chat_completion(
+ messages=messages,
+ model=model,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ )
+ if message is not None:
+ return message
+
+ response = None
+ # TODO impl this use vicuna server api
diff --git a/pilot/plugins.py b/pilot/plugins.py
index 5f99e34ff..196a68b22 100644
--- a/pilot/plugins.py
+++ b/pilot/plugins.py
@@ -9,14 +9,11 @@ from typing import List, Optional, Tuple
from urllib.parse import urlparse
from zipimport import zipimporter
-import openapi_python_client
import requests
from auto_gpt_plugin_template import AutoGPTPluginTemplate
-from openapi_python_client.cli import Config as OpenAPIConfig
from pilot.configs.config import Config
from pilot.logs import logger
-from pilot.agent.base_open_ai_plugin import BaseOpenAIPlugin
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
"""
@@ -49,63 +46,6 @@ def write_dict_to_json_file(data: dict, file_path: str) -> None:
with open(file_path, "w") as file:
json.dump(data, file, indent=4)
-
-
-def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
- """
- Fetch the manifest for a list of OpenAI plugins.
- Args:
- urls (List): List of URLs to fetch.
- Returns:
- dict: per url dictionary of manifest and spec.
- """
- # TODO add directory scan
- manifests = {}
- for url in cfg.plugins_openai:
- openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}"
- create_directory_if_not_exists(openai_plugin_client_dir)
- if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"):
- try:
- response = requests.get(f"{url}/.well-known/ai-plugin.json")
- if response.status_code == 200:
- manifest = response.json()
- if manifest["schema_version"] != "v1":
- logger.warn(
- f"Unsupported manifest version: {manifest['schem_version']} for {url}"
- )
- continue
- if manifest["api"]["type"] != "openapi":
- logger.warn(
- f"Unsupported API type: {manifest['api']['type']} for {url}"
- )
- continue
- write_dict_to_json_file(
- manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
- )
- else:
- logger.warn(
- f"Failed to fetch manifest for {url}: {response.status_code}"
- )
- except requests.exceptions.RequestException as e:
- logger.warn(f"Error while requesting manifest from {url}: {e}")
- else:
- logger.info(f"Manifest for {url} already exists")
- manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
- if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
- openapi_spec = openapi_python_client._get_document(
- url=manifest["api"]["url"], path=None, timeout=5
- )
- write_dict_to_json_file(
- openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
- )
- else:
- logger.info(f"OpenAPI spec for {url} already exists")
- openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
- manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
- return manifests
-
-
-
def create_directory_if_not_exists(directory_path: str) -> bool:
"""
Create a directory if it does not exist.
@@ -126,76 +66,6 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
logger.info(f"Directory {directory_path} already exists")
return True
-
-def initialize_openai_plugins(
- manifests_specs: dict, cfg: Config, debug: bool = False
-) -> dict:
- """
- Initialize OpenAI plugins.
- Args:
- manifests_specs (dict): per url dictionary of manifest and spec.
- cfg (Config): Config instance including plugins config
- debug (bool, optional): Enable debug logging. Defaults to False.
- Returns:
- dict: per url dictionary of manifest, spec and client.
- """
- openai_plugins_dir = f"{cfg.plugins_dir}/openai"
- if create_directory_if_not_exists(openai_plugins_dir):
- for url, manifest_spec in manifests_specs.items():
- openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}"
- _meta_option = (openapi_python_client.MetaType.SETUP,)
- _config = OpenAPIConfig(
- **{
- "project_name_override": "client",
- "package_name_override": "client",
- }
- )
- prev_cwd = Path.cwd()
- os.chdir(openai_plugin_client_dir)
- Path("ai-plugin.json")
- if not os.path.exists("client"):
- client_results = openapi_python_client.create_new_client(
- url=manifest_spec["manifest"]["api"]["url"],
- path=None,
- meta=_meta_option,
- config=_config,
- )
- if client_results:
- logger.warn(
- f"Error creating OpenAPI client: {client_results[0].header} \n"
- f" details: {client_results[0].detail}"
- )
- continue
- spec = importlib.util.spec_from_file_location(
- "client", "client/client/client.py"
- )
- module = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(module)
- client = module.Client(base_url=url)
- os.chdir(prev_cwd)
- manifest_spec["client"] = client
- return manifests_specs
-
-
-def instantiate_openai_plugin_clients(
- manifests_specs_clients: dict, cfg: Config, debug: bool = False
-) -> dict:
- """
- Instantiates BaseOpenAIPlugin instances for each OpenAI plugin.
- Args:
- manifests_specs_clients (dict): per url dictionary of manifest, spec and client.
- cfg (Config): Config instance including plugins config
- debug (bool, optional): Enable debug logging. Defaults to False.
- Returns:
- plugins (dict): per url dictionary of BaseOpenAIPlugin instances.
-
- """
- plugins = {}
- for url, manifest_spec_client in manifests_specs_clients.items():
- plugins[url] = BaseOpenAIPlugin(manifest_spec_client)
- return plugins
-
-
def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
"""Scan the plugins directory for plugins and loads them.
@@ -234,17 +104,6 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
and denylist_allowlist_check(a_module.__name__, cfg)
):
loaded_plugins.append(a_module())
- # OpenAI plugins
- if cfg.plugins_openai:
- manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg)
- if manifests_specs.keys():
- manifests_specs_clients = initialize_openai_plugins(
- manifests_specs, cfg, debug
- )
- for url, openai_plugin_meta in manifests_specs_clients.items():
- if denylist_allowlist_check(url, cfg):
- plugin = BaseOpenAIPlugin(openai_plugin_meta)
- loaded_plugins.append(plugin)
if loaded_plugins:
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
diff --git a/pilot/server/vicuna_server.py b/pilot/server/llmserver.py
similarity index 100%
rename from pilot/server/vicuna_server.py
rename to pilot/server/llmserver.py
diff --git a/requirements.txt b/requirements.txt
index 837df36a0..5654dba6f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -18,7 +18,6 @@ importlib-resources==5.12.0
kiwisolver==1.4.4
matplotlib==3.7.0
multidict==6.0.4
-openai==0.27.0
packaging==23.0
psutil==5.9.4
pycocotools==2.0.6
@@ -59,9 +58,6 @@ mkdocs
requests
gTTS==2.3.1
-# OpenAI and Generic plugins import
-openapi-python-client==0.13.4
-
# Testing dependencies
pytest
asynctest
@@ -74,4 +70,6 @@ vcrpy
pytest-recording
chromadb
markdown2
-
+colorama
+playsound
+distro
\ No newline at end of file