diff --git a/pilot/configs/config.py b/pilot/configs/config.py index e2583c7a3..7d5ee7eea 100644 --- a/pilot/configs/config.py +++ b/pilot/configs/config.py @@ -46,6 +46,8 @@ class Config(metaclass=Singleton): self.plugins: List[AutoGPTPluginTemplate] = [] self.plugins_openai = [] + self.command_registry = [] + self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") self.image_provider = os.getenv("IMAGE_PROVIDER") self.image_size = int(os.getenv("IMAGE_SIZE", 256)) diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py index 9e2de3048..0db9845a7 100644 --- a/pilot/configs/model_config.py +++ b/pilot/configs/model_config.py @@ -36,8 +36,8 @@ ISDEBUG = False DB_SETTINGS = { - "user": "puzzle@puzzle0_1954#dev_ipay10", - "password": "puzzle123", - "host": "obproxy.ocp2.alipay.net", - "port": 2883 + "user": "root", + "password": "root", + "host": "127.0.0.1", + "port": 3306 } \ No newline at end of file diff --git a/pilot/conversation.py b/pilot/conversation.py index c0cd51dac..28ea3d21c 100644 --- a/pilot/conversation.py +++ b/pilot/conversation.py @@ -48,7 +48,6 @@ class Conversation: else: ret += role + ":" return ret - else: raise ValueError(f"Invalid style: {self.sep_style}") @@ -104,12 +103,12 @@ def gen_sqlgen_conversation(dbname): conv_one_shot = Conversation( - system="A chat between a curious human and an artificial intelligence assistant, who very familiar with database related knowledge. " - "The assistant gives helpful, detailed, professional and polite answers to the human's questions. ", - roles=("Human", "Assistant"), + system="A chat between a curious user and an artificial intelligence assistant, who very familiar with database related knowledge. " + "The assistant gives helpful, detailed, professional and polite answers to the user's questions. ", + roles=("USER", "Assistant"), messages=( ( - "Human", + "USER", "What are the key differences between mysql and postgres?", ), ( @@ -155,12 +154,11 @@ auto_dbgpt_one_shot = Conversation( messages=( ( "USER", - """ Answer how many users does hackernews have by query mysql database + """ Answer how many users does app_users have by query ob database Constraints: - 1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files. - 2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember. - 3. No user assistance - 4. Exclusively use the commands listed in double quotes e.g. "command name" + 1. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember. + 2. No user assistance + 3. Exclusively use the commands listed in double quotes e.g. "command name" Commands: 1. analyze_code: Analyze Code, args: "code": "" @@ -170,13 +168,12 @@ auto_dbgpt_one_shot = Conversation( 5. list_files: List Files in Directory, args: "directory": "" 6. read_file: Read file, args: "filename": "" 7. write_to_file: Write to file, args: "filename": "", "text": "" - 8. tidb_sql_executor: "Execute SQL in TiDB Database.", args: "sql": "" + 8. ob_sql_executor: "Execute SQL in OB Database.", args: "sql": "" Resources: 1. Internet access for searches and information gathering. 2. Long Term memory management. 3. vicuna powered Agents for delegation of simple tasks. - 4. File output. Performance Evaluation: 1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities. @@ -210,16 +207,16 @@ auto_dbgpt_one_shot = Conversation( """ { "thoughts": { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user" + "text": "To answer how many users by query database we need to write SQL query to get the count of the distinct users from the database. We can use ob_sql_executor command to execute the SQL query in database.", + "reasoning": "We can use the sql_executor command to execute the SQL query for getting count of distinct users from the users database. We can select the count of the distinct users from the users table.", + "plan": "- Write SQL query to get count of distinct users from users database\n- Use ob_sql_executor to execute the SQL query in OB database\n- Parse the SQL result to get the count\n- Respond with the count as the answer", + "criticism": "None", + "speak": "To get the number of users in users, I will execute an SQL query in OB database using the ob_sql_executor command and respond with the count." }, "command": { - "name": "command name", + "name": "ob_sql_executor", "args": { - "arg name": "value" + "sql": "SELECT COUNT(DISTINCT(*)) FROM users ;" } } } @@ -227,18 +224,18 @@ auto_dbgpt_one_shot = Conversation( ) ), offset=0, - sep_style=SeparatorStyle.THREE, + sep_style=SeparatorStyle.SINGLE, sep=" ", sep2="", ) auto_dbgpt_without_shot = Conversation( - system="You are DB-GPT, an AI designed to answer questions about HackerNews by query `hackerbews` database in MySQL. " + system="You are DB-GPT, an AI designed to answer questions about users by query `users` database in MySQL. " "Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.", roles=("USER", "ASSISTANT"), messages=(), offset=0, - sep_style=SeparatorStyle.FOUR, + sep_style=SeparatorStyle.SINGLE, sep=" ", sep2="", ) @@ -262,6 +259,7 @@ conversation_types = { conv_templates = { "conv_one_shot": conv_one_shot, "vicuna_v1": conv_vicuna_v1, + "auto_dbgpt_one_shot": auto_dbgpt_one_shot } if __name__ == "__main__": diff --git a/pilot/prompts/first_conversation_prompt.py b/pilot/prompts/first_conversation_prompt.py index 131d141c6..8f26bcc54 100644 --- a/pilot/prompts/first_conversation_prompt.py +++ b/pilot/prompts/first_conversation_prompt.py @@ -66,7 +66,7 @@ class FirstPrompt: prompt_generator.goals = fisrt_message prompt_generator.command_registry = command_registry # 加载插件中可用命令 - cfg = Config + cfg = Config() for plugin in cfg.plugins: if not plugin.can_handle_post_prompt(): continue diff --git a/pilot/prompts/prompt.py b/pilot/prompts/prompt.py index 652bee5d7..1f8b54752 100644 --- a/pilot/prompts/prompt.py +++ b/pilot/prompts/prompt.py @@ -44,7 +44,7 @@ def build_default_prompt_generator() -> PromptGenerator: prompt_generator.add_resource( "GPT-3.5 powered Agents for delegation of simple tasks." ) - prompt_generator.add_resource("File output.") + # prompt_generator.add_resource("File output.") # Add performance evaluations to the PromptGenerator object prompt_generator.add_performance_evaluation( @@ -61,5 +61,5 @@ def build_default_prompt_generator() -> PromptGenerator: "Every command has a cost, so be smart and efficient. Aim to complete tasks in" " the least number of steps." ) - prompt_generator.add_performance_evaluation("Write all code to a file.") + # prompt_generator.add_performance_evaluation("Write all code to a file.") return prompt_generator diff --git a/pilot/server/webserver.py b/pilot/server/webserver.py index 1b6fb3b6b..b5fcbf36e 100644 --- a/pilot/server/webserver.py +++ b/pilot/server/webserver.py @@ -161,6 +161,8 @@ def post_process_code(code): def http_bot(state, mode, db_selector, temperature, max_new_tokens, request: gr.Request): + # MOCk + autogpt = True print("是否是AUTO-GPT模式.", autogpt) start_tstamp = time.time() model_name = LLM_MODEL @@ -175,25 +177,37 @@ def http_bot(state, mode, db_selector, temperature, max_new_tokens, request: gr. # TODO when tab mode is AUTO_GPT, Prompt need to rebuild. if len(state.messages) == state.offset + 2: - # 第一轮对话需要加入提示Prompt - - template_name = "conv_one_shot" - new_state = conv_templates[template_name].copy() - new_state.conv_id = uuid.uuid4().hex - query = state.messages[-2][1] + # 第一轮对话需要加入提示Prompt + if(autogpt): + # autogpt模式的第一轮对话需要 构建专属prompt + cfg = Config() + first_prompt = FirstPrompt() + first_prompt.command_registry = cfg.command_registry - # prompt 中添加上下文提示, 根据已有知识对话, 上下文提示是否也应该放在第一轮, 还是每一轮都添加上下文? - # 如果用户侧的问题跨度很大, 应该每一轮都加提示。 - if db_selector: - new_state.append_message(new_state.roles[0], gen_sqlgen_conversation(dbname) + query) - new_state.append_message(new_state.roles[1], None) - state = new_state + system_prompt = first_prompt.construct_first_prompt(fisrt_message=[query]) + logger.info("[TEST]:" + system_prompt) + template_name = "auto_dbgpt_one_shot" + new_state = conv_templates[template_name].copy() + new_state.append_message(role='USER', message=system_prompt) else: - new_state.append_message(new_state.roles[0], query) - new_state.append_message(new_state.roles[1], None) - state = new_state + template_name = "conv_one_shot" + new_state = conv_templates[template_name].copy() + new_state.conv_id = uuid.uuid4().hex + + if not autogpt: + # prompt 中添加上下文提示, 根据已有知识对话, 上下文提示是否也应该放在第一轮, 还是每一轮都添加上下文? + # 如果用户侧的问题跨度很大, 应该每一轮都加提示。 + if db_selector: + new_state.append_message(new_state.roles[0], gen_sqlgen_conversation(dbname) + query) + new_state.append_message(new_state.roles[1], None) + state = new_state + else: + new_state.append_message(new_state.roles[0], query) + new_state.append_message(new_state.roles[1], None) + + state = new_state if mode == conversation_types["default_knownledge"] and not db_selector: query = state.messages[-2][1] knqa = KnownLedgeBaseQA() @@ -457,8 +471,6 @@ if __name__ == "__main__": # 加载插件 cfg = Config() - cfg.plugins_dir = "123" - cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) # 加载插件可执行命令 @@ -474,15 +486,9 @@ if __name__ == "__main__": for command_category in command_categories: command_registry.import_commands(command_category) + cfg.command_registry =command_category - first_prompt =FirstPrompt(cfg= cfg) - first_prompt.command_registry = command_registry - - system_prompt = first_prompt.construct_first_prompt( fisrt_message=["this is a test goal"]) - - logger.info("[TEST]:" + system_prompt) - logger.info(args) demo = build_webdemo() demo.queue( diff --git a/plugins/Auto-GPT-TiDB-Serverless-Plugin.zip b/plugins/Auto-GPT-TiDB-Serverless-Plugin.zip deleted file mode 100644 index 60e15e706..000000000 Binary files a/plugins/Auto-GPT-TiDB-Serverless-Plugin.zip and /dev/null differ diff --git a/plugins/Db-GPT-SimpleChart-Plugin.zip b/plugins/Db-GPT-SimpleChart-Plugin.zip deleted file mode 100644 index e71a40b7b..000000000 Binary files a/plugins/Db-GPT-SimpleChart-Plugin.zip and /dev/null differ