From e65cf4d83f1d33a85030254199d5c881ae8ec5e0 Mon Sep 17 00:00:00 2001 From: yhjun1026 <460342015@qq.com> Date: Mon, 21 Apr 2025 11:39:37 +0800 Subject: [PATCH] feat(agent):Optimize the agent architecture, support gptsapp to be bound as a sub-agent, and strip vis to become an extended component. --- assets/schema/dbgpt.sql | 19 + .../upgrade/v0_7_2/upgrade_to_v0.7.2.sql | 26 + assets/schema/upgrade/v0_7_2/v0.7.1.sql | 584 ++++++++++++++++++ configs/dbgpt-local-vllm.toml | 2 +- configs/dev.toml | 96 +++ examples/agents/sandbox_code_agent_example.py | 10 +- .../src/dbgpt_app/operators/datasource.py | 2 +- .../src/dbgpt/agent/core/action/base.py | 23 +- .../dbgpt-core/src/dbgpt/agent/core/agent.py | 87 ++- .../src/dbgpt/agent/core/base_agent.py | 144 +++-- .../src/dbgpt/agent/core/memory/gpts/base.py | 72 ++- .../core/memory/gpts/default_gpts_memory.py | 35 +- .../agent/core/memory/gpts/gpts_memory.py | 489 +++++---------- .../src/dbgpt/agent/core/plan/__init__.py | 13 +- .../dbgpt/agent/core/plan/auto/__init__.py | 0 .../agent/core/plan/{ => auto}/plan_action.py | 37 +- .../core/plan/{ => auto}/planner_agent.py | 26 +- .../core/plan/{ => auto}/team_auto_plan.py | 57 +- .../src/dbgpt/agent/core/plan/base.py | 40 ++ .../core/plan/react/final_report_agent.py | 149 +++++ .../agent/core/plan/react/plan_action.py | 165 +++++ .../agent/core/plan/react/planner_agent.py | 115 ++++ .../agent/core/plan/react/team_react_plan.py | 308 +++++++++ .../src/dbgpt/agent/core/profile/base.py | 19 + .../dbgpt-core/src/dbgpt/agent/core/role.py | 5 + .../agent/expand/actions/chart_action.py | 10 +- .../dbgpt/agent/expand/actions/code_action.py | 10 +- .../agent/expand/actions/dashboard_action.py | 10 +- .../agent/expand/actions/indicator_action.py | 11 +- .../dbgpt/agent/expand/actions/tool_action.py | 10 +- .../src/dbgpt/agent/util/llm/llm.py | 61 +- .../src/dbgpt/agent/util/llm/llm_client.py | 141 ++--- .../dbgpt/agent/util/llm/strategy/priority.py | 21 +- .../src/dbgpt/core/interface/llm.py | 13 + .../model/cluster/worker/default_worker.py | 5 +- packages/dbgpt-core/src/dbgpt/vis/__init__.py | 21 +- packages/dbgpt-core/src/dbgpt/vis/base.py | 7 + packages/dbgpt-core/src/dbgpt/vis/client.py | 61 -- packages/dbgpt-core/src/dbgpt/vis/schema.py | 112 ++++ .../dbgpt-core/src/dbgpt/vis/vis_converter.py | 196 ++++++ .../src/dbgpt_ext/vis/gpt_vis/__init__.py | 0 .../vis/gpt_vis/gpt_vis_converter.py | 161 +++++ .../vis/gpt_vis/gpt_vis_converter_v2.py | 268 ++++++++ .../dbgpt_ext/vis/gpt_vis}/tags/__init__.py | 0 .../vis/gpt_vis}/tags/vis_agent_message.py | 2 +- .../vis/gpt_vis}/tags/vis_agent_plans.py | 2 +- .../vis/gpt_vis}/tags/vis_api_response.py | 2 +- .../vis/gpt_vis}/tags/vis_app_link.py | 2 +- .../dbgpt_ext/vis/gpt_vis}/tags/vis_chart.py | 2 +- .../dbgpt_ext/vis/gpt_vis}/tags/vis_code.py | 2 +- .../vis/gpt_vis}/tags/vis_dashboard.py | 2 +- .../vis/gpt_vis}/tags/vis_gpts_execution.py | 2 +- .../vis/gpt_vis}/tags/vis_gpts_result.py | 2 +- .../dbgpt_ext/vis/gpt_vis}/tags/vis_plugin.py | 2 +- .../vis/gpt_vis}/tags/vis_thinking.py | 2 +- .../dbgpt_serve/agent/agents/controller.py | 257 +++++--- .../agent/agents/db_gpts_memory.py | 9 + .../agents/expand/actions/app_link_action.py | 13 +- .../agents/expand/actions/app_start_action.py | 9 +- .../actions/intent_recognition_action.py | 9 +- .../app_resource_start_assisant_agent.py | 10 +- .../src/dbgpt_serve/agent/db/gpts_app.py | 164 ++++- .../dbgpt_serve/agent/db/gpts_messages_db.py | 104 +++- .../src/dbgpt_serve/agent/db/gpts_plans_db.py | 41 +- .../conversation/service/service.py | 14 +- 65 files changed, 3463 insertions(+), 830 deletions(-) create mode 100644 assets/schema/upgrade/v0_7_2/upgrade_to_v0.7.2.sql create mode 100644 assets/schema/upgrade/v0_7_2/v0.7.1.sql create mode 100644 configs/dev.toml create mode 100644 packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/__init__.py rename packages/dbgpt-core/src/dbgpt/agent/core/plan/{ => auto}/plan_action.py (79%) rename packages/dbgpt-core/src/dbgpt/agent/core/plan/{ => auto}/planner_agent.py (90%) rename packages/dbgpt-core/src/dbgpt/agent/core/plan/{ => auto}/team_auto_plan.py (86%) create mode 100644 packages/dbgpt-core/src/dbgpt/agent/core/plan/base.py create mode 100644 packages/dbgpt-core/src/dbgpt/agent/core/plan/react/final_report_agent.py create mode 100644 packages/dbgpt-core/src/dbgpt/agent/core/plan/react/plan_action.py create mode 100644 packages/dbgpt-core/src/dbgpt/agent/core/plan/react/planner_agent.py create mode 100644 packages/dbgpt-core/src/dbgpt/agent/core/plan/react/team_react_plan.py delete mode 100644 packages/dbgpt-core/src/dbgpt/vis/client.py create mode 100644 packages/dbgpt-core/src/dbgpt/vis/schema.py create mode 100644 packages/dbgpt-core/src/dbgpt/vis/vis_converter.py create mode 100644 packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/__init__.py create mode 100644 packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/gpt_vis_converter.py create mode 100644 packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/gpt_vis_converter_v2.py rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/__init__.py (100%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_agent_message.py (87%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_agent_plans.py (91%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_api_response.py (87%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_app_link.py (86%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_chart.py (99%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_code.py (85%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_dashboard.py (98%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_gpts_execution.py (86%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_gpts_result.py (88%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_plugin.py (85%) rename packages/{dbgpt-core/src/dbgpt/vis => dbgpt-ext/src/dbgpt_ext/vis/gpt_vis}/tags/vis_thinking.py (95%) diff --git a/assets/schema/dbgpt.sql b/assets/schema/dbgpt.sql index 8bdee4755..cc295e025 100644 --- a/assets/schema/dbgpt.sql +++ b/assets/schema/dbgpt.sql @@ -245,12 +245,19 @@ CREATE TABLE IF NOT EXISTS `gpts_instance` ( UNIQUE KEY `uk_gpts` (`gpts_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT="gpts instance"; + CREATE TABLE `gpts_messages` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', `conv_id` varchar(255) NOT NULL COMMENT 'The unique id of the conversation record', + `message_id` varchar(255) NOT NULL DEFAULT '' COMMENT 'message id', `sender` varchar(255) NOT NULL COMMENT 'Who speaking in the current conversation turn', `receiver` varchar(255) NOT NULL COMMENT 'Who receive message in the current conversation turn', + `sender_name` varchar(255) NOT NULL DEFAULT '' COMMENT 'Who(name) speaking in the current conversation turn', + `receiver_name` varchar(255) NOT NULL DEFAULT '' COMMENT 'Who(name) receive message in the current conversation turn', + `avatar` varchar(255) DEFAULT '' COMMENT 'Who(avatar) send message in the current conversation turn', `model_name` varchar(255) DEFAULT NULL COMMENT 'message generate model', + `thinking` longtext DEFAULT NULL COMMENT 'llm thinking text', + `show_message` tinyint(4) DEFAULT NULL COMMENT 'Whether the current message needs to be displayed to the user', `rounds` int(11) NOT NULL COMMENT 'dialogue turns', `is_success` int(4) NULL DEFAULT 0 COMMENT 'agent message is success', `app_code` varchar(255) NOT NULL COMMENT 'Current AI assistant code', @@ -264,6 +271,12 @@ CREATE TABLE `gpts_messages` ( `role` varchar(255) DEFAULT NULL COMMENT 'The role of the current message content', `created_at` datetime DEFAULT NULL COMMENT 'create time', `updated_at` datetime DEFAULT NULL COMMENT 'last update time', + `message_id` varchar(255) NOT NULL DEFAULT '' COMMENT 'message id', + `thinking` longtext DEFAULT NULL COMMENT 'llm thinking text', + `show_message` tinyint(4) DEFAULT NULL COMMENT 'Whether the current message needs to be displayed to the user', + `sender_name` varchar(255) NOT NULL DEFAULT '' COMMENT 'Who(name) speaking in the current conversation turn', + `receiver_name` varchar(255) NOT NULL DEFAULT '' COMMENT 'Who(name) receive message in the current conversation turn', + `avatar` varchar(255) DEFAULT '' COMMENT 'Who(avatar) send message in the current conversation turn', PRIMARY KEY (`id`), KEY `idx_q_messages` (`conv_id`,`rounds`,`sender`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT="gpts message"; @@ -285,6 +298,12 @@ CREATE TABLE `gpts_plans` ( `result` longtext COMMENT 'subtask result', `created_at` datetime DEFAULT NULL COMMENT 'create time', `updated_at` datetime DEFAULT NULL COMMENT 'last update time', + `conv_round` int(11) NOT NULL DEFAULT '0' COMMENT 'the current conversation turn number', + `sub_task_id` varchar(255) NOT NULL DEFAULT '' COMMENT 'the message task id', + `task_parent` varchar(255) DEFAULT '' COMMENT 'Subtask parent task i', + `action` text DEFAULT NULL COMMENT 'plan action', + `action_input` longtext DEFAULT NULL COMMENT 'plan action input', + `task_uid` varchar(255) NOT NULL DEFAULT '' COMMENT 'task uid'; PRIMARY KEY (`id`), UNIQUE KEY `uk_sub_task` (`conv_id`,`sub_task_num`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT="gpt plan"; diff --git a/assets/schema/upgrade/v0_7_2/upgrade_to_v0.7.2.sql b/assets/schema/upgrade/v0_7_2/upgrade_to_v0.7.2.sql new file mode 100644 index 000000000..d24f569f7 --- /dev/null +++ b/assets/schema/upgrade/v0_7_2/upgrade_to_v0.7.2.sql @@ -0,0 +1,26 @@ +-- From 0.7.0 to 0.7.1, we have the following changes: +USE dbgpt; + +-- Change message_detail column type from text to longtext in chat_history_message table +ALTER TABLE `gpts_messages` + ADD COLUMN `message_id` varchar(255) NOT NULL DEFAULT '' COMMENT 'message id', + ADD COLUMN `thinking` longtext DEFAULT NULL COMMENT 'llm thinking text', + ADD COLUMN `show_message` tinyint(4) DEFAULT NULL COMMENT 'Whether the current message needs to be displayed to the user', + ADD COLUMN `sender_name` varchar(255) NOT NULL DEFAULT '' COMMENT 'Who(name) speaking in the current conversation turn', + ADD COLUMN `receiver_name` varchar(255) NOT NULL DEFAULT '' COMMENT 'Who(name) receive message in the current conversation turn', + ADD COLUMN `avatar` varchar(255) DEFAULT '' COMMENT 'Who(avatar) send message in the current conversation turn'; + + + +ALTER TABLE `gpts_plans` + ADD COLUMN conv_round int(11) NOT NULL DEFAULT '0' COMMENT 'the current conversation turn number', + ADD COLUMN sub_task_id varchar(255) NOT NULL DEFAULT '' COMMENT 'the message task id', + ADD COLUMN task_parent varchar(255) DEFAULT '' COMMENT 'Subtask parent task i', + ADD COLUMN `action` text DEFAULT NULL COMMENT 'plan action', + ADD COLUMN `action_input` longtext DEFAULT NULL COMMENT 'plan action input', + ADD COLUMN `task_uid` varchar(255) NOT NULL DEFAULT '' COMMENT 'task uid'; + +ALTER TABLE `gpts_app_detail` + ADD COLUMN `type` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'bind agent type, default agent'; + + diff --git a/assets/schema/upgrade/v0_7_2/v0.7.1.sql b/assets/schema/upgrade/v0_7_2/v0.7.1.sql new file mode 100644 index 000000000..5ef94fda6 --- /dev/null +++ b/assets/schema/upgrade/v0_7_2/v0.7.1.sql @@ -0,0 +1,584 @@ +-- You can change `dbgpt` to your actual metadata database name in your `.env` file +-- eg. `LOCAL_DB_NAME=dbgpt` + +CREATE +DATABASE IF NOT EXISTS dbgpt; +use dbgpt; + +-- For alembic migration tool +CREATE TABLE IF NOT EXISTS `alembic_version` +( + version_num VARCHAR(32) NOT NULL, + CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num) +) DEFAULT CHARSET=utf8mb4 ; + +CREATE TABLE IF NOT EXISTS `knowledge_space` +( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `name` varchar(100) NOT NULL COMMENT 'knowledge space name', + `vector_type` varchar(50) NOT NULL COMMENT 'vector type', + `domain_type` varchar(50) NOT NULL COMMENT 'domain type', + `desc` varchar(500) NOT NULL COMMENT 'description', + `owner` varchar(100) DEFAULT NULL COMMENT 'owner', + `context` TEXT DEFAULT NULL COMMENT 'context argument', + `gmt_created` TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT 'created time', + `gmt_modified` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + PRIMARY KEY (`id`), + KEY `idx_name` (`name`) COMMENT 'index:idx_name' +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT='knowledge space table'; + +CREATE TABLE IF NOT EXISTS `knowledge_document` +( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `doc_name` varchar(100) NOT NULL COMMENT 'document path name', + `doc_type` varchar(50) NOT NULL COMMENT 'doc type', + `doc_token` varchar(100) NULL COMMENT 'doc token', + `space` varchar(50) NOT NULL COMMENT 'knowledge space', + `chunk_size` int NOT NULL COMMENT 'chunk size', + `last_sync` TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT 'last sync time', + `status` varchar(50) NOT NULL COMMENT 'status TODO,RUNNING,FAILED,FINISHED', + `content` LONGTEXT NOT NULL COMMENT 'knowledge embedding sync result', + `result` TEXT NULL COMMENT 'knowledge content', + `questions` TEXT NULL COMMENT 'document related questions', + `vector_ids` LONGTEXT NULL COMMENT 'vector_ids', + `summary` LONGTEXT NULL COMMENT 'knowledge summary', + `gmt_created` TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT 'created time', + `gmt_modified` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + PRIMARY KEY (`id`), + KEY `idx_doc_name` (`doc_name`) COMMENT 'index:idx_doc_name' +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT='knowledge document table'; + +CREATE TABLE IF NOT EXISTS `document_chunk` +( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'auto increment id', + `doc_name` varchar(100) NOT NULL COMMENT 'document path name', + `doc_type` varchar(50) NOT NULL COMMENT 'doc type', + `document_id` int NOT NULL COMMENT 'document parent id', + `content` longtext NOT NULL COMMENT 'chunk content', + `questions` text NULL COMMENT 'chunk related questions', + `meta_info` text NOT NULL COMMENT 'metadata info', + `gmt_created` timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'created time', + `gmt_modified` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + PRIMARY KEY (`id`), + KEY `idx_document_id` (`document_id`) COMMENT 'index:document_id' +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT='knowledge document chunk detail'; + + +CREATE TABLE IF NOT EXISTS `connect_config` +( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `db_type` varchar(255) NOT NULL COMMENT 'db type', + `db_name` varchar(255) NOT NULL COMMENT 'db name', + `db_path` varchar(255) DEFAULT NULL COMMENT 'file db path', + `db_host` varchar(255) DEFAULT NULL COMMENT 'db connect host(not file db)', + `db_port` varchar(255) DEFAULT NULL COMMENT 'db cnnect port(not file db)', + `db_user` varchar(255) DEFAULT NULL COMMENT 'db user', + `db_pwd` varchar(255) DEFAULT NULL COMMENT 'db password', + `comment` text COMMENT 'db comment', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `user_name` varchar(255) DEFAULT NULL COMMENT 'user name', + `user_id` varchar(255) DEFAULT NULL COMMENT 'user id', + `gmt_created` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'Record creation time', + `gmt_modified` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Record update time', + `ext_config` text COMMENT 'Extended configuration, json format', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_db` (`db_name`), + KEY `idx_q_db_type` (`db_type`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT 'Connection confi'; + +CREATE TABLE IF NOT EXISTS `chat_history` +( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `conv_uid` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'Conversation record unique id', + `chat_mode` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'Conversation scene mode', + `summary` longtext COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'Conversation record summary', + `user_name` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'interlocutor', + `messages` text COLLATE utf8mb4_unicode_ci COMMENT 'Conversation details', + `message_ids` text COLLATE utf8mb4_unicode_ci COMMENT 'Message id list, split by comma', + `app_code` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'App unique code', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `gmt_created` timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'created time', + `gmt_modified` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + UNIQUE KEY `conv_uid` (`conv_uid`), + PRIMARY KEY (`id`), + KEY `idx_chat_his_app_code` (`app_code`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT 'Chat history'; + +CREATE TABLE IF NOT EXISTS `chat_history_message` +( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `conv_uid` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'Conversation record unique id', + `index` int NOT NULL COMMENT 'Message index', + `round_index` int NOT NULL COMMENT 'Round of conversation', + `message_detail` longtext COLLATE utf8mb4_unicode_ci COMMENT 'Message details, json format', + `gmt_created` timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'created time', + `gmt_modified` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + UNIQUE KEY `message_uid_index` (`conv_uid`, `index`), + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT 'Chat history message'; + + +CREATE TABLE IF NOT EXISTS `chat_feed_back` +( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `conv_uid` varchar(128) DEFAULT NULL COMMENT 'Conversation ID', + `conv_index` int(4) DEFAULT NULL COMMENT 'Round of conversation', + `score` int(1) DEFAULT NULL COMMENT 'Score of user', + `ques_type` varchar(32) DEFAULT NULL COMMENT 'User question category', + `question` longtext DEFAULT NULL COMMENT 'User question', + `knowledge_space` varchar(128) DEFAULT NULL COMMENT 'Knowledge space name', + `messages` longtext DEFAULT NULL COMMENT 'The details of user feedback', + `message_id` varchar(255) NULL COMMENT 'Message id', + `feedback_type` varchar(50) NULL COMMENT 'Feedback type like or unlike', + `reason_types` varchar(255) NULL COMMENT 'Feedback reason categories', + `remark` text NULL COMMENT 'Feedback remark', + `user_code` varchar(128) NULL COMMENT 'User code', + `user_name` varchar(128) DEFAULT NULL COMMENT 'User name', + `gmt_created` timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'created time', + `gmt_modified` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_conv` (`conv_uid`,`conv_index`), + KEY `idx_conv` (`conv_uid`,`conv_index`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT='User feedback table'; + + +CREATE TABLE IF NOT EXISTS `my_plugin` +( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `tenant` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'user tenant', + `user_code` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'user code', + `user_name` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'user name', + `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'plugin name', + `file_name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'plugin package file name', + `type` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'plugin type', + `version` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'plugin version', + `use_count` int DEFAULT NULL COMMENT 'plugin total use count', + `succ_count` int DEFAULT NULL COMMENT 'plugin total success count', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `gmt_created` TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT 'plugin install time', + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='User plugin table'; + +CREATE TABLE IF NOT EXISTS `plugin_hub` +( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'plugin name', + `description` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL COMMENT 'plugin description', + `author` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'plugin author', + `email` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'plugin author email', + `type` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'plugin type', + `version` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'plugin version', + `storage_channel` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'plugin storage channel', + `storage_url` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'plugin download url', + `download_param` varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT 'plugin download param', + `gmt_created` TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT 'plugin upload time', + `installed` int DEFAULT NULL COMMENT 'plugin already installed count', + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='Plugin Hub table'; + + +CREATE TABLE IF NOT EXISTS `prompt_manage` +( + `id` int(11) NOT NULL AUTO_INCREMENT, + `chat_scene` varchar(100) DEFAULT NULL COMMENT 'Chat scene', + `sub_chat_scene` varchar(100) DEFAULT NULL COMMENT 'Sub chat scene', + `prompt_type` varchar(100) DEFAULT NULL COMMENT 'Prompt type: common or private', + `prompt_name` varchar(256) DEFAULT NULL COMMENT 'prompt name', + `prompt_code` varchar(256) DEFAULT NULL COMMENT 'prompt code', + `content` longtext COMMENT 'Prompt content', + `input_variables` varchar(1024) DEFAULT NULL COMMENT 'Prompt input variables(split by comma))', + `response_schema` text DEFAULT NULL COMMENT 'Prompt response schema', + `model` varchar(128) DEFAULT NULL COMMENT 'Prompt model name(we can use different models for different prompt)', + `prompt_language` varchar(32) DEFAULT NULL COMMENT 'Prompt language(eg:en, zh-cn)', + `prompt_format` varchar(32) DEFAULT 'f-string' COMMENT 'Prompt format(eg: f-string, jinja2)', + `prompt_desc` varchar(512) DEFAULT NULL COMMENT 'Prompt description', + `user_code` varchar(128) DEFAULT NULL COMMENT 'User code', + `user_name` varchar(128) DEFAULT NULL COMMENT 'User name', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `gmt_created` timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'created time', + `gmt_modified` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + PRIMARY KEY (`id`), + UNIQUE KEY `prompt_name_uiq` (`prompt_name`, `sys_code`, `prompt_language`, `model`), + KEY `gmt_created_idx` (`gmt_created`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='Prompt management table'; + + + + CREATE TABLE IF NOT EXISTS `gpts_conversations` ( + `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `conv_id` varchar(255) NOT NULL COMMENT 'The unique id of the conversation record', + `user_goal` text NOT NULL COMMENT 'User''s goals content', + `gpts_name` varchar(255) NOT NULL COMMENT 'The gpts name', + `state` varchar(255) DEFAULT NULL COMMENT 'The gpts state', + `max_auto_reply_round` int(11) NOT NULL COMMENT 'max auto reply round', + `auto_reply_count` int(11) NOT NULL COMMENT 'auto reply count', + `user_code` varchar(255) DEFAULT NULL COMMENT 'user code', + `sys_code` varchar(255) DEFAULT NULL COMMENT 'system app ', + `created_at` datetime DEFAULT NULL COMMENT 'create time', + `updated_at` datetime DEFAULT NULL COMMENT 'last update time', + `team_mode` varchar(255) NULL COMMENT 'agent team work mode', + + PRIMARY KEY (`id`), + UNIQUE KEY `uk_gpts_conversations` (`conv_id`), + KEY `idx_gpts_name` (`gpts_name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT="gpt conversations"; + +CREATE TABLE IF NOT EXISTS `gpts_instance` ( + `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `gpts_name` varchar(255) NOT NULL COMMENT 'Current AI assistant name', + `gpts_describe` varchar(2255) NOT NULL COMMENT 'Current AI assistant describe', + `resource_db` text COMMENT 'List of structured database names contained in the current gpts', + `resource_internet` text COMMENT 'Is it possible to retrieve information from the internet', + `resource_knowledge` text COMMENT 'List of unstructured database names contained in the current gpts', + `gpts_agents` varchar(1000) DEFAULT NULL COMMENT 'List of agents names contained in the current gpts', + `gpts_models` varchar(1000) DEFAULT NULL COMMENT 'List of llm model names contained in the current gpts', + `language` varchar(100) DEFAULT NULL COMMENT 'gpts language', + `user_code` varchar(255) NOT NULL COMMENT 'user code', + `sys_code` varchar(255) DEFAULT NULL COMMENT 'system app code', + `created_at` datetime DEFAULT NULL COMMENT 'create time', + `updated_at` datetime DEFAULT NULL COMMENT 'last update time', + `team_mode` varchar(255) NOT NULL COMMENT 'Team work mode', + `is_sustainable` tinyint(1) NOT NULL COMMENT 'Applications for sustainable dialogue', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_gpts` (`gpts_name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT="gpts instance"; + +CREATE TABLE `gpts_messages` ( + `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `conv_id` varchar(255) NOT NULL COMMENT 'The unique id of the conversation record', + `sender` varchar(255) NOT NULL COMMENT 'Who speaking in the current conversation turn', + `receiver` varchar(255) NOT NULL COMMENT 'Who receive message in the current conversation turn', + `model_name` varchar(255) DEFAULT NULL COMMENT 'message generate model', + `rounds` int(11) NOT NULL COMMENT 'dialogue turns', + `is_success` int(4) NULL DEFAULT 0 COMMENT 'agent message is success', + `app_code` varchar(255) NOT NULL COMMENT 'Current AI assistant code', + `app_name` varchar(255) NOT NULL COMMENT 'Current AI assistant name', + `content` text COMMENT 'Content of the speech', + `current_goal` text COMMENT 'The target corresponding to the current message', + `context` text COMMENT 'Current conversation context', + `review_info` text COMMENT 'Current conversation review info', + `action_report` longtext COMMENT 'Current conversation action report', + `resource_info` text DEFAULT NULL COMMENT 'Current conversation resource info', + `role` varchar(255) DEFAULT NULL COMMENT 'The role of the current message content', + `created_at` datetime DEFAULT NULL COMMENT 'create time', + `updated_at` datetime DEFAULT NULL COMMENT 'last update time', + PRIMARY KEY (`id`), + KEY `idx_q_messages` (`conv_id`,`rounds`,`sender`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT="gpts message"; + + +CREATE TABLE `gpts_plans` ( + `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `conv_id` varchar(255) NOT NULL COMMENT 'The unique id of the conversation record', + `sub_task_num` int(11) NOT NULL COMMENT 'Subtask number', + `sub_task_title` varchar(255) NOT NULL COMMENT 'subtask title', + `sub_task_content` text NOT NULL COMMENT 'subtask content', + `sub_task_agent` varchar(255) DEFAULT NULL COMMENT 'Available agents corresponding to subtasks', + `resource_name` varchar(255) DEFAULT NULL COMMENT 'resource name', + `rely` varchar(255) DEFAULT NULL COMMENT 'Subtask dependencies,like: 1,2,3', + `agent_model` varchar(255) DEFAULT NULL COMMENT 'LLM model used by subtask processing agents', + `retry_times` int(11) DEFAULT NULL COMMENT 'number of retries', + `max_retry_times` int(11) DEFAULT NULL COMMENT 'Maximum number of retries', + `state` varchar(255) DEFAULT NULL COMMENT 'subtask status', + `result` longtext COMMENT 'subtask result', + `created_at` datetime DEFAULT NULL COMMENT 'create time', + `updated_at` datetime DEFAULT NULL COMMENT 'last update time', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_sub_task` (`conv_id`,`sub_task_num`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT="gpt plan"; + +-- dbgpt.dbgpt_serve_flow definition +CREATE TABLE `dbgpt_serve_flow` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'Auto increment id', + `uid` varchar(128) NOT NULL COMMENT 'Unique id', + `dag_id` varchar(128) DEFAULT NULL COMMENT 'DAG id', + `name` varchar(128) DEFAULT NULL COMMENT 'Flow name', + `flow_data` longtext COMMENT 'Flow data, JSON format', + `user_name` varchar(128) DEFAULT NULL COMMENT 'User name', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `gmt_created` datetime DEFAULT NULL COMMENT 'Record creation time', + `gmt_modified` datetime DEFAULT NULL COMMENT 'Record update time', + `flow_category` varchar(64) DEFAULT NULL COMMENT 'Flow category', + `description` varchar(512) DEFAULT NULL COMMENT 'Flow description', + `state` varchar(32) DEFAULT NULL COMMENT 'Flow state', + `error_message` varchar(512) NULL comment 'Error message', + `source` varchar(64) DEFAULT NULL COMMENT 'Flow source', + `source_url` varchar(512) DEFAULT NULL COMMENT 'Flow source url', + `version` varchar(32) DEFAULT NULL COMMENT 'Flow version', + `define_type` varchar(32) null comment 'Flow define type(json or python)', + `label` varchar(128) DEFAULT NULL COMMENT 'Flow label', + `editable` int DEFAULT NULL COMMENT 'Editable, 0: editable, 1: not editable', + `variables` text DEFAULT NULL COMMENT 'Flow variables, JSON format', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_uid` (`uid`), + KEY `ix_dbgpt_serve_flow_sys_code` (`sys_code`), + KEY `ix_dbgpt_serve_flow_uid` (`uid`), + KEY `ix_dbgpt_serve_flow_dag_id` (`dag_id`), + KEY `ix_dbgpt_serve_flow_user_name` (`user_name`), + KEY `ix_dbgpt_serve_flow_name` (`name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +-- dbgpt.dbgpt_serve_file definition +CREATE TABLE `dbgpt_serve_file` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'Auto increment id', + `bucket` varchar(255) NOT NULL COMMENT 'Bucket name', + `file_id` varchar(255) NOT NULL COMMENT 'File id', + `file_name` varchar(256) NOT NULL COMMENT 'File name', + `file_size` int DEFAULT NULL COMMENT 'File size', + `storage_type` varchar(32) NOT NULL COMMENT 'Storage type', + `storage_path` varchar(512) NOT NULL COMMENT 'Storage path', + `uri` varchar(512) NOT NULL COMMENT 'File URI', + `custom_metadata` text DEFAULT NULL COMMENT 'Custom metadata, JSON format', + `file_hash` varchar(128) DEFAULT NULL COMMENT 'File hash', + `user_name` varchar(128) DEFAULT NULL COMMENT 'User name', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `gmt_created` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'Record creation time', + `gmt_modified` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Record update time', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_bucket_file_id` (`bucket`, `file_id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +-- dbgpt.dbgpt_serve_variables definition +CREATE TABLE `dbgpt_serve_variables` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'Auto increment id', + `key` varchar(128) NOT NULL COMMENT 'Variable key', + `name` varchar(128) DEFAULT NULL COMMENT 'Variable name', + `label` varchar(128) DEFAULT NULL COMMENT 'Variable label', + `value` text DEFAULT NULL COMMENT 'Variable value, JSON format', + `value_type` varchar(32) DEFAULT NULL COMMENT 'Variable value type(string, int, float, bool)', + `category` varchar(32) DEFAULT 'common' COMMENT 'Variable category(common or secret)', + `encryption_method` varchar(32) DEFAULT NULL COMMENT 'Variable encryption method(fernet, simple, rsa, aes)', + `salt` varchar(128) DEFAULT NULL COMMENT 'Variable salt', + `scope` varchar(32) DEFAULT 'global' COMMENT 'Variable scope(global,flow,app,agent,datasource,flow_priv,agent_priv, ""etc)', + `scope_key` varchar(256) DEFAULT NULL COMMENT 'Variable scope key, default is empty, for scope is "flow_priv", the scope_key is dag id of flow', + `enabled` int DEFAULT 1 COMMENT 'Variable enabled, 0: disabled, 1: enabled', + `description` text DEFAULT NULL COMMENT 'Variable description', + `user_name` varchar(128) DEFAULT NULL COMMENT 'User name', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `gmt_created` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'Record creation time', + `gmt_modified` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Record update time', + PRIMARY KEY (`id`), + KEY `ix_your_table_name_key` (`key`), + KEY `ix_your_table_name_name` (`name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +CREATE TABLE `dbgpt_serve_model` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'Auto increment id', + `host` varchar(255) NOT NULL COMMENT 'The model worker host', + `port` int NOT NULL COMMENT 'The model worker port', + `model` varchar(255) NOT NULL COMMENT 'The model name', + `provider` varchar(255) NOT NULL COMMENT 'The model provider', + `worker_type` varchar(255) NOT NULL COMMENT 'The worker type', + `params` text NOT NULL COMMENT 'The model parameters, JSON format', + `enabled` int DEFAULT 1 COMMENT 'Whether the model is enabled, if it is enabled, it will be started when the system starts, 1 is enabled, 0 is disabled', + `worker_name` varchar(255) DEFAULT NULL COMMENT 'The worker name', + `description` text DEFAULT NULL COMMENT 'The model description', + `user_name` varchar(128) DEFAULT NULL COMMENT 'User name', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `gmt_created` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'Record creation time', + `gmt_modified` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Record update time', + PRIMARY KEY (`id`), + KEY `idx_user_name` (`user_name`), + KEY `idx_sys_code` (`sys_code`), + UNIQUE KEY `uk_model_provider_type` (`model`, `provider`, `worker_type`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='Model persistence table'; + +-- dbgpt.gpts_app definition +CREATE TABLE `gpts_app` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `app_code` varchar(255) NOT NULL COMMENT 'Current AI assistant code', + `app_name` varchar(255) NOT NULL COMMENT 'Current AI assistant name', + `app_describe` varchar(2255) NOT NULL COMMENT 'Current AI assistant describe', + `language` varchar(100) NOT NULL COMMENT 'gpts language', + `team_mode` varchar(255) NOT NULL COMMENT 'Team work mode', + `team_context` text COMMENT 'The execution logic and team member content that teams with different working modes rely on', + `user_code` varchar(255) DEFAULT NULL COMMENT 'user code', + `sys_code` varchar(255) DEFAULT NULL COMMENT 'system app code', + `created_at` datetime DEFAULT NULL COMMENT 'create time', + `updated_at` datetime DEFAULT NULL COMMENT 'last update time', + `icon` varchar(1024) DEFAULT NULL COMMENT 'app icon, url', + `published` varchar(64) DEFAULT 'false' COMMENT 'Has it been published?', + `param_need` text DEFAULT NULL COMMENT 'Parameter information supported by the application', + `admins` text DEFAULT NULL COMMENT 'administrator', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_gpts_app` (`app_name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +CREATE TABLE `gpts_app_collection` ( + `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `app_code` varchar(255) NOT NULL COMMENT 'Current AI assistant code', + `user_code` int(11) NOT NULL COMMENT 'user code', + `sys_code` varchar(255) NULL COMMENT 'system app code', + `created_at` datetime DEFAULT NULL COMMENT 'create time', + `updated_at` datetime DEFAULT NULL COMMENT 'last update time', + PRIMARY KEY (`id`), + KEY `idx_app_code` (`app_code`), + KEY `idx_user_code` (`user_code`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT="gpt collections"; + +-- dbgpt.gpts_app_detail definition +CREATE TABLE `gpts_app_detail` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `app_code` varchar(255) NOT NULL COMMENT 'Current AI assistant code', + `app_name` varchar(255) NOT NULL COMMENT 'Current AI assistant name', + `agent_name` varchar(255) NOT NULL COMMENT ' Agent name', + `node_id` varchar(255) NOT NULL COMMENT 'Current AI assistant Agent Node id', + `resources` text COMMENT 'Agent bind resource', + `prompt_template` text COMMENT 'Agent bind template', + `llm_strategy` varchar(25) DEFAULT NULL COMMENT 'Agent use llm strategy', + `llm_strategy_value` text COMMENT 'Agent use llm strategy value', + `type` varchar(255) DEFAULT NULL COMMENT 'bind agent type, default agent', + `created_at` datetime DEFAULT NULL COMMENT 'create time', + `updated_at` datetime DEFAULT NULL COMMENT 'last update time', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_gpts_app_agent_node` (`app_name`,`agent_name`,`node_id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + + +-- For deploy model cluster of DB-GPT(StorageModelRegistry) +CREATE TABLE IF NOT EXISTS `dbgpt_cluster_registry_instance` ( + `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'Auto increment id', + `model_name` varchar(128) NOT NULL COMMENT 'Model name', + `host` varchar(128) NOT NULL COMMENT 'Host of the model', + `port` int(11) NOT NULL COMMENT 'Port of the model', + `weight` float DEFAULT 1.0 COMMENT 'Weight of the model', + `check_healthy` tinyint(1) DEFAULT 1 COMMENT 'Whether to check the health of the model', + `healthy` tinyint(1) DEFAULT 0 COMMENT 'Whether the model is healthy', + `enabled` tinyint(1) DEFAULT 1 COMMENT 'Whether the model is enabled', + `prompt_template` varchar(128) DEFAULT NULL COMMENT 'Prompt template for the model instance', + `last_heartbeat` datetime DEFAULT NULL COMMENT 'Last heartbeat time of the model instance', + `user_name` varchar(128) DEFAULT NULL COMMENT 'User name', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `gmt_created` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'Record creation time', + `gmt_modified` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Record update time', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_model_instance` (`model_name`, `host`, `port`, `sys_code`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT='Cluster model instance table, for registering and managing model instances'; + +-- dbgpt.recommend_question definition +CREATE TABLE `recommend_question` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time', + `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'last update time', + `app_code` varchar(255) NOT NULL COMMENT 'Current AI assistant code', + `question` text DEFAULT NULL COMMENT 'question', + `user_code` varchar(255) NOT NULL COMMENT 'user code', + `sys_code` varchar(255) NULL COMMENT 'system app code', + `valid` varchar(10) DEFAULT 'true' COMMENT 'is it effective,true/false', + `chat_mode` varchar(255) DEFAULT NULL COMMENT 'Conversation scene mode,chat_knowledge...', + `params` text DEFAULT NULL COMMENT 'question param', + `is_hot_question` varchar(10) DEFAULT 'false' COMMENT 'Is it a popular recommendation question?', + PRIMARY KEY (`id`), + KEY `idx_rec_q_app_code` (`app_code`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT="AI application related recommendation issues"; + +-- dbgpt.user_recent_apps definition +CREATE TABLE `user_recent_apps` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time', + `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'last update time', + `app_code` varchar(255) NOT NULL COMMENT 'AI assistant code', + `last_accessed` timestamp NULL DEFAULT NULL COMMENT 'User recent usage time', + `user_code` varchar(255) DEFAULT NULL COMMENT 'user code', + `sys_code` varchar(255) DEFAULT NULL COMMENT 'system app code', + PRIMARY KEY (`id`), + KEY `idx_user_r_app_code` (`app_code`), + KEY `idx_last_accessed` (`last_accessed`), + KEY `idx_user_code` (`user_code`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='User recently used apps'; + +-- dbgpt.dbgpt_serve_dbgpts_my definition +CREATE TABLE `dbgpt_serve_dbgpts_my` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `name` varchar(255) NOT NULL COMMENT 'plugin name', + `user_code` varchar(255) DEFAULT NULL COMMENT 'user code', + `user_name` varchar(255) DEFAULT NULL COMMENT 'user name', + `file_name` varchar(255) NOT NULL COMMENT 'plugin package file name', + `type` varchar(255) DEFAULT NULL COMMENT 'plugin type', + `version` varchar(255) DEFAULT NULL COMMENT 'plugin version', + `use_count` int DEFAULT NULL COMMENT 'plugin total use count', + `succ_count` int DEFAULT NULL COMMENT 'plugin total success count', + `sys_code` varchar(128) DEFAULT NULL COMMENT 'System code', + `gmt_created` TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT 'plugin install time', + `gmt_modified` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`, `user_name`), + KEY `ix_my_plugin_sys_code` (`sys_code`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +-- dbgpt.dbgpt_serve_dbgpts_hub definition +CREATE TABLE `dbgpt_serve_dbgpts_hub` ( + `id` int NOT NULL AUTO_INCREMENT COMMENT 'autoincrement id', + `name` varchar(255) NOT NULL COMMENT 'plugin name', + `description` varchar(255) NULL COMMENT 'plugin description', + `author` varchar(255) DEFAULT NULL COMMENT 'plugin author', + `email` varchar(255) DEFAULT NULL COMMENT 'plugin author email', + `type` varchar(255) DEFAULT NULL COMMENT 'plugin type', + `version` varchar(255) DEFAULT NULL COMMENT 'plugin version', + `storage_channel` varchar(255) DEFAULT NULL COMMENT 'plugin storage channel', + `storage_url` varchar(255) DEFAULT NULL COMMENT 'plugin download url', + `download_param` varchar(255) DEFAULT NULL COMMENT 'plugin download param', + `gmt_created` TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT 'plugin upload time', + `gmt_modified` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + `installed` int DEFAULT NULL COMMENT 'plugin already installed count', + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + + +CREATE +DATABASE IF NOT EXISTS EXAMPLE_1; +use EXAMPLE_1; +CREATE TABLE IF NOT EXISTS `users` +( + `id` int NOT NULL AUTO_INCREMENT, + `username` varchar(50) NOT NULL COMMENT '用户名', + `password` varchar(50) NOT NULL COMMENT '密码', + `email` varchar(50) NOT NULL COMMENT '邮箱', + `phone` varchar(20) DEFAULT NULL COMMENT '电话', + PRIMARY KEY (`id`), + KEY `idx_username` (`username`) COMMENT '索引:按用户名查询' +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT='聊天用户表'; + +INSERT INTO users (username, password, email, phone) +VALUES ('user_1', 'password_1', 'user_1@example.com', '12345678901'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_2', 'password_2', 'user_2@example.com', '12345678902'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_3', 'password_3', 'user_3@example.com', '12345678903'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_4', 'password_4', 'user_4@example.com', '12345678904'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_5', 'password_5', 'user_5@example.com', '12345678905'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_6', 'password_6', 'user_6@example.com', '12345678906'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_7', 'password_7', 'user_7@example.com', '12345678907'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_8', 'password_8', 'user_8@example.com', '12345678908'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_9', 'password_9', 'user_9@example.com', '12345678909'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_10', 'password_10', 'user_10@example.com', '12345678900'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_11', 'password_11', 'user_11@example.com', '12345678901'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_12', 'password_12', 'user_12@example.com', '12345678902'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_13', 'password_13', 'user_13@example.com', '12345678903'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_14', 'password_14', 'user_14@example.com', '12345678904'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_15', 'password_15', 'user_15@example.com', '12345678905'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_16', 'password_16', 'user_16@example.com', '12345678906'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_17', 'password_17', 'user_17@example.com', '12345678907'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_18', 'password_18', 'user_18@example.com', '12345678908'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_19', 'password_19', 'user_19@example.com', '12345678909'); +INSERT INTO users (username, password, email, phone) +VALUES ('user_20', 'password_20', 'user_20@example.com', '12345678900'); \ No newline at end of file diff --git a/configs/dbgpt-local-vllm.toml b/configs/dbgpt-local-vllm.toml index 3c7fa82a5..519e6ee68 100644 --- a/configs/dbgpt-local-vllm.toml +++ b/configs/dbgpt-local-vllm.toml @@ -7,7 +7,7 @@ encrypt_key = "your_secret_key" # Server Configurations [service.web] host = "0.0.0.0" -port = 5670 +port = "${env:DBGPT_WEBSERVER_PORT:-8080}" [service.web.database] type = "sqlite" diff --git a/configs/dev.toml b/configs/dev.toml new file mode 100644 index 000000000..4e1cbd469 --- /dev/null +++ b/configs/dev.toml @@ -0,0 +1,96 @@ +[system] +# Load language from environment variable(It is set by the hook) +language = "${env:DBGPT_LANG:-zh}" +log_level = "INFO" +api_keys = [] +encrypt_key = "your_secret_key" + +# Server Configurations +[service.web] +host = "127.0.0.1" +port = 5670 + +[service.web.database] +type = "sqlite" +path = "pilot/meta_data/dbgpt.db" +#[service.web.database] +# type = "mysql" +# host = "127.0.0.1" +# port = 3306 +# user = "root" +# database = "dbgpt" +# password = "aa12345678" + +[service.model.worker] +host = "127.0.0.1" + +[rag] +chunk_size=1000 +chunk_overlap=0 +similarity_top_k=5 +similarity_score_threshold=0.0 +max_chunks_once_load=10 +max_threads=1 +rerank_top_k=3 +graph_community_summary_enabled="True" +enable_summary=true + + +[rag.storage] +[rag.storage.vector] +type = "Chroma" +persist_path = "pilot/data" + +#type = "ElasticSearch" +#uri = "127.0.0.1" +#port = "19530" +#username="dbgpt" +#password=19530 +[rag.storage.graph] +type = "TuGraph" +host="127.0.0.1" +port=7687 +username="admin" +password="73@TuGraph" +enable_summary=true +enable_similarity_search=false + +[rag.storage.full_text] +type = "ElasticSearch" +host="127.0.0.1" +port=9200 + + + + +#GRAPH_COMMUNITY_SUMMARY_ENABLED=True # enable the graph community summary +#TRIPLET_GRAPH_ENABLED=True # enable the graph search for the triplets +#DOCUMENT_GRAPH_ENABLED=True # enable the graph search for documents and chunks +#KNOWLEDGE_GRAPH_EXTRACTION_BATCH_SIZE=20 # the batch size of triplet extraction from the text +#COMMUNITY_SUMMARY_BATCH_SIZE=20 # the batch size of parallel community summary process + + + + +# Model Configurations +[models] +[[models.llms]] +#name = "deepseek-chat" +#provider = "proxy/deepseek" +#provider = "proxy/vol-deepseek" +#api_key = "${env:DEEPSEEK_API_KEY}" +#name = "${env:LLM_MODEL_NAME:-deepseek-chat}" +#provider = "${env:LLM_MODEL_PROVIDER:-proxy/deepseek}" +#api_url = "https://zdfmng.alipay.com/chat/completions" +#api_key = "sk-adf80a1b814cf1193422fabcd34ccc0a" +#api_key = "sk-04bec639baf54da7b743016e8536a459" + +name = "Qwen2.5-72B-Instruct" +provider = "proxy/gitee" +api_key = "W7LRHBLWM0XMW0AGLDRKEITZNZCSUUHAVFOYWO1C" + +[[models.embeddings]] +name = "bge-large-zh-v1.5" +provider = "proxy/openai" +api_url = "https://ai.gitee.com/v1/embeddings" +api_key = "W7LRHBLWM0XMW0AGLDRKEITZNZCSUUHAVFOYWO1C" \ No newline at end of file diff --git a/examples/agents/sandbox_code_agent_example.py b/examples/agents/sandbox_code_agent_example.py index 122606268..b930c74f4 100644 --- a/examples/agents/sandbox_code_agent_example.py +++ b/examples/agents/sandbox_code_agent_example.py @@ -35,7 +35,7 @@ from dbgpt.core import ModelMessageRoleType from dbgpt.util.code_utils import UNKNOWN, extract_code, infer_lang from dbgpt.util.string_utils import str_to_bool from dbgpt.util.utils import colored -from dbgpt.vis.tags.vis_code import Vis, VisCode +from dbgpt.vis import SystemVisTag logger = logging.getLogger(__name__) @@ -46,13 +46,9 @@ class SandboxCodeAction(Action[None]): def __init__(self, **kwargs): """Code action init.""" super().__init__(**kwargs) - self._render_protocol = VisCode() self._code_execution_config = {} - - @property - def render_protocol(self) -> Optional[Vis]: - """Return the render protocol.""" - return self._render_protocol + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisCode.value async def run( self, diff --git a/packages/dbgpt-app/src/dbgpt_app/operators/datasource.py b/packages/dbgpt-app/src/dbgpt_app/operators/datasource.py index 32554fbef..8f9235665 100644 --- a/packages/dbgpt-app/src/dbgpt_app/operators/datasource.py +++ b/packages/dbgpt-app/src/dbgpt_app/operators/datasource.py @@ -16,7 +16,7 @@ from dbgpt.core.awel.flow import ( ) from dbgpt.core.operators import BaseLLM from dbgpt.util.i18n_utils import _ -from dbgpt.vis.tags.vis_chart import default_chart_type_prompt +from dbgpt_ext.vis.gpt_vis.tags.vis_chart import default_chart_type_prompt from .llm import HOContextBody diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/action/base.py b/packages/dbgpt-core/src/dbgpt/agent/core/action/base.py index 6cf401216..9055d833b 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/action/base.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/action/base.py @@ -26,7 +26,9 @@ from ...._private.pydantic import ( model_validator, ) from ....util.json_utils import find_json_objects +from ....vis import VisProtocolConverter from ....vis.base import Vis +from ....vis.vis_converter import DefaultVisConverter from ...resource.base import AgentResource, Resource, ResourceType logger = logging.getLogger(__name__) @@ -94,6 +96,22 @@ class Action(ABC, Generic[T]): self.resource: Optional[Resource] = None self.language: str = language self._name = name + self.action_view_tag: Optional[str] = None + + self._render: Optional[VisProtocolConverter] = None + + def init_action(self, **kwargs): + self._render: VisProtocolConverter = kwargs.get( + "render_protocol", DefaultVisConverter() + ) + + @property + def render_protocol(self) -> Optional[Vis]: + """Return the render protocol.""" + if self.action_view_tag: + return self._render.vis_inst(self.action_view_tag) + else: + return None def init_resource(self, resource: Optional[Resource]): """Initialize the resource.""" @@ -119,11 +137,6 @@ class Action(ABC, Generic[T]): """Return the action description.""" return cls.__doc__ or "" - @property - def render_protocol(self) -> Optional[Vis]: - """Return the render protocol.""" - return None - def render_prompt(self) -> Optional[str]: """Return the render prompt.""" if self.render_protocol is None: diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/agent.py b/packages/dbgpt-core/src/dbgpt/agent/core/agent.py index ef90cfa7e..215082eae 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/agent.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/agent.py @@ -3,14 +3,18 @@ from __future__ import annotations import dataclasses +import json +import uuid from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional, Tuple, Union from dbgpt.core import LLMClient from dbgpt.util.annotations import PublicAPI +from ...util.json_utils import serialize from .action.base import ActionOutput from .memory.agent_memory import AgentMemory +from .memory.gpts import GptsMessage class Agent(ABC): @@ -99,6 +103,7 @@ class Agent(ABC): async def thinking( self, messages: List[AgentMessage], + reply_message_id: str, sender: Optional[Agent] = None, prompt: Optional[str] = None, ) -> Tuple[Optional[str], Optional[str]]: @@ -181,6 +186,11 @@ class Agent(ABC): def name(self) -> str: """Return the name of the agent.""" + @property + @abstractmethod + def avatar(self) -> str: + """Return the avatar of the agent.""" + @property @abstractmethod def role(self) -> str: @@ -273,7 +283,9 @@ class AgentReviewInfo: class AgentMessage: """Message object for agent communication.""" + message_id: Optional[str] = None content: Optional[str] = None + thinking: Optional[str] = None name: Optional[str] = None rounds: int = 0 context: Optional[MessageContextType] = None @@ -284,6 +296,7 @@ class AgentMessage: role: Optional[str] = None success: bool = True resource_info: Optional[ResourceReferType] = None + show_message: bool = True def to_dict(self) -> Dict: """Return a dictionary representation of the AgentMessage.""" @@ -297,16 +310,43 @@ class AgentMessage: def to_llm_message(self) -> Dict[str, Any]: """Return a dictionary representation of the AgentMessage.""" + content = self.content + action_report = self.action_report + if action_report: + content = action_report.content return { - "content": self.content, + "content": content, # use tool data as message "context": self.context, "role": self.role, } + @classmethod + def init_new( + cls, + content: Optional[str] = None, + current_goal: Optional[str] = None, + context: Optional[dict] = None, + rounds: Optional[int] = None, + name: Optional[str] = None, + role: Optional[str] = None, + show_message: bool = True, + ): + return cls( + message_id=uuid.uuid4().hex, + content=content, + current_goal=current_goal, + context=context, + rounds=rounds + 1, + name=name, + role=role, + show_message=show_message, + ) + @classmethod def from_llm_message(cls, message: Dict[str, Any]) -> AgentMessage: """Create an AgentMessage object from a dictionary.""" return cls( + message_id=uuid.uuid4().hex, content=message.get("content"), context=message.get("context"), role=message.get("role"), @@ -322,6 +362,7 @@ class AgentMessage: kwargs = { key: value for key, value in message.items() if key in field_names } + kwargs["message_id"] = uuid.uuid4().hex results.append(cls(**kwargs)) return results @@ -337,6 +378,7 @@ class AgentMessage: copied_review_info = self.review_info.copy() if self.review_info else None return AgentMessage( content=self.content, + thinking=self.thinking, name=self.name, context=copied_context, rounds=self.rounds, @@ -354,3 +396,46 @@ class AgentMessage: if isinstance(self.context, dict): return self.context return {} + + def to_gpts_message( + self, + sender: "ConversableAgent", # noqa + receiver: "ConversableAgent", # noqa + ) -> GptsMessage: + gpts_message: GptsMessage = GptsMessage( + conv_id=receiver.not_null_agent_context.conv_id, + message_id=self.message_id if self.message_id else uuid.uuid4().hex, + sender=sender.role, + sender_name=sender.name, + receiver=receiver.role, + receiver_name=receiver.name, + role=receiver.role, + rounds=self.rounds, + is_success=self.success, + app_code=sender.not_null_agent_context.gpts_app_code, + app_name=sender.not_null_agent_context.gpts_app_name, + current_goal=self.current_goal, + content=self.content if self.content else "", + thinking=self.thinking if self.thinking else "", + context=( + json.dumps(self.context, default=serialize, ensure_ascii=False) + if self.context + else None + ), + review_info=( + json.dumps(self.review_info.to_dict(), ensure_ascii=False) + if self.review_info + else None + ), + action_report=( + json.dumps(self.action_report.to_dict(), ensure_ascii=False) + if self.action_report + else None + ), + model_name=self.model_name, + resource_info=( + json.dumps(self.resource_info) if self.resource_info else None + ), + show_message=self.show_message, + ) + return gpts_message diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/base_agent.py b/packages/dbgpt-core/src/dbgpt/agent/core/base_agent.py index be5e9a9c0..7a6665e3e 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/base_agent.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/base_agent.py @@ -5,6 +5,7 @@ from __future__ import annotations import asyncio import json import logging +import uuid from concurrent.futures import Executor, ThreadPoolExecutor from datetime import datetime from typing import Any, Callable, Dict, List, Optional, Tuple, Type, final @@ -18,9 +19,10 @@ from dbgpt.util.executor_utils import blocking_func_to_async from dbgpt.util.tracer import SpanType, root_tracer from dbgpt.util.utils import colored +from ...util.json_utils import serialize from ..resource.base import Resource from ..util.conv_utils import parse_conv_id -from ..util.llm.llm import LLMConfig, LLMStrategyType +from ..util.llm.llm import LLMConfig, get_llm_strategy_cls from ..util.llm.llm_client import AIWrapper from .action.base import Action, ActionOutput from .agent import Agent, AgentContext, AgentMessage, AgentReviewInfo @@ -51,6 +53,9 @@ class ConversableAgent(Role, Agent): # 确认当前Agent是否需要进行参考资源展示 show_reference: bool = False + # 当前Agent消息是否显示 + show_message: bool = True + executor: Executor = Field( default_factory=lambda: ThreadPoolExecutor(max_workers=1), description="Executor for running tasks", @@ -139,9 +144,9 @@ class ConversableAgent(Role, Agent): async def preload_resource(self) -> None: """Preload resources before agent initialization.""" if self.resource: - await self.resource.preload_resource() + await self.blocking_func_to_async(self.resource.preload_resource) - async def build(self, is_retry_chat: bool = False) -> "ConversableAgent": + async def build(self) -> "ConversableAgent": """Build the agent.""" # Preload resources await self.preload_resource() @@ -175,6 +180,12 @@ class ConversableAgent(Role, Agent): real_conv_id, self.role ) await self.recovering_memory(action_outputs) + + for action in self.actions: + action.init_action( + language=self.language, + render_protocol=self.memory.gpts_memory.vis_converter, + ) return self def bind(self, target: Any) -> "ConversableAgent": @@ -417,21 +428,23 @@ class ConversableAgent(Role, Agent): }, ) as span: # 1.Think about how to do things - llm_reply, model_name = await self.thinking( - thinking_messages, sender + llm_thinking, llm_content, model_name = await self.thinking( + thinking_messages, reply_message.message_id, sender ) + reply_message.model_name = model_name - reply_message.content = llm_reply + reply_message.content = llm_content + reply_message.thinking = llm_thinking reply_message.resource_info = resource_info - span.metadata["llm_reply"] = llm_reply + span.metadata["llm_reply"] = llm_content span.metadata["model_name"] = model_name with root_tracer.start_span( "agent.generate_reply.review", - metadata={"llm_reply": llm_reply, "censored": self.name}, + metadata={"llm_reply": llm_content, "censored": self.name}, ) as span: # 2.Review whether what is being done is legal - approve, comments = await self.review(llm_reply, self) + approve, comments = await self.review(llm_content, self) reply_message.review_info = AgentReviewInfo( approve=approve, comments=comments, @@ -444,11 +457,12 @@ class ConversableAgent(Role, Agent): sender=sender, rely_messages=rely_messages, historical_dialogues=historical_dialogues, + reply_message=reply_message, ) with root_tracer.start_span( "agent.generate_reply.act", metadata={ - "llm_reply": llm_reply, + "llm_reply": llm_content, "sender": sender.name, "reviewer": reviewer.name if reviewer else None, "act_extent_param": act_extent_param, @@ -472,7 +486,7 @@ class ConversableAgent(Role, Agent): with root_tracer.start_span( "agent.generate_reply.verify", metadata={ - "llm_reply": llm_reply, + "llm_reply": llm_content, "sender": sender.name, "reviewer": reviewer.name if reviewer else None, }, @@ -486,7 +500,7 @@ class ConversableAgent(Role, Agent): span.metadata["reason"] = reason question: str = received_message.content or "" - ai_message: str = llm_reply or "" + ai_message: str = llm_content # 5.Optimize wrong answers myself if not check_pass: if not act_out.have_retry: @@ -541,9 +555,10 @@ class ConversableAgent(Role, Agent): async def thinking( self, messages: List[AgentMessage], + reply_message_id: str, sender: Optional[Agent] = None, prompt: Optional[str] = None, - ) -> Tuple[Optional[str], Optional[str]]: + ) -> Tuple[Optional[str], Optional[str], Optional[str]]: """Think and reason about the current task goal. Args: @@ -564,21 +579,53 @@ class ConversableAgent(Role, Agent): if not self.llm_client: raise ValueError("LLM client is not initialized!") - response = await self.llm_client.create( + + res_thinking = None + res_content = None + async for output in self.llm_client.create( context=llm_messages[-1].pop("context", None), messages=llm_messages, llm_model=llm_model, max_new_tokens=self.not_null_agent_context.max_new_tokens, temperature=self.not_null_agent_context.temperature, verbose=self.not_null_agent_context.verbose, - memory=self.memory.gpts_memory, - conv_id=self.not_null_agent_context.conv_id, - sender=sender.role if sender else "?", - stream_out=self.stream_out, - ) - return response, llm_model + ): + res_thinking, res_content = output + res_thinking = ( + res_thinking.strip().replace("\\n", "\n") + if res_thinking + else res_thinking + ) + res_content = ( + res_content.strip().replace("\\n", "\n") + if res_content + else res_content + ) + if self.stream_out: + temp_message = { + "uid": reply_message_id, + "type": "incr", + "sender": self.name or self.role, + "model": llm_model, + "thinking": res_thinking, + "content": res_content, + "avatar": self.avatar, + } + if not self.not_null_agent_context.output_process_message: + if self.is_final_role: + await self.memory.gpts_memory.push_message( + self.not_null_agent_context.conv_id, + stream_msg=temp_message, + ) + else: + await self.memory.gpts_memory.push_message( + self.not_null_agent_context.conv_id, + stream_msg=temp_message, + ) + + return res_thinking, res_content, llm_model except LLMChatError as e: - logger.error(f"model:{llm_model} generate Failed!{str(e)}") + logger.exception(f"model:{llm_model} generate Failed!{str(e)}") retry_count += 1 last_model = llm_model last_err = str(e) @@ -632,6 +679,8 @@ class ConversableAgent(Role, Agent): ai_message=message.content if message.content else "", resource=None, rely_action_out=last_out, + render_protocol=self.memory.gpts_memory.vis_converter, + message_id=message.message_id, **kwargs, ) span.metadata["action_out"] = last_out.to_dict() if last_out else None @@ -745,9 +794,13 @@ class ConversableAgent(Role, Agent): ) -> bool: gpts_message: GptsMessage = GptsMessage( conv_id=self.not_null_agent_context.conv_id, + message_id=message.message_id if message.message_id else uuid.uuid4().hex, sender=sender.role, + sender_name=sender.name, receiver=self.role, + receiver_name=self.name, role=role, + avatar=sender.avatar, rounds=message.rounds, is_success=message.success, app_code=( @@ -762,8 +815,9 @@ class ConversableAgent(Role, Agent): ), current_goal=message.current_goal, content=message.content if message.content else "", + thinking=message.thinking if message.thinking else "", context=( - json.dumps(message.context, ensure_ascii=False) + json.dumps(message.context, default=serialize, ensure_ascii=False) if message.context else None ), @@ -781,6 +835,7 @@ class ConversableAgent(Role, Agent): resource_info=( json.dumps(message.resource_info) if message.resource_info else None ), + show_message=message.show_message, ) with root_tracer.start_span( @@ -908,10 +963,11 @@ class ConversableAgent(Role, Agent): for item in gpts_messages: # Message conversion, priority is given to converting execution results, # and only model output results will be used if not. - content = item.content oai_messages.append( AgentMessage( - content=content, + message_id=item.message_id, + content=item.content, + thinking=item.thinking, context=( json.loads(item.context) if item.context is not None else None ), @@ -921,9 +977,11 @@ class ConversableAgent(Role, Agent): else None ), name=item.sender, + role=item.role, rounds=item.rounds, model_name=item.model_name, success=item.is_success, + show_message=item.show_message, ) ) return oai_messages @@ -933,23 +991,19 @@ class ConversableAgent(Role, Agent): ) -> str: logger.info(f"_a_select_llm_model:{excluded_models}") try: - all_models = await self.not_null_llm_client.models() - all_model_names = [item.model for item in all_models] - # TODO Currently only two strategies, priority and default, are implemented. - if self.not_null_llm_config.llm_strategy == LLMStrategyType.Priority: - priority: List[str] = [] - strategy_context = self.not_null_llm_config.strategy_context - if strategy_context is not None: - priority = json.loads(strategy_context) # type: ignore - can_uses = self._excluded_models( - all_model_names, priority, excluded_models + llm_strategy_cls = get_llm_strategy_cls( + self.not_null_llm_config.llm_strategy + ) + if not llm_strategy_cls: + raise ValueError( + f"Configured model policy not found {self.not_null_llm_config.llm_strategy}!" # noqa ) - else: - can_uses = self._excluded_models(all_model_names, None, excluded_models) - if can_uses and len(can_uses) > 0: - return can_uses[0] - else: - raise ValueError("No model service available!") + llm_strategy = llm_strategy_cls( + self.not_null_llm_config.llm_client, + self.not_null_llm_config.strategy_context, + ) + + return await llm_strategy.next_llm(excluded_models=excluded_models) except Exception as e: logger.error(f"{self.role} get next llm failed!{str(e)}") raise ValueError(f"Failed to allocate model service,{str(e)}!") @@ -969,11 +1023,14 @@ class ConversableAgent(Role, Agent): Returns: AgentMessage: A new message """ - return AgentMessage( + return AgentMessage.init_new( content=received_message.content, current_goal=received_message.current_goal, context=received_message.context, - rounds=received_message.rounds + 1, + rounds=received_message.rounds, + name=self.name, + role=self.role, + show_message=self.show_message, ) async def _a_init_reply_message( @@ -1076,6 +1133,7 @@ class ConversableAgent(Role, Agent): historical_dialogues: Optional[List[AgentMessage]] = None, context: Optional[Dict[str, Any]] = None, is_retry_chat: bool = False, + force_use_historical: bool = False, ) -> Tuple[List[AgentMessage], Optional[Dict]]: observation = received_message.content if not observation: @@ -1138,7 +1196,7 @@ class ConversableAgent(Role, Agent): role=ModelMessageRoleType.SYSTEM, ) ) - if historical_dialogues and not has_memories: + if (historical_dialogues and not has_memories) or force_use_historical: # If we can't read the memory, we need to rely on the historical dialogue for i in range(len(historical_dialogues)): if i % 2 == 0: diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/base.py b/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/base.py index 0508f37d5..7596c376e 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/base.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/base.py @@ -15,16 +15,22 @@ class GptsPlan: """Gpts plan.""" conv_id: str + conv_round: int + sub_task_id: str sub_task_num: int + task_uid: str sub_task_content: Optional[str] + task_parent: Optional[str] = None + conv_round_id: Optional[str] = None sub_task_title: Optional[str] = None sub_task_agent: Optional[str] = None resource_name: Optional[str] = None - rely: Optional[str] = None agent_model: Optional[str] = None retry_times: int = 0 max_retry_times: int = 5 state: Optional[str] = Status.TODO.value + action: Optional[str] = None + action_input: Optional[str] = None result: Optional[str] = None @staticmethod @@ -32,11 +38,15 @@ class GptsPlan: """Create a GptsPlan object from a dictionary.""" return GptsPlan( conv_id=d["conv_id"], + conv_round=d["conv_id"], + task_uid=d["task_uid"], sub_task_num=d["sub_task_num"], + sub_task_id=d["sub_task_id"], + conv_round_id=d.get("conv_round_id"), + task_parent=d.get("task_parent"), sub_task_content=d["sub_task_content"], sub_task_agent=d["sub_task_agent"], resource_name=d["resource_name"], - rely=d["rely"], agent_model=d["agent_model"], retry_times=d["retry_times"], max_retry_times=d["max_retry_times"], @@ -55,12 +65,16 @@ class GptsMessage: conv_id: str sender: str - + sender_name: str + message_id: str receiver: str + receiver_name: str role: str content: str rounds: int = 0 is_success: bool = True + avatar: Optional[str] = None + thinking: Optional[str] = None app_code: Optional[str] = None app_name: Optional[str] = None current_goal: Optional[str] = None @@ -69,6 +83,7 @@ class GptsMessage: action_report: Optional[str] = None model_name: Optional[str] = None resource_info: Optional[str] = None + show_message: bool = True created_at: datetime = dataclasses.field(default_factory=datetime.utcnow) updated_at: datetime = dataclasses.field(default_factory=datetime.utcnow) @@ -77,9 +92,14 @@ class GptsMessage: """Create a GptsMessage object from a dictionary.""" return GptsMessage( conv_id=d["conv_id"], + message_id=d["message_id"], sender=d["sender"], + sender_name=d["sender_name"], receiver=d["receiver"], + receiver_name=d["receiver_name"], role=d["role"], + avatar=d.get("avatar"), + thinking=d["thinking"], content=d["content"], rounds=d["rounds"], is_success=d["is_success"], @@ -91,6 +111,7 @@ class GptsMessage: review_info=d["review_info"], action_report=d["action_report"], resource_info=d["resource_info"], + show_message=d["show_message"], created_at=d["created_at"], updated_at=d["updated_at"], ) @@ -125,13 +146,13 @@ class GptsPlansMemory(ABC): @abstractmethod def get_by_conv_id_and_num( - self, conv_id: str, task_nums: List[int] + self, conv_id: str, task_ids: List[str] ) -> List[GptsPlan]: """Get plans by conv_id and task number. Args: conv_id(str): conversation id - task_nums(List[int]): List of sequence numbers of plans in the same + task_ids(List[int]): List of sequence numbers of plans in the same conversation Returns: @@ -150,12 +171,23 @@ class GptsPlansMemory(ABC): """ @abstractmethod - def complete_task(self, conv_id: str, task_num: int, result: str) -> None: + def get_plans_by_msg_round(self, conv_id: str, rounds_id: str) -> List[GptsPlan]: + """Get unfinished planning steps. + + Args: + conv_id(str): Conversation id + rounds_id(str): rounds id + Returns: + List[GptsPlan]: List of planning steps + """ + + @abstractmethod + def complete_task(self, conv_id: str, task_id: str, result: str) -> None: """Set the planning step to complete. Args: conv_id(str): conversation id - task_num(int): Planning step num + task_id(str): Planning step id result(str): Plan step results """ @@ -163,7 +195,7 @@ class GptsPlansMemory(ABC): def update_task( self, conv_id: str, - task_num: int, + task_id: str, state: str, retry_times: int, agent: Optional[str] = None, @@ -174,7 +206,7 @@ class GptsPlansMemory(ABC): Args: conv_id(str): conversation id - task_num(int): Planning step num + task_id(str): Planning step num state(str): the status to update to retry_times(int): Latest number of retries agent(str): Agent's name @@ -202,6 +234,17 @@ class GptsMessageMemory(ABC): message(GptsMessage): Message object """ + @abstractmethod + def update(self, message: GptsMessage) -> None: + """Update message. + + Args: + message: + + Returns: + + """ + @abstractmethod def get_by_agent(self, conv_id: str, agent: str) -> Optional[List[GptsMessage]]: """Return all messages of the agent in the conversation. @@ -248,6 +291,17 @@ class GptsMessageMemory(ABC): List[GptsMessage]: List of messages """ + @abstractmethod + def get_by_message_id(self, message_id: str) -> Optional[GptsMessage]: + """Return one messages by message id. + + Args: + message_id: + + Returns: + + """ + @abstractmethod def get_last_message(self, conv_id: str) -> Optional[GptsMessage]: """Return the last message in the conversation. diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/default_gpts_memory.py b/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/default_gpts_memory.py index 11758a1d9..cd93ec7a2 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/default_gpts_memory.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/default_gpts_memory.py @@ -30,13 +30,24 @@ class DefaultGptsPlansMemory(GptsPlansMemory): plans.append(GptsPlan.from_dict(row_dict)) return plans + def get_plans_by_msg_round(self, conv_id: str, rounds_id: str) -> List[GptsPlan]: + """Get plans by conv_id and conv round.""" + result = self.df.query( # noqa + "conv_id==@conv_id and conv_round_id==@rounds_id" # noqa + ) + plans = [] + for row in result.itertuples(index=False, name=None): + row_dict = dict(zip(self.df.columns, row)) + plans.append(GptsPlan.from_dict(row_dict)) + return plans + def get_by_conv_id_and_num( - self, conv_id: str, task_nums: List[int] + self, conv_id: str, task_ids: List[str] ) -> List[GptsPlan]: """Get plans by conv_id and task number.""" - task_nums_int = [int(num) for num in task_nums] # noqa:F841 + task_nums_str = [str(num) for num in task_ids] # noqa:F841 result = self.df.query( # noqa - "conv_id==@conv_id and sub_task_num in @task_nums_int" # noqa + "conv_id==@conv_id and sub_task_id in @task_nums_str" # noqa ) plans = [] for row in result.itertuples(index=False, name=None): @@ -54,10 +65,10 @@ class DefaultGptsPlansMemory(GptsPlansMemory): plans.append(GptsPlan.from_dict(row_dict)) return plans - def complete_task(self, conv_id: str, task_num: int, result: str): + def complete_task(self, conv_id: str, task_id: str, result: str): """Set the planning step to complete.""" condition = (self.df["conv_id"] == conv_id) & ( - self.df["sub_task_num"] == task_num + self.df["sub_task_id"] == task_id ) self.df.loc[condition, "state"] = Status.COMPLETE.value self.df.loc[condition, "result"] = result @@ -65,7 +76,7 @@ class DefaultGptsPlansMemory(GptsPlansMemory): def update_task( self, conv_id: str, - task_num: int, + task_id: str, state: str, retry_times: int, agent: Optional[str] = None, @@ -74,7 +85,7 @@ class DefaultGptsPlansMemory(GptsPlansMemory): ): """Update the state of the planning step.""" condition = (self.df["conv_id"] == conv_id) & ( - self.df["sub_task_num"] == task_num + self.df["sub_task_id"] == task_id ) self.df.loc[condition, "state"] = state self.df.loc[condition, "retry_times"] = retry_times @@ -102,6 +113,9 @@ class DefaultGptsMessageMemory(GptsMessageMemory): """Append a message to the memory.""" self.df.loc[len(self.df)] = message.to_dict() + def update(self, message: GptsMessage) -> None: + pass + def get_by_agent(self, conv_id: str, agent: str) -> Optional[List[GptsMessage]]: """Get all messages sent or received by the agent in the conversation.""" result = self.df.query( @@ -144,6 +158,13 @@ class DefaultGptsMessageMemory(GptsMessageMemory): messages.append(GptsMessage.from_dict(row_dict)) return messages + def get_by_message_id(self, message_id: str) -> Optional[GptsMessage]: + result = self.df.query("message_id==@message_id") # noqa: F541 + for row in result.itertuples(index=False, name=None): + row_dict = dict(zip(self.df.columns, row)) + return GptsMessage.from_dict(row_dict) + return None + def get_last_message(self, conv_id: str) -> Optional[GptsMessage]: """Get the last message in the conversation.""" return None diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/gpts_memory.py b/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/gpts_memory.py index c4b4c3fec..a5bcba664 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/gpts_memory.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/memory/gpts/gpts_memory.py @@ -9,15 +9,13 @@ from concurrent.futures import Executor, ThreadPoolExecutor from typing import Dict, List, Optional, Union from dbgpt.util.executor_utils import blocking_func_to_async -from dbgpt.vis.client import VisAgentMessages, VisAgentPlans, VisAppLink, vis_client +from .....util.json_utils import EnhancedJSONEncoder +from .....vis.vis_converter import DefaultVisConverter, VisProtocolConverter from ...action.base import ActionOutput -from ...schema import Status -from .base import GptsMessage, GptsMessageMemory, GptsPlansMemory +from .base import GptsMessage, GptsMessageMemory, GptsPlan, GptsPlansMemory from .default_gpts_memory import DefaultGptsMessageMemory, DefaultGptsPlansMemory -NONE_GOAL_PREFIX: str = "none_goal_count_" - logger = logging.getLogger(__name__) @@ -39,9 +37,16 @@ class GptsMemory: ) self._executor = executor or ThreadPoolExecutor(max_workers=2) self.messages_cache: defaultdict = defaultdict(list) + self.view_cache: defaultdict = defaultdict(list) + self.plans_cache: defaultdict = defaultdict(list) self.channels: defaultdict = defaultdict(Queue) - self.enable_vis_map: defaultdict = defaultdict(bool) self.start_round_map: defaultdict = defaultdict(int) + self._vis_converter: VisProtocolConverter = DefaultVisConverter() + + @property + def vis_converter(self): + """Return the vis converter""" + return self._vis_converter @property def plans_memory(self) -> GptsPlansMemory: @@ -56,19 +61,32 @@ class GptsMemory: def init( self, conv_id: str, - enable_vis_message: bool = True, history_messages: Optional[List[GptsMessage]] = None, + vis_converter: Optional[VisProtocolConverter] = None, start_round: int = 0, ): """Gpt memory init.""" self.channels[conv_id] = asyncio.Queue() - self.enable_vis_map[conv_id] = enable_vis_message self.messages_cache[conv_id] = history_messages if history_messages else [] self.start_round_map[conv_id] = start_round + if vis_converter: + self._vis_converter = vis_converter - def enable_vis_message(self, conv_id): - """Enable conversation message vis tag.""" - return self.enable_vis_map[conv_id] if conv_id in self.enable_vis_map else True + async def load_persistent_memory(self, conv_id: str): + """Load persistent memory.""" + messages = self.messages_cache[conv_id] + if not messages: + messages = await blocking_func_to_async( + self._executor, self.message_memory.get_by_conv_id, conv_id + ) + self.messages_cache[conv_id] = messages + + plans = self.plans_cache[conv_id] + if not plans: + plans = await blocking_func_to_async( + self._executor, self.plans_memory.get_by_conv_id, conv_id + ) + self.plans_cache[conv_id] = plans def queue(self, conv_id: str): """Get conversation message queue.""" @@ -84,50 +102,152 @@ class GptsMemory: cache = self.messages_cache.pop(conv_id) # noqa del cache - # clear vis_enable_tag - vis_enable_tag = self.enable_vis_map.pop(conv_id) # noqa - del vis_enable_tag + # clear view cache + if self.view_cache.get(conv_id): + view_cache = self.view_cache.pop(conv_id) # noqa + del view_cache # clear start_roun start_round = self.start_round_map.pop(conv_id) # noqa del start_round - async def push_message(self, conv_id: str, temp_msg: Optional[str] = None): + async def push_stream_message( + self, conv_id: str, stream_msg: Optional[Union[Dict, str]] = None + ): + queue = self.queue(conv_id) + if not queue: + logger.warning(f"There is no message channel available for it!{conv_id}") + final_view = self.view_cache[conv_id] + await queue.put( + final_view + "\n" + self._vis_converter.visualization_stream(stream_msg) + ) + + async def push_message( + self, + conv_id: str, + gpt_msg: Optional[GptsMessage] = None, + stream_msg: Optional[Union[Dict, str]] = None, + ): """Push conversation message.""" queue = self.queue(conv_id) - enable_vis_tag = self.enable_vis_message(conv_id=conv_id) - if enable_vis_tag: - # 如果有临时消息内容需要push 拼接再最末尾,否则直接从短期记忆中发布最后消息 - message_view = await self.app_link_chat_message(conv_id) - if temp_msg: - temp_view = await self.agent_stream_message(temp_msg) - message_view = message_view + "\n" + temp_view - await queue.put(message_view) + if not queue: + logger.warning(f"There is no message channel available for it!{conv_id}") + final_view = await self.vis_messages(conv_id, gpt_msg, stream_msg) + self.view_cache[conv_id] = final_view + await queue.put(final_view) + async def vis_messages( + self, + conv_id: str, + gpt_msg: Optional[GptsMessage] = None, + stream_msg: Optional[Union[Dict, str]] = None, + ): + """Get all persistent messages that have been converted through the + visualization protocol(excluding the part that is currently being streamed.)""" # noqa: E501 + ## 消息数据流准备 + messages = [] + if conv_id in self.messages_cache: + messages_cache = self.messages_cache[conv_id] + if messages_cache and len(messages_cache) > 0: + start_round = ( + self.start_round_map[conv_id] + if conv_id in self.start_round_map + else 0 + ) + messages = messages_cache[start_round:] else: - # 非VIS消息模式,直接推送简单消息列表即可,不做任何处理 - message_views = await self.simple_message(conv_id) - if temp_msg: - temp_view = await self.agent_stream_message(temp_msg, False) - if temp_view and len(temp_view) > 0: - message_views.extend(temp_view) - await queue.put(message_views) + messages = await blocking_func_to_async( + self._executor, self.message_memory.get_by_conv_id, conv_id=conv_id + ) + messages = self._merge_messages(messages) + ## 消息可视化布局转换 + vis_view = await self._vis_converter.visualization( + messages=messages, + plans=await self.get_plans(conv_id=conv_id), + gpt_msg=gpt_msg, + stream_msg=stream_msg, + ) + return vis_view + + def _merge_messages(self, messages: List[GptsMessage]): + i = 0 + new_messages: List[GptsMessage] = [] + + while i < len(messages): + cu_item = messages[i] + from dbgpt.agent import UserProxyAgent + + # 屏蔽用户消息 + if cu_item.sender == UserProxyAgent().role: + i += 1 + continue + if not cu_item.show_message: + ## 接到消息的Agent不展示消息,消息直接往后传递展示 + if i + 1 < len(messages): + ne_item = messages[i + 1] + new_message = ne_item + new_message.sender = cu_item.sender + new_message.current_goal = ( + ne_item.current_goal or cu_item.current_goal + ) + new_message.resource_info = ( + ne_item.resource_info or cu_item.resource_info + ) + new_messages.append(new_message) + i += 2 # 两个消息合并为一个 + continue + new_messages.append(cu_item) + i += 1 + + return new_messages + + async def chat_messages( + self, + conv_id: str, + ): + """Get chat messages.""" + while True: + queue = self.queue(conv_id) + if not queue: + break + item = await queue.get() + if item == "[DONE]": + queue.task_done() + break + else: + yield item + await asyncio.sleep(0.005) async def complete(self, conv_id: str): """Complete conversation message.""" queue = self.queue(conv_id) - - await queue.put("[DONE]") + if queue: + await queue.put("[DONE]") async def append_message(self, conv_id: str, message: GptsMessage): """Append message.""" - self.messages_cache[conv_id].append(message) + cache_idx = next( + ( + idx + for idx, c in enumerate(self.messages_cache[conv_id]) + if c.message_id == message.message_id + ), + -1, + ) + if cache_idx >= 0: + self.messages_cache[conv_id][cache_idx] = message + else: + self.messages_cache[conv_id].append(message) + await blocking_func_to_async( self._executor, self.message_memory.append, message ) + logger.info( + f"[memory.append_message]{json.dumps(message, cls=EnhancedJSONEncoder, ensure_ascii=False)}" # noqa: E501 + ) # 消息记忆后发布消息 - await self.push_message(conv_id) + await self.push_message(conv_id, message) async def get_messages(self, conv_id: str) -> List[GptsMessage]: """Get message by conv_id.""" @@ -138,14 +258,12 @@ class GptsMemory: ) return messages - async def get_agent_messages( - self, conv_id: str, agent_role: str - ) -> List[GptsMessage]: + async def get_agent_messages(self, conv_id: str, agent: str) -> List[GptsMessage]: """Get agent messages.""" gpt_messages = self.messages_cache[conv_id] result = [] for gpt_message in gpt_messages: - if gpt_message.sender == agent_role or gpt_messages.receiver == agent_role: + if gpt_message.sender == agent or gpt_messages.receiver == agent: result.append(gpt_message) return result @@ -178,287 +296,18 @@ class GptsMemory: # Just use the action_output now return [m["action_output"] for m in new_list if m["action_output"]] - async def _message_group_vis_build(self, message_group, vis_items: list): - num: int = 0 - if message_group: - last_goal = next(reversed(message_group)) - last_goal_message = None - if not last_goal.startswith(NONE_GOAL_PREFIX): - last_goal_messages = message_group[last_goal] - last_goal_message = last_goal_messages[-1] - - plan_temps: List[dict] = [] - need_show_singe_last_message = False - for key, value in message_group.items(): - num = num + 1 - if key.startswith(NONE_GOAL_PREFIX): - vis_items.append(await self._messages_to_plan_vis(plan_temps)) - plan_temps = [] - num = 0 - vis_items.append(await self._messages_to_agents_vis(value)) - else: - num += 1 - plan_temps.append( - { - "name": key, - "num": num, - "status": "complete", - "agent": value[0].receiver if value else "", - "markdown": await self._messages_to_agents_vis(value), - } - ) - need_show_singe_last_message = True - - if len(plan_temps) > 0: - vis_items.append(await self._messages_to_plan_vis(plan_temps)) - if need_show_singe_last_message and last_goal_message: - vis_items.append( - await self._messages_to_agents_vis([last_goal_message], True) - ) - return "\n".join(vis_items) - - async def agent_stream_message( - self, - message: Union[Dict, str], - enable_vis_message: bool = True, - ): - """Get agent stream message.""" - messages_view = [] - if isinstance(message, dict): - messages_view.append( - { - "sender": message["sender"], - "receiver": message["receiver"], - "model": message["model"], - "markdown": message["markdown"], - } - ) - else: - messages_view.append( - { - "sender": "?", - "receiver": "?", - "model": "?", - "markdown": message, - } - ) - if enable_vis_message: - return await vis_client.get(VisAgentMessages.vis_tag()).display( - content=messages_view - ) - else: - return messages_view - - async def _plan_vis_build(self, plan_group: dict[str, list]): - num: int = 0 - plan_items = [] - for key, value in plan_group.items(): - num = num + 1 - plan_items.append( - { - "name": key, - "num": num, - "status": "complete", - "agent": value[0].receiver if value else "", - "markdown": await self._messages_to_agents_vis(value), - } - ) - return await self._messages_to_plan_vis(plan_items) - - async def simple_message(self, conv_id: str): - """Get agent simple message.""" - messages_cache = self.messages_cache[conv_id] - if messages_cache and len(messages_cache) > 0: - messages = messages_cache - else: - messages = await blocking_func_to_async( - self._executor, self.message_memory.get_by_conv_id, conv_id=conv_id - ) - - simple_message_list = [] - for message in messages: - if message.sender == "Human": - continue - - action_report_str = message.action_report - view_info = message.content - action_out = None - if action_report_str and len(action_report_str) > 0: - action_out = ActionOutput.from_dict(json.loads(action_report_str)) - if action_out is not None: - view_info = action_out.content - - simple_message_list.append( - { - "sender": message.sender, - "receiver": message.receiver, - "model": message.model_name, - "markdown": view_info, - } - ) - - return simple_message_list - - async def app_link_chat_message(self, conv_id: str): - """Get app link chat message.""" - messages = [] - if conv_id in self.messages_cache: - messages_cache = self.messages_cache[conv_id] - if messages_cache and len(messages_cache) > 0: - start_round = ( - self.start_round_map[conv_id] - if conv_id in self.start_round_map - else 0 - ) - messages = messages_cache[start_round:] - else: - messages = await blocking_func_to_async( - self._executor, self.message_memory.get_by_conv_id, conv_id=conv_id - ) - - # VIS消息组装 - temp_group: Dict = {} - app_link_message: Optional[GptsMessage] = None - app_lanucher_message: Optional[GptsMessage] = None - - none_goal_count = 1 - for message in messages: - if message.sender in [ - "Intent Recognition Expert", - "App Link", - ] or message.receiver in ["Intent Recognition Expert", "App Link"]: - if ( - message.sender in ["Intent Recognition Expert", "App Link"] - and message.receiver == "AppLauncher" - ): - app_link_message = message - if message.receiver != "Human": - continue - - if message.sender == "AppLauncher": - if message.receiver == "Human": - app_lanucher_message = message - continue - - current_gogal = message.current_goal - - last_goal = next(reversed(temp_group)) if temp_group else None - if last_goal: - last_goal_messages = temp_group[last_goal] - if current_gogal: - if current_gogal == last_goal: - last_goal_messages.append(message) - else: - temp_group[current_gogal] = [message] - else: - temp_group[f"{NONE_GOAL_PREFIX}{none_goal_count}"] = [message] - none_goal_count += 1 - else: - if current_gogal: - temp_group[current_gogal] = [message] - else: - temp_group[f"{NONE_GOAL_PREFIX}{none_goal_count}"] = [message] - none_goal_count += 1 - - vis_items: list = [] - if app_link_message: - vis_items.append( - await self._messages_to_app_link_vis( - app_link_message, app_lanucher_message - ) - ) - - return await self._message_group_vis_build(temp_group, vis_items) - - async def _messages_to_agents_vis( - self, messages: List[GptsMessage], is_last_message: bool = False - ): - if messages is None or len(messages) <= 0: - return "" - messages_view = [] - for message in messages: - action_report_str = message.action_report - view_info = message.content - if action_report_str and len(action_report_str) > 0: - action_out = ActionOutput.from_dict(json.loads(action_report_str)) - if action_out is not None: # noqa - if action_out.is_exe_success or is_last_message: # noqa - view = action_out.view - view_info = view if view else action_out.content - - messages_view.append( - { - "sender": message.sender, - "receiver": message.receiver, - "model": message.model_name, - "markdown": view_info, - "resource": ( - message.resource_info if message.resource_info else None - ), - } - ) - return await vis_client.get(VisAgentMessages.vis_tag()).display( - content=messages_view + async def append_plans(self, conv_id: str, plans: List[GptsPlan]): + """Append plans.""" + self.plans_cache[conv_id].extend(plans) + await blocking_func_to_async( + self._executor, self.plans_memory.batch_save, plans ) - async def _messages_to_plan_vis(self, messages: List[Dict]): - if messages is None or len(messages) <= 0: - return "" - return await vis_client.get(VisAgentPlans.vis_tag()).display(content=messages) - - async def _messages_to_app_link_vis( - self, link_message: GptsMessage, lanucher_message: Optional[GptsMessage] = None - ): - logger.info("app link vis build") - if link_message is None: - return "" - param = {} - link_report_str = link_message.action_report - if link_report_str and len(link_report_str) > 0: - action_out = ActionOutput.from_dict(json.loads(link_report_str)) - if action_out is not None: - if action_out.is_exe_success: - temp = json.loads(action_out.content) - - param["app_code"] = temp["app_code"] - param["app_name"] = temp["app_name"] - param["app_desc"] = temp.get("app_desc", "") - param["app_logo"] = "" - param["status"] = Status.RUNNING.value - - else: - param["status"] = Status.FAILED.value - param["msg"] = action_out.content - - if lanucher_message: - lanucher_report_str = lanucher_message.action_report - if lanucher_report_str and len(lanucher_report_str) > 0: - lanucher_action_out = ActionOutput.from_dict( - json.loads(lanucher_report_str) - ) - if lanucher_action_out is not None: - if lanucher_action_out.is_exe_success: - param["status"] = Status.COMPLETE.value - else: - param["status"] = Status.FAILED.value - param["msg"] = lanucher_action_out.content - else: - param["status"] = Status.COMPLETE.value - return await vis_client.get(VisAppLink.vis_tag()).display(content=param) - - async def chat_messages( - self, - conv_id: str, - ): - """Get chat messages.""" - while True: - queue = self.queue(conv_id) - if not queue: - break - item = await queue.get() - if item == "[DONE]": - queue.task_done() - break - else: - yield item - await asyncio.sleep(0.005) + async def get_plans(self, conv_id: str) -> List[GptsPlan]: + """Get plans by conv_id.""" + plans = self.plans_cache[conv_id] + if not plans: + plans = await blocking_func_to_async( + self._executor, self.plans_memory.get_by_conv_id, conv_id + ) + return plans diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/__init__.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/__init__.py index 95f8bb56f..de59d24bd 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/plan/__init__.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/plan/__init__.py @@ -1,5 +1,8 @@ """Plan module for the agent.""" +from .auto.plan_action import PlanAction, PlanInput # noqa: F401 +from .auto.planner_agent import PlannerAgent # noqa: F401 +from .auto.team_auto_plan import AutoPlanChatManager # noqa: F401 from .awel.agent_operator import ( # noqa: F401 AgentDummyTrigger, AWELAgentOperator, @@ -15,15 +18,19 @@ from .awel.team_awel_layout import ( # noqa: F401 DefaultAWELLayoutManager, WrappedAWELLayoutManager, ) -from .plan_action import PlanAction, PlanInput # noqa: F401 -from .planner_agent import PlannerAgent # noqa: F401 -from .team_auto_plan import AutoPlanChatManager # noqa: F401 +from .react.plan_action import ReActAction, TaskParam +from .react.planner_agent import ReActPlannerAgent +from .react.team_react_plan import ReActPlanChatManager # noqa: F401 __all__ = [ "PlanAction", "PlanInput", "PlannerAgent", "AutoPlanChatManager", + "ReActPlanChatManager", + "ReActPlannerAgent", + "TaskParam", + "ReActAction", "AWELAgent", "AWELAgentConfig", "AWELAgentResource", diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/__init__.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/plan_action.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/plan_action.py similarity index 79% rename from packages/dbgpt-core/src/dbgpt/agent/core/plan/plan_action.py rename to packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/plan_action.py index 4e6f1b1f1..905ed0d19 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/plan/plan_action.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/plan_action.py @@ -4,14 +4,13 @@ import logging from typing import List, Optional from dbgpt._private.pydantic import BaseModel, Field -from dbgpt.vis.tags.vis_agent_plans import Vis, VisAgentPlans - -from ...resource.base import AgentResource -from ..action.base import Action, ActionOutput -from ..agent import AgentContext -from ..memory.gpts.base import GptsPlan -from ..memory.gpts.gpts_memory import GptsPlansMemory -from ..schema import Status +from dbgpt.agent.core.action.base import Action, ActionOutput +from dbgpt.agent.core.agent import AgentContext +from dbgpt.agent.core.memory.gpts.base import GptsPlan +from dbgpt.agent.core.memory.gpts.gpts_memory import GptsPlansMemory +from dbgpt.agent.core.schema import Status +from dbgpt.agent.resource.base import AgentResource +from dbgpt.vis import SystemVisTag logger = logging.getLogger(__name__) @@ -39,14 +38,11 @@ class PlanAction(Action[List[PlanInput]]): """Plan action class.""" def __init__(self, **kwargs): - """Create a plan action.""" + """Create a reasoning_engine action.""" super().__init__(**kwargs) - self._render_protocol = VisAgentPlans() - @property - def render_protocol(self) -> Optional[Vis]: - """Return the render protocol.""" - return self._render_protocol + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisPlans.value @property def out_model_type(self): @@ -61,7 +57,7 @@ class PlanAction(Action[List[PlanInput]]): need_vis_render: bool = True, **kwargs, ) -> ActionOutput: - """Run the plan action.""" + """Run the reasoning_engine action.""" context: AgentContext = kwargs["context"] plans_memory: GptsPlansMemory = kwargs["plans_memory"] try: @@ -81,14 +77,15 @@ class PlanAction(Action[List[PlanInput]]): for item in param: plan = GptsPlan( conv_id=context.conv_id, - sub_task_num=item.serial_number, + sub_task_id=str(item.serial_number), sub_task_content=item.content, + conv_round=kwargs.get("round", 0), ) plan.resource_name = "" plan.max_retry_times = context.max_retry_round plan.sub_task_agent = item.agent plan.sub_task_title = item.content - plan.rely = item.rely + plan.task_parent = item.rely plan.retry_times = 0 plan.state = Status.TODO.value plan_objects.append(plan) @@ -99,9 +96,9 @@ class PlanAction(Action[List[PlanInput]]): except Exception as e: logger.exception(str(e)) fail_reason = ( - f"The generated plan cannot be stored, reason: {str(e)}." - f" Please check whether it is a problem with the plan content. " - f"If so, please regenerate the correct plan. If not, please return" + f"The generated reasoning_engine cannot be stored, reason: {str(e)}." # noqa: E501 + f" Please check whether it is a problem with the reasoning_engine content. " # noqa: E501 + f"If so, please regenerate the correct reasoning_engine. If not, please return" # noqa: E501 f" 'TERMINATE'." ) response_success = False diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/planner_agent.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/planner_agent.py similarity index 90% rename from packages/dbgpt-core/src/dbgpt/agent/core/plan/planner_agent.py rename to packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/planner_agent.py index 8f7df8c24..49078d0aa 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/plan/planner_agent.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/planner_agent.py @@ -3,12 +3,12 @@ from typing import Any, Dict, List, Optional from dbgpt._private.pydantic import Field +from dbgpt.agent.core.agent import Agent, AgentMessage +from dbgpt.agent.core.base_agent import ConversableAgent +from dbgpt.agent.core.profile import DynConfig, ProfileConfig +from dbgpt.agent.resource.pack import ResourcePack -from ...resource.pack import ResourcePack -from ..agent import Agent, AgentMessage -from ..base_agent import ConversableAgent -from ..plan.plan_action import PlanAction -from ..profile import DynConfig, ProfileConfig +from .plan_action import PlanAction class PlannerAgent(ConversableAgent): @@ -35,7 +35,7 @@ class PlannerAgent(ConversableAgent): "capabilities, using the provided resources, solve user problems by " "coordinating intelligent agents. Please utilize your LLM's knowledge " "and understanding ability to comprehend the intent and goals of the " - "user's problem, generating a task plan that can be completed through" + "user's problem, generating a task reasoning_engine that can be completed through" # noqa: E501 " the collaboration of intelligent agents without user assistance.", category="agent", key="dbgpt_agent_plan_planner_agent_profile_goal", @@ -47,11 +47,11 @@ class PlannerAgent(ConversableAgent): ), constraints=DynConfig( [ - "Every step of the task plan should exist to advance towards solving " + "Every step of the task reasoning_engine should exist to advance towards solving " # noqa: E501 "the user's goals. Do not generate meaningless task steps; ensure " "that each step has a clear goal and its content is complete.", "Pay attention to the dependencies and logic of each step in the task " - "plan. For the steps that are depended upon, consider the data they " + "reasoning_engine. For the steps that are depended upon, consider the data they " # noqa: E501 "depend on and whether it can be obtained based on the current goal. " "If it cannot be obtained, please indicate in the goal that the " "dependent data needs to be generated.", @@ -63,7 +63,7 @@ class PlannerAgent(ConversableAgent): "used, and you may use only the necessary parts of them. Allocate " "them to appropriate steps strictly based on their described " "capabilities and limitations. Each intelligent agent can be reused.", - "Utilize the provided resources to assist in generating the plan " + "Utilize the provided resources to assist in generating the reasoning_engine " # noqa: E501 "steps according to the actual needs of the user's goals. Do not use " "unnecessary resources.", "Each step should ideally use only one type of resource to accomplish " @@ -76,10 +76,10 @@ class PlannerAgent(ConversableAgent): "Try to merge continuous steps that have sequential dependencies. If " "the user's goal does not require splitting, you can create a " "single-step task with content that is the user's goal.", - "Carefully review the plan to ensure it comprehensively covers all " + "Carefully review the reasoning_engine to ensure it comprehensively covers all " # noqa: E501 "information involved in the user's problem and can ultimately " "achieve the goal. Confirm whether each step includes the necessary " - "resource information, such as URLs, resource names, etc.", + "resource information, such as URLs, resource names, etc.", # noqa: E501 ], category="agent", key="dbgpt_agent_plan_planner_agent_profile_constraints", @@ -144,12 +144,12 @@ assistants:[ super().__init__(**kwargs) self._init_actions([PlanAction]) - def _init_reply_message( + def init_reply_message( self, received_message: AgentMessage, rely_messages: Optional[List[AgentMessage]] = None, ) -> AgentMessage: - reply_message = super()._init_reply_message(received_message) + reply_message = super().init_reply_message(received_message) reply_message.context = { "agents": "\n".join([f"- {item.role}:{item.desc}" for item in self.agents]), } diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/team_auto_plan.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/team_auto_plan.py similarity index 86% rename from packages/dbgpt-core/src/dbgpt/agent/core/plan/team_auto_plan.py rename to packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/team_auto_plan.py index 20d5014ac..3bd2a6583 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/plan/team_auto_plan.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/plan/auto/team_auto_plan.py @@ -1,19 +1,20 @@ -"""Auto plan chat manager agent.""" +"""Auto reasoning_engine chat manager agent.""" import logging +import uuid from typing import Dict, List, Optional, Tuple +from dbgpt.agent.core.action.base import ActionOutput +from dbgpt.agent.core.agent import Agent, AgentMessage +from dbgpt.agent.core.agent_manage import mentioned_agents, participant_roles +from dbgpt.agent.core.base_agent import ConversableAgent +from dbgpt.agent.core.base_team import ManagerAgent +from dbgpt.agent.core.memory.gpts.base import GptsPlan +from dbgpt.agent.core.profile import DynConfig, ProfileConfig +from dbgpt.agent.core.schema import Status from dbgpt.core.interface.message import ModelMessageRoleType -from ..action.base import ActionOutput -from ..agent import Agent, AgentMessage -from ..agent_manage import mentioned_agents, participant_roles -from ..base_agent import ConversableAgent -from ..base_team import ManagerAgent -from ..memory.gpts.base import GptsPlan -from ..plan.planner_agent import PlannerAgent -from ..profile import DynConfig, ProfileConfig -from ..schema import Status +from .planner_agent import PlannerAgent logger = logging.getLogger(__name__) @@ -33,14 +34,14 @@ class AutoPlanChatManager(ManagerAgent): key="dbgpt_agent_plan_team_auto_plan_profile_role", ), goal=DynConfig( - "Advance the task plan generated by the planning agent. If the plan " + "Advance the task reasoning_engine generated by the planning agent. If the reasoning_engine " # noqa: E501 "does not pre-allocate an agent, it needs to be coordinated with the " "appropriate agent to complete.", category="agent", key="dbgpt_agent_plan_team_auto_plan_profile_goal", ), desc=DynConfig( - "Advance the task plan generated by the planning agent.", + "Advance the task reasoning_engine generated by the planning agent.", category="agent", key="dbgpt_agent_plan_team_auto_plan_profile_desc", ), @@ -126,6 +127,7 @@ class AutoPlanChatManager(ManagerAgent): f"Please only return the role, such as: {agents[0].name}", ) ], + reply_message_id=uuid.uuid4().hex, prompt=self.select_speaker_msg(agents), ) if not fina_name: @@ -184,7 +186,7 @@ class AutoPlanChatManager(ManagerAgent): return ActionOutput( is_exe_success=False, content="Retrying 3 times based on current application " - "resources still fails to build a valid plan!", + "resources still fails to build a valid reasoning_engine!", ) planner: ConversableAgent = ( await PlannerAgent() @@ -213,7 +215,7 @@ class AutoPlanChatManager(ManagerAgent): if plan.state in [Status.TODO.value, Status.RETRYING.value] ] if not todo_plans or len(todo_plans) <= 0: - # The plan has been fully executed and a success message is sent + # The reasoning_engine has been fully executed and a success message is sent # noqa: E501 # to the user. # complete return ActionOutput( @@ -228,7 +230,7 @@ class AutoPlanChatManager(ManagerAgent): current_goal=now_plan.sub_task_content, context={ "plan_task": now_plan.sub_task_content, - "plan_task_num": now_plan.sub_task_num, + "plan_task_id": now_plan.sub_task_id, }, rounds=rounds + 1, ) @@ -283,14 +285,14 @@ class AutoPlanChatManager(ManagerAgent): # Plan executed successfully self.memory.plans_memory.complete_task( self.not_null_agent_context.conv_id, - now_plan.sub_task_num, + now_plan.sub_task_id, plan_result, ) else: plan_result = reply_message["content"] self.memory.plans_memory.update_task( self.not_null_agent_context.conv_id, - now_plan.sub_task_num, + now_plan.sub_task_id, Status.FAILED.value, now_plan.retry_times + 1, speaker.name, @@ -304,14 +306,31 @@ class AutoPlanChatManager(ManagerAgent): except Exception as e: logger.exception( f"An exception was encountered during the execution of the" - f" current plan step.{str(e)}" + f" current reasoning_engine step.{str(e)}" ) return ActionOutput( is_exe_success=False, content=f"An exception was encountered during the execution" - f" of the current plan step.{str(e)}", + f" of the current reasoning_engine step.{str(e)}", ) return ActionOutput( is_exe_success=False, content=f"Maximum number of dialogue rounds exceeded.{self.max_round}", ) + + async def thinking( + self, + messages: List[AgentMessage], + reply_message_id: str, + sender: Optional[Agent] = None, + prompt: Optional[str] = None, + ) -> Tuple[Optional[str], Optional[str], Optional[str]]: + """Think and reason about the current task goal.""" + # TeamManager, which is based on processes and plans by default, only needs to + # ensure execution and does not require additional thinking. + if messages is None or len(messages) <= 0: + return None, None + else: + message = messages[-1] + self.messages.append(message.to_llm_message()) + return None, message.content, None diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/base.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/base.py new file mode 100644 index 000000000..c7663d256 --- /dev/null +++ b/packages/dbgpt-core/src/dbgpt/agent/core/plan/base.py @@ -0,0 +1,40 @@ +from typing import Any, List, Optional, Union + +from dbgpt._private.pydantic import ( + BaseModel, + Field, + model_to_dict, +) + +from ...resource.base import AgentResource + + +class TeamContext(BaseModel): + can_ask_user: Optional[bool] = Field( + True, + description="Can ask user", + examples=[ + True, + False, + ], + ) + llm_strategy: Optional[str] = Field( + None, description="The team leader's llm strategy" + ) + llm_strategy_value: Union[Optional[str], Optional[List[Any]]] = Field( + None, description="The team leader's llm config" + ) + prompt_template: Optional[str] = Field( + None, description="The team leader's prompt template!" + ) + resources: Optional[list[AgentResource]] = Field( + None, description="The team leader's prompt template!" + ) + + def to_dict(self): + return model_to_dict(self) + + +class SingleAgentContext(TeamContext): + agent_name: Optional[str] = Field(None, description="Current agent name") + agent_role: Optional[str] = Field(None, description="Current agent role") diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/final_report_agent.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/final_report_agent.py new file mode 100644 index 000000000..3c4f3373c --- /dev/null +++ b/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/final_report_agent.py @@ -0,0 +1,149 @@ +"""Summary Assistant Agent.""" + +import logging +from typing import Dict, List, Optional, Tuple + +from dbgpt.rag.retriever.rerank import RetrieverNameRanker + +from .... import AgentMessage +from ....core.action.blank_action import BlankAction +from ....core.base_agent import ConversableAgent +from ....core.profile import DynConfig, ProfileConfig + +logger = logging.getLogger(__name__) + + +class FinalReportAssistantAgent(ConversableAgent): + """Reporter Assistant Agent.""" + + profile: ProfileConfig = ProfileConfig( + name=DynConfig( + "final_report_agent", + category="agent", + key="dbgpt_agent_expand_summary_assistant_agent_profile_name", + ), + role=DynConfig( + "final_report_agent", + category="agent", + key="dbgpt_agent_expand_summary_assistant_agent_profile_role", + ), + goal=DynConfig( + "Summarize answer summaries based on user questions from provided " + "resource information or from historical conversation memories.", + category="agent", + key="dbgpt_agent_expand_summary_assistant_agent_profile_goal", + ), + constraints=DynConfig( + [ + "Prioritize the summary of answers to user questions from the improved " + "resource text. If no relevant information is found, summarize it from " + "the historical dialogue memory given. It is forbidden to make up your " + "own.", + "You need to first detect user's question that you need to answer with " + "your summarization.", + "Extract the provided text content used for summarization.", + "Then you need to summarize the extracted text content.", + "Output the content of summarization ONLY related to user's question. " + "The output language must be the same to user's question language.", + "If you think the provided text content is not related to user " + "questions at all, ONLY output 'Did not find the information you " + "want.'!!.", + ], + category="agent", + key="dbgpt_agent_expand_summary_assistant_agent_profile_constraints", + ), + desc=DynConfig( + "You can summarize provided text content according to user's questions" + " and output the summarization.", + category="agent", + key="dbgpt_agent_expand_summary_assistant_agent_profile_desc", + ), + ) + + def __init__(self, **kwargs): + """Create a new SummaryAssistantAgent instance.""" + super().__init__(**kwargs) + self._post_reranks = [RetrieverNameRanker(5)] + self._init_actions([BlankAction]) + + async def load_resource(self, question: str, is_retry_chat: bool = False): + """Load agent bind resource.""" + if self.resource: + if self.resource.is_pack: + sub_resources = self.resource.sub_resources + candidates_results: List = [] + resource_candidates_map = {} + info_map = {} + prompt_list = [] + for resource in sub_resources: + ( + candidates, + prompt_template, + resource_reference, + ) = await resource.get_resources(question=question) + resource_candidates_map[resource.name] = ( + candidates, + resource_reference, + prompt_template, + ) + candidates_results.extend(candidates) # type: ignore # noqa + new_candidates_map = self.post_filters(resource_candidates_map) + for resource, ( + candidates, + references, + prompt_template, + ) in new_candidates_map.items(): + content = "\n".join( + [ + f"--{i}--:" + chunk.content + for i, chunk in enumerate(candidates) # type: ignore # noqa + ] + ) + prompt_list.append( + prompt_template.format(name=resource, content=content) + ) + info_map.update(references) + return "\n".join(prompt_list), info_map + else: + resource_prompt, resource_reference = await self.resource.get_prompt( + lang=self.language, question=question + ) + return resource_prompt, resource_reference + return None, None + + def init_reply_message( + self, + received_message: AgentMessage, + rely_messages: Optional[List[AgentMessage]] = None, + ) -> AgentMessage: + reply_message = super().init_reply_message(received_message, rely_messages) + reply_message.context = { + "user_question": received_message.content, + } + return reply_message + + def post_filters(self, resource_candidates_map: Optional[Dict[str, Tuple]] = None): + """Post filters for resource candidates.""" + if resource_candidates_map: + new_candidates_map = resource_candidates_map.copy() + filter_hit = False + for resource, ( + candidates, + references, + prompt_template, + ) in resource_candidates_map.items(): + for rerank in self._post_reranks: + filter_candidates = rerank.rank(candidates) + new_candidates_map[resource] = [], [], prompt_template + if filter_candidates and len(filter_candidates) > 0: + new_candidates_map[resource] = ( + filter_candidates, + references, + prompt_template, + ) + filter_hit = True + break + if filter_hit: + logger.info("Post filters hit, use new candidates.") + return new_candidates_map + return resource_candidates_map diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/plan_action.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/plan_action.py new file mode 100644 index 000000000..25de3c4c0 --- /dev/null +++ b/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/plan_action.py @@ -0,0 +1,165 @@ +"""Plan Action.""" + +import json +import logging +import uuid +from typing import Any, Dict, List, Optional, Type, Union + +from dbgpt._private.pydantic import BaseModel, Field, model_to_dict +from dbgpt.agent.core.action.base import Action, ActionOutput +from dbgpt.agent.core.agent import AgentContext +from dbgpt.agent.core.memory.gpts.base import GptsPlan +from dbgpt.agent.core.memory.gpts.gpts_memory import GptsPlansMemory +from dbgpt.agent.core.schema import Status +from dbgpt.agent.resource.base import AgentResource +from dbgpt.vis import SystemVisTag +from dbgpt.vis.schema import VisPlansContent, VisTaskContent + +logger = logging.getLogger(__name__) + + +class TaskParam(BaseModel): + """Plan input model.""" + + task_id: Any = Field( + ..., + description="任务编号", + ) + parent_id: Any = Field( + ..., + description="父任务编号", + ) + task_goal: str = Field( + ..., + description="任务目标内容", + ) + agent: str = Field(..., description="当前任务可交给那个代理完成") + assertion: str = Field( + None, + description="当目标的判断规则和标准", + ) + slots: dict = Field( + None, + description="提取到的用户输入的参数信息", + ) + + def to_dict(self): + """Convert the object to a dictionary.""" + return model_to_dict(self) + + +class ReActAction(Action[List[TaskParam]]): + """Plan action class.""" + + def __init__(self, **kwargs): + """Create a plan action.""" + super().__init__() + self.action_view_tag: str = SystemVisTag.VisPlans.value + + @property + def out_model_type(self): + """Return the output model type.""" + return List[TaskParam] + + def _create_example( + self, + model_type: Union[Type[BaseModel], List[Type[BaseModel]]], + ) -> Optional[Union[Dict[str, Any], List[Dict[str, Any]]]]: + return [ + { + "parent_id": "父任务编号,当前任务是根据之前的那个任务逻辑推导得出,标记任务逻辑血缘,默认0作为所有无血缘任务的父任务", # noqa: E501 + "task_id": "当前任务编号,如果存在父任务,确保当前任务编号大于父任务的最大子任务序号,同一个父任务下,任务序号不能重复 ", # noqa: E501 + "task_goal": "任务目标x", + "agent": "当前任务可交给那个代理完成", + "assertion": "当目标的判断规则和标准", + "slots": { + "参数名1": "当前任务关联的具体目标等参数值信息1", + "参数名2": "当前任务关联的具体目标等参数值信息2", + }, + } + ] + + async def run( + self, + ai_message: str, + resource: Optional[AgentResource] = None, + rely_action_out: Optional[ActionOutput] = None, + need_vis_render: bool = True, + **kwargs, + ) -> ActionOutput: + """Run the plan action.""" + try: + context: AgentContext = kwargs["context"] + plans_memory: GptsPlansMemory = kwargs["plans_memory"] + task_params: List[TaskParam] = self._input_convert( + ai_message, List[TaskParam] + ) + + plan_objects = [] + + tasks: List[VisTaskContent] = [] + + for item in task_params: + task_uid = uuid.uuid4().hex + plan = GptsPlan( + conv_id=context.conv_id, + task_uid=task_uid, + sub_task_num=0, + sub_task_id=item.task_id, + sub_task_title=item.task_goal, + sub_task_content=json.dumps(item.slots), + task_parent=item.parent_id, + conv_round=kwargs.get("round", 0), + conv_round_id=kwargs.get("round_id", None), + resource_name=None, + max_retry_times=context.max_retry_round, + sub_task_agent=item.agent, + retry_times=kwargs.get("retry_times", 0), + state=Status.TODO.value, + ) + + plan_objects.append(plan) + + tasks.append( + VisTaskContent( + task_uid=task_uid, + task_id=str(item.task_id), + task_title=item.task_goal, + task_name=item.task_goal, + task_content=json.dumps(item.slots), + task_parent=str(item.parent_id), + task_link=None, + agent_id=item.agent, + agent_name=item.agent, + agent_link="", + avatar="", + ) + ) + drsk_plan_content = VisPlansContent( + uid=uuid.uuid4().hex, type="all", tasks=tasks + ) + if self.render_protocol: + view = await self.render_protocol.display( + content=drsk_plan_content.to_dict() + ) + elif need_vis_render: + raise NotImplementedError("The render_protocol should be implemented.") + else: + view = None + + ## 任务规划记录,方便后续做进展跟踪 + # plans_memory.remove_by_conv_id(context.conv_id) + plans_memory.batch_save(plan_objects) + + return ActionOutput( + is_exe_success=True, + content=json.dumps( + [item.to_dict() for item in task_params], ensure_ascii=False + ), + view=view, + ) + except Exception as e: + logger.exception("React Plan Action Run Failed!") + return ActionOutput( + is_exe_success=False, content=f"React Plan action run failed!{str(e)}" + ) diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/planner_agent.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/planner_agent.py new file mode 100644 index 000000000..82158d7d9 --- /dev/null +++ b/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/planner_agent.py @@ -0,0 +1,115 @@ +"""Planner Agent.""" + +import logging +from typing import Any, Dict, List, Optional + +from dbgpt._private.pydantic import Field +from dbgpt.agent.core.agent import Agent, AgentMessage +from dbgpt.agent.core.base_agent import ConversableAgent +from dbgpt.agent.core.profile import DynConfig, ProfileConfig + +from .plan_action import ReActAction + +logger = logging.getLogger(__name__) + + +class ReActPlannerAgent(ConversableAgent): + """Planner Agent. + + Planner agent, realizing task goal planning decomposition through LLM. + """ + + agents: List[ConversableAgent] = Field(default_factory=list) + profile: ProfileConfig = ProfileConfig( + name=DynConfig( + "Planner", + category="agent", + key="dbgpt_agent_plan_react_planner_agent_profile_name", + ), + role=DynConfig( + "ReActPlanner", + category="agent", + key="dbgpt_agent_plan_react_planner_agent_profile_role", + ), + goal=DynConfig( + """复杂软件系统的风险分析专家,你需要基于提供给你的背景知识、分析经验再结合历史消息里的分析处理进展和数据状态,对用户输入的异常 + 或问题进行专业的分析思考,寻找接下来可行的分析处理方法和路径,并将找到的下一步可行路径整理成一个目标单一明确信息完整的可执行任务, + 每次请只给出一个任务即可, 并将目标分配给合适的代理去完成.如果问题已经可以基于历史信息给出确切结论, + 把结论信息交给'final_report_agent'进行整理回复. 同时你思考过程和输出答案请准守下面给出的‘约束’""", # noqa: E501 + category="agent", + key="dbgpt_agent_plan_planner_agent_profile_goal", + ), + expand_prompt=DynConfig( + "### 你可以安排的代理如下:\n {{ agents }}", + category="agent", + key="dbgpt_agent_plan_planner_agent_profile_expand_prompt", + ), + constraints=DynConfig( + [ + "每个次给出的任务是为后续的进一步分析补充数据和信息,不要输出不可完成的或依赖路径还没有答案的步骤, 也不要反复生成已经获取过相关信息的任务", # noqa: E501 + "请确保生成的任务目标明确信息完整,每个任务只负责解决一件事情,不要把多步多目标需求放到一个任务中,不要过于复杂", + "请注意分析任务的前后逻辑顺序,如果当前的数据信息还不满足,就不要输出对应任务,直到前置条件满足", + "生成任务时,确保任务包含所有在用户消息和对话中出现的明确目标参数信息,给出对应参数名称和值,确保不要遗漏任何关键的目标信息", + "对于任务断言如果没有背景知识支持可以考虑简单确定是否存在数据即可,不要自行构造规则和判断标准", + "请根据Agent的能力介绍给任务分配的Agent,不要自行理解和随意分配", + ], + category="agent", + key="dbgpt_agent_plan_planner_agent_profile_constraints", + ), + desc=DynConfig( + "你是一个任务规划专家!可以协调智能体,基于提供给你的背景知识、分析经验再结合历史消息里的分析处理进展和数据状态,对用户输入的异常或问题进行专业的分析思考,寻找接下来可行的分析处理方法和路径。", + category="agent", + key="dbgpt_agent_plan_planner_agent_profile_desc", + ), + examples=DynConfig( + None, # noqa: E501 + category="agent", + key="dbgpt_agent_plan_planner_agent_profile_examples", + ), + ) + language: str = "zh" + + def __init__(self, **kwargs): + """Create a new PlannerAgent instance.""" + super().__init__(**kwargs) + self._init_actions([ReActAction]) + + def init_reply_message( + self, + received_message: AgentMessage, + rely_messages: Optional[List[AgentMessage]] = None, + ) -> AgentMessage: + reply_message = super().init_reply_message(received_message) + reply_message.context = { + "agents": "\n".join([f"- {item.name}:{item.desc}" for item in self.agents]), + } + return reply_message + + def bind_agents(self, agents: List[ConversableAgent]) -> ConversableAgent: + """Bind the agents to the planner agent.""" + self.agents = agents + # resources = [] + # for agent in self.agents: + # if agent.resource: + # resources.append(agent.resource) + # self.resource = ResourcePack(resources) + return self + + def prepare_act_param( + self, + received_message: Optional[AgentMessage], + sender: Agent, + rely_messages: Optional[List[AgentMessage]] = None, + **kwargs, + ) -> Dict[str, Any]: + """Prepare the parameters for the act method.""" + reply_message = kwargs.get("reply_message") + if not reply_message: + raise "planner agent need reply_message params!" + return { + "context": self.not_null_agent_context, + "plans_memory": self.memory.plans_memory, + "round": reply_message.rounds, + "round_id": reply_message.round_id, + "retry_times": self.current_retry_counter, + } diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/team_react_plan.py b/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/team_react_plan.py new file mode 100644 index 000000000..e26eda16e --- /dev/null +++ b/packages/dbgpt-core/src/dbgpt/agent/core/plan/react/team_react_plan.py @@ -0,0 +1,308 @@ +"""Auto reasoning_engine chat manager agent.""" + +import json +import logging +from typing import Dict, List, Optional, Tuple + +from dbgpt._private.pydantic import ( + Field, + model_to_dict, +) +from dbgpt.core import ModelMessageRoleType +from dbgpt.util.configure import DynConfig + +from ...base_agent import ( + ActionOutput, + Agent, + AgentMessage, + ConversableAgent, + ProfileConfig, +) +from ...base_team import ManagerAgent +from ...memory.gpts import GptsPlan +from ..base import TeamContext +from .final_report_agent import FinalReportAssistantAgent +from .planner_agent import ReActPlannerAgent + +logger = logging.getLogger(__name__) + + +class AutoTeamContext(TeamContext): + teamleader: Optional[str] = Field( + None, + description="The multi agents teamleader", + examples=[ + "advance_planning", + "dynamic_planning", + ], + ) + + def to_dict(self): + return model_to_dict(self) + + +class ReActPlanChatManager(ManagerAgent): + """A chat manager agent that can manage a team chat of multiple agents.""" + + profile: ProfileConfig = ProfileConfig( + name=DynConfig( + "ReAct Manager", + category="agent", + key="dbgpt_agent_plan_team_react_plan_profile_name", + ), + role=DynConfig( + "ReActManager", + category="agent", + key="dbgpt_agent_plan_team_react_plan_profile_role", + ), + goal=DynConfig( + "Advance the task reasoning_engine generated by the planning agent." + " If the reasoning_engine " + "does not pre-allocate an agent, it needs to be coordinated with the " + "appropriate agent to complete.", + category="agent", + key="dbgpt_agent_plan_team_auto_plan_profile_goal", + ), + desc=DynConfig( + "Advance the task reasoning_engine generated by the planning agent.", + category="agent", + key="dbgpt_agent_plan_team_auto_plan_profile_desc", + ), + ) + concurrency_limit: int = 6 + # 当前Agent消息是否显示 + show_message: bool = False + + def __init__(self, **kwargs): + """Create a new AutoPlanChatManager instance.""" + super().__init__(**kwargs) + + async def process_rely_message( + self, conv_id: str, now_plan: GptsPlan, speaker: Agent + ): + """Process the dependent message.""" + rely_prompt = None + rely_messages: List[Dict] = [] + + if now_plan.rely and len(now_plan.rely) > 0: + rely_tasks_list = now_plan.rely.split(",") + rely_tasks_list_int = [int(i) for i in rely_tasks_list] + rely_tasks = self.memory.plans_memory.get_by_conv_id_and_num( + conv_id, rely_tasks_list_int + ) + if rely_tasks: + rely_prompt = ( + "Read the result data of the dependent steps in the above" + " historical message to complete the current goal:" + ) + for rely_task in rely_tasks: + rely_messages.append( + { + "content": rely_task.sub_task_content, + "role": ModelMessageRoleType.HUMAN, + "name": rely_task.sub_task_agent, + } + ) + rely_messages.append( + { + "content": rely_task.result, + "role": ModelMessageRoleType.AI, + "name": rely_task.sub_task_agent, + } + ) + return rely_prompt, rely_messages + + async def act( + self, + message: Optional[AgentMessage], + sender: Optional[Agent] = None, + reviewer: Optional[Agent] = None, + **kwargs, + ) -> Optional[ActionOutput]: + """Perform an action based on the received message.""" + if not sender: + return ActionOutput( + is_exe_success=False, + content="The sender cannot be empty!", + ) + + try: + message_rounds = message.rounds + last_round_messages: List = [] + all_task_messages: List = [] + all_messages: List = [] + + for i in range(self.max_round): + if not self.memory: + return ActionOutput( + is_exe_success=False, + content="The memory cannot be empty!", + ) + + ## 读取新的背景知识 + + ## 读取历史消息进展 + + ## 内置报告Agent TODO 考虑外部绑定 + final_reporter: ConversableAgent = ( + await FinalReportAssistantAgent() + .bind(self.memory) + .bind(self.agent_context) + .bind(self.llm_config) + .build() + ) + self.agents.append(final_reporter) + ## 规划新的任务步骤 + planner: ConversableAgent = ( + await ReActPlannerAgent() + .bind(self.memory) + .bind(self.agent_context) + .bind(self.bind_prompt) + .bind(self.llm_config) + .bind_agents(self.agents) + .build() + ) + + last_round_messages.clear() + + plan_in_message = AgentMessage.init_new( + content=message.content, rounds=message_rounds + ) + + await self.send( + message=plan_in_message, recipient=planner, request_reply=False + ) + + plan_message = await planner.generate_reply( + received_message=plan_in_message, + sender=self, + reviewer=reviewer, + historical_dialogues=all_messages, + force_use_historical=True, + ) + + last_round_messages.append(plan_in_message) + all_messages.append(plan_in_message) + await planner.send( + message=plan_message, recipient=self, request_reply=False + ) + message_rounds = plan_message.rounds + + last_round_messages.append(plan_message) + all_messages.append(plan_message) + if plan_message.action_report.is_exe_success: + task_params = json.loads(plan_message.action_report.content) + else: + continue + + ## 执行新的任务步骤(排除需要用户代理处理的任务) + api_tasks = [] + + ask_user = None + agent_role_map = {agent.name: agent for agent in self.agents} + task_in_message_map = {} + for task in task_params: + agent_name = task.get("agent") + agent_goal = task.get("task_goal") + agent_goal_id = task.get("task_id") + if agent_name == "Human": + ask_user = agent_goal + continue + + if not agent_name: + raise + + task_agent: ConversableAgent = agent_role_map.get(agent_name) + if not task_agent: + logger.warning(f"agent{agent_name}没有找到具体agent!") + continue + agent_role_map.update({task_agent.name: task_agent}) + + task_in_message = AgentMessage.init_new( + content=f"任务目标:{agent_goal},相关参数:{json.dumps(task.get('slots'))}", + current_goal=agent_goal, + goal_id=agent_goal_id, + rounds=message_rounds, + ) + message_rounds = task_in_message.rounds + 1 + + task_in_message_map.update({agent_name: task_in_message}) + if task_agent.name == "final_report_agent": + api_tasks.append( + task_agent.generate_reply( + received_message=task_in_message, + sender=self, + reviewer=reviewer, + rely_messages=None, # 会变成单论消息 + # 会变成多轮历史消息 + historical_dialogues=all_task_messages, + ) + ) + + else: + task_agent.stream_out = False + api_tasks.append( + task_agent.generate_reply( + received_message=task_in_message, + sender=self, + reviewer=reviewer, + ) + ) + + from dbgpt.util.chat_util import run_async_tasks + + results: List[AgentMessage] = await run_async_tasks( + tasks=api_tasks, concurrency_limit=self.concurrency_limit + ) + + ## 检察所有计划任务输出,有效输出记录消息 + for result in results: + task_agent = agent_role_map.get(result.name) + ## 判断消息结果 + if result.name == "final_report_agent": + ## 处理终止代理任务,结束对话,告诉用户结论 + return result.action_report + else: + ## 如果没有用户任务,重新进入下一个阶段直到出现终止任务的Agent + + task_in_message = task_in_message_map.get(result.name) + logger.info(f"消息发送给:{task_agent},{result.name},{result}") + if task_agent: + await self.send( + message=task_in_message, + recipient=task_agent, + request_reply=False, + ) + last_round_messages.append(task_in_message) + all_task_messages.append(task_in_message) + all_messages.append(task_in_message) + await task_agent.send( + message=result, recipient=self, request_reply=False + ) + all_task_messages.append(result) + last_round_messages.append(result) + all_messages.append(result) + + ## 处理用户代理任务,终断循环,进入用户交互 + if ask_user: + return ActionOutput(is_exe_success=True, content=ask_user) + except Exception as e: + logger.exception("ReAct Team Chat Exception!") + return ActionOutput(is_exe_success=False, content=str(e)) + + async def thinking( + self, + messages: List[AgentMessage], + reply_message_id: str, + sender: Optional[Agent] = None, + prompt: Optional[str] = None, + ) -> Tuple[Optional[str], Optional[str], Optional[str]]: + """Think and reason about the current task goal.""" + # TeamManager, which is based on processes and plans by default, only needs to + # ensure execution and does not require additional thinking. + if messages is None or len(messages) <= 0: + return None, None, None + else: + message = messages[-1] + self.messages.append(message.to_llm_message()) + return message.thinking, message.content, None diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/profile/base.py b/packages/dbgpt-core/src/dbgpt/agent/core/profile/base.py index 91738ebf9..840040693 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/profile/base.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/profile/base.py @@ -154,6 +154,10 @@ class Profile(ABC): """Return the goal of current agent.""" return None + def get_avatar(self) -> Optional[str]: + """Return the goal of current agent.""" + return None + def get_retry_goal(self) -> Optional[str]: """Return the goal of current agent.""" return None @@ -340,6 +344,7 @@ class DefaultProfile(BaseModel, Profile): role: str = Field("", description="The role of the agent.") goal: Optional[str] = Field(None, description="The goal of the agent.") retry_goal: Optional[str] = Field(None, description="The retry goal of the agent.") + avatar: Optional[str] = Field(None, description="The avatar of the agent.") constraints: Optional[List[str]] = Field( None, description="The constraints of the agent." ) @@ -385,6 +390,10 @@ class DefaultProfile(BaseModel, Profile): """Return the goal of current agent.""" return self.goal + def get_avatar(self) -> Optional[str]: + """Return the avatar of current agent.""" + return self.avatar + def get_retry_goal(self) -> Optional[str]: """Return the retry goal of current agent.""" return self.retry_goal @@ -440,6 +449,7 @@ class ProfileFactory: goal: Optional[str] = None, prefer_prompt_language: Optional[str] = None, prefer_model: Optional[str] = None, + avatar: Optional[str] = None, ) -> Optional[Profile]: """Create a profile.""" @@ -461,6 +471,7 @@ class LLMProfileFactory(ProfileFactory): goal: Optional[str] = None, prefer_prompt_language: Optional[str] = None, prefer_model: Optional[str] = None, + avatar: Optional[str] = None, ) -> Optional[Profile]: """Create a profile by LLM. @@ -487,6 +498,7 @@ class DatasetProfileFactory(ProfileFactory): goal: Optional[str] = None, prefer_prompt_language: Optional[str] = None, prefer_model: Optional[str] = None, + avatar: Optional[str] = None, ) -> Optional[Profile]: """Create a profile by dataset. @@ -510,6 +522,7 @@ class CompositeProfileFactory(ProfileFactory): goal: Optional[str] = None, prefer_prompt_language: Optional[str] = None, prefer_model: Optional[str] = None, + avatar: Optional[str] = None, ) -> Optional[Profile]: """Create a profile by combining multiple profile factories. @@ -532,6 +545,7 @@ class ProfileConfig(BaseModel): name: str | ConfigInfo | None = DynConfig(..., description="The name of the agent.") role: str | ConfigInfo | None = DynConfig(..., description="The role of the agent.") goal: str | ConfigInfo | None = DynConfig(None, description="The retry goal.") + avatar: str | ConfigInfo | None = DynConfig(None, description="The avatar.") retry_goal: str | ConfigInfo | None = DynConfig(None, description="The goal.") constraints: List[str] | ConfigInfo | None = DynConfig(None, is_list=True) retry_constraints: List[str] | ConfigInfo | None = DynConfig(None, is_list=True) @@ -584,6 +598,7 @@ class ProfileConfig(BaseModel): name = self.name role = self.role goal = self.goal + avatar = self.avatar retry_goal = self.retry_goal retry_constraints = self.retry_constraints constraints = self.constraints @@ -603,6 +618,8 @@ class ProfileConfig(BaseModel): role = role.query(**call_args) if isinstance(goal, ConfigInfo): goal = goal.query(**call_args) + if isinstance(avatar, ConfigInfo): + avatar = avatar.query(**call_args) if isinstance(retry_goal, ConfigInfo): retry_goal = retry_goal.query(**call_args) if isinstance(constraints, ConfigInfo): @@ -640,6 +657,7 @@ class ProfileConfig(BaseModel): goal, prefer_prompt_language, prefer_model, + avatar, ) if factory_profile is not None: @@ -648,6 +666,7 @@ class ProfileConfig(BaseModel): name=name, role=role, goal=goal, + avatar=avatar, retry_goal=retry_goal, constraints=constraints, retry_constraints=retry_constraints, diff --git a/packages/dbgpt-core/src/dbgpt/agent/core/role.py b/packages/dbgpt-core/src/dbgpt/agent/core/role.py index 6aa9fa3df..91f885831 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/core/role.py +++ b/packages/dbgpt-core/src/dbgpt/agent/core/role.py @@ -153,6 +153,11 @@ class Role(ABC, BaseModel): """Return the goal of the role.""" return self.current_profile.get_goal() + @property + def avatar(self) -> Optional[str]: + """Return the goal of the role.""" + return self.current_profile.get_avatar() + @property def retry_goal(self) -> Optional[str]: """Return the retry goal of the role.""" diff --git a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/chart_action.py b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/chart_action.py index b2f9386a0..2b47a7cce 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/chart_action.py +++ b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/chart_action.py @@ -5,7 +5,7 @@ import logging from typing import List, Optional from dbgpt._private.pydantic import BaseModel, Field, model_to_json -from dbgpt.vis.tags.vis_chart import Vis, VisChart +from dbgpt.vis import SystemVisTag from ...core.action.base import Action, ActionOutput from ...resource.base import AgentResource, ResourceType @@ -34,18 +34,14 @@ class ChartAction(Action[SqlInput]): def __init__(self, **kwargs): """Chart action init.""" super().__init__(**kwargs) - self._render_protocol = VisChart() + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisChart.value @property def resource_need(self) -> Optional[ResourceType]: """Return the resource type needed for the action.""" return ResourceType.DB - @property - def render_protocol(self) -> Optional[Vis]: - """Return the render protocol.""" - return self._render_protocol - @property def out_model_type(self): """Return the output model type.""" diff --git a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/code_action.py b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/code_action.py index 0213c85f9..c5f5db6b6 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/code_action.py +++ b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/code_action.py @@ -5,7 +5,7 @@ from typing import Optional, Union from dbgpt.util.code_utils import UNKNOWN, execute_code, extract_code, infer_lang from dbgpt.util.utils import colored -from dbgpt.vis.tags.vis_code import Vis, VisCode +from dbgpt.vis import SystemVisTag from ...core.action.base import Action, ActionOutput from ...resource.base import AgentResource @@ -19,13 +19,9 @@ class CodeAction(Action[None]): def __init__(self, **kwargs): """Code action init.""" super().__init__(**kwargs) - self._render_protocol = VisCode() self._code_execution_config = {} - - @property - def render_protocol(self) -> Optional[Vis]: - """Return the render protocol.""" - return self._render_protocol + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisCode.value async def run( self, diff --git a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/dashboard_action.py b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/dashboard_action.py index 3d0cb127b..10647a12e 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/dashboard_action.py +++ b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/dashboard_action.py @@ -5,7 +5,7 @@ import logging from typing import List, Optional from dbgpt._private.pydantic import BaseModel, Field, model_to_dict -from dbgpt.vis.tags.vis_dashboard import Vis, VisDashboard +from dbgpt.vis import SystemVisTag from ...core.action.base import Action, ActionOutput from ...resource.base import AgentResource, ResourceType @@ -42,18 +42,14 @@ class DashboardAction(Action[List[ChartItem]]): def __init__(self, **kwargs): """Dashboard action init.""" super().__init__(**kwargs) - self._render_protocol = VisDashboard() + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisDashboard.value @property def resource_need(self) -> Optional[ResourceType]: """Return the resource type needed for the action.""" return ResourceType.DB - @property - def render_protocol(self) -> Optional[Vis]: - """Return the render protocol.""" - return self._render_protocol - @property def out_model_type(self): """Return the output model type.""" diff --git a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/indicator_action.py b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/indicator_action.py index 9156e3ff8..6d08a0b9d 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/indicator_action.py +++ b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/indicator_action.py @@ -5,8 +5,7 @@ import logging from typing import Optional from dbgpt._private.pydantic import BaseModel, Field -from dbgpt.vis.tags.vis_api_response import VisApiResponse -from dbgpt.vis.tags.vis_plugin import Vis +from dbgpt.vis import SystemVisTag from ...core.action.base import Action, ActionOutput from ...core.schema import Status @@ -45,18 +44,14 @@ class IndicatorAction(Action[IndicatorInput]): def __init__(self, **kwargs): """Init indicator action.""" super().__init__(**kwargs) - self._render_protocol = VisApiResponse() + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisTool.value @property def resource_need(self) -> Optional[ResourceType]: """Return the resource type needed for the action.""" return ResourceType.Knowledge - @property - def render_protocol(self) -> Optional[Vis]: - """Return the render protocol.""" - return self._render_protocol - @property def out_model_type(self): """Return the output model type.""" diff --git a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/tool_action.py b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/tool_action.py index dc65cf6b9..e2e8ad924 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/expand/actions/tool_action.py +++ b/packages/dbgpt-core/src/dbgpt/agent/expand/actions/tool_action.py @@ -5,7 +5,7 @@ import logging from typing import Optional from dbgpt._private.pydantic import BaseModel, Field -from dbgpt.vis.tags.vis_plugin import Vis, VisPlugin +from dbgpt.vis import SystemVisTag, Vis from ...core.action.base import Action, ActionOutput from ...core.schema import Status @@ -37,18 +37,14 @@ class ToolAction(Action[ToolInput]): def __init__(self, **kwargs): """Tool action init.""" super().__init__(**kwargs) - self._render_protocol = VisPlugin() + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisTool.value @property def resource_need(self) -> Optional[ResourceType]: """Return the resource type needed for the action.""" return ResourceType.Tool - @property - def render_protocol(self) -> Optional[Vis]: - """Return the render protocol.""" - return self._render_protocol - @property def out_model_type(self): """Return the output model type.""" diff --git a/packages/dbgpt-core/src/dbgpt/agent/util/llm/llm.py b/packages/dbgpt-core/src/dbgpt/agent/util/llm/llm.py index 51773d887..f90a7fae6 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/util/llm/llm.py +++ b/packages/dbgpt-core/src/dbgpt/agent/util/llm/llm.py @@ -1,12 +1,13 @@ """LLM module.""" import logging +from abc import ABC from collections import defaultdict from enum import Enum from typing import Any, Dict, List, Optional, Type from dbgpt._private.pydantic import BaseModel, ConfigDict, Field -from dbgpt.core import LLMClient, ModelMetadata, ModelRequest +from dbgpt.core import LLMClient, ModelRequest logger = logging.getLogger(__name__) @@ -67,31 +68,37 @@ class LLMStrategyType(Enum): } -class LLMStrategy: - """LLM strategy base class.""" - +class LLMStrategy(ABC): def __init__(self, llm_client: LLMClient, context: Optional[str] = None): - """Create an LLMStrategy instance.""" self._llm_client = llm_client self._context = context @property def type(self) -> LLMStrategyType: - """Return the strategy type.""" return LLMStrategyType.Default def _excluded_models( self, - all_models: List[ModelMetadata], - excluded_models: List[str], - need_uses: Optional[List[str]] = None, + all_models: List[str], + order_llms: Optional[List[str]] = None, + excluded_models: Optional[List[str]] = None, ): - if not need_uses: - need_uses = [] + if not order_llms: + order_llms = [] + if not excluded_models: + excluded_models = [] can_uses = [] - for item in all_models: - if item.model in need_uses and item.model not in excluded_models: - can_uses.append(item) + if order_llms and len(order_llms) > 0: + for llm_name in order_llms: + if llm_name in all_models and ( + not excluded_models or llm_name not in excluded_models + ): + can_uses.append(llm_name) + else: + for llm_name in all_models: + if not excluded_models or llm_name not in excluded_models: + can_uses.append(llm_name) + return can_uses async def next_llm(self, excluded_models: Optional[List[str]] = None): @@ -107,9 +114,11 @@ class LLMStrategy: excluded_models = [] try: all_models = await self._llm_client.models() - available_llms = self._excluded_models(all_models, excluded_models, None) - if available_llms and len(available_llms) > 0: - return available_llms[0].model + all_model_names = [item.model for item in all_models] + + can_uses = self._excluded_models(all_model_names, None, excluded_models) + if can_uses and len(can_uses) > 0: + return can_uses[0] else: raise ValueError("No model service available!") @@ -118,14 +127,26 @@ class LLMStrategy: raise ValueError(f"Failed to allocate model service,{str(e)}!") -llm_strategies: Dict[LLMStrategyType, List[Type[LLMStrategy]]] = defaultdict(list) +### Model selection strategy registration, built-in strategy registration by default +llm_strategies: Dict[LLMStrategyType, List[Type[LLMStrategy]]] = defaultdict( + Type[LLMStrategy] +) -def register_llm_strategy( +def register_llm_strategy_cls( llm_strategy_type: LLMStrategyType, strategy: Type[LLMStrategy] ): """Register llm strategy.""" - llm_strategies[llm_strategy_type].append(strategy) + llm_strategies[llm_strategy_type] = strategy + + +def get_llm_strategy_cls( + llm_strategy_type: LLMStrategyType, +) -> Optional[Type[LLMStrategy]]: + return llm_strategies.get(llm_strategy_type, None) + + +register_llm_strategy_cls(LLMStrategyType.Default, LLMStrategy) class LLMConfig(BaseModel): diff --git a/packages/dbgpt-core/src/dbgpt/agent/util/llm/llm_client.py b/packages/dbgpt-core/src/dbgpt/agent/util/llm/llm_client.py index 73f70214d..190129468 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/util/llm/llm_client.py +++ b/packages/dbgpt-core/src/dbgpt/agent/util/llm/llm_client.py @@ -1,14 +1,12 @@ -"""AIWrapper for LLM.""" - import json import logging -import traceback -from typing import Any, Callable, Dict, Optional, Union +from typing import Callable, Dict, Optional, Type, Union -from dbgpt.core import LLMClient, ModelOutput, ModelRequestContext +from dbgpt.core import LLMClient, ModelRequestContext from dbgpt.core.interface.output_parser import BaseOutputParser from dbgpt.util.error_types import LLMChatError from dbgpt.util.tracer import root_tracer +from dbgpt.vis import Vis from ..llm.llm import _build_model_request @@ -32,12 +30,16 @@ class AIWrapper: } def __init__( - self, llm_client: LLMClient, output_parser: Optional[BaseOutputParser] = None + self, + llm_client: LLMClient, + output_parser: Optional[BaseOutputParser] = None, + thinking_render: Optional[Type[Vis]] = None, ): """Create an AIWrapper instance.""" self.llm_echo = False self.model_cache_enable = False self._llm_client = llm_client + self._thinking_render = thinking_render self._output_parser = output_parser or BaseOutputParser(is_stream_out=False) @classmethod @@ -120,64 +122,14 @@ class AIWrapper: return json.dumps(config, sort_keys=True, ensure_ascii=False) async def create(self, verbose: bool = False, **config): - """Create llm client request.""" # merge the input config with the i-th config in the config list full_config = {**config} # separate the config into create_config and extra_kwargs create_config, extra_kwargs = self._separate_create_config(full_config) - - # construct the create params params = self._construct_create_params(create_config, extra_kwargs) - # get the cache_seed, filter_func and context - filter_func = extra_kwargs.get("filter_func") - context = extra_kwargs.get("context") llm_model = extra_kwargs.get("llm_model") - memory = extra_kwargs.get("memory", None) - conv_id = extra_kwargs.get("conv_id", None) - sender = extra_kwargs.get("sender", None) stream_out = extra_kwargs.get("stream_out", True) - try: - response = await self._completions_create( - llm_model, params, conv_id, sender, memory, stream_out, verbose - ) - except LLMChatError as e: - logger.debug(f"{llm_model} generate failed!{str(e)}") - raise e - else: - pass_filter = filter_func is None or filter_func( - context=context, response=response - ) - if pass_filter: - # Return the response if it passes the filter - return response - else: - return None - - def _get_span_metadata(self, payload: Dict) -> Dict: - metadata = {k: v for k, v in payload.items()} - - metadata["messages"] = list( - map(lambda m: m if isinstance(m, dict) else m.dict(), metadata["messages"]) - ) - return metadata - - def _llm_messages_convert(self, params): - gpts_messages = params["messages"] - # TODO - - return gpts_messages - - async def _completions_create( - self, - llm_model, - params, - conv_id: Optional[str] = None, - sender: Optional[str] = None, - memory: Optional[Any] = None, - stream_out: bool = True, - verbose: bool = False, - ): payload = { "model": llm_model, "prompt": params.get("prompt"), @@ -197,38 +149,57 @@ class AIWrapper: payload["context"] = ModelRequestContext(extra=params["context"]) try: model_request = _build_model_request(payload) - str_prompt = model_request.messages_to_string() - model_output: Optional[ModelOutput] = None - async for output in self._llm_client.generate_stream(model_request.copy()): # type: ignore # noqa - model_output = output - if memory and stream_out: - from ... import GptsMemory # noqa: F401 + from datetime import datetime - temp_message = { - "sender": sender, - "receiver": "?", - "model": llm_model, - "markdown": model_output.gen_text_with_thinking(), - } - await memory.push_message( - conv_id, - temp_message, - ) - if not model_output: - raise ValueError("LLM generate stream is null!") - parsed_output = model_output.gen_text_with_thinking() - parsed_output = parsed_output.strip().replace("\\n", "\n") + start_time = datetime.now() + first_chunk = True + if stream_out: + async for output in self._llm_client.generate_stream( + model_request.copy() + ): # type: ignore + model_output = output + parsed_output = model_output.text_and_thinking() + + think_blank = not parsed_output[0] or len(parsed_output[0]) <= 0 + content_blank = not parsed_output[1] or len(parsed_output[1]) <= 0 + if think_blank and content_blank: + continue + first_chunk = False + + if first_chunk: + end_time = datetime.now() + logger.info( + f"LLM stream generate first token cost:" + f"{end_time - start_time} " + f"seconds. output is {parsed_output}" + ) + yield parsed_output + else: + model_output = await self._llm_client.generate(model_request.copy()) # type: ignore + parsed_output = model_output.gen_text_and_thinking() + end_time = datetime.now() + logger.info( + f"LLM no stream generate cost:{end_time - start_time} " + f"seconds. output is {parsed_output}" + ) + yield parsed_output - if verbose: - print("\n", "-" * 80, flush=True, sep="") - print(f"String Prompt[verbose]: \n{str_prompt}") - print(f"LLM Output[verbose]: \n{parsed_output}") - print("-" * 80, "\n", flush=True, sep="") - return parsed_output except Exception as e: - logger.error( - f"Call LLMClient error, {str(e)}, detail: {traceback.format_exc()}" - ) + logger.exception(f"Call LLMClient error, {str(e)}, detail: {str(e)}") raise LLMChatError(original_exception=e) from e finally: span.end() + + def _get_span_metadata(self, payload: Dict) -> Dict: + metadata = {k: v for k, v in payload.items()} + + metadata["messages"] = list( + map(lambda m: m if isinstance(m, dict) else m.dict(), metadata["messages"]) + ) + return metadata + + def _llm_messages_convert(self, params): + gpts_messages = params["messages"] + # TODO + + return gpts_messages diff --git a/packages/dbgpt-core/src/dbgpt/agent/util/llm/strategy/priority.py b/packages/dbgpt-core/src/dbgpt/agent/util/llm/strategy/priority.py index e696b5a36..6a0d49155 100644 --- a/packages/dbgpt-core/src/dbgpt/agent/util/llm/strategy/priority.py +++ b/packages/dbgpt-core/src/dbgpt/agent/util/llm/strategy/priority.py @@ -4,7 +4,7 @@ import json import logging from typing import List, Optional -from ..llm import LLMStrategy, LLMStrategyType +from ..llm import LLMStrategy, LLMStrategyType, register_llm_strategy_cls logger = logging.getLogger(__name__) @@ -20,18 +20,25 @@ class LLMStrategyPriority(LLMStrategy): async def next_llm(self, excluded_models: Optional[List[str]] = None) -> str: """Return next available llm model name.""" try: - if not excluded_models: - excluded_models = [] all_models = await self._llm_client.models() + all_model_names = [item.model for item in all_models] + if not self._context: raise ValueError("No context provided for priority strategy!") - priority: List[str] = json.loads(self._context) - can_uses = self._excluded_models(all_models, excluded_models, priority) + if isinstance(self._context, str): + priority = json.loads(self._context) + else: + priority = self._context + logger.info(f"Use {self.type} llm strategy! value:{self._context}") + can_uses = self._excluded_models(all_model_names, priority, excluded_models) + if can_uses and len(can_uses) > 0: - return can_uses[0].model + return can_uses[0] else: raise ValueError("No model service available!") - except Exception as e: logger.error(f"{self.type} get next llm failed!{str(e)}") raise ValueError(f"Failed to allocate model service,{str(e)}!") + + +register_llm_strategy_cls(LLMStrategyType.Priority, LLMStrategyPriority) diff --git a/packages/dbgpt-core/src/dbgpt/core/interface/llm.py b/packages/dbgpt-core/src/dbgpt/core/interface/llm.py index ea0f30072..a2fe9330b 100644 --- a/packages/dbgpt-core/src/dbgpt/core/interface/llm.py +++ b/packages/dbgpt-core/src/dbgpt/core/interface/llm.py @@ -295,6 +295,19 @@ class ModelOutput: msg += self.text or "" return msg + def gen_text_and_thinking(self, new_text: Optional[str] = None): + thinking_text = "" + content_text = "" + if self.has_thinking: + thinking_text = self.thinking_text + + if new_text: + content_text += new_text + elif self.has_text: + content_text = self.text or "" + + return thinking_text, content_text + @text.setter def text(self, value: str): """Set the generated text.""" diff --git a/packages/dbgpt-core/src/dbgpt/model/cluster/worker/default_worker.py b/packages/dbgpt-core/src/dbgpt/model/cluster/worker/default_worker.py index 61a1c75b9..aa6c0ccaa 100644 --- a/packages/dbgpt-core/src/dbgpt/model/cluster/worker/default_worker.py +++ b/packages/dbgpt-core/src/dbgpt/model/cluster/worker/default_worker.py @@ -634,5 +634,8 @@ def _try_import_torch(): import torch _torch_imported = True - except ImportError: + except ImportError as e: + logger.warning(f"_try_import_torch ImportError!{str(e)}") pass + except Exception as e: + logger.warning(f"_try_import_torch exception!{str(e)}") diff --git a/packages/dbgpt-core/src/dbgpt/vis/__init__.py b/packages/dbgpt-core/src/dbgpt/vis/__init__.py index 5c4195467..bb6dd71a1 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/__init__.py +++ b/packages/dbgpt-core/src/dbgpt/vis/__init__.py @@ -1,25 +1,10 @@ """GPT-Vis Module.""" from .base import Vis # noqa: F401 -from .client import vis_client # noqa: F401 -from .tags.vis_agent_message import VisAgentMessages # noqa: F401 -from .tags.vis_agent_plans import VisAgentPlans # noqa: F401 -from .tags.vis_api_response import VisApiResponse # noqa: F401 -from .tags.vis_app_link import VisAppLink # noqa: F401 -from .tags.vis_chart import VisChart # noqa: F401 -from .tags.vis_code import VisCode # noqa: F401 -from .tags.vis_dashboard import VisDashboard # noqa: F401 -from .tags.vis_plugin import VisPlugin # noqa: F401 +from .vis_converter import SystemVisTag, VisProtocolConverter # noqa: F401 __ALL__ = [ "Vis", - "vis_client", - "VisAgentMessages", - "VisAgentPlans", - "VisChart", - "VisCode", - "VisDashboard", - "VisPlugin", - "VisAppLink", - "VisApiResponse", + "SystemVisTag", + "VisProtocolConverter", ] diff --git a/packages/dbgpt-core/src/dbgpt/vis/base.py b/packages/dbgpt-core/src/dbgpt/vis/base.py index fbe7d5b8f..e1bad8af6 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/base.py +++ b/packages/dbgpt-core/src/dbgpt/vis/base.py @@ -9,6 +9,13 @@ from dbgpt.util.json_utils import serialize class Vis: """Vis protocol base class.""" + def __init__(self, **kwargs): + """ + vis init + Args: + **kwargs: + """ + def render_prompt(self) -> Optional[str]: """Return the prompt for the vis protocol.""" return None diff --git a/packages/dbgpt-core/src/dbgpt/vis/client.py b/packages/dbgpt-core/src/dbgpt/vis/client.py deleted file mode 100644 index 138b8aeae..000000000 --- a/packages/dbgpt-core/src/dbgpt/vis/client.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Client for vis protocol.""" - -from typing import Dict, Type - -from .base import Vis -from .tags.vis_agent_message import VisAgentMessages -from .tags.vis_agent_plans import VisAgentPlans -from .tags.vis_api_response import VisApiResponse -from .tags.vis_app_link import VisAppLink -from .tags.vis_chart import VisChart -from .tags.vis_code import VisCode -from .tags.vis_dashboard import VisDashboard -from .tags.vis_plugin import VisPlugin -from .tags.vis_thinking import VisThinking - - -class VisClient: - """Client for vis protocol.""" - - def __init__(self): - """Client for vis protocol.""" - self._vis_tag: Dict[str, Vis] = {} - - def register(self, vis_cls: Type[Vis]): - """Register the vis protocol.""" - self._vis_tag[vis_cls.vis_tag()] = vis_cls() - - def get(self, tag_name): - """Get the vis protocol by tag name.""" - if tag_name not in self._vis_tag: - raise ValueError(f"Vis protocol tags not yet supported![{tag_name}]") - return self._vis_tag[tag_name] - - def tag_names(self): - """Return the tag names of the vis protocol.""" - self._vis_tag.keys() - - -vis_client = VisClient() - -vis_client.register(VisCode) -vis_client.register(VisChart) -vis_client.register(VisDashboard) -vis_client.register(VisAgentPlans) -vis_client.register(VisAgentMessages) -vis_client.register(VisPlugin) -vis_client.register(VisAppLink) -vis_client.register(VisApiResponse) -vis_client.register(VisThinking) - - -def vis_name_change(vis_message: str) -> str: - """Change vis tag name use new name.""" - replacements = { - "```vis-chart": "```vis-db-chart", - } - - for old_tag, new_tag in replacements.items(): - vis_message = vis_message.replace(old_tag, new_tag) - - return vis_message diff --git a/packages/dbgpt-core/src/dbgpt/vis/schema.py b/packages/dbgpt-core/src/dbgpt/vis/schema.py new file mode 100644 index 000000000..f9affe57a --- /dev/null +++ b/packages/dbgpt-core/src/dbgpt/vis/schema.py @@ -0,0 +1,112 @@ +from typing import Any, Dict, List, Optional + +from dbgpt._private.pydantic import ( + BaseModel, + Field, + model_to_dict, +) + + +class VisBase(BaseModel): + uid: str = Field(..., description="vis component uid") + type: str = Field(..., description="vis data update type") + message_id: Optional[str] = Field(None, description="vis component message id") + + def to_dict(self, **kwargs) -> Dict[str, Any]: + """Convert the model to a dictionary""" + return model_to_dict(self, **kwargs) + + +class VisTextContent(VisBase): + markdown: str = Field(..., description="vis message content") + + +class VisMessageContent(VisBase): + markdown: str = Field(..., description="vis msg content") + role: Optional[str] = Field( + default=None, description="vis message generate agent role" + ) + name: Optional[str] = Field( + default=None, description="vis message generate agent name" + ) + avatar: Optional[str] = Field( + default=None, description="vis message generate agent avatar" + ) + model: Optional[str] = Field( + default=None, description="vis message generate agent model" + ) + + +class VisTaskContent(BaseModel): + task_id: str = Field(default=None, description="vis task id") + task_uid: Optional[str] = Field(default=None, description="vis task uid") + task_content: Optional[str] = Field(default=None, description="vis task content") + task_link: Optional[str] = Field(default=None, description="vis task link") + agent_id: Optional[str] = Field(default=None, description="vis task agent id") + agent_name: Optional[str] = Field(default=None, description="vis task agent name") + agent_link: Optional[str] = Field(default=None, description="vis task agent link") + task_name: Optional[str] = Field(default=None, description="vis task name") + avatar: Optional[str] = Field(default=None, description="vis task avatar") + + def to_dict(self, **kwargs) -> Dict[str, Any]: + """Convert the model to a dictionary""" + return model_to_dict(self, **kwargs) + + +class VisPlansContent(VisBase): + tasks: List[VisTaskContent] = Field(default=[], description="vis plan tasks") + + def to_dict(self, **kwargs) -> Dict[str, Any]: + tasks_dict = [] + for step in self.tasks: + tasks_dict.append(step.to_dict()) + dict_value = model_to_dict(self, exclude={"tasks"}) + dict_value["tasks"] = tasks_dict + return dict_value + + +class VisStepContent(VisBase): + avatar: Optional[str] = Field(default=None, description="vis task avatar") + status: Optional[str] = Field(default=None, description="vis task status") + tool_name: Optional[str] = Field(default=None, description="vis task tool name") + tool_args: Optional[str] = Field(default=None, description="vis task tool args") + tool_result: Optional[str] = Field(default=None, description="vis tool result") + + err_msg: Optional[str] = Field( + default=None, description="vis task tool error message" + ) + progress: Optional[int] = Field( + default=None, description="vis task tool exceute progress" + ) + tool_execute_link: Optional[str] = Field( + default=None, description="vis task tool exceute link" + ) + + +class StepInfo(BaseModel): + avatar: Optional[str] = Field(default=None, description="vis task avatar") + status: Optional[str] = Field(default=None, description="vis task status") + tool_name: Optional[str] = Field(default=None, description="vis task tool name") + tool_args: Optional[str] = Field(default=None, description="vis task tool args") + tool_result: Optional[str] = Field(default=None, description="vis tool result") + + err_msg: Optional[str] = Field( + default=None, description="vis task tool error message" + ) + progress: Optional[int] = Field( + default=None, description="vis task tool exceute progress" + ) + tool_execute_link: Optional[str] = Field( + default=None, description="vis task tool exceute link" + ) + + +class VisStepsContent(VisBase): + steps: Optional[List[StepInfo]] = Field( + default=None, description="vis task tools exceute info" + ) + + +class VisThinkingContent(VisBase): + markdown: str = Field(..., description="vis thinking content") + think_link: str = Field(None, description="vis thinking link") diff --git a/packages/dbgpt-core/src/dbgpt/vis/vis_converter.py b/packages/dbgpt-core/src/dbgpt/vis/vis_converter.py new file mode 100644 index 000000000..e6ec240a2 --- /dev/null +++ b/packages/dbgpt-core/src/dbgpt/vis/vis_converter.py @@ -0,0 +1,196 @@ +from __future__ import annotations + +import json +from abc import ABC +from collections import defaultdict +from enum import Enum +from importlib import util +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Type, Union + +from dbgpt.vis import Vis + + +def scan_vis_tags(vis_tag_paths: List[str]): + """ + Scan the component path address specified in the current component package. + Args: + path: The component path address of the current component package + Returns: + + """ + from dbgpt.util.module_utils import ModelScanner, ScannerConfig + + from .base import Vis + + scanner = ModelScanner[Vis]() + for path in vis_tag_paths: + config = ScannerConfig( + module_path=path, + base_class=Vis, + recursive=True, + ) + scanner.scan_and_register(config) + return scanner.get_registered_items() + + +class SystemVisTag(Enum): + """System Vis Tags.""" + + VisMessage = "vis-message" + VisPlans = "vis-plans" + VisText = "vis-text" + VisThinking = "vis-thinking" + VisChart = "vis-chart" + VisCode = "vis-code" + VisTool = "vis-tool" + VisTools = "vis-tools" + VisDashboard = "vis-dashboard" + + +class VisProtocolConverter(ABC): + """The default Vis component that needs to exist as the basis for + organizing message structures can be overridden.If not overridden, + the default component will be used.""" + + SYSTEM_TAGS = [member.value for member in SystemVisTag] + + def __init__(self, paths: Optional[List[str]] = None): + """Create a new AgentManager.""" + self._owned_vis_tag: Dict[str, Tuple[Type[Vis], Vis]] = defaultdict() + self._paths = paths or [""] # TODO 取当前路径的.tags + if paths: + owned_tags = scan_vis_tags(self._paths) + for _, tag in owned_tags.items(): + self.register_vis_tag(tag) + + def system_vis_tag_map(self): + return { + SystemVisTag.VisMessage.value: SystemVisTag.VisMessage.value, + SystemVisTag.VisPlans.value: SystemVisTag.VisPlans.value, + SystemVisTag.VisText.value: SystemVisTag.VisText.value, + SystemVisTag.VisThinking.value: SystemVisTag.VisThinking.value, + SystemVisTag.VisChart.value: SystemVisTag.VisChart.value, + SystemVisTag.VisCode.value: SystemVisTag.VisCode.value, + SystemVisTag.VisTool.value: SystemVisTag.VisTool.value, + SystemVisTag.VisTools.value: SystemVisTag.VisTools.value, + SystemVisTag.VisDashboard.value: SystemVisTag.VisDashboard.value, + } + + def vis(self, vis_tag): + ## check if a system vis tag + tag_name = vis_tag + if vis_tag in self.system_vis_tag_map(): + tag_name = self.system_vis_tag_map()[vis_tag] + if tag_name in self._owned_vis_tag: + vis_cls, vis_inst = self._owned_vis_tag[tag_name] + return vis_cls + else: + return None + + def vis_inst(self, vis_tag): + ## check if a system vis tag + tag_name = vis_tag + if vis_tag in self.system_vis_tag_map(): + tag_name = self.system_vis_tag_map()[vis_tag] + if tag_name in self._owned_vis_tag: + vis_cls, vis_inst = self._owned_vis_tag[tag_name] + return vis_inst + else: + return None + + def tag_config(self) -> dict: + return None + + def register_vis_tag(self, cls: Type[Vis], ignore_duplicate: bool = False) -> str: + """Register an vis tag.""" + tag_config = self.tag_config() + inst = cls(**tag_config) if tag_config else cls() + tag_name = inst.vis_tag() + if tag_name in self._owned_vis_tag and ( + tag_name in self._owned_vis_tag or not ignore_duplicate + ): + raise ValueError(f"Vis:{tag_name} already register!") + self._owned_vis_tag[tag_name] = (cls, inst) + return tag_name + + async def visualization( + self, + messages: List["GptsMessage"], # noqa + plans: Optional[List["GptsPlan"]] = None, # noqa + gpt_msg: Optional["GptsMessage"] = None, # noqa + stream_msg: Optional[Union[Dict, str]] = None, + ): + pass + + async def visualization_stream( + self, + stream_msg: Optional[Union[Dict, str]] = None, + ): + pass + + def get_package_path_dynamic(self) -> str: + """动态解析模块的包路径""" + spec = util.find_spec(__name__) + if spec and spec.parent: + return str(Path(spec.origin).parent) + return str(Path(__file__).parent) + + +class DefaultVisConverter(VisProtocolConverter): + """None Vis Render, Just retrun message info""" + + async def visualization( + self, + messages: List["GptsMessage"], # noqa + plans: Optional[List["GptsPlan"]] = None, # noqa + gpt_msg: Optional["GptsMessage"] = None, # noqa + stream_msg: Optional[Union[Dict, str]] = None, + ): + from dbgpt.agent import ActionOutput + + simple_message_list = [] + for message in messages: + if message.sender == "Human": + continue + + action_report_str = message.action_report + view_info = message.content + action_out = None + if action_report_str and len(action_report_str) > 0: + action_out = ActionOutput.from_dict(json.loads(action_report_str)) + if action_out is not None: + view_info = action_out.content + + simple_message_list.append( + { + "sender": message.sender, + "receiver": message.receiver, + "model": message.model_name, + "markdown": view_info, + } + ) + if stream_msg: + simple_message_list.append(self._view_stream_message(stream_msg)) + + return simple_message_list + + async def _view_stream_message(self, message: Dict): + """Get agent stream message.""" + messages_view = [] + messages_view.append( + { + "sender": message["sender"], + "receiver": message["receiver"], + "model": message["model"], + "markdown": message["markdown"], + } + ) + + return messages_view + + async def visualization_stream( + self, + stream_msg: Optional[Union[Dict, str]] = None, + ): + return self._view_stream_message(stream_msg) diff --git a/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/__init__.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/gpt_vis_converter.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/gpt_vis_converter.py new file mode 100644 index 000000000..a3e443f1f --- /dev/null +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/gpt_vis_converter.py @@ -0,0 +1,161 @@ +import json +import logging +from enum import Enum +from typing import Dict, List, Optional, Union + +from dbgpt.agent import ActionOutput, UserProxyAgent +from dbgpt.agent.core.memory.gpts import GptsMessage, GptsPlan +from dbgpt.vis.vis_converter import SystemVisTag, VisProtocolConverter + +NONE_GOAL_PREFIX: str = "none_goal_count_" +logger = logging.getLogger(__name__) + + +def vis_name_change(vis_message: str) -> str: + """Change vis tag name use new name.""" + replacements = { + "```vis-chart": "```vis-db-chart", + } + + for old_tag, new_tag in replacements.items(): + vis_message = vis_message.replace(old_tag, new_tag) + + return vis_message + + +class GptVisTagPackage(Enum): + """System Vis Tags.""" + + AgentMessage = "agent-messages" + AgentPlans = "agent-plans" + APIResponse = "vis-api-response" + AppLink = "vis-app-link" + Chart = "vis-db-chart" + Code = "vis-code" + Dashboard = "vis-dashboard" + Flow = "agent-flow" + Result = "agent-result" + Plugin = "vis-plugin" + Tools = "vis-tools" + Thinking = "vis-thinking" + Text = "vis-text" + + +class GptVisConverter(VisProtocolConverter): + def __init__(self, paths: Optional[str] = None): + default_tag_paths = ["dbgpt_ext.vis.gpt_vis.tags"] + super().__init__(paths if paths else default_tag_paths) + + def system_vis_tag_map(self): + return { + SystemVisTag.VisMessage.value: GptVisTagPackage.AgentMessage.value, + SystemVisTag.VisPlans.value: GptVisTagPackage.AgentPlans.value, + SystemVisTag.VisText.value: GptVisTagPackage.Text.value, + SystemVisTag.VisThinking.value: GptVisTagPackage.Thinking.value, + SystemVisTag.VisChart.value: GptVisTagPackage.Chart.value, + SystemVisTag.VisCode.value: GptVisTagPackage.Code.value, + SystemVisTag.VisTool.value: GptVisTagPackage.Plugin.value, + SystemVisTag.VisTools.value: GptVisTagPackage.Plugin.value, + SystemVisTag.VisDashboard.value: GptVisTagPackage.Dashboard.value, + } + + async def visualization_stream(self, stream_msg: Optional[Union[Dict, str]] = None): + return await self.agent_stream_message(stream_msg) + + async def visualization( + self, + messages: List[GptsMessage], + plans: Optional[List[GptsPlan]] = None, + gpt_msg: Optional[GptsMessage] = None, + stream_msg: Optional[Union[Dict, str]] = None, + ): + # VIS消息组装 + deal_messages: List[GptsMessage] = [] + for message in messages: + if not message.action_report and message.receiver != UserProxyAgent.role: + continue + deal_messages.append(message) + # last_message: Optional[GptsMessage] = ( + # deal_messages[-1] if len(deal_messages) > 0 else None + # ) + # if last_message and last_message.sender_name != message.sender_name: + # deal_messages.append(message) + # else: + # ## 直接替换最后一个 + # if len(deal_messages) > 0: + # deal_messages[-1] = message + # else: + # deal_messages.append(message) + + deal_messages = sorted(deal_messages, key=lambda _message: _message.rounds) + vis_items: List[str] = [] + for message in deal_messages: + vis_items.append(await self._messages_to_agents_vis(message)) + message_view = "\n".join(vis_items) + if stream_msg: + temp_view = await self.agent_stream_message(stream_msg) + message_view = message_view + "\n" + temp_view + return message_view + + async def agent_stream_message( + self, + message: Dict, + ): + """Get agent stream message.""" + messages_view = [] + thinking = message.get("thinking") + markdown = message.get("content") + vis_thinking = self.vis_inst(SystemVisTag.VisThinking.value) + msg_markdown = "" + if thinking: + vis_thinking = vis_thinking.sync_display(content=thinking) + msg_markdown = vis_thinking + if markdown: + msg_markdown = msg_markdown + "\n" + markdown + + messages_view.append( + { + "sender": message["sender"], + "model": message["model"], + "markdown": msg_markdown, + } + ) + + return await self.vis_inst(SystemVisTag.VisMessage.value).display( + content=messages_view + ) + + async def _messages_to_agents_vis( + self, message: GptsMessage, is_last_message: bool = False + ): + if message is None: + return "" + messages_view = [] + + action_report_str = message.action_report + view_info = message.content + if action_report_str and len(action_report_str) > 0: + action_out = ActionOutput.from_dict(json.loads(action_report_str)) + if action_out is not None: # noqa + if action_out.is_exe_success or is_last_message: # noqa + view = action_out.view + view_info = view if view else action_out.content + + thinking = message.thinking + vis_thinking = self.vis_inst(SystemVisTag.VisThinking.value) + if thinking: + vis_thinking = vis_thinking.sync_display(content=thinking) + view_info = vis_thinking + "\n" + view_info + + messages_view.append( + { + "sender": message.sender_name or message.sender, + "receiver": message.receiver_name or message.receiver, + "model": message.model_name, + "markdown": view_info, + "resource": (message.resource_info if message.resource_info else None), + } + ) + return await self.vis_inst(SystemVisTag.VisMessage.value).display( + content=messages_view + ) diff --git a/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/gpt_vis_converter_v2.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/gpt_vis_converter_v2.py new file mode 100644 index 000000000..2beba5b12 --- /dev/null +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/gpt_vis_converter_v2.py @@ -0,0 +1,268 @@ +import json +import logging +from enum import Enum +from typing import Dict, List, Optional, Union + +from dbgpt.agent import ActionOutput +from dbgpt.agent.core.memory.gpts import GptsMessage, GptsPlan +from dbgpt.agent.core.schema import Status +from dbgpt.vis.vis_converter import SystemVisTag, VisProtocolConverter + +from .tags.vis_app_link import VisAppLink + +NONE_GOAL_PREFIX: str = "none_goal_count_" +logger = logging.getLogger(__name__) + + +class GptVisTagPackage(Enum): + """System Vis Tags.""" + + AgentMessage = "agent-messages" + AgentPlans = "agent-plans" + APIResponse = "vis-api-response" + AppLink = "vis-app-link" + Chart = "vis-db-chart" + Code = "vis-code" + Dashboard = "vis-dashboard" + Flow = "agent-flow" + Result = "agent-result" + Plugin = "vis-plugin" + Tools = "vis-tools" + Thinking = "vis-thinking" + Text = "vis-text" + + +class GptVisConverterNew(VisProtocolConverter): + def __init__(self, paths: Optional[str] = None): + default_tag_paths = ["dbgpt_ext.vis.gpt_vis.tags"] + super().__init__(paths if paths else default_tag_paths) + + def system_vis_tag_map(self): + return { + SystemVisTag.VisMessage.value: GptVisTagPackage.AgentMessage.value, + SystemVisTag.VisPlans.value: GptVisTagPackage.AgentPlans.value, + SystemVisTag.VisText.value: GptVisTagPackage.Text.value, + SystemVisTag.VisThinking.value: GptVisTagPackage.Thinking.value, + SystemVisTag.VisChart.value: GptVisTagPackage.Chart.value, + SystemVisTag.VisCode.value: GptVisTagPackage.Code.value, + SystemVisTag.VisTool.value: GptVisTagPackage.Plugin.value, + SystemVisTag.VisTools.value: GptVisTagPackage.Plugin.value, + SystemVisTag.VisDashboard.value: GptVisTagPackage.Dashboard.value, + } + + async def visualization_stream(self, stream_msg: Optional[Union[Dict, str]] = None): + return await self.agent_stream_message(stream_msg) + + async def visualization( + self, + messages: List[GptsMessage], + plans: Optional[List[GptsPlan]] = None, + gpt_msg: Optional[GptsMessage] = None, + stream_msg: Optional[Union[Dict, str]] = None, + ): + message_view = await self.app_link_chat_message(messages) + if stream_msg: + temp_view = await self.agent_stream_message(stream_msg) + message_view = message_view + "\n" + temp_view + return message_view + + async def agent_stream_message( + self, + message: Dict, + ): + """Get agent stream message.""" + messages_view = [] + thinking = message.get("thinking") + markdown = message.get("content") + vis_thinking = self.vis_inst(SystemVisTag.VisThinking.value) + msg_markdown = "" + if thinking: + vis_thinking = vis_thinking.sync_display(content=thinking) + msg_markdown = vis_thinking + if markdown: + msg_markdown = msg_markdown + "\n" + markdown + + messages_view.append( + { + "sender": message["sender"], + "model": message["model"], + "markdown": msg_markdown, + } + ) + + return await self.vis_inst(SystemVisTag.VisMessage.value).display( + content=messages_view + ) + + async def app_link_chat_message(self, messages: list): + """Get app link chat message.""" + + # VIS消息组装 + temp_group: Dict = {} + app_link_message: Optional[GptsMessage] = None + app_lanucher_message: Optional[GptsMessage] = None + + none_goal_count = 1 + for message in messages: + if message.sender in [ + "Intent Recognition Expert", + "App Link", + ] or message.receiver in ["Intent Recognition Expert", "App Link"]: + if ( + message.sender in ["Intent Recognition Expert", "App Link"] + and message.receiver == "AppLauncher" + ): + app_link_message = message + if message.receiver != "Human": + continue + + if message.sender == "AppLauncher": + if message.receiver == "Human": + app_lanucher_message = message + continue + + current_gogal = message.current_goal + + last_goal = next(reversed(temp_group)) if temp_group else None + if last_goal: + last_goal_messages = temp_group[last_goal] + if current_gogal: + if current_gogal == last_goal: + last_goal_messages.append(message) + else: + temp_group[current_gogal] = [message] + else: + temp_group[f"{NONE_GOAL_PREFIX}{none_goal_count}"] = [message] + none_goal_count += 1 + else: + if current_gogal: + temp_group[current_gogal] = [message] + else: + temp_group[f"{NONE_GOAL_PREFIX}{none_goal_count}"] = [message] + none_goal_count += 1 + + vis_items: list = [] + if app_link_message: + vis_items.append( + await self._messages_to_app_link_vis( + app_link_message, app_lanucher_message + ) + ) + + return await self._message_group_vis_build(temp_group, vis_items) + + async def _messages_to_agents_vis( + self, messages: List[GptsMessage], is_last_message: bool = False + ): + if messages is None or len(messages) <= 0: + return "" + messages_view = [] + for message in messages: + action_report_str = message.action_report + view_info = message.content + if action_report_str and len(action_report_str) > 0: + action_out = ActionOutput.from_dict(json.loads(action_report_str)) + if action_out is not None: # noqa + if action_out.is_exe_success or is_last_message: # noqa + view = action_out.view + view_info = view if view else action_out.content + + messages_view.append( + { + "sender": message.sender, + "receiver": message.receiver, + "model": message.model_name, + "markdown": view_info, + "resource": ( + message.resource_info if message.resource_info else None + ), + } + ) + vis_msg = self.vis_inst(SystemVisTag.VisMessage.value) + return vis_msg.sync_display(content=messages_view) + + async def _messages_to_plan_vis(self, messages: List[Dict]): + if messages is None or len(messages) <= 0: + return "" + vis_msg = self.vis_inst(SystemVisTag.VisPlans.value) + return vis_msg.sync_display(content=messages) + + async def _messages_to_app_link_vis( + self, link_message: GptsMessage, lanucher_message: Optional[GptsMessage] = None + ): + logger.info("app link vis build") + if link_message is None: + return "" + param = {} + link_report_str = link_message.action_report + if link_report_str and len(link_report_str) > 0: + action_out = ActionOutput.from_dict(json.loads(link_report_str)) + if action_out is not None: + if action_out.is_exe_success: + temp = json.loads(action_out.content) + + param["app_code"] = temp["app_code"] + param["app_name"] = temp["app_name"] + param["app_desc"] = temp.get("app_desc", "") + param["app_logo"] = "" + param["status"] = Status.RUNNING.value + + else: + param["status"] = Status.FAILED.value + param["msg"] = action_out.content + + if lanucher_message: + lanucher_report_str = lanucher_message.action_report + if lanucher_report_str and len(lanucher_report_str) > 0: + lanucher_action_out = ActionOutput.from_dict( + json.loads(lanucher_report_str) + ) + if lanucher_action_out is not None: + if lanucher_action_out.is_exe_success: + param["status"] = Status.COMPLETE.value + else: + param["status"] = Status.FAILED.value + param["msg"] = lanucher_action_out.content + else: + param["status"] = Status.COMPLETE.value + vis_app_link = self.vis_inst(VisAppLink.vis_tag()) + return vis_app_link.sync_display(content=param) + + async def _message_group_vis_build(self, message_group, vis_items: list): + num: int = 0 + if message_group: + last_goal = next(reversed(message_group)) + last_goal_message = None + if not last_goal.startswith(NONE_GOAL_PREFIX): + last_goal_messages = message_group[last_goal] + last_goal_message = last_goal_messages[-1] + + plan_temps: List[dict] = [] + need_show_singe_last_message = False + for key, value in message_group.items(): + num = num + 1 + if key.startswith(NONE_GOAL_PREFIX): + vis_items.append(await self._messages_to_plan_vis(plan_temps)) + plan_temps = [] + num = 0 + vis_items.append(await self._messages_to_agents_vis(value)) + else: + num += 1 + plan_temps.append( + { + "name": key, + "num": num, + "status": "complete", + "agent": value[0].receiver if value else "", + "markdown": await self._messages_to_agents_vis(value), + } + ) + need_show_singe_last_message = True + + if len(plan_temps) > 0: + vis_items.append(await self._messages_to_plan_vis(plan_temps)) + if need_show_singe_last_message and last_goal_message: + vis_items.append( + await self._messages_to_agents_vis([last_goal_message], True) + ) + return "\n".join(vis_items) diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/__init__.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/__init__.py similarity index 100% rename from packages/dbgpt-core/src/dbgpt/vis/tags/__init__.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/__init__.py diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_agent_message.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_agent_message.py similarity index 87% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_agent_message.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_agent_message.py index fac953e10..b144a9b7f 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_agent_message.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_agent_message.py @@ -1,6 +1,6 @@ """Vis Agent Messages.""" -from ..base import Vis +from dbgpt.vis import Vis class VisAgentMessages(Vis): diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_agent_plans.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_agent_plans.py similarity index 91% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_agent_plans.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_agent_plans.py index 027fce9b7..a3cb0fb08 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_agent_plans.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_agent_plans.py @@ -1,6 +1,6 @@ """Viss Agent Plans.""" -from ..base import Vis +from dbgpt.vis import Vis class VisAgentPlans(Vis): diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_api_response.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_api_response.py similarity index 87% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_api_response.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_api_response.py index f6a1da619..7852f760c 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_api_response.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_api_response.py @@ -1,6 +1,6 @@ """Vis Api Response.""" -from ..base import Vis +from dbgpt.vis import Vis class VisApiResponse(Vis): diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_app_link.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_app_link.py similarity index 86% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_app_link.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_app_link.py index 5b5702774..d6bc12098 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_app_link.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_app_link.py @@ -1,6 +1,6 @@ """Vis App Link.""" -from ..base import Vis +from dbgpt.vis import Vis class VisAppLink(Vis): diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_chart.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_chart.py similarity index 99% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_chart.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_chart.py index 3980aef21..121ab959a 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_chart.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_chart.py @@ -3,7 +3,7 @@ import json from typing import Any, Dict, Optional -from ..base import Vis +from dbgpt.vis import Vis def default_chart_type_prompt() -> str: diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_code.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_code.py similarity index 85% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_code.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_code.py index 4b9f1f050..7f16be8f1 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_code.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_code.py @@ -1,6 +1,6 @@ """Vis Code.""" -from ..base import Vis +from dbgpt.vis import Vis class VisCode(Vis): diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_dashboard.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_dashboard.py similarity index 98% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_dashboard.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_dashboard.py index 104c6adaf..7c6f380f6 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_dashboard.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_dashboard.py @@ -4,7 +4,7 @@ import json import logging from typing import Any, Dict, Optional -from ..base import Vis +from dbgpt.vis import Vis logger = logging.getLogger(__name__) diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_gpts_execution.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_gpts_execution.py similarity index 86% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_gpts_execution.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_gpts_execution.py index 821012e7a..860cffb82 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_gpts_execution.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_gpts_execution.py @@ -1,6 +1,6 @@ """VisDbgptsFlow.""" -from ..base import Vis +from dbgpt.vis import Vis class VisDbgptsFlow(Vis): diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_gpts_result.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_gpts_result.py similarity index 88% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_gpts_result.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_gpts_result.py index 7c4fde482..d052c665a 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_gpts_result.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_gpts_result.py @@ -1,6 +1,6 @@ """VisDbgptsFlowResult.""" -from ..base import Vis +from dbgpt.vis import Vis class VisDbgptsFlowResult(Vis): diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_plugin.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_plugin.py similarity index 85% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_plugin.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_plugin.py index 27c9d79ef..f8f46f26a 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_plugin.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_plugin.py @@ -1,6 +1,6 @@ """Vis Plugin.""" -from ..base import Vis +from dbgpt.vis import Vis class VisPlugin(Vis): diff --git a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_thinking.py b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_thinking.py similarity index 95% rename from packages/dbgpt-core/src/dbgpt/vis/tags/vis_thinking.py rename to packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_thinking.py index 7b5407f04..4c5665e47 100644 --- a/packages/dbgpt-core/src/dbgpt/vis/tags/vis_thinking.py +++ b/packages/dbgpt-ext/src/dbgpt_ext/vis/gpt_vis/tags/vis_thinking.py @@ -1,4 +1,4 @@ -from ..base import Vis +from dbgpt.vis import Vis class VisThinking(Vis): diff --git a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/controller.py b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/controller.py index 02c97fb85..634d85a3f 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/controller.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/controller.py @@ -3,18 +3,17 @@ import json import logging import time from abc import ABC +from copy import deepcopy from typing import Any, Dict, List, Optional, Type from fastapi import APIRouter from dbgpt._private.config import Config from dbgpt.agent import ( - Agent, AgentContext, AgentMemory, AutoPlanChatManager, ConversableAgent, - DefaultAWELLayoutManager, EnhancedShortTermMemory, GptsMemory, HybridMemory, @@ -23,9 +22,10 @@ from dbgpt.agent import ( UserProxyAgent, get_agent_manager, ) +from dbgpt.agent.core.base_team import ManagerAgent from dbgpt.agent.core.memory.gpts import GptsMessage from dbgpt.agent.core.schema import Status -from dbgpt.agent.resource import get_resource_manager +from dbgpt.agent.resource import ResourceManager, get_resource_manager from dbgpt.agent.util.llm.llm import LLMStrategyType from dbgpt.component import BaseComponent, ComponentType, SystemApp from dbgpt.core import PromptTemplate @@ -45,7 +45,7 @@ from dbgpt_serve.prompt.service import service as PromptService from ...rag.retriever.knowledge_space import KnowledgeSpaceRetriever from ..db import GptsMessagesDao -from ..db.gpts_app import GptsApp, GptsAppDao, GptsAppQuery +from ..db.gpts_app import GptsApp, GptsAppDao, GptsAppDetail, GptsAppQuery from ..db.gpts_conversations_db import GptsConversationsDao, GptsConversationsEntity from ..team.base import TeamMode from .db_gpts_memory import MetaDbGptsMessageMemory, MetaDbGptsPlansMemory @@ -309,11 +309,16 @@ class MultiAgents(BaseComponent, ABC): yield None, chunk, agent_conv_id else: # init gpts memory + vis_protocal = None + # if enable_verbose: + ## Defaul use gpt_vis ui component‘s package + # vis_protocal = GptVisConverter() + self.memory.init( agent_conv_id, - enable_vis_message=enable_verbose, history_messages=history_messages, start_round=history_message_count, + vis_converter=vis_protocal, ) # init agent memory agent_memory = self.get_or_build_agent_memory(conv_id, gpts_name) @@ -475,6 +480,179 @@ class MultiAgents(BaseComponent, ABC): current_message.end_current_round() current_message.save_to_storage() + async def _build_agent_by_gpts( + self, + context: AgentContext, + agent_memory: AgentMemory, + rm: ResourceManager, + app: GptsApp, + ) -> ConversableAgent: + """Build a dialogue target agent through gpts configuration""" + logger.info(f"_build_agent_by_gpts:{app.app_code},{app.app_name}") + employees: List[ConversableAgent] = [] + if app.details is not None and len(app.details) > 0: + employees: List[ConversableAgent] = await self._build_employees( + context, agent_memory, rm, [deepcopy(item) for item in app.details] + ) + team_mode = TeamMode(app.team_mode) + prompt_service: PromptService = get_service() + if team_mode == TeamMode.SINGLE_AGENT: + if employees is not None and len(employees) == 1: + recipient = employees[0] + else: + single_context = app.team_context + cls: Type[ConversableAgent] = self.agent_manage.get_by_name( + single_context.agent_name + ) + + llm_config = LLMConfig( + llm_client=self.llm_provider, + lm_strategy=LLMStrategyType(single_context.llm_strategy), + strategy_context=single_context.llm_strategy_value, + ) + prompt_template = None + if single_context.prompt_template: + prompt_template: PromptTemplate = prompt_service.get_template( + prompt_code=single_context.prompt_template + ) + depend_resource = await blocking_func_to_async( + CFG.SYSTEM_APP, rm.build_resource, single_context.resources + ) + + recipient = ( + await cls() + .bind(context) + .bind(agent_memory) + .bind(llm_config) + .bind(depend_resource) + .bind(prompt_template) + .build() + ) + recipient.profile.name = app.app_name + recipient.profile.desc = app.app_describe + recipient.profile.avatar = app.icon + return recipient + elif TeamMode.AUTO_PLAN == team_mode: + if app.team_context: + agent_manager = get_agent_manager() + auto_team_ctx = app.team_context + + manager_cls: Type[ConversableAgent] = agent_manager.get_by_name( + auto_team_ctx.teamleader + ) + manager = manager_cls() + if isinstance(manager, ManagerAgent) and len(employees) > 0: + manager.hire(employees) + + llm_config = LLMConfig( + llm_client=self.llm_provider, + llm_strategy=LLMStrategyType(auto_team_ctx.llm_strategy), + strategy_context=auto_team_ctx.llm_strategy_value, + ) + manager.bind(llm_config) + + if auto_team_ctx.prompt_template: + prompt_template: PromptTemplate = prompt_service.get_template( + prompt_code=auto_team_ctx.prompt_template + ) + manager.bind(prompt_template) + if auto_team_ctx.resources: + depend_resource = await blocking_func_to_async( + CFG.SYSTEM_APP, rm.build_resource, auto_team_ctx.resources + ) + manager.bind(depend_resource) + + manager = await manager.bind(context).bind(agent_memory).build() + else: + ## default + manager = AutoPlanChatManager() + llm_config = employees[0].llm_config + + if not employees or len(employees) < 0: + raise ValueError("APP exception no available agent!") + manager = ( + await manager.bind(context) + .bind(agent_memory) + .bind(llm_config) + .build() + ) + manager.hire(employees) + + manager.profile.name = app.app_name + manager.profile.desc = app.app_describe + manager.profile.avatar = app.icon + logger.info( + f"_build_agent_by_gpts return:{manager.profile.name},{manager.profile.desc},{id(manager)}" # noqa + ) + return manager + elif TeamMode.NATIVE_APP == team_mode: + raise ValueError("Native APP chat not supported!") + else: + raise ValueError(f"Unknown Agent Team Mode!{team_mode}") + + async def _build_employees( + self, + context: AgentContext, + agent_memory: AgentMemory, + rm: ResourceManager, + app_details: List[GptsAppDetail], + ) -> List[ConversableAgent]: + """Constructing dialogue members through gpts-related Agent or gpts app information.""" # noqa + logger.info( + f"_build_employees:{[item.agent_role + ',' + item.agent_name for item in app_details] if app_details else ''}" # noqa + ) + employees: List[ConversableAgent] = [] + prompt_service: PromptService = get_service() + for record in app_details: + logger.info(f"_build_employees循环:{record.agent_role},{record.agent_name}") + if record.type == "app": + gpt_app: GptsApp = deepcopy(self.gpts_app.app_detail(record.agent_role)) + if not gpt_app: + raise ValueError(f"Not found app {record.agent_role}!") + employee_agent = await self._build_agent_by_gpts( + context, agent_memory, rm, gpt_app + ) + logger.info( + f"append employee_agent:{employee_agent.profile.name},{employee_agent.profile.desc},{id(employee_agent)}" # noqa + ) + employees.append(employee_agent) + else: + cls: Type[ConversableAgent] = self.agent_manage.get_by_name( + record.agent_role + ) + llm_config = LLMConfig( + llm_client=self.llm_provider, + lm_strategy=LLMStrategyType(record.llm_strategy), + strategy_context=record.llm_strategy_value, + ) + prompt_template = None + if record.prompt_template: + prompt_template: PromptTemplate = prompt_service.get_template( + prompt_code=record.prompt_template + ) + depend_resource = await blocking_func_to_async( + CFG.SYSTEM_APP, rm.build_resource, record.resources + ) + agent = ( + await cls() + .bind(context) + .bind(agent_memory) + .bind(llm_config) + .bind(depend_resource) + .bind(prompt_template) + .build() + ) + if record.agent_describe: + temp_profile = agent.profile.copy() + temp_profile.desc = record.agent_describe + temp_profile.name = record.agent_name + agent.bind(temp_profile) + employees.append(agent) + logger.info( + f"_build_employees return:{[item.profile.name if item.profile.name else '' + ',' + str(id(item)) for item in employees]}" # noqa + ) + return employees + async def agent_team_chat_new( self, user_query: str, @@ -493,8 +671,6 @@ class MultiAgents(BaseComponent, ABC): ): gpts_status = Status.COMPLETE.value try: - employees: List[Agent] = [] - self.agent_manage = get_agent_manager() context: AgentContext = AgentContext( @@ -506,7 +682,6 @@ class MultiAgents(BaseComponent, ABC): enable_vis_message=enable_verbose, ) - prompt_service: PromptService = get_service() rm = get_resource_manager() # init llm provider @@ -518,66 +693,9 @@ class MultiAgents(BaseComponent, ABC): worker_manager, auto_convert_message=True ) - for record in gpts_app.details: - cls: Type[ConversableAgent] = self.agent_manage.get_by_name( - record.agent_name - ) - llm_config = LLMConfig( - llm_client=self.llm_provider, - llm_strategy=LLMStrategyType(record.llm_strategy), - strategy_context=record.llm_strategy_value, - ) - prompt_template = None - if record.prompt_template: - prompt_template: PromptTemplate = prompt_service.get_template( - prompt_code=record.prompt_template - ) - depend_resource = await blocking_func_to_async( - CFG.SYSTEM_APP, rm.build_resource, record.resources - ) - agent = ( - await cls() - .bind(context) - .bind(agent_memory) - .bind(llm_config) - .bind(depend_resource) - .bind(prompt_template) - .build(is_retry_chat=is_retry_chat) - ) - employees.append(agent) - - team_mode = TeamMode(gpts_app.team_mode) - if team_mode == TeamMode.SINGLE_AGENT: - recipient = employees[0] - else: - if TeamMode.AUTO_PLAN == team_mode: - if not gpts_app.details or len(gpts_app.details) < 0: - raise ValueError("APP exception no available agent!") - llm_config = employees[0].llm_config - manager = AutoPlanChatManager() - elif TeamMode.AWEL_LAYOUT == team_mode: - if not gpts_app.team_context: - raise ValueError( - "Your APP has not been developed yet, please bind Flow!" - ) - manager = DefaultAWELLayoutManager(dag=gpts_app.team_context) - llm_config = LLMConfig( - llm_client=self.llm_provider, - llm_strategy=LLMStrategyType.Priority, - strategy_context=json.dumps(["bailing_proxyllm"]), - ) # TODO - elif TeamMode.NATIVE_APP == team_mode: - raise ValueError("Native APP chat not supported!") - else: - raise ValueError(f"Unknown Agent Team Mode!{team_mode}") - manager = ( - await manager.bind(context) - .bind(agent_memory) - .bind(llm_config) - .build() - ) - manager.hire(employees) - recipient = manager + recipient = await self._build_agent_by_gpts( + context, agent_memory, rm, gpts_app + ) if is_retry_chat: # retry chat @@ -618,6 +736,7 @@ class MultiAgents(BaseComponent, ABC): except Exception as e: logger.error(f"chat abnormal termination!{str(e)}", e) self.gpts_conversations.update(conv_uid, Status.FAILED.value) + raise ValueError(f"The conversation is abnormal!{str(e)}") finally: if not app_link_start: await self.memory.complete(conv_uid) @@ -654,7 +773,7 @@ class MultiAgents(BaseComponent, ABC): else False ) if is_complete: - return await self.memory.app_link_chat_message(conv_id) + return await self.memory.vis_messages(conv_id) else: pass diff --git a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/db_gpts_memory.py b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/db_gpts_memory.py index 1169ded5e..8423e63a9 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/db_gpts_memory.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/db_gpts_memory.py @@ -12,6 +12,9 @@ from ..db.gpts_plans_db import GptsPlansDao, GptsPlansEntity class MetaDbGptsPlansMemory(GptsPlansMemory): + def get_plans_by_msg_round(self, conv_id: str, rounds_id: str) -> List[GptsPlan]: + pass + def __init__(self): self.gpts_plan = GptsPlansDao() @@ -78,6 +81,12 @@ class MetaDbGptsMessageMemory(GptsMessageMemory): def __init__(self): self.gpts_message = GptsMessagesDao() + def get_by_message_id(self, message_id: str) -> Optional[GptsMessage]: + pass + + def update(self, message: GptsMessage) -> None: + pass + def append(self, message: GptsMessage): self.gpts_message.append(message.to_dict()) diff --git a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/app_link_action.py b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/app_link_action.py index f8108549d..159f8ba6e 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/app_link_action.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/app_link_action.py @@ -4,7 +4,7 @@ from typing import Optional from dbgpt._private.pydantic import BaseModel, Field, model_to_dict from dbgpt.agent import Action, ActionOutput, AgentResource, ResourceType -from dbgpt.vis.tags.vis_app_link import Vis, VisAppLink +from dbgpt.vis import SystemVisTag logger = logging.getLogger(__name__) @@ -23,16 +23,13 @@ class LinkAppInput(BaseModel): class LinkAppAction(Action[LinkAppInput]): def __init__(self, **kwargs): super().__init__(**kwargs) - self._render_protocal = VisAppLink() + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisCode.value @property def resource_need(self) -> Optional[ResourceType]: return ResourceType.Knowledge - @property - def render_protocal(self) -> Optional[Vis]: - return self._render_protocal - @property def out_model_type(self): return LinkAppInput @@ -68,7 +65,7 @@ class LinkAppAction(Action[LinkAppInput]): return ActionOutput( is_exe_success=True, content=json.dumps(app_link_param, ensure_ascii=False), - view=await self.render_protocal.display(content=app_link_param), + view=await self.render_protocol.display(content=app_link_param), next_speakers=[SummaryAssistantAgent().role], ) else: @@ -87,6 +84,6 @@ class LinkAppAction(Action[LinkAppInput]): return ActionOutput( is_exe_success=True, content=json.dumps(model_to_dict(param), ensure_ascii=False), - view=await self.render_protocal.display(content=app_link_param), + view=await self.render_protocol.display(content=app_link_param), next_speakers=[StartAppAssistantAgent().role], ) diff --git a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/app_start_action.py b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/app_start_action.py index 92459790e..46704a652 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/app_start_action.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/app_start_action.py @@ -3,7 +3,7 @@ from typing import Optional from dbgpt._private.pydantic import BaseModel, Field from dbgpt.agent import Action, ActionOutput, AgentResource -from dbgpt.vis.tags.vis_plugin import Vis, VisPlugin +from dbgpt.vis import SystemVisTag from dbgpt_serve.agent.agents.expand.actions.intent_recognition_action import ( IntentRecognitionInput, ) @@ -31,11 +31,8 @@ class LinkAppInput(BaseModel): class StartAppAction(Action[LinkAppInput]): def __init__(self, **kwargs): super().__init__(**kwargs) - self._render_protocal = VisPlugin() - - @property - def render_protocal(self) -> Optional[Vis]: - return self._render_protocal + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisTool.value @property def out_model_type(self): diff --git a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/intent_recognition_action.py b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/intent_recognition_action.py index b74df905b..7d79448c6 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/intent_recognition_action.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/actions/intent_recognition_action.py @@ -4,7 +4,7 @@ from typing import Optional from dbgpt._private.pydantic import BaseModel, Field, model_to_dict from dbgpt.agent import Action, ActionOutput, AgentResource, ResourceType -from dbgpt.vis.tags.vis_app_link import Vis, VisAppLink +from dbgpt.vis import SystemVisTag logger = logging.getLogger(__name__) @@ -39,16 +39,13 @@ class IntentRecognitionInput(BaseModel): class IntentRecognitionAction(Action[IntentRecognitionInput]): def __init__(self, **kwargs): super().__init__(**kwargs) - self._render_protocal = VisAppLink() + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisTool.value @property def resource_need(self) -> Optional[ResourceType]: return ResourceType.Knowledge - @property - def render_protocal(self) -> Optional[Vis]: - return self._render_protocal - @property def out_model_type(self): return IntentRecognitionInput diff --git a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/app_resource_start_assisant_agent.py b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/app_resource_start_assisant_agent.py index 54b72bc7a..1cab76e52 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/app_resource_start_assisant_agent.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/agent/agents/expand/app_resource_start_assisant_agent.py @@ -16,7 +16,7 @@ from dbgpt.agent import ( ) from dbgpt.agent.core.profile import DynConfig, ProfileConfig from dbgpt.agent.resource.app import AppResource -from dbgpt.vis.tags.vis_plugin import Vis, VisPlugin +from dbgpt.vis import SystemVisTag logger = logging.getLogger(__name__) @@ -43,18 +43,14 @@ class AppResourceAction(Action[AppResourceInput]): def __init__(self, **kwargs): """App action init.""" super().__init__(**kwargs) - self._render_protocol = VisPlugin() + ## this action out view vis tag name + self.action_view_tag: str = SystemVisTag.VisTool.value @property def resource_need(self) -> Optional[ResourceType]: """Return the resource type needed for the action.""" return ResourceType.App - @property - def render_protocol(self) -> Optional[Vis]: - """Return the render protocol.""" - return self._render_protocol - @property def out_model_type(self): """Return the output model type.""" diff --git a/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_app.py b/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_app.py index 03d73115f..837d06007 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_app.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_app.py @@ -25,6 +25,8 @@ from dbgpt._private.pydantic import ( model_validator, ) from dbgpt.agent.core.plan import AWELTeamContext +from dbgpt.agent.core.plan.base import SingleAgentContext +from dbgpt.agent.core.plan.react.team_react_plan import AutoTeamContext from dbgpt.agent.resource.base import AgentResource, ResourceType from dbgpt.storage.metadata import BaseDao, Model from dbgpt_app.openapi.api_view_model import ConversationVo @@ -42,17 +44,25 @@ logger = logging.getLogger(__name__) recommend_question_dao = RecommendQuestionDao() +class BindAppRequest(BaseModel): + team_app_code: str + bin_app_codes: List[str] + + class GptsAppDetail(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) app_code: Optional[str] = None app_name: Optional[str] = None + type: Optional[str] = None agent_name: Optional[str] = None + agent_role: Optional[str] = None + agent_describe: Optional[str] = None node_id: Optional[str] = None resources: Optional[list[AgentResource]] = None prompt_template: Optional[str] = None llm_strategy: Optional[str] = None - llm_strategy_value: Optional[str] = None + llm_strategy_value: Union[Optional[str], Optional[List[Any]]] = None created_at: datetime = datetime.now() updated_at: datetime = datetime.now() @@ -81,7 +91,10 @@ class GptsAppDetail(BaseModel): return cls( app_code=d["app_code"], app_name=d["app_name"], + type=d["type"], agent_name=d["agent_name"], + agent_role=d["agent_role"], + agent_describe=d.get("agent_describe", None), node_id=d["node_id"], resources=AgentResource.from_json_list_str(d.get("resources", None)), prompt_template=d.get("prompt_template", None), @@ -97,7 +110,10 @@ class GptsAppDetail(BaseModel): return cls( app_code=entity.app_code, app_name=entity.app_name, + type=entity.type, agent_name=entity.agent_name, + agent_role=entity.agent_role, + agent_describe=entity.agent_describe, node_id=entity.node_id, resources=resources, prompt_template=entity.prompt_template, @@ -116,7 +132,11 @@ class GptsApp(BaseModel): app_describe: Optional[str] = None team_mode: Optional[str] = None language: Optional[str] = None - team_context: Optional[Union[str, AWELTeamContext, NativeTeamContext]] = None + team_context: Optional[ + Union[ + str, AutoTeamContext, SingleAgentContext, AWELTeamContext, NativeTeamContext + ] + ] = None user_code: Optional[str] = None sys_code: Optional[str] = None is_collected: Optional[str] = None @@ -162,6 +182,7 @@ class GptsApp(BaseModel): team_context=d.get("team_context", None), user_code=d.get("user_code", None), sys_code=d.get("sys_code", None), + icon=d.get("icon", None), is_collected=d.get("is_collected", None), created_at=d.get("created_at", None), updated_at=d.get("updated_at", None), @@ -285,9 +306,12 @@ class GptsAppCollectionEntity(Model): app_code = Column(String(255), nullable=False, comment="Current AI assistant code") user_code = Column(String(255), nullable=True, comment="user code") sys_code = Column(String(255), nullable=True, comment="system app code") - created_at = Column(DateTime, default=datetime.utcnow, comment="create time") + created_at = Column( + DateTime, name="gmt_create", default=datetime.utcnow, comment="create time" + ) updated_at = Column( DateTime, + name="gmt_modified", default=datetime.utcnow, onupdate=datetime.utcnow, comment="last update time", @@ -322,9 +346,12 @@ class GptsAppEntity(Model): comment="Parameters required for application", ) - created_at = Column(DateTime, default=datetime.utcnow, comment="create time") + created_at = Column( + DateTime, name="gmt_create", default=datetime.utcnow, comment="create time" + ) updated_at = Column( DateTime, + name="gmt_modified", default=datetime.utcnow, onupdate=datetime.utcnow, comment="last update time", @@ -339,7 +366,14 @@ class GptsAppDetailEntity(Model): id = Column(Integer, primary_key=True, comment="autoincrement id") app_code = Column(String(255), nullable=False, comment="Current AI assistant code") app_name = Column(String(255), nullable=False, comment="Current AI assistant name") + type = Column( + String(255), + nullable=False, + comment="bind detail agent type. 'app' or 'agent', default 'agent'", + ) agent_name = Column(String(255), nullable=False, comment=" Agent name") + agent_role = Column(String(255), nullable=False, comment=" Agent role") + agent_describe = Column(Text, nullable=True, comment=" Agent describe") node_id = Column( String(255), nullable=False, comment="Current AI assistant Agent Node id" ) @@ -349,9 +383,12 @@ class GptsAppDetailEntity(Model): llm_strategy_value = Column( Text, nullable=True, comment="Agent use llm strategy value" ) - created_at = Column(DateTime, default=datetime.utcnow, comment="create time") + created_at = Column( + DateTime, name="gmt_create", default=datetime.utcnow, comment="create time" + ) updated_at = Column( DateTime, + name="gmt_modified", default=datetime.utcnow, onupdate=datetime.utcnow, comment="last update time", @@ -695,6 +732,7 @@ class GptsAppDao(BaseDao): app_info.team_mode, app_info.team_context ), "user_code": app_info.user_code, + "icon": app_info.icon, "sys_code": app_info.sys_code, "is_collected": "true" if app_info.app_code in app_collects else "false", "created_at": app_info.created_at, @@ -766,6 +804,45 @@ class GptsAppDao(BaseDao): else: return app_info + async def auto_team_bin_apps(self, team_app_code: str, bind_apps: List[str]): + logger.info(f"auto_team_bin_apps:{team_app_code},{bind_apps}") + ### 把应用转换为当前应用的子agent, + team_app: GptsApp = self.app_detail(team_app_code) + if not team_app: + raise ValueError(f"{team_app} is not a app!") + if team_app.team_mode != TeamMode.AUTO_PLAN.value: + raise ValueError(f"{team_app.app_name} is not a multi agents app!") + + gpt_apps: List[GptsApp] = [] + err_app_codes: List[str] = [] + for bind_app in bind_apps: + gpt_app: GptsApp = self.app_detail(bind_app) + if not gpt_app: + err_app_codes.append(bind_app) + gpt_apps.append(gpt_app) + if len(err_app_codes) > 0: + raise ValueError( + f"There is a problem with the app codes to be bound![{err_app_codes}]" + ) + for gpt_app in gpt_apps: + ## 暂时线只支持绑定单agent应用,多Agent应用绑定要把多Agent的子Agent资源提到绑定的TL Agent上,可能需要产品测来定义 #noqa + if gpt_app.team_mode == TeamMode.SINGLE_AGENT.value: + new_detail: GptsAppDetail = gpt_app.details[0].copy() + new_detail.app_name = team_app.app_name + new_detail.app_code = team_app.app_code + strategy_values = json.loads(gpt_app.details[0].llm_strategy_value) + # 恢复模拟前端的数据 + new_detail.llm_strategy_value = ",".join(strategy_values) + new_detail.agent_describe = gpt_app.app_describe + new_detail.agent_role = ( + new_detail.agent_role + if new_detail.agent_role + else new_detail.agent_name + ) + new_detail.agent_name = gpt_app.app_name + team_app.details.append(new_detail) + self.edit(team_app) + def app_detail(self, app_code: str, user_code: str = None, sys_code: str = None): with self.session() as session: app_qry = session.query(GptsAppEntity).filter( @@ -874,6 +951,9 @@ class GptsAppDao(BaseDao): app_code=app_entity.app_code, app_name=app_entity.app_name, agent_name=item.agent_name, + agent_role=item.agent_role + if item.agent_role + else item.agent_name, node_id=str(uuid.uuid1()), resources=json.dumps(resource_dicts, ensure_ascii=False), prompt_template=item.prompt_template, @@ -935,7 +1015,6 @@ class GptsAppDao(BaseDao): GptsAppDetailEntity.app_code == gpts_app.app_code ) old_details.delete() - session.commit() app_details = [] for item in gpts_app.details: @@ -945,6 +1024,11 @@ class GptsAppDao(BaseDao): app_code=gpts_app.app_code, app_name=gpts_app.app_name, agent_name=item.agent_name, + type=item.type, + agent_role=item.agent_role + if item.agent_role + else item.agent_name, + agent_describe=item.agent_describe, node_id=str(uuid.uuid1()), resources=json.dumps(resource_dicts, ensure_ascii=False), prompt_template=item.prompt_template, @@ -983,7 +1067,6 @@ class GptsAppDao(BaseDao): params=json.dumps(recommend_question.params), valid=recommend_question.valid, chat_mode=chat_scene, - user_code=gpts_app.user_code, ) ) session.add_all(recommend_questions) @@ -1147,18 +1230,6 @@ class GptsAppDao(BaseDao): app_describe=chat_normal_ctx.scene_describe, team_context=chat_normal_ctx, param_need=[ - { - "type": AppParamType.Resource.value, - "value": ResourceType.ImageFile.value, - }, - { - "type": AppParamType.Resource.value, - "value": ResourceType.AudioFile.value, - }, - { - "type": AppParamType.Resource.value, - "value": ResourceType.VideoFile.value, - }, {"type": AppParamType.Model.value, "value": None}, {"type": AppParamType.Temperature.value, "value": None}, {"type": AppParamType.MaxNewTokens.value, "value": None}, @@ -1269,13 +1340,20 @@ class GptsAppDao(BaseDao): def _parse_team_context( - team_context: Optional[Union[str, AWELTeamContext, NativeTeamContext]] = None, + team_context: Optional[ + Union[ + str, AutoTeamContext, SingleAgentContext, AWELTeamContext, NativeTeamContext + ] + ] = None, ): """ parse team_context to str """ - if isinstance(team_context, AWELTeamContext) or isinstance( - team_context, NativeTeamContext + if ( + isinstance(team_context, AWELTeamContext) + or isinstance(team_context, NativeTeamContext) + or isinstance(team_context, AutoTeamContext) + or isinstance(team_context, SingleAgentContext) ): return model_to_json(team_context) return team_context @@ -1283,12 +1361,29 @@ def _parse_team_context( def _load_team_context( team_mode: str = None, team_context: str = None -) -> Union[str, AWELTeamContext, NativeTeamContext]: +) -> Union[ + str, AWELTeamContext, SingleAgentContext, NativeTeamContext, AutoTeamContext +]: """ load team_context to str or AWELTeamContext """ if team_mode is not None: match team_mode: + case TeamMode.SINGLE_AGENT.value: + try: + if team_context: + single_agent_ctx = SingleAgentContext( + **json.loads(team_context) + ) + return single_agent_ctx + else: + return None + except Exception as ex: + logger.warning( + f"_load_team_context error, team_mode={team_mode}, " + f"team_context={team_context}, {ex}" + ) + return None case TeamMode.AWEL_LAYOUT.value: try: if team_context: @@ -1301,6 +1396,27 @@ def _load_team_context( f"_load_team_context error, team_mode={team_mode}, " f"team_context={team_context}, {ex}" ) + case TeamMode.AUTO_PLAN.value: + try: + if team_context: + context_obj = json.loads(team_context) + if "resources" in context_obj: + resource = context_obj["resources"] + if isinstance(resource, str): + resource_obj = json.loads(context_obj["resources"]) + else: + resource_obj = resource + context_obj["resources"] = resource_obj + + auto_team_ctx = AutoTeamContext(**context_obj) + return auto_team_ctx + else: + return None + except Exception as ex: + logger.exception( + f"_load_team_context error, team_mode={team_mode}, " + f"team_context={team_context}, {ex}" + ) case TeamMode.NATIVE_APP.value: try: if team_context: @@ -1403,7 +1519,7 @@ def adapt_native_app_model(dialogue: ConversationVo): ChatScene.ChatWithDbQA.value(), ChatScene.ChatWithDbExecute.value(), ChatScene.ChatDashboard.value(), - ChatScene.ChatNormal.value(), + ChatScene.ChatNormal.value, ]: return dialogue gpts_dao = GptsAppDao() diff --git a/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_messages_db.py b/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_messages_db.py index acad1b5aa..82e0df4c2 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_messages_db.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_messages_db.py @@ -26,15 +26,28 @@ class GptsMessagesEntity(Model): conv_id = Column( String(255), nullable=False, comment="The unique id of the conversation record" ) + message_id = Column( + String(255), nullable=False, comment="The unique id of the messages" + ) sender = Column( String(255), nullable=False, - comment="Who speaking in the current conversation turn", + comment="Who(role) speaking in the current conversation turn", + ) + sender_name = Column( + String(255), + nullable=False, + comment="Who(name) speaking in the current conversation turn", ) receiver = Column( String(255), nullable=False, - comment="Who receive message in the current conversation turn", + comment="Who(role) receive message in the current conversation turn", + ) + receiver_name = Column( + String(255), + nullable=False, + comment="Who(name) receive message in the current conversation turn", ) model_name = Column(String(255), nullable=True, comment="message generate model") rounds = Column(Integer, nullable=False, comment="dialogue turns") @@ -49,9 +62,17 @@ class GptsMessagesEntity(Model): nullable=False, comment="The message in which app name", ) + thinking = Column( + Text(length=2**31 - 1), nullable=True, comment="Thinking of the speech" + ) content = Column( Text(length=2**31 - 1), nullable=True, comment="Content of the speech" ) + show_message = Column( + Boolean, + nullable=True, + comment="Whether the current message needs to be displayed to the user", + ) current_goal = Column( Text, nullable=True, comment="The target corresponding to the current message" ) @@ -72,7 +93,11 @@ class GptsMessagesEntity(Model): role = Column( String(255), nullable=True, comment="The role of the current message content" ) - + avatar = Column( + String(255), + nullable=True, + comment="The avatar of the agent who send current message content", + ) created_at = Column(DateTime, default=datetime.utcnow, comment="create time") updated_at = Column( DateTime, @@ -84,15 +109,19 @@ class GptsMessagesEntity(Model): class GptsMessagesDao(BaseDao): - def append(self, entity: dict): - session = self.get_raw_session() - message = GptsMessagesEntity( + def _dict_to_entity(self, entity: dict) -> GptsMessagesEntity: + return GptsMessagesEntity( conv_id=entity.get("conv_id"), + message_id=entity.get("message_id"), sender=entity.get("sender"), + sender_name=entity.get("sender_name"), receiver=entity.get("receiver"), + receiver_name=entity.get("receiver_name"), content=entity.get("content"), + thinking=entity.get("thinking"), is_success=entity.get("is_success", True), role=entity.get("role", None), + avatar=entity.get("avatar", None), model_name=entity.get("model_name", None), context=entity.get("context", None), rounds=entity.get("rounds", None), @@ -102,7 +131,46 @@ class GptsMessagesDao(BaseDao): review_info=entity.get("review_info", None), action_report=entity.get("action_report", None), resource_info=entity.get("resource_info", None), + show_message=entity.get("show_message", None), ) + + def update_message(self, entity: dict): + session = self.get_raw_session() + message_qry = session.query(GptsMessagesEntity) + message_qry = message_qry.filter( + GptsMessagesEntity.message_id == entity["message_id"] + ) + old_message: GptsMessagesEntity = message_qry.order_by( + GptsMessagesEntity.rounds + ).one_or_none() + + if old_message: + old_message.update( + { + GptsMessagesEntity.receiver: entity.get("receiver"), + GptsMessagesEntity.receiver_name: entity.get("receiver_name"), + GptsMessagesEntity.content: entity.get("content"), + GptsMessagesEntity.thinking: entity.get("thinking"), + GptsMessagesEntity.is_success: entity.get("is_success"), + GptsMessagesEntity.role: entity.get("role"), + GptsMessagesEntity.model_name: entity.get("model_name"), + GptsMessagesEntity.context: entity.get("context"), + GptsMessagesEntity.review_info: entity.get("review_info"), + GptsMessagesEntity.action_report: entity.get("action_report"), + GptsMessagesEntity.resource_info: entity.get("resource_info"), + }, + synchronize_session="fetch", + ) + else: + session.add(self._dict_to_entity(entity)) + + session.commit() + session.close() + return id + + def append(self, entity: dict): + session = self.get_raw_session() + message = self._dict_to_entity(entity) session.add(message) session.commit() id = message.id @@ -148,6 +216,30 @@ class GptsMessagesDao(BaseDao): session.close() return result + def delete_by_msg_id(self, message_id: str): + session = self.get_raw_session() + old_message_qry = session.query(GptsMessagesEntity) + + old_message_qry = old_message_qry.filter( + GptsMessagesEntity.message_id == message_id + ) + old_message = old_message_qry.order_by(GptsMessagesEntity.rounds).one_or_none() + if old_message: + session.delete(old_message) + session.commit() + session.close() + + def get_by_message_id(self, message_id: str) -> Optional[GptsMessagesEntity]: + session = self.get_raw_session() + gpts_messages = session.query(GptsMessagesEntity) + + gpts_messages = gpts_messages.filter( + GptsMessagesEntity.message_id == message_id + ) + result = gpts_messages.order_by(GptsMessagesEntity.rounds).one_or_none() + session.close() + return result + def get_between_agents( self, conv_id: str, diff --git a/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_plans_db.py b/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_plans_db.py index 7183ef16e..2cfc065a5 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_plans_db.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/agent/db/gpts_plans_db.py @@ -1,4 +1,5 @@ from datetime import datetime +from typing import Optional from sqlalchemy import Column, DateTime, Integer, String, Text, UniqueConstraint @@ -13,16 +14,19 @@ class GptsPlansEntity(Model): conv_id = Column( String(255), nullable=False, comment="The unique id of the conversation record" ) - sub_task_num = Column(Integer, nullable=False, comment="Subtask number") + task_uid = Column(String(255), nullable=False, comment="The uid of the plan task") + sub_task_num = Column(Integer, nullable=False, comment="Subtask id") + conv_round = Column(Integer, nullable=False, comment="The dialogue turns") + conv_round_id = Column(String(255), nullable=True, comment="The dialogue turns uid") + + sub_task_id = Column(String(255), nullable=False, comment="Subtask id") + task_parent = Column(String(255), nullable=True, comment="Subtask parent task id") sub_task_title = Column(String(255), nullable=False, comment="subtask title") sub_task_content = Column(Text, nullable=False, comment="subtask content") sub_task_agent = Column( String(255), nullable=True, comment="Available agents corresponding to subtasks" ) resource_name = Column(String(255), nullable=True, comment="resource name") - rely = Column( - String(255), nullable=True, comment="Subtask dependencies,like: 1,2,3" - ) agent_model = Column( String(255), @@ -43,7 +47,7 @@ class GptsPlansEntity(Model): onupdate=datetime.utcnow, comment="last update time", ) - __table_args__ = (UniqueConstraint("conv_id", "sub_task_num", name="uk_sub_task"),) + __table_args__ = (UniqueConstraint("conv_id", "sub_task_id", name="uk_sub_task"),) class GptsPlansDao(BaseDao): @@ -53,32 +57,38 @@ class GptsPlansDao(BaseDao): session.commit() session.close() - def get_by_conv_id(self, conv_id: str) -> list[GptsPlansEntity]: + def get_by_conv_id( + self, conv_id: str, conv_round_id: Optional[str] = None + ) -> list[GptsPlansEntity]: session = self.get_raw_session() gpts_plans = session.query(GptsPlansEntity) if conv_id: gpts_plans = gpts_plans.filter(GptsPlansEntity.conv_id == conv_id) + if conv_round_id: + gpts_plans = gpts_plans.filter( + GptsPlansEntity.conv_round_id == conv_round_id + ) result = gpts_plans.all() session.close() return result - def get_by_task_id(self, task_id: int) -> list[GptsPlansEntity]: + def get_by_task_id(self, task_id: str) -> list[GptsPlansEntity]: session = self.get_raw_session() gpts_plans = session.query(GptsPlansEntity) if task_id: - gpts_plans = gpts_plans.filter(GptsPlansEntity.id == task_id) + gpts_plans = gpts_plans.filter(GptsPlansEntity.sub_task_id == task_id) result = gpts_plans.first() session.close() return result def get_by_conv_id_and_num( - self, conv_id: str, task_nums: list + self, conv_id: str, task_ids: list ) -> list[GptsPlansEntity]: session = self.get_raw_session() gpts_plans = session.query(GptsPlansEntity) if conv_id: gpts_plans = gpts_plans.filter(GptsPlansEntity.conv_id == conv_id).filter( - GptsPlansEntity.sub_task_num.in_(task_nums) + GptsPlansEntity.sub_task_id.in_(task_ids) ) result = gpts_plans.all() session.close() @@ -92,15 +102,15 @@ class GptsPlansDao(BaseDao): gpts_plans = gpts_plans.filter(GptsPlansEntity.conv_id == conv_id).filter( GptsPlansEntity.state.in_([Status.TODO.value, Status.RETRYING.value]) ) - result = gpts_plans.order_by(GptsPlansEntity.sub_task_num).all() + result = gpts_plans.order_by(GptsPlansEntity.sub_task_id).all() session.close() return result - def complete_task(self, conv_id: str, task_num: int, result: str): + def complete_task(self, conv_id: str, task_id: str, result: str): session = self.get_raw_session() gpts_plans = session.query(GptsPlansEntity) gpts_plans = gpts_plans.filter(GptsPlansEntity.conv_id == conv_id).filter( - GptsPlansEntity.sub_task_num == task_num + GptsPlansEntity.sub_task_id == task_id ) gpts_plans.update( { @@ -115,7 +125,7 @@ class GptsPlansDao(BaseDao): def update_task( self, conv_id: str, - task_num: int, + task_id: str, state: str, retry_times: int, agent: str = None, @@ -125,10 +135,11 @@ class GptsPlansDao(BaseDao): session = self.get_raw_session() gpts_plans = session.query(GptsPlansEntity) gpts_plans = gpts_plans.filter(GptsPlansEntity.conv_id == conv_id).filter( - GptsPlansEntity.sub_task_num == task_num + GptsPlansEntity.sub_task_id == task_id ) update_param = {} update_param[GptsPlansEntity.state] = state + update_param[GptsPlansEntity.retry_times] = retry_times update_param[GptsPlansEntity.result] = result if agent: diff --git a/packages/dbgpt-serve/src/dbgpt_serve/conversation/service/service.py b/packages/dbgpt-serve/src/dbgpt_serve/conversation/service/service.py index 127325634..1dadf2703 100644 --- a/packages/dbgpt-serve/src/dbgpt_serve/conversation/service/service.py +++ b/packages/dbgpt-serve/src/dbgpt_serve/conversation/service/service.py @@ -9,7 +9,6 @@ from dbgpt.core import ( from dbgpt.core.interface.message import _append_view_messages from dbgpt.storage.metadata._base_dao import REQ, RES from dbgpt.util.pagination_utils import PaginationResult -from dbgpt.vis.client import vis_name_change from dbgpt_serve.core import BaseService from ...feedback.api.endpoints import get_service @@ -18,6 +17,19 @@ from ..config import SERVE_SERVICE_COMPONENT_NAME, ServeConfig from ..models.models import ServeDao, ServeEntity +## Compatible with historical old messages +def vis_name_change(vis_message: str) -> str: + """Change vis tag name use new name.""" + replacements = { + "```vis-chart": "```vis-db-chart", + } + + for old_tag, new_tag in replacements.items(): + vis_message = vis_message.replace(old_tag, new_tag) + + return vis_message + + class Service(BaseService[ServeEntity, ServeRequest, ServerResponse]): """The service class for Conversation"""