mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-12 20:53:48 +00:00
feat(agent): Multi agents v0.1 (#1044)
Co-authored-by: qidanrui <qidanrui@gmail.com> Co-authored-by: csunny <cfqsunny@163.com> Co-authored-by: Fangyin Cheng <staneyffer@gmail.com>
This commit is contained in:
@@ -5,6 +5,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from dbgpt.core import LLMClient
|
||||
from dbgpt.core.interface.llm import ModelMetadata
|
||||
from dbgpt.util.annotations import PublicAPI
|
||||
|
||||
from ..memory.gpts_memory import GptsMemory
|
||||
|
||||
@@ -44,6 +45,10 @@ class Agent:
|
||||
"""Get the name of the agent."""
|
||||
return self._describe
|
||||
|
||||
@property
|
||||
def is_terminal_agent(self) -> bool:
|
||||
return False
|
||||
|
||||
async def a_send(
|
||||
self,
|
||||
message: Union[Dict, str],
|
||||
@@ -88,6 +93,7 @@ class Agent:
|
||||
sender: Agent,
|
||||
reviewer: Agent,
|
||||
silent: Optional[bool] = False,
|
||||
rely_messages: Optional[List[Dict]] = None,
|
||||
**kwargs,
|
||||
) -> Union[str, Dict, None]:
|
||||
"""(Abstract async method) Generate a reply based on the received messages.
|
||||
@@ -102,10 +108,9 @@ class Agent:
|
||||
async def a_reasoning_reply(
|
||||
self, messages: Optional[List[Dict]]
|
||||
) -> Union[str, Dict, None]:
|
||||
"""
|
||||
Based on the requirements of the current agent, reason about the current task goal through LLM
|
||||
"""Based on the requirements of the current agent, reason about the current task goal through LLM
|
||||
Args:
|
||||
message:
|
||||
messages:
|
||||
|
||||
Returns:
|
||||
str or dict or None: the generated reply. If None, no reply is generated.
|
||||
@@ -187,3 +192,20 @@ class AgentContext:
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return dataclasses.asdict(self)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@PublicAPI(stability="beta")
|
||||
class AgentGenerateContext:
|
||||
"""A class to represent the input of a Agent."""
|
||||
|
||||
message: Optional[Dict]
|
||||
sender: Agent
|
||||
reviewer: Agent
|
||||
silent: Optional[bool] = False
|
||||
|
||||
rely_messages: List[Dict] = dataclasses.field(default_factory=list)
|
||||
final: Optional[bool] = True
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
return dataclasses.asdict(self)
|
||||
|
@@ -1,11 +1,17 @@
|
||||
import logging
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from typing import Optional, Type
|
||||
from typing import Dict, List, Optional, Type
|
||||
|
||||
from .agent import Agent
|
||||
from .expand.code_assistant_agent import CodeAssistantAgent
|
||||
from .expand.dashboard_assistant_agent import DashboardAssistantAgent
|
||||
from .expand.data_scientist_agent import DataScientistAgent
|
||||
from .expand.plugin_assistant_agent import PluginAssistantAgent
|
||||
from .expand.sql_assistant_agent import SQLAssistantAgent
|
||||
from .expand.summary_assistant_agent import SummaryAssistantAgent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_all_subclasses(cls):
|
||||
@@ -18,6 +24,40 @@ def get_all_subclasses(cls):
|
||||
return all_subclasses
|
||||
|
||||
|
||||
def participant_roles(agents: List[Agent] = None) -> str:
|
||||
# Default to all agents registered
|
||||
if agents is None:
|
||||
agents = agents
|
||||
|
||||
roles = []
|
||||
for agent in agents:
|
||||
if agent.system_message.strip() == "":
|
||||
logger.warning(
|
||||
f"The agent '{agent.name}' has an empty system_message, and may not work well with GroupChat."
|
||||
)
|
||||
roles.append(f"{agent.name}: {agent.describe}")
|
||||
return "\n".join(roles)
|
||||
|
||||
|
||||
def mentioned_agents(message_content: str, agents: List[Agent]) -> Dict:
|
||||
"""
|
||||
Finds and counts agent mentions in the string message_content, taking word boundaries into account.
|
||||
|
||||
Returns: A dictionary mapping agent names to mention counts (to be included, at least one mention must occur)
|
||||
"""
|
||||
mentions = dict()
|
||||
for agent in agents:
|
||||
regex = (
|
||||
r"(?<=\W)" + re.escape(agent.name) + r"(?=\W)"
|
||||
) # Finds agent mentions, taking word boundaries into account
|
||||
count = len(
|
||||
re.findall(regex, " " + message_content + " ")
|
||||
) # Pad the message to help with matching
|
||||
if count > 0:
|
||||
mentions[agent.name] = count
|
||||
return mentions
|
||||
|
||||
|
||||
class AgentsMange:
|
||||
def __init__(self):
|
||||
self._agents = defaultdict()
|
||||
@@ -46,3 +86,5 @@ agent_mange.register_agent(CodeAssistantAgent)
|
||||
agent_mange.register_agent(DashboardAssistantAgent)
|
||||
agent_mange.register_agent(DataScientistAgent)
|
||||
agent_mange.register_agent(SQLAssistantAgent)
|
||||
agent_mange.register_agent(SummaryAssistantAgent)
|
||||
agent_mange.register_agent(PluginAssistantAgent)
|
||||
|
@@ -33,6 +33,7 @@ class ConversableAgent(Agent):
|
||||
max_consecutive_auto_reply: Optional[int] = None,
|
||||
human_input_mode: Optional[str] = "TERMINATE",
|
||||
default_auto_reply: Optional[Union[str, Dict, None]] = "",
|
||||
is_terminal_agent: bool = False,
|
||||
):
|
||||
super().__init__(name, memory, describe)
|
||||
|
||||
@@ -57,8 +58,9 @@ class ConversableAgent(Agent):
|
||||
else self.MAX_CONSECUTIVE_AUTO_REPLY
|
||||
)
|
||||
self.consecutive_auto_reply_counter: int = 0
|
||||
|
||||
self._current_retry_counter: int = 0
|
||||
self._max_retry_count: int = 5
|
||||
self._is_terminal_agent = is_terminal_agent
|
||||
|
||||
## By default, the memory of 4 rounds of dialogue is retained.
|
||||
self.dialogue_memory_rounds = 5
|
||||
@@ -91,6 +93,10 @@ class ConversableAgent(Agent):
|
||||
},
|
||||
)
|
||||
|
||||
@property
|
||||
def is_terminal_agent(self):
|
||||
return self._is_terminal_agent
|
||||
|
||||
@property
|
||||
def system_message(self):
|
||||
"""Return the system message."""
|
||||
@@ -197,7 +203,6 @@ class ConversableAgent(Agent):
|
||||
"""
|
||||
Put the received message content into the collective message memory
|
||||
Args:
|
||||
conv_id:
|
||||
message:
|
||||
role:
|
||||
sender:
|
||||
@@ -381,17 +386,32 @@ class ConversableAgent(Agent):
|
||||
)
|
||||
return oai_messages
|
||||
|
||||
def process_now_message(self, sender, current_gogal: Optional[str] = None):
|
||||
# Convert and tailor the information in collective memory into contextual memory available to the current Agent
|
||||
def process_now_message(
|
||||
self,
|
||||
current_message: Optional[Dict],
|
||||
sender,
|
||||
rely_messages: Optional[List[Dict]] = None,
|
||||
):
|
||||
current_gogal = current_message.get("current_gogal", None)
|
||||
### Convert and tailor the information in collective memory into contextual memory available to the current Agent
|
||||
current_gogal_messages = self._gpts_message_to_ai_message(
|
||||
self.memory.message_memory.get_between_agents(
|
||||
self.agent_context.conv_id, self.name, sender.name, current_gogal
|
||||
)
|
||||
)
|
||||
|
||||
# relay messages
|
||||
if current_gogal_messages is None or len(current_gogal_messages) <= 0:
|
||||
current_message["role"] = ModelMessageRoleType.HUMAN
|
||||
current_gogal_messages = [current_message]
|
||||
### relay messages
|
||||
cut_messages = []
|
||||
cut_messages.extend(self._rely_messages)
|
||||
if rely_messages:
|
||||
for rely_message in rely_messages:
|
||||
action_report = rely_message.get("action_report", None)
|
||||
if action_report:
|
||||
rely_message["content"] = action_report["content"]
|
||||
cut_messages.extend(rely_messages)
|
||||
else:
|
||||
cut_messages.extend(self._rely_messages)
|
||||
|
||||
if len(current_gogal_messages) < self.dialogue_memory_rounds:
|
||||
cut_messages.extend(current_gogal_messages)
|
||||
@@ -409,8 +429,9 @@ class ConversableAgent(Agent):
|
||||
self,
|
||||
message: Optional[Dict],
|
||||
sender: Agent,
|
||||
reviewer: "Agent",
|
||||
reviewer: Agent,
|
||||
silent: Optional[bool] = False,
|
||||
rely_messages: Optional[List[Dict]] = None,
|
||||
):
|
||||
## 0.New message build
|
||||
new_message = {}
|
||||
@@ -420,11 +441,7 @@ class ConversableAgent(Agent):
|
||||
## 1.LLM Reasonging
|
||||
await self.a_system_fill_param()
|
||||
await asyncio.sleep(5) ##TODO Rate limit reached for gpt-3.5-turbo
|
||||
current_messages = self.process_now_message(
|
||||
sender, message.get("current_gogal", None)
|
||||
)
|
||||
if current_messages is None or len(current_messages) <= 0:
|
||||
current_messages = [message]
|
||||
current_messages = self.process_now_message(message, sender, rely_messages)
|
||||
ai_reply, model = await self.a_reasoning_reply(messages=current_messages)
|
||||
new_message["content"] = ai_reply
|
||||
new_message["model_name"] = model
|
||||
@@ -466,6 +483,9 @@ class ConversableAgent(Agent):
|
||||
if request_reply is False or request_reply is None:
|
||||
logger.info("Messages that do not require a reply")
|
||||
return
|
||||
if self._is_termination_msg(message) or sender.is_terminal_agent:
|
||||
logger.info(f"TERMINATE!")
|
||||
return
|
||||
|
||||
verify_paas, reply = await self.a_generate_reply(
|
||||
message=message, sender=sender, reviewer=reviewer, silent=silent
|
||||
@@ -476,14 +496,26 @@ class ConversableAgent(Agent):
|
||||
message=reply, recipient=sender, reviewer=reviewer, silent=silent
|
||||
)
|
||||
else:
|
||||
self._current_retry_counter += 1
|
||||
logger.info(
|
||||
"The generated answer failed to verify, so send it to yourself for optimization."
|
||||
)
|
||||
# TODO: Exit after the maximum number of rounds of self-optimization
|
||||
await sender.a_send(
|
||||
message=reply, recipient=self, reviewer=reviewer, silent=silent
|
||||
)
|
||||
# Exit after the maximum number of rounds of self-optimization
|
||||
if self._current_retry_counter >= self._max_retry_count:
|
||||
# If the maximum number of retries is exceeded, the abnormal answer will be returned directly.
|
||||
logger.warning(
|
||||
f"More than {self._current_retry_counter} times and still no valid answer is output."
|
||||
)
|
||||
reply[
|
||||
"content"
|
||||
] = f"After n optimizations, the following problems still exist:{reply['content']}"
|
||||
await self.a_send(
|
||||
message=reply, recipient=sender, reviewer=reviewer, silent=silent
|
||||
)
|
||||
else:
|
||||
self._current_retry_counter += 1
|
||||
logger.info(
|
||||
"The generated answer failed to verify, so send it to yourself for optimization."
|
||||
)
|
||||
await sender.a_send(
|
||||
message=reply, recipient=self, reviewer=reviewer, silent=silent
|
||||
)
|
||||
|
||||
async def a_verify(self, message: Optional[Dict]):
|
||||
return True, message
|
||||
@@ -547,7 +579,6 @@ class ConversableAgent(Agent):
|
||||
async def a_retry_chat(
|
||||
self,
|
||||
recipient: "ConversableAgent",
|
||||
agent_map: dict,
|
||||
reviewer: "Agent" = None,
|
||||
clear_history: Optional[bool] = True,
|
||||
silent: Optional[bool] = False,
|
||||
|
158
dbgpt/agent/agents/base_team.py
Normal file
158
dbgpt/agent/agents/base_team.py
Normal file
@@ -0,0 +1,158 @@
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from dbgpt.agent.agents.agent import Agent, AgentContext
|
||||
from dbgpt.agent.agents.base_agent import ConversableAgent
|
||||
from dbgpt.agent.memory.gpts_memory import GptsMemory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def content_str(content: Union[str, List, None]) -> str:
|
||||
"""Converts `content` into a string format.
|
||||
|
||||
This function processes content that may be a string, a list of mixed text and image URLs, or None,
|
||||
and converts it into a string. Text is directly appended to the result string, while image URLs are
|
||||
represented by a placeholder image token. If the content is None, an empty string is returned.
|
||||
|
||||
Args:
|
||||
- content (Union[str, List, None]): The content to be processed. Can be a string, a list of dictionaries
|
||||
representing text and image URLs, or None.
|
||||
|
||||
Returns:
|
||||
str: A string representation of the input content. Image URLs are replaced with an image token.
|
||||
|
||||
Note:
|
||||
- The function expects each dictionary in the list to have a "type" key that is either "text" or "image_url".
|
||||
For "text" type, the "text" key's value is appended to the result. For "image_url", an image token is appended.
|
||||
- This function is useful for handling content that may include both text and image references, especially
|
||||
in contexts where images need to be represented as placeholders.
|
||||
"""
|
||||
if content is None:
|
||||
return ""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if not isinstance(content, list):
|
||||
raise TypeError(f"content must be None, str, or list, but got {type(content)}")
|
||||
|
||||
rst = ""
|
||||
for item in content:
|
||||
if not isinstance(item, dict):
|
||||
raise TypeError(
|
||||
"Wrong content format: every element should be dict if the content is a list."
|
||||
)
|
||||
assert (
|
||||
"type" in item
|
||||
), "Wrong content format. Missing 'type' key in content's dict."
|
||||
if item["type"] == "text":
|
||||
rst += item["text"]
|
||||
elif item["type"] == "image_url":
|
||||
rst += "<image>"
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Wrong content format: unknown type {item['type']} within the content"
|
||||
)
|
||||
return rst
|
||||
|
||||
|
||||
class Team:
|
||||
def __init__(self):
|
||||
self.agents: List[Agent] = []
|
||||
self.messages: List[Dict] = []
|
||||
self.max_round: Optional[int] = 10
|
||||
|
||||
def hire(self, agents: List[Agent]):
|
||||
"""Hire roles to cooperate"""
|
||||
self.agents.extend(agents)
|
||||
|
||||
@property
|
||||
def agent_names(self) -> List[str]:
|
||||
"""Return the names of the agents in the group chat."""
|
||||
return [agent.name for agent in self.agents]
|
||||
|
||||
def agent_by_name(self, name: str) -> Agent:
|
||||
"""Returns the agent with a given name."""
|
||||
return self.agents[self.agent_names.index(name)]
|
||||
|
||||
async def a_select_speaker(self, last_speaker: Agent, selector: Agent):
|
||||
pass
|
||||
|
||||
def reset(self):
|
||||
"""Reset the group chat."""
|
||||
self.messages.clear()
|
||||
|
||||
def append(self, message: Dict):
|
||||
"""Append a message to the group chat.
|
||||
We cast the content to str here so that it can be managed by text-based
|
||||
model.
|
||||
"""
|
||||
message["content"] = content_str(message["content"])
|
||||
self.messages.append(message)
|
||||
|
||||
async def a_generate_speech_process(self, message: Optional[str]) -> None:
|
||||
"""Build respective speech processes based on different team organizational models
|
||||
Args:
|
||||
message:Speech goal
|
||||
Returns:
|
||||
|
||||
"""
|
||||
|
||||
async def a_run_chat(
|
||||
self,
|
||||
message: Optional[str] = None,
|
||||
sender: Optional[Agent] = None,
|
||||
reviewer: Agent = None,
|
||||
):
|
||||
"""
|
||||
Install the current organization method to open the conversation
|
||||
Args:
|
||||
message:
|
||||
sender:
|
||||
reviewer:
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class MangerAgent(ConversableAgent, Team):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
memory: GptsMemory,
|
||||
agent_context: AgentContext,
|
||||
# unlimited consecutive auto reply by default
|
||||
max_consecutive_auto_reply: Optional[int] = sys.maxsize,
|
||||
human_input_mode: Optional[str] = "NEVER",
|
||||
describe: Optional[str] = "layout chat manager.",
|
||||
**kwargs,
|
||||
):
|
||||
ConversableAgent.__init__(
|
||||
self,
|
||||
name=name,
|
||||
describe=describe,
|
||||
memory=memory,
|
||||
max_consecutive_auto_reply=max_consecutive_auto_reply,
|
||||
human_input_mode=human_input_mode,
|
||||
agent_context=agent_context,
|
||||
**kwargs,
|
||||
)
|
||||
Team.__init__(self)
|
||||
|
||||
async def a_reasoning_reply(
|
||||
self, messages: Optional[List[Dict]] = None
|
||||
) -> Union[str, Dict, None]:
|
||||
if messages is None or len(messages) <= 0:
|
||||
message = None
|
||||
return None, None
|
||||
else:
|
||||
message = messages[-1]
|
||||
self.messages.append(message)
|
||||
return message["content"], None
|
||||
|
||||
async def a_verify_reply(
|
||||
self, message: Optional[Dict], sender: Agent, reviewer: Agent, **kwargs
|
||||
) -> Union[str, Dict, None]:
|
||||
return True, message
|
@@ -2,15 +2,27 @@ import logging
|
||||
from typing import Callable, Dict, Literal, Optional, Union
|
||||
|
||||
from dbgpt.util.json_utils import find_json_objects
|
||||
from dbgpt.vis import VisPlugin, vis_client
|
||||
|
||||
from ...common.schema import Status
|
||||
from ...memory.gpts_memory import GptsMemory
|
||||
from ...plugin.commands.command_mange import execute_command
|
||||
from ...plugin.loader import PluginLoader
|
||||
from ..agent import Agent, AgentContext
|
||||
from ..base_agent import ConversableAgent
|
||||
|
||||
try:
|
||||
from termcolor import colored
|
||||
except ImportError:
|
||||
|
||||
def colored(x, *args, **kwargs):
|
||||
return x
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PluginAgent(ConversableAgent):
|
||||
class PluginAssistantAgent(ConversableAgent):
|
||||
"""(In preview) Assistant agent, designed to solve a task with LLM.
|
||||
|
||||
AssistantAgent is a subclass of ConversableAgent configured with a default system message.
|
||||
@@ -32,19 +44,19 @@ class PluginAgent(ConversableAgent):
|
||||
user: Search for the latest hot financial news
|
||||
assisant: {{
|
||||
"tool_name":"The chart rendering method currently selected by SQL",
|
||||
"args": "{{
|
||||
"args": {{
|
||||
"query": "latest hot financial news",
|
||||
}}",
|
||||
}},
|
||||
"thought":"I will use the google-search tool to search for the latest hot financial news."
|
||||
}}
|
||||
|
||||
Please think step by step and return it in the following json format
|
||||
{{
|
||||
"tool_name":"The chart rendering method currently selected by SQL",
|
||||
"args": "{{
|
||||
"args": {{
|
||||
"arg name1": "arg value1",
|
||||
"arg name2": "arg value2",
|
||||
}}",
|
||||
}},
|
||||
"thought":"Summary of thoughts to the user"
|
||||
}}
|
||||
Make sure the response is correct json and can be parsed by Python json.loads.
|
||||
@@ -56,6 +68,7 @@ class PluginAgent(ConversableAgent):
|
||||
self,
|
||||
memory: GptsMemory,
|
||||
agent_context: AgentContext,
|
||||
plugin_path: str,
|
||||
describe: Optional[str] = DEFAULT_DESCRIBE,
|
||||
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
|
||||
max_consecutive_auto_reply: Optional[int] = None,
|
||||
@@ -74,18 +87,20 @@ class PluginAgent(ConversableAgent):
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
self.register_reply(Agent, PluginAgent.tool_call)
|
||||
self.register_reply(Agent, PluginAssistantAgent.a_tool_call)
|
||||
self.agent_context = agent_context
|
||||
self._plugin_loader = PluginLoader()
|
||||
self.plugin_generator = self._plugin_loader.load_plugins(
|
||||
plugin_path=plugin_path
|
||||
)
|
||||
|
||||
async def a_system_fill_param(self):
|
||||
# TODO no db_connect attribute
|
||||
params = {
|
||||
"tool_infos": self.db_connect.get_table_info(),
|
||||
"dialect": self.db_connect.db_type,
|
||||
"tool_list": self.plugin_generator.generate_commands_string(),
|
||||
}
|
||||
self.update_system_message(self.DEFAULT_SYSTEM_MESSAGE.format(**params))
|
||||
|
||||
async def tool_call(
|
||||
async def a_tool_call(
|
||||
self,
|
||||
message: Optional[str] = None,
|
||||
sender: Optional[Agent] = None,
|
||||
@@ -95,22 +110,42 @@ class PluginAgent(ConversableAgent):
|
||||
"""Generate a reply using code execution."""
|
||||
|
||||
json_objects = find_json_objects(message)
|
||||
fail_reason = "The required json format answer was not generated."
|
||||
json_count = len(json_objects)
|
||||
response_success = True
|
||||
|
||||
rensponse_succ = True
|
||||
view = None
|
||||
content = None
|
||||
tool_result = None
|
||||
err_msg = None
|
||||
if json_count != 1:
|
||||
# Answer failed, turn on automatic repair
|
||||
response_success = False
|
||||
### Answer failed, turn on automatic repair
|
||||
rensponse_succ = False
|
||||
err_msg = "Your answer has multiple json contents, which is not the required return format."
|
||||
else:
|
||||
tool_name = json_objects[0].get("tool_name", None)
|
||||
args = json_objects[0].get("args", None)
|
||||
|
||||
try:
|
||||
view = ""
|
||||
tool_result = execute_command(tool_name, args, self.plugin_generator)
|
||||
status = Status.COMPLETE.value
|
||||
except Exception as e:
|
||||
view = f"```vis-convert-error\n{content}\n```"
|
||||
logger.exception(f"Tool [{tool_name}] excute Failed!")
|
||||
status = Status.FAILED.value
|
||||
err_msg = f"Tool [{tool_name}] excute Failed!{str(e)}"
|
||||
rensponse_succ = False
|
||||
|
||||
plugin_param = {
|
||||
"name": tool_name,
|
||||
"args": args,
|
||||
"status": status,
|
||||
"logo": None,
|
||||
"result": tool_result,
|
||||
"err_msg": err_msg,
|
||||
}
|
||||
vis_tag = vis_client.get(VisPlugin.vis_tag())
|
||||
view = await vis_tag.disply(**plugin_param)
|
||||
|
||||
return True, {
|
||||
"is_exe_success": response_success,
|
||||
"content": content,
|
||||
"is_exe_success": rensponse_succ,
|
||||
"content": tool_result if rensponse_succ else err_msg,
|
||||
"view": view,
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
from typing import Callable, Dict, Literal, Optional, Union
|
||||
|
||||
from dbgpt._private.config import Config
|
||||
from dbgpt.agent.agents.base_agent import ConversableAgent
|
||||
from dbgpt.agent.plugin.commands.command_mange import ApiCall
|
||||
|
||||
from ...memory.gpts_memory import GptsMemory
|
||||
from ..agent import Agent, AgentContext
|
||||
@@ -23,8 +21,8 @@ class SummaryAssistantAgent(ConversableAgent):
|
||||
Please complete this task step by step following instructions below:
|
||||
1. You need to first detect user's question that you need to answer with your summarization.
|
||||
2. Output the extracted user's question with the format - The User's Question: user's question.
|
||||
3. Then you need to summarize the historical messages
|
||||
4. Output the summarization only related to user's question with the format - The Summarization: the summarization.
|
||||
3. Then you need to summarize the provided messages.
|
||||
4. Output the content of summarization ONLY related to user's question. The output language must be the same to user's question language.
|
||||
"""
|
||||
|
||||
DEFAULT_DESCRIBE = """Summarize provided text content according to user's questions and output the summaraization."""
|
||||
@@ -63,7 +61,7 @@ class SummaryAssistantAgent(ConversableAgent):
|
||||
config: Optional[Union[Dict, Literal[False]]] = None,
|
||||
):
|
||||
"""Generate a reply with summary."""
|
||||
|
||||
fail_reason = None
|
||||
response_success = True
|
||||
view = None
|
||||
content = None
|
||||
@@ -73,7 +71,6 @@ class SummaryAssistantAgent(ConversableAgent):
|
||||
response_success = False
|
||||
else:
|
||||
try:
|
||||
vis_client = ApiCall()
|
||||
content = message
|
||||
view = content
|
||||
except Exception as e:
|
||||
|
@@ -1,449 +0,0 @@
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from dbgpt.core.interface.message import ModelMessageRoleType
|
||||
|
||||
from ..common.schema import Status
|
||||
from ..memory.base import GptsPlan
|
||||
from ..memory.gpts_memory import GptsMemory
|
||||
from .agent import Agent, AgentContext
|
||||
from .base_agent import ConversableAgent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlanChat:
|
||||
"""(In preview) A group chat class that contains the following data fields:
|
||||
- agents: a list of participating agents.
|
||||
- messages: a list of messages in the group chat.
|
||||
- max_round: the maximum number of rounds.
|
||||
- admin_name: the name of the admin agent if there is one. Default is "Admin".
|
||||
KeyBoardInterrupt will make the admin agent take over.
|
||||
- func_call_filter: whether to enforce function call filter. Default is True.
|
||||
When set to True and when a message is a function call suggestion,
|
||||
the next speaker will be chosen from an agent which contains the corresponding function name
|
||||
in its `function_map`.
|
||||
- speaker_selection_method: the method for selecting the next speaker. Default is "auto".
|
||||
Could be any of the following (case insensitive), will raise ValueError if not recognized:
|
||||
- "auto": the next speaker is selected automatically by LLM.
|
||||
- "manual": the next speaker is selected manually by user input.
|
||||
- "random": the next speaker is selected randomly.
|
||||
- "round_robin": the next speaker is selected in a round robin fashion, i.e., iterating in the same order as provided in `agents`.
|
||||
- allow_repeat_speaker: whether to allow the same speaker to speak consecutively. Default is True.
|
||||
"""
|
||||
|
||||
agents: List[Agent]
|
||||
messages: List[Dict]
|
||||
max_round: int = 50
|
||||
admin_name: str = "Admin"
|
||||
func_call_filter: bool = True
|
||||
speaker_selection_method: str = "auto"
|
||||
allow_repeat_speaker: bool = True
|
||||
|
||||
_VALID_SPEAKER_SELECTION_METHODS = ["auto", "manual", "random", "round_robin"]
|
||||
|
||||
@property
|
||||
def agent_names(self) -> List[str]:
|
||||
"""Return the names of the agents in the group chat."""
|
||||
return [agent.name for agent in self.agents]
|
||||
|
||||
def reset(self):
|
||||
"""Reset the group chat."""
|
||||
self.messages.clear()
|
||||
|
||||
def agent_by_name(self, name: str) -> Agent:
|
||||
"""Returns the agent with a given name."""
|
||||
return self.agents[self.agent_names.index(name)]
|
||||
|
||||
# def select_speaker_msg(self, agents: List[Agent], task_context: str, models: Optional[List[dict]]):
|
||||
# f"""Return the message for selecting the next speaker."""
|
||||
# return f"""You are in a role play game. Read and understand the following tasks and assign the appropriate role to complete them.
|
||||
# Task content: {task_context}
|
||||
# You can fill the following roles: {[agent.name for agent in agents]},
|
||||
# Please answer only the role name, such as: {agents[0].name}"""
|
||||
|
||||
def select_speaker_msg(self, agents: List[Agent]):
|
||||
"""Return the message for selecting the next speaker."""
|
||||
return f"""You are in a role play game. The following roles are available:
|
||||
{self._participant_roles(agents)}.
|
||||
Read the following conversation.
|
||||
Then select the next role from {[agent.name for agent in agents]} to play. The role can be selected repeatedly.Only return the role."""
|
||||
|
||||
async def a_select_speaker(
|
||||
self,
|
||||
last_speaker: Agent,
|
||||
selector: ConversableAgent,
|
||||
now_plan_context: str,
|
||||
pre_allocated: str = None,
|
||||
):
|
||||
"""Select the next speaker."""
|
||||
if (
|
||||
self.speaker_selection_method.lower()
|
||||
not in self._VALID_SPEAKER_SELECTION_METHODS
|
||||
):
|
||||
raise ValueError(
|
||||
f"GroupChat speaker_selection_method is set to '{self.speaker_selection_method}'. "
|
||||
f"It should be one of {self._VALID_SPEAKER_SELECTION_METHODS} (case insensitive). "
|
||||
)
|
||||
|
||||
agents = self.agents
|
||||
n_agents = len(agents)
|
||||
# Warn if GroupChat is underpopulated
|
||||
|
||||
if (
|
||||
n_agents <= 2
|
||||
and self.speaker_selection_method.lower() != "round_robin"
|
||||
and self.allow_repeat_speaker
|
||||
):
|
||||
logger.warning(
|
||||
f"GroupChat is underpopulated with {n_agents} agents. "
|
||||
"It is recommended to set speaker_selection_method to 'round_robin' or allow_repeat_speaker to False."
|
||||
"Or, use direct communication instead."
|
||||
)
|
||||
|
||||
# remove the last speaker from the list to avoid selecting the same speaker if allow_repeat_speaker is False
|
||||
agents = (
|
||||
agents
|
||||
if self.allow_repeat_speaker
|
||||
else [agent for agent in agents if agent != last_speaker]
|
||||
)
|
||||
|
||||
# if self.speaker_selection_method.lower() == "manual":
|
||||
# selected_agent = self.manual_select_speaker(agents)
|
||||
# if selected_agent:
|
||||
# return selected_agent
|
||||
# elif self.speaker_selection_method.lower() == "round_robin":
|
||||
# return self.next_agent(last_speaker, agents)
|
||||
# elif self.speaker_selection_method.lower() == "random":
|
||||
# return random.choice(agents)
|
||||
|
||||
if pre_allocated:
|
||||
# Preselect speakers
|
||||
logger.info(f"Preselect speakers:{pre_allocated}")
|
||||
name = pre_allocated
|
||||
model = None
|
||||
else:
|
||||
# auto speaker selection
|
||||
selector.update_system_message(self.select_speaker_msg(agents))
|
||||
final, name, model = await selector.a_generate_oai_reply(
|
||||
self.messages
|
||||
+ [
|
||||
{
|
||||
"role": ModelMessageRoleType.HUMAN,
|
||||
"content": f"""Read and understand the following task content and assign the appropriate role to complete the task.
|
||||
Task content: {now_plan_context}
|
||||
select the role from: {[agent.name for agent in agents]},
|
||||
Please only return the role, such as: {agents[0].name}""",
|
||||
}
|
||||
]
|
||||
)
|
||||
if not final:
|
||||
# the LLM client is None, thus no reply is generated. Use round robin instead.
|
||||
return self.next_agent(last_speaker, agents), model
|
||||
|
||||
# If exactly one agent is mentioned, use it. Otherwise, leave the OAI response unmodified
|
||||
mentions = self._mentioned_agents(name, agents)
|
||||
if len(mentions) == 1:
|
||||
name = next(iter(mentions))
|
||||
else:
|
||||
logger.warning(
|
||||
f"GroupChat select_speaker failed to resolve the next speaker's name. This is because the speaker selection OAI call returned:\n{name}"
|
||||
)
|
||||
|
||||
# Return the result
|
||||
try:
|
||||
return self.agent_by_name(name), model
|
||||
except Exception as e:
|
||||
logger.warning(f"auto select speaker failed!{str(e)}")
|
||||
return self.next_agent(last_speaker, agents), model
|
||||
|
||||
def _mentioned_agents(self, message_content: str, agents: List[Agent]) -> Dict:
|
||||
"""
|
||||
Finds and counts agent mentions in the string message_content, taking word boundaries into account.
|
||||
|
||||
Returns: A dictionary mapping agent names to mention counts (to be included, at least one mention must occur)
|
||||
"""
|
||||
mentions = dict()
|
||||
for agent in agents:
|
||||
regex = (
|
||||
r"(?<=\W)" + re.escape(agent.name) + r"(?=\W)"
|
||||
) # Finds agent mentions, taking word boundaries into account
|
||||
count = len(
|
||||
re.findall(regex, " " + message_content + " ")
|
||||
) # Pad the message to help with matching
|
||||
if count > 0:
|
||||
mentions[agent.name] = count
|
||||
return mentions
|
||||
|
||||
def _participant_roles(self, agents: List[Agent] = None) -> str:
|
||||
# Default to all agents registered
|
||||
if agents is None:
|
||||
agents = self.agents
|
||||
|
||||
roles = []
|
||||
for agent in agents:
|
||||
if agent.system_message.strip() == "":
|
||||
logger.warning(
|
||||
f"The agent '{agent.name}' has an empty system_message, and may not work well with GroupChat."
|
||||
)
|
||||
roles.append(f"{agent.name}: {agent.describe}")
|
||||
return "\n".join(roles)
|
||||
|
||||
def agent_by_name(self, name: str) -> Agent:
|
||||
"""Returns the agent with a given name."""
|
||||
return self.agents[self.agent_names.index(name)]
|
||||
|
||||
def next_agent(self, agent: Agent, agents: List[Agent]) -> Agent:
|
||||
"""Return the next agent in the list."""
|
||||
if agents == self.agents:
|
||||
return agents[(self.agent_names.index(agent.name) + 1) % len(agents)]
|
||||
else:
|
||||
offset = self.agent_names.index(agent.name) + 1
|
||||
for i in range(len(self.agents)):
|
||||
if self.agents[(offset + i) % len(self.agents)] in agents:
|
||||
return self.agents[(offset + i) % len(self.agents)]
|
||||
|
||||
|
||||
class PlanChatManager(ConversableAgent):
|
||||
"""(In preview) A chat manager agent that can manage a group chat of multiple agents."""
|
||||
|
||||
NAME = "plan_manager"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
plan_chat: PlanChat,
|
||||
planner: Agent,
|
||||
memory: GptsMemory,
|
||||
agent_context: "AgentContext",
|
||||
# unlimited consecutive auto reply by default
|
||||
max_consecutive_auto_reply: Optional[int] = sys.maxsize,
|
||||
human_input_mode: Optional[str] = "NEVER",
|
||||
describe: Optional[str] = "Plan chat manager.",
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
name=self.NAME,
|
||||
describe=describe,
|
||||
memory=memory,
|
||||
max_consecutive_auto_reply=max_consecutive_auto_reply,
|
||||
human_input_mode=human_input_mode,
|
||||
agent_context=agent_context,
|
||||
**kwargs,
|
||||
)
|
||||
# Order of register_reply is important.
|
||||
|
||||
# Allow async chat if initiated using a_initiate_chat
|
||||
self.register_reply(
|
||||
Agent,
|
||||
PlanChatManager.a_run_chat,
|
||||
config=plan_chat,
|
||||
reset_config=PlanChat.reset,
|
||||
)
|
||||
self.plan_chat = plan_chat
|
||||
self.planner = planner
|
||||
|
||||
async def a_reasoning_reply(
|
||||
self, messages: Optional[List[Dict]] = None
|
||||
) -> Union[str, Dict, None]:
|
||||
if messages is None or len(messages) <= 0:
|
||||
message = None
|
||||
return None, None
|
||||
else:
|
||||
message = messages[-1]
|
||||
self.plan_chat.messages.append(message)
|
||||
return message["content"], None
|
||||
|
||||
async def a_process_rely_message(
|
||||
self, conv_id: str, now_plan: GptsPlan, speaker: ConversableAgent
|
||||
):
|
||||
rely_prompt = ""
|
||||
speaker.reset_rely_message()
|
||||
if now_plan.rely and len(now_plan.rely) > 0:
|
||||
rely_tasks_list = now_plan.rely.split(",")
|
||||
rely_tasks = self.memory.plans_memory.get_by_conv_id_and_num(
|
||||
conv_id, rely_tasks_list
|
||||
)
|
||||
if rely_tasks:
|
||||
rely_prompt = "Read the result data of the dependent steps in the above historical message to complete the current goal:"
|
||||
for rely_task in rely_tasks:
|
||||
speaker.append_rely_message(
|
||||
{"content": rely_task.sub_task_content},
|
||||
ModelMessageRoleType.HUMAN,
|
||||
)
|
||||
speaker.append_rely_message(
|
||||
{"content": rely_task.result}, ModelMessageRoleType.AI
|
||||
)
|
||||
return rely_prompt
|
||||
|
||||
async def a_verify_reply(
|
||||
self, message: Optional[Dict], sender: "Agent", reviewer: "Agent", **kwargs
|
||||
) -> Union[str, Dict, None]:
|
||||
return True, message
|
||||
|
||||
async def a_run_chat(
|
||||
self,
|
||||
message: Optional[str] = None,
|
||||
sender: Optional[Agent] = None,
|
||||
reviewer: Agent = None,
|
||||
config: Optional[PlanChat] = None,
|
||||
):
|
||||
"""Run a group chat asynchronously."""
|
||||
|
||||
speaker = sender
|
||||
groupchat = config
|
||||
|
||||
final_message = None
|
||||
|
||||
for i in range(groupchat.max_round):
|
||||
plans = self.memory.plans_memory.get_by_conv_id(self.agent_context.conv_id)
|
||||
if not plans or len(plans) <= 0:
|
||||
###Have no plan, generate a new plan TODO init plan use planmanger
|
||||
await self.a_send(
|
||||
{"content": message, "current_gogal": message},
|
||||
self.planner,
|
||||
reviewer,
|
||||
request_reply=False,
|
||||
)
|
||||
verify_pass, reply = await self.planner.a_generate_reply(
|
||||
{"content": message, "current_gogal": message}, self, reviewer
|
||||
)
|
||||
|
||||
await self.planner.a_send(
|
||||
message=reply,
|
||||
recipient=self,
|
||||
reviewer=reviewer,
|
||||
request_reply=False,
|
||||
)
|
||||
if not verify_pass:
|
||||
final_message = reply
|
||||
if i > 10:
|
||||
break
|
||||
else:
|
||||
todo_plans = [
|
||||
plan
|
||||
for plan in plans
|
||||
if plan.state in [Status.TODO.value, Status.RETRYING.value]
|
||||
]
|
||||
if not todo_plans or len(todo_plans) <= 0:
|
||||
### The plan has been fully executed and a success message is sent to the user.
|
||||
# complete
|
||||
complete_message = {"content": f"TERMINATE", "is_exe_success": True}
|
||||
return True, complete_message
|
||||
else:
|
||||
now_plan: GptsPlan = todo_plans[0]
|
||||
|
||||
# There is no need to broadcast the message to other agents, it will be automatically obtained from the collective memory according to the dependency relationship.
|
||||
try:
|
||||
if Status.RETRYING.value == now_plan.state:
|
||||
if now_plan.retry_times <= now_plan.max_retry_times:
|
||||
current_goal_message = {
|
||||
"content": now_plan.result,
|
||||
"current_gogal": now_plan.sub_task_content,
|
||||
"context": {
|
||||
"plan_task": now_plan.sub_task_content,
|
||||
"plan_task_num": now_plan.sub_task_num,
|
||||
},
|
||||
}
|
||||
else:
|
||||
self.memory.plans_memory.update_task(
|
||||
self.agent_context.conv_id,
|
||||
now_plan.sub_task_num,
|
||||
Status.FAILED.value,
|
||||
now_plan.retry_times + 1,
|
||||
speaker.name,
|
||||
"",
|
||||
plan_result,
|
||||
)
|
||||
faild_report = {
|
||||
"content": f"ReTask [{now_plan.sub_task_content}] was retried more than the maximum number of times and still failed.{now_plan.result}",
|
||||
"is_exe_success": False,
|
||||
}
|
||||
return True, faild_report
|
||||
else:
|
||||
current_goal_message = {
|
||||
"content": now_plan.sub_task_content,
|
||||
"current_gogal": now_plan.sub_task_content,
|
||||
"context": {
|
||||
"plan_task": now_plan.sub_task_content,
|
||||
"plan_task_num": now_plan.sub_task_num,
|
||||
},
|
||||
}
|
||||
|
||||
# select the next speaker
|
||||
speaker, model = await groupchat.a_select_speaker(
|
||||
speaker,
|
||||
self,
|
||||
now_plan.sub_task_content,
|
||||
now_plan.sub_task_agent,
|
||||
)
|
||||
# Tell the speaker the dependent history information
|
||||
rely_prompt = await self.a_process_rely_message(
|
||||
conv_id=self.agent_context.conv_id,
|
||||
now_plan=now_plan,
|
||||
speaker=speaker,
|
||||
)
|
||||
|
||||
current_goal_message["content"] = (
|
||||
rely_prompt + current_goal_message["content"]
|
||||
)
|
||||
|
||||
is_recovery = False
|
||||
if message == current_goal_message["content"]:
|
||||
is_recovery = True
|
||||
await self.a_send(
|
||||
message=current_goal_message,
|
||||
recipient=speaker,
|
||||
reviewer=reviewer,
|
||||
request_reply=False,
|
||||
is_recovery=is_recovery,
|
||||
)
|
||||
verify_pass, reply = await speaker.a_generate_reply(
|
||||
current_goal_message, self, reviewer
|
||||
)
|
||||
|
||||
plan_result = ""
|
||||
|
||||
if verify_pass:
|
||||
if reply:
|
||||
action_report = reply.get("action_report", None)
|
||||
if action_report:
|
||||
plan_result = action_report.get("content", "")
|
||||
### The current planned Agent generation verification is successful
|
||||
##Plan executed successfully
|
||||
self.memory.plans_memory.complete_task(
|
||||
self.agent_context.conv_id,
|
||||
now_plan.sub_task_num,
|
||||
plan_result,
|
||||
)
|
||||
await speaker.a_send(
|
||||
reply, self, reviewer, request_reply=False
|
||||
)
|
||||
else:
|
||||
plan_result = reply["content"]
|
||||
self.memory.plans_memory.update_task(
|
||||
self.agent_context.conv_id,
|
||||
now_plan.sub_task_num,
|
||||
Status.RETRYING.value,
|
||||
now_plan.retry_times + 1,
|
||||
speaker.name,
|
||||
"",
|
||||
plan_result,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
f"An exception was encountered during the execution of the current plan step.{str(e)}"
|
||||
)
|
||||
error_report = {
|
||||
"content": f"An exception was encountered during the execution of the current plan step.{str(e)}",
|
||||
"is_exe_success": False,
|
||||
}
|
||||
return True, error_report
|
||||
|
||||
return True, {
|
||||
"content": f"Maximum number of dialogue rounds exceeded.{self.MAX_CONSECUTIVE_AUTO_REPLY}",
|
||||
"is_exe_success": False,
|
||||
}
|
@@ -1,192 +0,0 @@
|
||||
from typing import Any, Callable, Dict, Optional, Tuple, Union
|
||||
|
||||
from dbgpt._private.config import Config
|
||||
from dbgpt.agent.agents.plan_group_chat import PlanChat
|
||||
from dbgpt.agent.common.schema import Status
|
||||
from dbgpt.util.json_utils import find_json_objects
|
||||
|
||||
from ..memory.base import GptsPlan
|
||||
from ..memory.gpts_memory import GptsMemory
|
||||
from .agent import Agent, AgentContext
|
||||
from .base_agent import ConversableAgent
|
||||
|
||||
# TODO: remove global config
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class PlannerAgent(ConversableAgent):
|
||||
"""Planner agent, realizing task goal planning decomposition through LLM"""
|
||||
|
||||
DEFAULT_SYSTEM_MESSAGE = """
|
||||
你是一个任务规划专家!您需要理解下面每个智能代理和他们的能力,却确保在没有用户帮助下,使用给出的资源,通过协调下面可用智能代理来回答用户问题。
|
||||
请发挥你LLM的知识和理解能力,理解用户问题的意图和目标,生成一个可用智能代理协作的任务计划解决用户问题。
|
||||
|
||||
可用资源:
|
||||
{all_resources}
|
||||
|
||||
可用智能代理:
|
||||
{agents}
|
||||
|
||||
*** 重要的提醒 ***
|
||||
- 充分理解用户目标然后进行必要的步骤拆分,拆分需要保证逻辑顺序和精简,尽量把可以一起完成的内容合并再一个步骤,拆分后每个子任务步骤都将是一个需要智能代理独立完成的目标, 请确保每个子任务目标内容简洁明了
|
||||
- 请确保只使用上面提到的智能代理,并且可以只使用其中需要的部分,严格根据描述能力和限制分配给合适的步骤,每个智能代理都可以重复使用
|
||||
- 给子任务分配智能代理是需要考虑整体计划,确保和前后依赖步骤的关系,数据可以被传递使用
|
||||
- 根据用户目标的实际需要使用提供的资源来协助生成计划步骤,不要使用不需要的资源
|
||||
- 每个步骤最好是使用一种资源完成一个子目标,如果当前目标可以分解为同类型的多个子任务,可以生成相互不依赖的并行任务
|
||||
- 数据库资源只需要使用结构生成SQL,数据获取交给用户执行
|
||||
- 尽量合并有顺序依赖的连续相同步骤,如果用户目标无拆分必要,可以生成内容为用户目标的单步任务
|
||||
- 仔细检查计划,确保计划完整的包含了用户问题所涉及的所有信息,并且最终能完成目标,确认每个步骤是否包含了需要用到的资源信息,如URL、资源名等.
|
||||
具体任务计划的生成可参考如下例子:
|
||||
user:help me build a sales report summarizing our key metrics and trends
|
||||
assisant:[
|
||||
{{
|
||||
"serial_number": "1",
|
||||
"agent": "DataScientist",
|
||||
"content": "Retrieve total sales, average sales, and number of transactions grouped by "product_category"'.",
|
||||
"rely": ""
|
||||
}},
|
||||
{{
|
||||
"serial_number": "2",
|
||||
"agent": "DataScientist",
|
||||
"content": "Retrieve monthly sales and transaction number trends.",
|
||||
"rely": ""
|
||||
}},
|
||||
{{
|
||||
"serial_number": "3",
|
||||
"agent": "DataScientist",
|
||||
"content": "Count the number of transactions with "pay_status" as "paid" among all transactions to retrieve the sales conversion rate.",
|
||||
"rely": ""
|
||||
}},
|
||||
{{
|
||||
"serial_number": "4",
|
||||
"agent": "Reporter",
|
||||
"content": "Integrate analytical data into the format required to build sales reports.",
|
||||
"rely": "1,2,3"
|
||||
}}
|
||||
]
|
||||
|
||||
请一步步思考,并以如下json格式返回你的行动计划内容:
|
||||
[{{
|
||||
"serial_number":"0",
|
||||
"agent": "用来完成当前步骤的智能代理",
|
||||
"content": "当前步骤的任务内容,确保可以被智能代理执行",
|
||||
"rely":"当前任务执行依赖的其他任务serial_number, 如:1,2,3, 无依赖为空"
|
||||
}}]
|
||||
确保回答的json可以被Python代码的json.loads函数加载解析.
|
||||
"""
|
||||
|
||||
REPAIR_SYSTEM_MESSAGE = """
|
||||
您是规划专家!现在你需要利用你的专业知识,仔细检查已生成的计划,进行重新评估和分析,确保计划的每个步骤都是清晰完整的,可以被智能代理理解的,解决当前计划中遇到的问题!并按要求返回新的计划内容。
|
||||
"""
|
||||
NAME = "Planner"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
memory: GptsMemory,
|
||||
plan_chat: PlanChat,
|
||||
agent_context: AgentContext,
|
||||
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
|
||||
max_consecutive_auto_reply: Optional[int] = None,
|
||||
human_input_mode: Optional[str] = "NEVER",
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
name=self.NAME,
|
||||
memory=memory,
|
||||
system_message=self.DEFAULT_SYSTEM_MESSAGE,
|
||||
is_termination_msg=is_termination_msg,
|
||||
max_consecutive_auto_reply=max_consecutive_auto_reply,
|
||||
human_input_mode=human_input_mode,
|
||||
agent_context=agent_context,
|
||||
**kwargs,
|
||||
)
|
||||
self.plan_chat = plan_chat
|
||||
### register planning funtion
|
||||
self.register_reply(Agent, PlannerAgent._a_planning)
|
||||
|
||||
def build_param(self, agent_context: AgentContext):
|
||||
resources = []
|
||||
if agent_context.resource_db is not None:
|
||||
db_connect = CFG.LOCAL_DB_MANAGE.get_connect(
|
||||
agent_context.resource_db.get("name")
|
||||
)
|
||||
|
||||
resources.append(
|
||||
f"{agent_context.resource_db.get('type')}:{agent_context.resource_db.get('name')}\n{db_connect.get_table_info()}"
|
||||
)
|
||||
if agent_context.resource_knowledge is not None:
|
||||
resources.append(
|
||||
f"{agent_context.resource_knowledge.get('type')}:{agent_context.resource_knowledge.get('name')}\n{agent_context.resource_knowledge.get('introduce')}"
|
||||
)
|
||||
if agent_context.resource_internet is not None:
|
||||
resources.append(
|
||||
f"{agent_context.resource_internet.get('type')}:{agent_context.resource_internet.get('name')}\n{agent_context.resource_internet.get('introduce')}"
|
||||
)
|
||||
return {
|
||||
"all_resources": "\n".join([f"- {item}" for item in resources]),
|
||||
"agents": "\n".join(
|
||||
[f"- {item.name}:{item.describe}" for item in self.plan_chat.agents]
|
||||
),
|
||||
}
|
||||
|
||||
async def a_system_fill_param(self):
|
||||
params = self.build_param(self.agent_context)
|
||||
self.update_system_message(self.DEFAULT_SYSTEM_MESSAGE.format(**params))
|
||||
|
||||
async def _a_planning(
|
||||
self,
|
||||
message: Optional[str] = None,
|
||||
sender: Optional[Agent] = None,
|
||||
reviewer: Optional[Agent] = None,
|
||||
config: Optional[Any] = None,
|
||||
) -> Tuple[bool, Union[str, Dict, None]]:
|
||||
json_objects = find_json_objects(message)
|
||||
plan_objects = []
|
||||
fail_reason = (
|
||||
"Please recheck your answer,no usable plans generated in correct format,"
|
||||
)
|
||||
json_count = len(json_objects)
|
||||
rensponse_succ = True
|
||||
if json_count != 1:
|
||||
### Answer failed, turn on automatic repair
|
||||
fail_reason += f"There are currently {json_count} json contents"
|
||||
rensponse_succ = False
|
||||
else:
|
||||
try:
|
||||
for item in json_objects[0]:
|
||||
plan = GptsPlan(
|
||||
conv_id=self.agent_context.conv_id,
|
||||
sub_task_num=item.get("serial_number"),
|
||||
sub_task_content=item.get("content"),
|
||||
)
|
||||
plan.resource_name = item.get("resource")
|
||||
plan.max_retry_times = self.agent_context.max_retry_round
|
||||
plan.sub_task_agent = item.get("agent")
|
||||
plan.sub_task_title = item.get("content")
|
||||
plan.rely = item.get("rely")
|
||||
plan.retry_times = 0
|
||||
plan.status = Status.TODO.value
|
||||
plan_objects.append(plan)
|
||||
except Exception as e:
|
||||
fail_reason += f"Return json structure error and cannot be converted to a usable plan,{str(e)}"
|
||||
rensponse_succ = False
|
||||
|
||||
if rensponse_succ:
|
||||
if len(plan_objects) > 0:
|
||||
### Delete the old plan every time before saving it
|
||||
self.memory.plans_memory.remove_by_conv_id(self.agent_context.conv_id)
|
||||
self.memory.plans_memory.batch_save(plan_objects)
|
||||
|
||||
content = "\n".join(
|
||||
[
|
||||
"{},{}".format(index + 1, item.get("content"))
|
||||
for index, item in enumerate(json_objects[0])
|
||||
]
|
||||
)
|
||||
else:
|
||||
content = fail_reason
|
||||
return True, {
|
||||
"is_exe_success": rensponse_succ,
|
||||
"content": content,
|
||||
"view": content,
|
||||
}
|
@@ -79,8 +79,9 @@ def execute_command(
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
|
||||
cmd = plugin_generator.command_registry.commands.get(command_name)
|
||||
cmd = None
|
||||
if plugin_generator.command_registry:
|
||||
cmd = plugin_generator.command_registry.commands.get(command_name)
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
|
@@ -552,14 +552,14 @@ class ApiCall:
|
||||
except Exception as e:
|
||||
param["data"] = []
|
||||
param["err_msg"] = str(e)
|
||||
chart_items.append(
|
||||
f"```vis-chart-item\n{json.dumps(param, default=serialize, ensure_ascii=False)}\n```"
|
||||
)
|
||||
chart_items.append(param)
|
||||
|
||||
dashboard_param = {
|
||||
"markdown": "\n".join(chart_items),
|
||||
"data": chart_items,
|
||||
"chart_count": len(chart_items),
|
||||
"title": title,
|
||||
"display_strategy": "default",
|
||||
"style": "default",
|
||||
}
|
||||
view_json_str = json.dumps(
|
||||
dashboard_param, default=serialize, ensure_ascii=False
|
||||
|
33
dbgpt/agent/plugin/loader.py
Normal file
33
dbgpt/agent/plugin/loader.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
from .generator import PluginPromptGenerator
|
||||
from .plugins_util import scan_plugins
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PluginLoader:
|
||||
def load_plugins(
|
||||
self, plugin_path: Optional[str], available_plugins: Optional[List[str]] = None
|
||||
) -> PluginPromptGenerator:
|
||||
logger.info(
|
||||
f"load_plugin path:{plugin_path}, available:{available_plugins if available_plugins else ''}"
|
||||
)
|
||||
plugins = scan_plugins(plugin_path)
|
||||
|
||||
generator: PluginPromptGenerator = PluginPromptGenerator()
|
||||
# load select plugin
|
||||
if available_plugins and len(available_plugins) > 0:
|
||||
for plugin in plugins:
|
||||
if plugin._name in available_plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
generator = plugin.post_prompt(generator)
|
||||
else:
|
||||
for plugin in plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
generator = plugin.post_prompt(generator)
|
||||
return generator
|
@@ -1,20 +0,0 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from .generator import PluginPromptGenerator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PluginLoader:
|
||||
def load_plugins(
|
||||
self, generator: PluginPromptGenerator, my_plugins: List[str]
|
||||
) -> PluginPromptGenerator:
|
||||
logger.info(f"load_select_plugin:{my_plugins}")
|
||||
# load select plugin
|
||||
for plugin in self.plugins:
|
||||
if plugin._name in my_plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
generator = plugin.post_prompt(generator)
|
||||
return generator
|
Reference in New Issue
Block a user