refactor(agent): Agent modular refactoring (#1487)

This commit is contained in:
Fangyin Cheng
2024-05-07 09:45:26 +08:00
committed by GitHub
parent 2a418f91e8
commit 863b5404dd
86 changed files with 4513 additions and 967 deletions

View File

@@ -0,0 +1,36 @@
"""Plan module for the agent."""
from .awel.agent_operator import ( # noqa: F401
AgentDummyTrigger,
AWELAgentOperator,
WrappedAgentOperator,
)
from .awel.agent_operator_resource import ( # noqa: F401
AWELAgent,
AWELAgentConfig,
AWELAgentResource,
)
from .awel.team_awel_layout import ( # noqa: F401
AWELTeamContext,
DefaultAWELLayoutManager,
WrappedAWELLayoutManager,
)
from .plan_action import PlanAction, PlanInput # noqa: F401
from .planner_agent import PlannerAgent # noqa: F401
from .team_auto_plan import AutoPlanChatManager # noqa: F401
__all__ = [
"PlanAction",
"PlanInput",
"PlannerAgent",
"AutoPlanChatManager",
"AWELAgent",
"AWELAgentConfig",
"AWELAgentResource",
"AWELTeamContext",
"DefaultAWELLayoutManager",
"WrappedAWELLayoutManager",
"AgentDummyTrigger",
"AWELAgentOperator",
"WrappedAgentOperator",
]

View File

@@ -0,0 +1,4 @@
"""External planner.
Use AWEL as the external planner.
"""

View File

@@ -0,0 +1,311 @@
"""Agent Operator for AWEL."""
from abc import ABC
from typing import List, Optional, Type
from dbgpt.core.awel import MapOperator
from dbgpt.core.awel.flow import (
IOField,
OperatorCategory,
OperatorType,
Parameter,
ViewMetadata,
)
from dbgpt.core.awel.trigger.base import Trigger
from dbgpt.core.interface.message import ModelMessageRoleType
# TODO: Don't dependent on MixinLLMOperator
from dbgpt.model.operators.llm_operator import MixinLLMOperator
from ....util.llm.llm import LLMConfig
from ...agent import Agent, AgentGenerateContext, AgentMessage
from ...agent_manage import get_agent_manager
from ...base_agent import ConversableAgent
from .agent_operator_resource import AWELAgent
class BaseAgentOperator:
"""The abstract operator for an Agent."""
SHARE_DATA_KEY_MODEL_NAME = "share_data_key_agent_name"
def __init__(self, agent: Optional[Agent] = None):
"""Create an AgentOperator."""
self._agent = agent
@property
def agent(self) -> Agent:
"""Return the Agent."""
if not self._agent:
raise ValueError("agent is not set")
return self._agent
class WrappedAgentOperator(
BaseAgentOperator, MapOperator[AgentGenerateContext, AgentGenerateContext], ABC
):
"""The Agent operator.
Wrap the agent and trigger the agent to generate a reply.
"""
def __init__(self, agent: Agent, **kwargs):
"""Create an WrappedAgentOperator."""
super().__init__(agent=agent)
MapOperator.__init__(self, **kwargs)
async def map(self, input_value: AgentGenerateContext) -> AgentGenerateContext:
"""Trigger agent to generate a reply."""
now_rely_messages: List[AgentMessage] = []
if not input_value.message:
raise ValueError("The message is empty.")
input_message = input_value.message.copy()
# Isolate the message delivery mechanism and pass it to the operator
_goal = self.agent.name if self.agent.name else self.agent.role
current_goal = f"[{_goal}]:"
if input_message.content:
current_goal += input_message.content
input_message.current_goal = current_goal
# What was received was the User message
human_message = input_message.copy()
human_message.role = ModelMessageRoleType.HUMAN
now_rely_messages.append(human_message)
# Send a message (no reply required) and pass the message content
now_message = input_message
if input_value.rely_messages and len(input_value.rely_messages) > 0:
now_message = input_value.rely_messages[-1]
if not input_value.sender:
raise ValueError("The sender is empty.")
await input_value.sender.send(
now_message, self.agent, input_value.reviewer, False
)
agent_reply_message = await self.agent.generate_reply(
received_message=input_message,
sender=input_value.sender,
reviewer=input_value.reviewer,
rely_messages=input_value.rely_messages,
)
is_success = agent_reply_message.success
if not is_success:
raise ValueError(
f"The task failed at step {self.agent.role} and the attempt "
f"to repair it failed. The final reason for "
f"failure:{agent_reply_message.content}!"
)
# What is sent is an AI message
ai_message = agent_reply_message.copy()
ai_message.role = ModelMessageRoleType.AI
now_rely_messages.append(ai_message)
# Handle user goals and outcome dependencies
return AgentGenerateContext(
message=input_message,
sender=self.agent,
reviewer=input_value.reviewer,
# Default single step transfer of information
rely_messages=now_rely_messages,
silent=input_value.silent,
)
class AWELAgentOperator(
MixinLLMOperator, MapOperator[AgentGenerateContext, AgentGenerateContext]
):
"""The Agent operator for AWEL."""
metadata = ViewMetadata(
label="AWEL Agent Operator",
name="agent_operator",
category=OperatorCategory.AGENT,
description="The Agent operator.",
parameters=[
Parameter.build_from(
"Agent",
"awel_agent",
AWELAgent,
description="The dbgpt agent.",
),
],
inputs=[
IOField.build_from(
"Agent Operator Request",
"agent_operator_request",
AgentGenerateContext,
"The Agent Operator request.",
)
],
outputs=[
IOField.build_from(
"Agent Operator Output",
"agent_operator_output",
AgentGenerateContext,
description="The Agent Operator output.",
)
],
)
def __init__(self, awel_agent: AWELAgent, **kwargs):
"""Create an AgentOperator."""
MixinLLMOperator.__init__(self)
MapOperator.__init__(self, **kwargs)
self.awel_agent = awel_agent
async def map(
self,
input_value: AgentGenerateContext,
) -> AgentGenerateContext:
"""Trigger agent to generate a reply."""
if not input_value.message:
raise ValueError("The message is empty.")
input_message = input_value.message.copy()
agent = await self.get_agent(input_value)
if agent.fixed_subgoal and len(agent.fixed_subgoal) > 0:
# Isolate the message delivery mechanism and pass it to the operator
current_goal = f"[{agent.name if agent.name else agent.role}]:"
if agent.fixed_subgoal:
current_goal += agent.fixed_subgoal
input_message.current_goal = current_goal
input_message.content = agent.fixed_subgoal
else:
# Isolate the message delivery mechanism and pass it to the operator
current_goal = f"[{agent.name if agent.name else agent.role}]:"
if input_message.content:
current_goal += input_message.content
input_message.current_goal = current_goal
now_rely_messages: List[AgentMessage] = []
# What was received was the User message
human_message = input_message.copy()
human_message.role = ModelMessageRoleType.HUMAN
now_rely_messages.append(human_message)
# Send a message (no reply required) and pass the message content
now_message = input_message
if input_value.rely_messages and len(input_value.rely_messages) > 0:
now_message = input_value.rely_messages[-1]
sender = input_value.sender
if not sender:
raise ValueError("The sender is empty.")
await sender.send(now_message, agent, input_value.reviewer, False)
agent_reply_message = await agent.generate_reply(
received_message=input_message,
sender=sender,
reviewer=input_value.reviewer,
rely_messages=input_value.rely_messages,
)
is_success = agent_reply_message.success
if not is_success:
raise ValueError(
f"The task failed at step {agent.role} and the attempt to "
f"repair it failed. The final reason for "
f"failure:{agent_reply_message.content}!"
)
# What is sent is an AI message
ai_message: AgentMessage = agent_reply_message.copy()
ai_message.role = ModelMessageRoleType.AI
now_rely_messages.append(ai_message)
# Handle user goals and outcome dependencies
return AgentGenerateContext(
message=input_message,
sender=agent,
reviewer=input_value.reviewer,
# Default single step transfer of information
rely_messages=now_rely_messages,
silent=input_value.silent,
memory=input_value.memory.structure_clone() if input_value.memory else None,
agent_context=input_value.agent_context,
resource_loader=input_value.resource_loader,
llm_client=input_value.llm_client,
round_index=agent.consecutive_auto_reply_counter,
)
async def get_agent(
self,
input_value: AgentGenerateContext,
) -> ConversableAgent:
"""Build the agent."""
# agent build
agent_cls: Type[ConversableAgent] = get_agent_manager().get_by_name(
self.awel_agent.agent_profile
)
llm_config = self.awel_agent.llm_config
if not llm_config:
if input_value.llm_client:
llm_config = LLMConfig(llm_client=input_value.llm_client)
else:
llm_config = LLMConfig(llm_client=self.llm_client)
else:
if not llm_config.llm_client:
if input_value.llm_client:
llm_config.llm_client = input_value.llm_client
else:
llm_config.llm_client = self.llm_client
kwargs = {}
if self.awel_agent.role_name:
kwargs["name"] = self.awel_agent.role_name
if self.awel_agent.fixed_subgoal:
kwargs["fixed_subgoal"] = self.awel_agent.fixed_subgoal
agent = (
await agent_cls(**kwargs)
.bind(input_value.memory)
.bind(llm_config)
.bind(input_value.agent_context)
.bind(self.awel_agent.resources)
.bind(input_value.resource_loader)
.build()
)
return agent
class AgentDummyTrigger(Trigger):
"""Http trigger for AWEL.
Http trigger is used to trigger a DAG by http request.
"""
metadata = ViewMetadata(
label="Agent Trigger",
name="agent_trigger",
category=OperatorCategory.AGENT,
operator_type=OperatorType.INPUT,
description="Trigger your workflow by agent",
inputs=[],
parameters=[],
outputs=[
IOField.build_from(
"Agent Operator Context",
"agent_operator_context",
AgentGenerateContext,
description="The Agent Operator output.",
)
],
)
def __init__(
self,
**kwargs,
) -> None:
"""Initialize a HttpTrigger."""
super().__init__(**kwargs)
async def trigger(self, **kwargs) -> None:
"""Trigger the DAG. Not used in HttpTrigger."""
raise NotImplementedError("Dummy trigger does not support trigger.")

View File

@@ -0,0 +1,209 @@
"""The AWEL Agent Operator Resource."""
from typing import Any, Dict, List, Optional
from dbgpt._private.pydantic import BaseModel, ConfigDict, Field, model_validator
from dbgpt.core import LLMClient
from dbgpt.core.awel.flow import (
FunctionDynamicOptions,
OptionValue,
Parameter,
ResourceCategory,
register_resource,
)
from ....resource.resource_api import AgentResource, ResourceType
from ....util.llm.llm import LLMConfig, LLMStrategyType
from ...agent_manage import get_agent_manager
@register_resource(
label="AWEL Agent Resource",
name="agent_operator_resource",
description="The Agent Resource.",
category=ResourceCategory.AGENT,
parameters=[
Parameter.build_from(
label="Agent Resource Type",
name="agent_resource_type",
type=str,
optional=True,
default=None,
options=[
OptionValue(label=item.name, name=item.value, value=item.value)
for item in ResourceType
],
),
Parameter.build_from(
label="Agent Resource Name",
name="agent_resource_name",
type=str,
optional=True,
default=None,
description="The agent resource name.",
),
Parameter.build_from(
label="Agent Resource Value",
name="agent_resource_value",
type=str,
optional=True,
default=None,
description="The agent resource value.",
),
],
alias=[
"dbgpt.serve.agent.team.layout.agent_operator_resource.AwelAgentResource",
"dbgpt.agent.plan.awel.agent_operator_resource.AWELAgentResource",
],
)
class AWELAgentResource(AgentResource):
"""AWEL Agent Resource."""
@model_validator(mode="before")
@classmethod
def pre_fill(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Pre fill the agent ResourceType."""
if not isinstance(values, dict):
return values
name = values.pop("agent_resource_name")
type = values.pop("agent_resource_type")
value = values.pop("agent_resource_value")
values["name"] = name
values["type"] = ResourceType(type)
values["value"] = value
return values
@register_resource(
label="AWEL Agent LLM Config",
name="agent_operator_llm_config",
description="The Agent LLM Config.",
category=ResourceCategory.AGENT,
parameters=[
Parameter.build_from(
"LLM Client",
"llm_client",
LLMClient,
optional=True,
default=None,
description="The LLM Client.",
),
Parameter.build_from(
label="Agent LLM Strategy",
name="llm_strategy",
type=str,
optional=True,
default=None,
options=[
OptionValue(label=item.name, name=item.value, value=item.value)
for item in LLMStrategyType
],
description="The Agent LLM Strategy.",
),
Parameter.build_from(
label="Agent LLM Strategy Value",
name="strategy_context",
type=str,
optional=True,
default=None,
description="The agent LLM Strategy Value.",
),
],
alias=[
"dbgpt.serve.agent.team.layout.agent_operator_resource.AwelAgentConfig",
"dbgpt.agent.plan.awel.agent_operator_resource.AWELAgentConfig",
],
)
class AWELAgentConfig(LLMConfig):
"""AWEL Agent Config."""
pass
def _agent_resource_option_values() -> List[OptionValue]:
return [
OptionValue(label=item["name"], name=item["name"], value=item["name"])
for item in get_agent_manager().list_agents()
]
@register_resource(
label="AWEL Layout Agent",
name="agent_operator_agent",
description="The Agent to build the Agent Operator.",
category=ResourceCategory.AGENT,
parameters=[
Parameter.build_from(
label="Agent Profile",
name="agent_profile",
type=str,
description="Which agent want use.",
options=FunctionDynamicOptions(func=_agent_resource_option_values),
),
Parameter.build_from(
label="Role Name",
name="role_name",
type=str,
optional=True,
default=None,
description="The agent role name.",
),
Parameter.build_from(
label="Fixed Gogal",
name="fixed_subgoal",
type=str,
optional=True,
default=None,
description="The agent fixed gogal.",
),
Parameter.build_from(
label="Agent Resource",
name="agent_resource",
type=AWELAgentResource,
optional=True,
default=None,
description="The agent resource.",
),
Parameter.build_from(
label="Agent LLM Config",
name="agent_llm_Config",
type=AWELAgentConfig,
optional=True,
default=None,
description="The agent llm config.",
),
],
alias=[
"dbgpt.serve.agent.team.layout.agent_operator_resource.AwelAgent",
"dbgpt.agent.plan.awel.agent_operator_resource.AWELAgent",
],
)
class AWELAgent(BaseModel):
"""AWEL Agent."""
model_config = ConfigDict(arbitrary_types_allowed=True)
agent_profile: str
role_name: Optional[str] = None
llm_config: Optional[LLMConfig] = None
resources: List[AgentResource] = Field(default_factory=list)
fixed_subgoal: Optional[str] = None
@model_validator(mode="before")
@classmethod
def pre_fill(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Pre fill the agent ResourceType."""
if not isinstance(values, dict):
return values
resource = values.pop("agent_resource")
llm_config = values.pop("agent_llm_Config")
if resource is not None:
values["resources"] = [resource]
if llm_config is not None:
values["llm_config"] = llm_config
return values

View File

@@ -0,0 +1,268 @@
"""The manager of the team for the AWEL layout."""
import logging
from abc import ABC, abstractmethod
from typing import Optional, cast
from dbgpt._private.config import Config
from dbgpt._private.pydantic import (
BaseModel,
ConfigDict,
Field,
model_to_dict,
validator,
)
from dbgpt.core.awel import DAG
from dbgpt.core.awel.dag.dag_manager import DAGManager
from ...action.base import ActionOutput
from ...agent import Agent, AgentGenerateContext, AgentMessage
from ...base_team import ManagerAgent
from ...profile import DynConfig, ProfileConfig
from .agent_operator import AWELAgentOperator, WrappedAgentOperator
logger = logging.getLogger(__name__)
class AWELTeamContext(BaseModel):
"""The context of the team for the AWEL layout."""
dag_id: str = Field(
...,
description="The unique id of dag",
examples=["flow_dag_testflow_66d8e9d6-f32e-4540-a5bd-ea0648145d0e"],
)
uid: str = Field(
default=None,
description="The unique id of flow",
examples=["66d8e9d6-f32e-4540-a5bd-ea0648145d0e"],
)
name: Optional[str] = Field(
default=None,
description="The name of dag",
)
label: Optional[str] = Field(
default=None,
description="The label of dag",
)
version: Optional[str] = Field(
default=None,
description="The version of dag",
)
description: Optional[str] = Field(
default=None,
description="The description of dag",
)
editable: bool = Field(
default=False,
description="is the dag is editable",
examples=[True, False],
)
state: Optional[str] = Field(
default=None,
description="The state of dag",
)
user_name: Optional[str] = Field(
default=None,
description="The owner of current dag",
)
sys_code: Optional[str] = Field(
default=None,
description="The system code of current dag",
)
flow_category: Optional[str] = Field(
default="common",
description="The flow category of current dag",
)
def to_dict(self):
"""Convert the object to a dictionary."""
return model_to_dict(self)
class AWELBaseManager(ManagerAgent, ABC):
"""AWEL base manager."""
model_config = ConfigDict(arbitrary_types_allowed=True)
profile: ProfileConfig = ProfileConfig(
name="AWELBaseManager",
role=DynConfig(
"PlanManager", category="agent", key="dbgpt_agent_plan_awel_profile_name"
),
goal=DynConfig(
"Promote and solve user problems according to the process arranged "
"by AWEL.",
category="agent",
key="dbgpt_agent_plan_awel_profile_goal",
),
desc=DynConfig(
"Promote and solve user problems according to the process arranged "
"by AWEL.",
category="agent",
key="dbgpt_agent_plan_awel_profile_desc",
),
)
async def _a_process_received_message(self, message: AgentMessage, sender: Agent):
"""Process the received message."""
pass
@abstractmethod
def get_dag(self) -> DAG:
"""Get the DAG of the manager."""
async def act(
self,
message: Optional[str],
sender: Optional[Agent] = None,
reviewer: Optional[Agent] = None,
**kwargs,
) -> Optional[ActionOutput]:
"""Perform the action."""
try:
agent_dag = self.get_dag()
last_node: AWELAgentOperator = cast(
AWELAgentOperator, agent_dag.leaf_nodes[0]
)
start_message_context: AgentGenerateContext = AgentGenerateContext(
message=AgentMessage(content=message, current_goal=message),
sender=sender,
reviewer=reviewer,
memory=self.memory.structure_clone(),
agent_context=self.agent_context,
resource_loader=self.resource_loader,
llm_client=self.not_null_llm_config.llm_client,
)
final_generate_context: AgentGenerateContext = await last_node.call(
call_data=start_message_context
)
last_message = final_generate_context.rely_messages[-1]
last_agent = await last_node.get_agent(final_generate_context)
if final_generate_context.round_index is not None:
last_agent.consecutive_auto_reply_counter = (
final_generate_context.round_index
)
if not sender:
raise ValueError("sender is required!")
await last_agent.send(
last_message, sender, start_message_context.reviewer, False
)
view_message: Optional[str] = None
if last_message.action_report:
view_message = last_message.action_report.get("view", None)
return ActionOutput(
content=last_message.content,
view=view_message,
)
except Exception as e:
logger.exception(f"DAG run failed!{str(e)}")
return ActionOutput(
is_exe_success=False,
content=f"Failed to complete goal! {str(e)}",
)
class WrappedAWELLayoutManager(AWELBaseManager):
"""The manager of the team for the AWEL layout.
Receives a DAG or builds a DAG from the agents.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
dag: Optional[DAG] = Field(None, description="The DAG of the manager")
def get_dag(self) -> DAG:
"""Get the DAG of the manager."""
if self.dag:
return self.dag
conv_id = self.not_null_agent_context.conv_id
last_node: Optional[WrappedAgentOperator] = None
with DAG(
f"layout_agents_{self.not_null_agent_context.gpts_app_name}_{conv_id}"
) as dag:
for agent in self.agents:
now_node = WrappedAgentOperator(agent=agent)
if not last_node:
last_node = now_node
else:
last_node >> now_node
last_node = now_node
self.dag = dag
return dag
async def act(
self,
message: Optional[str],
sender: Optional[Agent] = None,
reviewer: Optional[Agent] = None,
**kwargs,
) -> Optional[ActionOutput]:
"""Perform the action."""
try:
dag = self.get_dag()
last_node: WrappedAgentOperator = cast(
WrappedAgentOperator, dag.leaf_nodes[0]
)
start_message_context: AgentGenerateContext = AgentGenerateContext(
message=AgentMessage(content=message, current_goal=message),
sender=self,
reviewer=reviewer,
)
final_generate_context: AgentGenerateContext = await last_node.call(
call_data=start_message_context
)
last_message = final_generate_context.rely_messages[-1]
last_agent = last_node.agent
await last_agent.send(
last_message,
self,
start_message_context.reviewer,
False,
)
view_message: Optional[str] = None
if last_message.action_report:
view_message = last_message.action_report.get("view", None)
return ActionOutput(
content=last_message.content,
view=view_message,
)
except Exception as e:
logger.exception(f"DAG run failed!{str(e)}")
return ActionOutput(
is_exe_success=False,
content=f"Failed to complete goal! {str(e)}",
)
class DefaultAWELLayoutManager(AWELBaseManager):
"""The manager of the team for the AWEL layout."""
model_config = ConfigDict(arbitrary_types_allowed=True)
dag: AWELTeamContext = Field(...)
@validator("dag")
def check_dag(cls, value):
"""Check the DAG of the manager."""
assert value is not None and value != "", "dag must not be empty"
return value
def get_dag(self) -> DAG:
"""Get the DAG of the manager."""
cfg = Config()
_dag_manager = DAGManager.get_instance(cfg.SYSTEM_APP) # type: ignore
agent_dag: Optional[DAG] = _dag_manager.get_dag(alias_name=self.dag.uid)
if agent_dag is None:
raise ValueError(f"The configured flow cannot be found![{self.dag.name}]")
return agent_dag

View File

@@ -0,0 +1,139 @@
"""Plan Action."""
import logging
from typing import List, Optional
from dbgpt._private.pydantic import BaseModel, Field
from dbgpt.vis.tags.vis_agent_plans import Vis, VisAgentPlans
from ...resource.resource_api import AgentResource
from ..action.base import Action, ActionOutput
from ..agent import AgentContext
from ..memory.gpts.base import GptsPlan
from ..memory.gpts.gpts_memory import GptsPlansMemory
from ..schema import Status
logger = logging.getLogger(__name__)
class PlanInput(BaseModel):
"""Plan input model."""
serial_number: int = Field(
0,
description="Number of sub-tasks",
)
agent: str = Field(..., description="The agent name to complete current task")
content: str = Field(
...,
description="The task content of current step, make sure it can by executed by"
" agent",
)
rely: str = Field(
...,
description="The rely task number(serial_number), e.g. 1,2,3, empty if no rely",
)
class PlanAction(Action[List[PlanInput]]):
"""Plan action class."""
def __init__(self, **kwargs):
"""Create a plan action."""
super().__init__()
self._render_protocol = VisAgentPlans()
@property
def render_protocol(self) -> Optional[Vis]:
"""Return the render protocol."""
return self._render_protocol
@property
def out_model_type(self):
"""Output model type."""
return List[PlanInput]
async def run(
self,
ai_message: str,
resource: Optional[AgentResource] = None,
rely_action_out: Optional[ActionOutput] = None,
need_vis_render: bool = True,
**kwargs,
) -> ActionOutput:
"""Run the plan action."""
context: AgentContext = kwargs["context"]
plans_memory: GptsPlansMemory = kwargs["plans_memory"]
try:
param: List[PlanInput] = self._input_convert(ai_message, List[PlanInput])
except Exception as e:
logger.exception((str(e)))
return ActionOutput(
is_exe_success=False,
content="The requested correctly structured answer could not be found.",
)
fail_reason = ""
try:
response_success = True
plan_objects = []
try:
for item in param:
plan = GptsPlan(
conv_id=context.conv_id,
sub_task_num=item.serial_number,
sub_task_content=item.content,
)
plan.resource_name = ""
plan.max_retry_times = context.max_retry_round
plan.sub_task_agent = item.agent
plan.sub_task_title = item.content
plan.rely = item.rely
plan.retry_times = 0
plan.state = Status.TODO.value
plan_objects.append(plan)
plans_memory.remove_by_conv_id(context.conv_id)
plans_memory.batch_save(plan_objects)
except Exception as e:
logger.exception(str(e))
fail_reason = (
f"The generated plan cannot be stored, reason: {str(e)}."
f" Please check whether it is a problem with the plan content. "
f"If so, please regenerate the correct plan. If not, please return"
f" 'TERMINATE'."
)
response_success = False
if response_success:
plan_content = []
mk_plans = []
for item in param:
plan_content.append(
{
"name": item.content,
"num": item.serial_number,
"status": Status.TODO.value,
"agent": item.agent,
"rely": item.rely,
"markdown": "",
}
)
mk_plans.append(
f"- {item.serial_number}.{item.content}[{item.agent}]"
)
view = "\n".join(mk_plans)
return ActionOutput(
is_exe_success=True,
content=ai_message,
view=view,
)
else:
raise ValueError(fail_reason)
except Exception as e:
logger.exception("Plan Action Run Failed")
return ActionOutput(
is_exe_success=False, content=f"Plan action run failed!{str(e)}"
)

View File

@@ -0,0 +1,165 @@
"""Planner Agent."""
from typing import Any, Dict, List
from dbgpt._private.pydantic import Field
from ..agent import AgentMessage
from ..base_agent import ConversableAgent
from ..plan.plan_action import PlanAction
from ..profile import DynConfig, ProfileConfig
class PlannerAgent(ConversableAgent):
"""Planner Agent.
Planner agent, realizing task goal planning decomposition through LLM.
"""
agents: List[ConversableAgent] = Field(default_factory=list)
profile: ProfileConfig = ProfileConfig(
name=DynConfig(
"Planner",
category="agent",
key="dbgpt_agent_plan_planner_agent_profile_name",
),
role=DynConfig(
"Planner",
category="agent",
key="dbgpt_agent_plan_planner_agent_profile_role",
),
goal=DynConfig(
"Understand each of the following intelligent agents and their "
"capabilities, using the provided resources, solve user problems by "
"coordinating intelligent agents. Please utilize your LLM's knowledge "
"and understanding ability to comprehend the intent and goals of the "
"user's problem, generating a task plan that can be completed through"
" the collaboration of intelligent agents without user assistance.",
category="agent",
key="dbgpt_agent_plan_planner_agent_profile_goal",
),
expand_prompt=DynConfig(
"Available Intelligent Agents:\n {{ agents }}",
category="agent",
key="dbgpt_agent_plan_planner_agent_profile_expand_prompt",
),
constraints=DynConfig(
[
"Every step of the task plan should exist to advance towards solving "
"the user's goals. Do not generate meaningless task steps; ensure "
"that each step has a clear goal and its content is complete.",
"Pay attention to the dependencies and logic of each step in the task "
"plan. For the steps that are depended upon, consider the data they "
"depend on and whether it can be obtained based on the current goal. "
"If it cannot be obtained, please indicate in the goal that the "
"dependent data needs to be generated.",
"Each step must be an independently achievable goal. Ensure that the "
"logic and information are complete. Avoid steps with unclear "
"objectives, like 'Analyze the retrieved issues data,' where it's "
"unclear what specific content needs to be analyzed.",
"Please ensure that only the intelligent agents mentioned above are "
"used, and you may use only the necessary parts of them. Allocate "
"them to appropriate steps strictly based on their described "
"capabilities and limitations. Each intelligent agent can be reused.",
"Utilize the provided resources to assist in generating the plan "
"steps according to the actual needs of the user's goals. Do not use "
"unnecessary resources.",
"Each step should ideally use only one type of resource to accomplish "
"a sub-goal. If the current goal can be broken down into multiple "
"subtasks of the same type, you can create mutually independent "
"parallel tasks.",
"Data resources can be loaded and utilized by the appropriate "
"intelligent agents without the need to consider the issues related "
"to data loading links.",
"Try to merge continuous steps that have sequential dependencies. If "
"the user's goal does not require splitting, you can create a "
"single-step task with content that is the user's goal.",
"Carefully review the plan to ensure it comprehensively covers all "
"information involved in the user's problem and can ultimately "
"achieve the goal. Confirm whether each step includes the necessary "
"resource information, such as URLs, resource names, etc.",
],
category="agent",
key="dbgpt_agent_plan_planner_agent_profile_constraints",
),
desc=DynConfig(
"You are a task planning expert! You can coordinate intelligent agents"
" and allocate resources to achieve complex task goals.",
category="agent",
key="dbgpt_agent_plan_planner_agent_profile_desc",
),
examples=DynConfig(
"""
user:help me build a sales report summarizing our key metrics and trends
assistants:[
{{
"serial_number": "1",
"agent": "DataScientist",
"content": "Retrieve total sales, average sales, and number of transactions grouped by "product_category"'.",
"rely": ""
}},
{{
"serial_number": "2",
"agent": "DataScientist",
"content": "Retrieve monthly sales and transaction number trends.",
"rely": ""
}},
{{
"serial_number": "3",
"agent": "Reporter",
"content": "Integrate analytical data into the format required to build sales reports.",
"rely": "1,2"
}}
]""", # noqa: E501
category="agent",
key="dbgpt_agent_plan_planner_agent_profile_examples",
),
)
_goal_zh: str = (
"理解下面每个智能体(agent)和他们的能力,使用给出的资源,通过协调智能体来解决"
"用户问题。 请发挥你LLM的知识和理解能力理解用户问题的意图和目标生成一个可以在没有用户帮助"
"下,由智能体协作完成目标的任务计划。"
)
_expand_prompt_zh: str = "可用智能体(agent):\n {{ agents }}"
_constraints_zh: List[str] = [
"任务计划的每个步骤都应该是为了推进解决用户目标而存在,不要生成无意义的任务步骤,确保每个步骤内目标明确内容完整。",
"关注任务计划每个步骤的依赖关系和逻辑,被依赖步骤要考虑被依赖的数据,是否能基于当前目标得到,如果不能请在目标中提示要生成被依赖数据。",
"每个步骤都是一个独立可完成的目标,一定要确保逻辑和信息完整,不要出现类似:"
"'Analyze the retrieved issues data'这样目标不明确,不知道具体要分析啥内容的步骤",
"请确保只使用上面提到的智能体,并且可以只使用其中需要的部分,严格根据描述能力和限制分配给合适的步骤,每个智能体都可以重复使用。",
"根据用户目标的实际需要使用提供的资源来协助生成计划步骤,不要使用不需要的资源。",
"每个步骤最好只使用一种资源完成一个子目标,如果当前目标可以分解为同类型的多个子任务,可以生成相互不依赖的并行任务。",
"数据资源可以被合适的智能体加载使用,不用考虑数据资源的加载链接问题",
"尽量合并有顺序依赖的连续相同步骤,如果用户目标无拆分必要,可以生成内容为用户目标的单步任务。",
"仔细检查计划,确保计划完整的包含了用户问题所涉及的所有信息,并且最终能完成目标,确认每个步骤是否包含了需要用到的资源信息,如URL、资源名等. ",
]
_desc_zh: str = "你是一个任务规划专家!可以协调智能体,分配资源完成复杂的任务目标。"
def __init__(self, **kwargs):
"""Create a new PlannerAgent instance."""
super().__init__(**kwargs)
self._init_actions([PlanAction])
def _init_reply_message(self, received_message: AgentMessage):
reply_message = super()._init_reply_message(received_message)
reply_message.context = {
"agents": "\n".join([f"- {item.role}:{item.desc}" for item in self.agents]),
}
return reply_message
def bind_agents(self, agents: List[ConversableAgent]) -> ConversableAgent:
"""Bind the agents to the planner agent."""
self.agents = agents
for agent in self.agents:
if agent.resources and len(agent.resources) > 0:
self.resources.extend(agent.resources)
return self
def prepare_act_param(self) -> Dict[str, Any]:
"""Prepare the parameters for the act method."""
return {
"context": self.not_null_agent_context,
"plans_memory": self.memory.plans_memory,
}

View File

@@ -0,0 +1,312 @@
"""Auto plan chat manager agent."""
import logging
from typing import Dict, List, Optional, Tuple
from dbgpt.core.interface.message import ModelMessageRoleType
from ..action.base import ActionOutput
from ..agent import Agent, AgentMessage
from ..agent_manage import mentioned_agents, participant_roles
from ..base_agent import ConversableAgent
from ..base_team import ManagerAgent
from ..memory.gpts.base import GptsPlan
from ..plan.planner_agent import PlannerAgent
from ..profile import DynConfig, ProfileConfig
from ..schema import Status
logger = logging.getLogger(__name__)
class AutoPlanChatManager(ManagerAgent):
"""A chat manager agent that can manage a team chat of multiple agents."""
profile: ProfileConfig = ProfileConfig(
name=DynConfig(
"AutoPlanChatManager",
category="agent",
key="dbgpt_agent_plan_team_auto_plan_profile_name",
),
role=DynConfig(
"PlanManager",
category="agent",
key="dbgpt_agent_plan_team_auto_plan_profile_role",
),
goal=DynConfig(
"Advance the task plan generated by the planning agent. If the plan "
"does not pre-allocate an agent, it needs to be coordinated with the "
"appropriate agent to complete.",
category="agent",
key="dbgpt_agent_plan_team_auto_plan_profile_goal",
),
desc=DynConfig(
"Advance the task plan generated by the planning agent.",
category="agent",
key="dbgpt_agent_plan_team_auto_plan_profile_desc",
),
)
def __init__(self, **kwargs):
"""Create a new AutoPlanChatManager instance."""
super().__init__(**kwargs)
async def process_rely_message(
self, conv_id: str, now_plan: GptsPlan, speaker: Agent
):
"""Process the dependent message."""
rely_prompt = None
rely_messages: List[Dict] = []
if now_plan.rely and len(now_plan.rely) > 0:
rely_tasks_list = now_plan.rely.split(",")
rely_tasks_list_int = [int(i) for i in rely_tasks_list]
rely_tasks = self.memory.plans_memory.get_by_conv_id_and_num(
conv_id, rely_tasks_list_int
)
if rely_tasks:
rely_prompt = (
"Read the result data of the dependent steps in the above"
" historical message to complete the current goal:"
)
for rely_task in rely_tasks:
rely_messages.append(
{
"content": rely_task.sub_task_content,
"role": ModelMessageRoleType.HUMAN,
"name": rely_task.sub_task_agent,
}
)
rely_messages.append(
{
"content": rely_task.result,
"role": ModelMessageRoleType.AI,
"name": rely_task.sub_task_agent,
}
)
return rely_prompt, rely_messages
def select_speaker_msg(self, agents: List[Agent]) -> str:
"""Return the message for selecting the next speaker."""
agent_names = [agent.name for agent in agents]
return (
"You are in a role play game. The following roles are available:\n"
f" {participant_roles(agents)}.\n"
" Read the following conversation.\n"
f" Then select the next role from {agent_names} to play.\n"
" The role can be selected repeatedly.Only return the role."
)
async def select_speaker(
self,
last_speaker: Agent,
selector: Agent,
now_goal_context: Optional[str] = None,
pre_allocated: Optional[str] = None,
) -> Tuple[Agent, Optional[str]]:
"""Select the next speaker."""
agents = self.agents
if pre_allocated:
# Preselect speakers
logger.info(f"Preselect speakers:{pre_allocated}")
name = pre_allocated
model = None
else:
# auto speaker selection
# TODO selector a_thinking It has been overwritten and cannot be used.
agent_names = [agent.name for agent in agents]
fina_name, model = await selector.thinking(
messages=[
AgentMessage(
role=ModelMessageRoleType.HUMAN,
content="Read and understand the following task content and"
" assign the appropriate role to complete the task.\n"
f"Task content: {now_goal_context},\n"
f"Select the role from: {agent_names},\n"
f"Please only return the role, such as: {agents[0].name}",
)
],
prompt=self.select_speaker_msg(agents),
)
if not fina_name:
raise ValueError("Unable to select next speaker!")
else:
name = fina_name
# If exactly one agent is mentioned, use it. Otherwise, leave the OAI response
# unmodified
mentions = mentioned_agents(name, agents)
if len(mentions) == 1:
name = next(iter(mentions))
else:
logger.warning(
"GroupChat select_speaker failed to resolve the next speaker's name. "
f"This is because the speaker selection OAI call returned:\n{name}"
)
# Return the result
try:
return self.agent_by_name(name), model
except Exception as e:
logger.exception(f"auto select speaker failed!{str(e)}")
raise ValueError("Unable to select next speaker!")
async def act(
self,
message: Optional[str],
sender: Optional[Agent] = None,
reviewer: Optional[Agent] = None,
**kwargs,
) -> Optional[ActionOutput]:
"""Perform an action based on the received message."""
if not sender:
return ActionOutput(
is_exe_success=False,
content="The sender cannot be empty!",
)
speaker: Agent = sender
final_message = message
for i in range(self.max_round):
if not self.memory:
return ActionOutput(
is_exe_success=False,
content="The memory cannot be empty!",
)
plans = self.memory.plans_memory.get_by_conv_id(
self.not_null_agent_context.conv_id
)
if not plans or len(plans) <= 0:
if i > 3:
return ActionOutput(
is_exe_success=False,
content="Retrying 3 times based on current application "
"resources still fails to build a valid plan",
)
planner: ConversableAgent = (
await PlannerAgent()
.bind(self.memory)
.bind(self.agent_context)
.bind(self.llm_config)
.bind(self.resource_loader)
.bind_agents(self.agents)
.build()
)
plan_message = await planner.generate_reply(
received_message=AgentMessage.from_llm_message(
{"content": message}
),
sender=self,
reviewer=reviewer,
)
await planner.send(
message=plan_message, recipient=self, request_reply=False
)
else:
todo_plans = [
plan
for plan in plans
if plan.state in [Status.TODO.value, Status.RETRYING.value]
]
if not todo_plans or len(todo_plans) <= 0:
# The plan has been fully executed and a success message is sent
# to the user.
# complete
return ActionOutput(
is_exe_success=True,
content=final_message, # work results message
)
else:
try:
now_plan: GptsPlan = todo_plans[0]
current_goal_message = AgentMessage(
content=now_plan.sub_task_content,
current_goal=now_plan.sub_task_content,
context={
"plan_task": now_plan.sub_task_content,
"plan_task_num": now_plan.sub_task_num,
},
)
# select the next speaker
speaker, model = await self.select_speaker(
speaker,
self,
now_plan.sub_task_content,
now_plan.sub_task_agent,
)
# Tell the speaker the dependent history information
rely_prompt, rely_messages = await self.process_rely_message(
conv_id=self.not_null_agent_context.conv_id,
now_plan=now_plan,
speaker=speaker,
)
if rely_prompt:
current_goal_message.content = (
rely_prompt + current_goal_message.content
)
await self.send(
message=current_goal_message,
recipient=speaker,
reviewer=reviewer,
request_reply=False,
)
agent_reply_message = await speaker.generate_reply(
received_message=current_goal_message,
sender=self,
reviewer=reviewer,
rely_messages=AgentMessage.from_messages(rely_messages),
)
is_success = agent_reply_message.success
reply_message = agent_reply_message.to_llm_message()
await speaker.send(
agent_reply_message, self, reviewer, request_reply=False
)
plan_result = ""
final_message = reply_message["content"]
if is_success:
if reply_message:
action_report = agent_reply_message.action_report
if action_report:
plan_result = action_report.get("content", "")
final_message = action_report["view"]
# The current planned Agent generation verification is
# successful
# Plan executed successfully
self.memory.plans_memory.complete_task(
self.not_null_agent_context.conv_id,
now_plan.sub_task_num,
plan_result,
)
else:
plan_result = reply_message["content"]
self.memory.plans_memory.update_task(
self.not_null_agent_context.conv_id,
now_plan.sub_task_num,
Status.FAILED.value,
now_plan.retry_times + 1,
speaker.name,
"",
plan_result,
)
return ActionOutput(
is_exe_success=False, content=plan_result
)
except Exception as e:
logger.exception(
f"An exception was encountered during the execution of the"
f" current plan step.{str(e)}"
)
return ActionOutput(
is_exe_success=False,
content=f"An exception was encountered during the execution"
f" of the current plan step.{str(e)}",
)
return ActionOutput(
is_exe_success=False,
content=f"Maximum number of dialogue rounds exceeded.{self.max_round}",
)