feat: (0.6)New UI (#1855)

Co-authored-by: 夏姜 <wenfengjiang.jwf@digital-engine.com>
Co-authored-by: aries_ckt <916701291@qq.com>
Co-authored-by: wb-lh513319 <wb-lh513319@alibaba-inc.com>
Co-authored-by: csunny <cfqsunny@163.com>
This commit is contained in:
明天
2024-08-21 17:37:45 +08:00
committed by GitHub
parent 3fc82693ba
commit b124ecc10b
824 changed files with 93371 additions and 2515 deletions

View File

@@ -1,9 +1,15 @@
"""Agent Operator for AWEL."""
import logging
from abc import ABC
from typing import List, Optional, Type
from typing import Dict, List, Optional, Type
from dbgpt.core.awel import MapOperator
from dbgpt.core.awel import (
BranchFunc,
BranchJoinOperator,
BranchOperator,
BranchTaskType,
MapOperator,
)
from dbgpt.core.awel.flow import (
IOField,
OperatorCategory,
@@ -16,7 +22,10 @@ from dbgpt.core.interface.message import ModelMessageRoleType
# TODO: Don't dependent on MixinLLMOperator
from dbgpt.model.operators.llm_operator import MixinLLMOperator
from dbgpt.serve.prompt.api.endpoints import get_service
from dbgpt.util.i18n_utils import _
from .... import ActionOutput
from ....resource.manage import get_resource_manager
from ....util.llm.llm import LLMConfig
from ...agent import Agent, AgentGenerateContext, AgentMessage
@@ -24,6 +33,8 @@ from ...agent_manage import get_agent_manager
from ...base_agent import ConversableAgent
from .agent_operator_resource import AWELAgent
logger = logging.getLogger()
class BaseAgentOperator:
"""The abstract operator for an Agent."""
@@ -164,10 +175,23 @@ class AWELAgentOperator(
input_value: AgentGenerateContext,
) -> AgentGenerateContext:
"""Trigger agent to generate a reply."""
if input_value.already_failed:
return input_value
if not input_value.message:
raise ValueError("The message is empty.")
input_message = input_value.message.copy()
input_message.rounds = input_message.rounds + 1
agent = await self.get_agent(input_value)
is_retry_chat = False
# 检测awel flow的启动位置如果还没启动当前不执行要匹配到启动点才开始执行如果已经启动则当前需要执行
if input_value.begin_agent and not input_value.already_started:
if agent.role != input_value.begin_agent:
return input_value
else:
is_retry_chat = True
if agent.fixed_subgoal and len(agent.fixed_subgoal) > 0:
# Isolate the message delivery mechanism and pass it to the operator
current_goal = f"[{agent.name if agent.name else agent.role}]:"
@@ -200,19 +224,19 @@ class AWELAgentOperator(
agent_reply_message = await agent.generate_reply(
received_message=input_message,
sender=sender,
sender=input_value.sender,
reviewer=input_value.reviewer,
rely_messages=input_value.rely_messages,
is_retry_chat=is_retry_chat,
last_speaker_name=input_value.begin_agent,
)
if not isinstance(agent_reply_message, AgentMessage):
raise ValueError(agent_reply_message)
already_failed = False
is_success = agent_reply_message.success
if not is_success:
raise ValueError(
f"The task failed at step {agent.role} and the attempt to "
f"repair it failed. The final reason for "
f"failure:{agent_reply_message.content}!"
)
already_failed = True
# What is sent is an AI message
ai_message: AgentMessage = agent_reply_message.copy()
@@ -230,7 +254,10 @@ class AWELAgentOperator(
memory=input_value.memory.structure_clone() if input_value.memory else None,
agent_context=input_value.agent_context,
llm_client=input_value.llm_client,
round_index=agent.consecutive_auto_reply_counter,
begin_agent=None,
already_failed=already_failed,
last_speaker=agent,
already_started=True,
)
async def get_agent(
@@ -262,6 +289,13 @@ class AWELAgentOperator(
if self.awel_agent.fixed_subgoal:
kwargs["fixed_subgoal"] = self.awel_agent.fixed_subgoal
prompt_template = None
if self.awel_agent.agent_prompt:
prompt_service = get_service()
prompt_template = prompt_service.get_template(
self.awel_agent.agent_prompt.code
)
resource = get_resource_manager().build_resource(self.awel_agent.resources)
agent = (
await agent_cls(**kwargs)
@@ -269,7 +303,8 @@ class AWELAgentOperator(
.bind(llm_config)
.bind(input_value.agent_context)
.bind(resource)
.build()
.bind(prompt_template)
.build(is_retry_chat=bool(input_value.begin_agent))
)
return agent
@@ -309,3 +344,101 @@ class AgentDummyTrigger(Trigger):
async def trigger(self, **kwargs) -> None:
"""Trigger the DAG. Not used in HttpTrigger."""
raise NotImplementedError("Dummy trigger does not support trigger.")
class AgentBranchOperator(BranchOperator[AgentGenerateContext, AgentGenerateContext]):
"""The intent detection branch operator."""
metadata = ViewMetadata(
label=_("Agent Branch Operator"),
name="agent_branch_operator",
category=OperatorCategory.AGENT,
operator_type=OperatorType.BRANCH,
description=_(
"Branch the workflow based on the agent actionreport nexspeakers of the request." # noqa
),
parameters=[],
inputs=[
IOField.build_from(
_("Agent Request"),
"input_value",
AgentGenerateContext,
description=_("The input value of the operator."),
),
],
outputs=[
IOField.build_from(
_("Agent Request"),
"output_value",
AgentGenerateContext,
description=_("The agent request to agent Operator."),
),
],
)
def __init__(self, **kwargs):
"""Create the intent detection branch operator."""
super().__init__(**kwargs)
async def branches(
self,
) -> Dict[BranchFunc[AgentGenerateContext], BranchTaskType]:
"""Branch the intent detection result to different tasks."""
agent_nodes: List[AWELAgentOperator] = [
node for node in self.downstream if isinstance(node, AWELAgentOperator)
]
download_agent_nodes = set(task for task in agent_nodes) # noqa
branch_func_map = {}
for task_node in download_agent_nodes:
agent_name = task_node.awel_agent.agent_profile
def check(r: AgentGenerateContext, outer_task_name=agent_name):
last_message = r.rely_messages[-1]
action_output: Optional[ActionOutput] = last_message.action_report
if not action_output or not action_output.next_speakers:
return False
return outer_task_name in action_output.next_speakers
branch_func_map[check] = task_node.node_name
return branch_func_map # type: ignore
class AgentBranchJoinOperator(BranchJoinOperator[AgentGenerateContext]):
"""The LLM Branch Join Operator.
Decide which output to keep(streaming or non-streaming).
"""
metadata = ViewMetadata(
label=_("Agent Branch Join Operator"),
name="agent_branch_join_operator",
category=OperatorCategory.AGENT,
operator_type=OperatorType.JOIN,
description=_("Just keep the first non-empty output."),
parameters=[],
inputs=[
IOField.build_from(
_("Agent Output"),
"agent_output",
AgentGenerateContext,
description=_("The Agent output."),
),
],
outputs=[
IOField.build_from(
_("Branch Output"),
"agent_output_value",
AgentGenerateContext,
description=_("The output value of the operator."),
),
],
)
def __init__(self, **kwargs):
"""Create a new LLM branch join operator."""
super().__init__(**kwargs)

View File

@@ -1,5 +1,5 @@
"""The AWEL Agent Operator Resource."""
"""Agent operator define."""
import json
from typing import Any, Dict, List, Optional
from dbgpt._private.pydantic import BaseModel, ConfigDict, Field, model_validator
@@ -11,13 +11,24 @@ from dbgpt.core.awel.flow import (
ResourceCategory,
register_resource,
)
from dbgpt.serve.prompt.api.endpoints import get_service
from ....resource.base import AgentResource
from ....resource.base import AgentResource, ResourceType
from ....resource.manage import get_resource_manager
from ....util.llm.llm import LLMConfig, LLMStrategyType
from ...agent_manage import get_agent_manager
def _agent_resource_prompt_values() -> List[OptionValue]:
prompt_service = get_service()
prompts = prompt_service.get_target_prompt()
return [
OptionValue(label=pmt.prompt_name, name=pmt.prompt_code, value=pmt.prompt_code)
for pmt in prompts
if pmt.prompt_code
]
def _load_resource_types():
resources = get_resource_manager().get_supported_resources()
return [OptionValue(label=item, name=item, value=item) for item in resources.keys()]
@@ -56,7 +67,7 @@ def _load_resource_types():
],
alias=[
"dbgpt.serve.agent.team.layout.agent_operator_resource.AwelAgentResource",
"dbgpt.agent.plan.awel.agent_operator_resource.AWELAgentResource",
"dbgpt.agent.core.plan.awel.agent_operator_resource.AWELAgentResource",
],
)
class AWELAgentResource(AgentResource):
@@ -73,12 +84,121 @@ class AWELAgentResource(AgentResource):
value = values.pop("agent_resource_value")
values["name"] = name
values["type"] = type
values["type"] = ResourceType(type)
values["value"] = value
return values
def _agent_resource_knowledge_values(
user_id: Optional[str] = None,
) -> List[OptionValue]:
from dbgpt.app.knowledge.api import knowledge_space_service
from dbgpt.app.knowledge.request.request import KnowledgeSpaceRequest
knowledge_spaces = knowledge_space_service.get_knowledge_space(
KnowledgeSpaceRequest(user_id=user_id)
)
return [
OptionValue(label=ks.name, name=str(ks.id), value=str(ks.id))
for ks in knowledge_spaces
]
@register_resource(
label="Awel Agent Resource Knowledge",
name="agent_resource_knowledge",
description="The Agent Resource Knowledge.",
category=ResourceCategory.AGENT,
parameters=[
Parameter.build_from(
label="Agent Resource Name",
name="agent_resource_name",
type=str,
optional=True,
default=None,
description="The agent resource name.",
),
Parameter.build_from(
label="Agent Resource Value",
name="agent_resource_value",
type=str,
optional=True,
default=None,
description="The agent resource value.",
options=FunctionDynamicOptions(func=_agent_resource_knowledge_values),
),
Parameter.build_from(
label="Agent Knowledge Resource Recall TopK",
name="agent_resource_recall_topk",
type=str,
default=3,
optional=True,
description="The agent resource recall topk.",
),
],
alias=[
"dbgpt.serve.agent.team.layout.agent_operator_resource.AwelAgentKnowledgeResource", # noqa
"dbgpt.agent.core.plan.awel.agent_operator_resource.AWELAgentKnowledgeResource",
],
)
class AWELAgentKnowledgeResource(AgentResource):
"""AWELAgentKnowledgeResource."""
@model_validator(mode="before")
def pre_fill(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Pre fill the agent ResourceType."""
value = values.pop("agent_resource_value")
name = values.pop("agent_resource_name")
top_k = (
values.pop("agent_resource_recall_topk")
if "agent_resource_recall_topk" in values
else None
)
values["type"] = ResourceType.Knowledge
values["value"] = value
values["name"] = name
if top_k:
values["context"] = {"top_k": int(top_k)}
return values
@register_resource(
label="Awel Agent Prompt",
name="agent_prompt",
description="The Agent Prompt.",
category=ResourceCategory.AGENT,
parameters=[
Parameter.build_from(
label="Agent Prompt Code",
name="agent_prompt_code",
type=str,
optional=True,
default=None,
description="The agent prompt code.",
options=FunctionDynamicOptions(func=_agent_resource_prompt_values),
),
],
alias=[
"dbgpt.serve.agent.team.layout.agent_operator_resource.AgentPrompt",
"dbgpt.agent.core.plan.awel.agent_operator_resource.AgentPrompt",
],
)
class AgentPrompt(BaseModel):
"""Agent Prompt resource."""
code: str
@model_validator(mode="before")
def pre_fill(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Pre fill the agent ResourceType."""
code = values.pop("agent_prompt_code")
values["code"] = code
return values
@register_resource(
label="AWEL Agent LLM Config",
name="agent_operator_llm_config",
@@ -116,13 +236,21 @@ class AWELAgentResource(AgentResource):
],
alias=[
"dbgpt.serve.agent.team.layout.agent_operator_resource.AwelAgentConfig",
"dbgpt.agent.plan.awel.agent_operator_resource.AWELAgentConfig",
"dbgpt.agent.core.plan.awel.agent_operator_resource.AWELAgentConfig",
],
)
class AWELAgentConfig(LLMConfig):
"""AWEL Agent Config."""
pass
@model_validator(mode="before")
def pre_fill(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Prefill the agent ResourceType."""
strategy_context = values.pop("strategy_context")
if strategy_context is not None:
values["strategy_context"] = json.dumps(strategy_context.split(","))
return values
def _agent_resource_option_values() -> List[OptionValue]:
@@ -164,7 +292,7 @@ def _agent_resource_option_values() -> List[OptionValue]:
Parameter.build_from(
label="Agent Resource",
name="agent_resource",
type=AWELAgentResource,
type=AgentResource,
optional=True,
default=None,
description="The agent resource.",
@@ -177,10 +305,18 @@ def _agent_resource_option_values() -> List[OptionValue]:
default=None,
description="The agent llm config.",
),
Parameter.build_from(
label="Agent Prompt",
name="agent_prompt",
type=AgentPrompt,
optional=True,
default=None,
description="The agent prompt.",
),
],
alias=[
"dbgpt.serve.agent.team.layout.agent_operator_resource.AwelAgent",
"dbgpt.agent.plan.awel.agent_operator_resource.AWELAgent",
"dbgpt.agent.core.plan.awel.agent_operator_resource.AWELAgent",
],
)
class AWELAgent(BaseModel):
@@ -191,6 +327,7 @@ class AWELAgent(BaseModel):
agent_profile: str
role_name: Optional[str] = None
llm_config: Optional[LLMConfig] = None
agent_prompt: Optional[AgentPrompt] = None
resources: List[AgentResource] = Field(default_factory=list)
fixed_subgoal: Optional[str] = None
@@ -202,6 +339,10 @@ class AWELAgent(BaseModel):
return values
resource = values.pop("agent_resource")
llm_config = values.pop("agent_llm_Config")
if "agent_prompt" in values:
agent_prompt = values.pop("agent_prompt")
else:
agent_prompt = None
if resource is not None:
values["resources"] = [resource]
@@ -209,4 +350,6 @@ class AWELAgent(BaseModel):
if llm_config is not None:
values["llm_config"] = llm_config
if agent_prompt is not None:
values["agent_prompt"] = agent_prompt
return values

View File

@@ -104,9 +104,32 @@ class AWELBaseManager(ManagerAgent, ABC):
),
)
async def _a_process_received_message(self, message: AgentMessage, sender: Agent):
"""Process the received message."""
pass
async def receive(
self,
message: AgentMessage,
sender: Agent,
reviewer: Optional[Agent] = None,
request_reply: Optional[bool] = None,
silent: Optional[bool] = False,
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
) -> None:
"""Recive message by base team."""
if request_reply is False or request_reply is None:
return
if not self.is_human:
await self.generate_reply(
received_message=message,
sender=sender,
reviewer=reviewer,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
)
# if reply is not None:
# await self.a_send(reply, sender)
@abstractmethod
def get_dag(self) -> DAG:
@@ -114,11 +137,13 @@ class AWELBaseManager(ManagerAgent, ABC):
async def act(
self,
message: Optional[str],
sender: Optional[Agent] = None,
message: AgentMessage,
sender: Agent,
reviewer: Optional[Agent] = None,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
**kwargs,
) -> Optional[ActionOutput]:
) -> ActionOutput:
"""Perform the action."""
try:
agent_dag = self.get_dag()
@@ -127,44 +152,62 @@ class AWELBaseManager(ManagerAgent, ABC):
)
start_message_context: AgentGenerateContext = AgentGenerateContext(
message=AgentMessage(content=message, current_goal=message),
message=message,
sender=sender,
reviewer=reviewer,
memory=self.memory.structure_clone(),
agent_context=self.agent_context,
begin_agent=last_speaker_name if is_retry_chat else None,
llm_client=self.not_null_llm_config.llm_client,
)
final_generate_context: AgentGenerateContext = await last_node.call(
call_data=start_message_context
)
last_message = final_generate_context.rely_messages[-1]
last_agent = await last_node.get_agent(final_generate_context)
if final_generate_context.round_index is not None:
last_agent.consecutive_auto_reply_counter = (
final_generate_context.round_index
)
if not sender:
raise ValueError("sender is required!")
await last_agent.send(
last_message, sender, start_message_context.reviewer, False
last_message.rounds = last_message.rounds + 1
if not final_generate_context.last_speaker:
raise ValueError("Dont have last speaker agent!")
await final_generate_context.last_speaker.send(
last_message,
sender,
start_message_context.reviewer,
False,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
)
view_message: Optional[str] = None
view_message = None
if last_message.action_report:
view_message = last_message.action_report.get("view", None)
if last_message.action_report.view:
view_message = last_message.action_report.view
else:
view_message = last_message.action_report.content
return ActionOutput(
content=last_message.content,
view=view_message,
)
except Exception as e:
logger.exception(f"DAG run failed!{str(e)}")
return ActionOutput(
failed_out = ActionOutput(
is_exe_success=False,
content=f"Failed to complete goal! {str(e)}",
content=f"{str(e)}",
have_retry=False,
)
failed_message = AgentMessage.from_llm_message(
{
"content": f"{str(e)}",
"rounds": 999,
}
)
await self.send(
failed_message,
sender,
reviewer,
False,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
)
return failed_out
class WrappedAWELLayoutManager(AWELBaseManager):
@@ -198,11 +241,13 @@ class WrappedAWELLayoutManager(AWELBaseManager):
async def act(
self,
message: Optional[str],
sender: Optional[Agent] = None,
message: AgentMessage,
sender: Agent,
reviewer: Optional[Agent] = None,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
**kwargs,
) -> Optional[ActionOutput]:
) -> ActionOutput:
"""Perform the action."""
try:
dag = self.get_dag()
@@ -210,27 +255,37 @@ class WrappedAWELLayoutManager(AWELBaseManager):
WrappedAgentOperator, dag.leaf_nodes[0]
)
start_message_context: AgentGenerateContext = AgentGenerateContext(
message=AgentMessage(content=message, current_goal=message),
sender=self,
message=message,
sender=sender,
reviewer=reviewer,
memory=self.memory,
agent_context=self.agent_context,
begin_agent=last_speaker_name if is_retry_chat else None,
llm_client=self.not_null_llm_client,
)
final_generate_context: AgentGenerateContext = await last_node.call(
call_data=start_message_context
)
last_message = final_generate_context.rely_messages[-1]
last_agent = last_node.agent
await last_agent.send(
last_message = final_generate_context.rely_messages[-1]
last_message.rounds = last_message.rounds + 1
if not final_generate_context.last_speaker:
raise ValueError("Not have last speaker!")
await final_generate_context.last_speaker.send(
last_message,
self,
sender,
start_message_context.reviewer,
False,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
)
view_message: Optional[str] = None
view_message = None
if last_message.action_report:
view_message = last_message.action_report.get("view", None)
if last_message.action_report.view:
view_message = last_message.action_report.view
else:
view_message = last_message.action_report.content
return ActionOutput(
content=last_message.content,
view=view_message,
@@ -238,10 +293,26 @@ class WrappedAWELLayoutManager(AWELBaseManager):
except Exception as e:
logger.exception(f"DAG run failed!{str(e)}")
return ActionOutput(
failed_out = ActionOutput(
is_exe_success=False,
content=f"Failed to complete goal! {str(e)}",
content=f"{str(e)}",
have_retry=False,
)
failed_message = AgentMessage.from_llm_message(
{
"content": f"{str(e)}",
"rounds": 999,
}
)
await self.send(
failed_message,
sender,
reviewer,
False,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
)
return failed_out
class DefaultAWELLayoutManager(AWELBaseManager):

View File

@@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional
from dbgpt._private.pydantic import Field
from ...resource.pack import ResourcePack
from ..agent import AgentMessage
from ..agent import Agent, AgentMessage
from ..base_agent import ConversableAgent
from ..plan.plan_action import PlanAction
from ..profile import DynConfig, ProfileConfig
@@ -143,7 +143,11 @@ assistants:[
super().__init__(**kwargs)
self._init_actions([PlanAction])
def _init_reply_message(self, received_message: AgentMessage):
def _init_reply_message(
self,
received_message: AgentMessage,
rely_messages: Optional[List[AgentMessage]] = None,
) -> AgentMessage:
reply_message = super()._init_reply_message(received_message)
reply_message.context = {
"agents": "\n".join([f"- {item.role}:{item.desc}" for item in self.agents]),
@@ -160,16 +164,12 @@ assistants:[
self.resource = ResourcePack(resources)
return self
async def generate_resource_variables(
self, question: Optional[str] = None
def prepare_act_param(
self,
received_message: Optional[AgentMessage],
sender: Agent,
rely_messages: Optional[List[AgentMessage]] = None,
) -> Dict[str, Any]:
"""Generate the resource variables."""
out_schema: Optional[str] = None
if self.actions and len(self.actions) > 0:
out_schema = self.actions[0].ai_out_schema
return {"out_schema": out_schema}
def prepare_act_param(self) -> Dict[str, Any]:
"""Prepare the parameters for the act method."""
return {
"context": self.not_null_agent_context,

View File

@@ -153,11 +153,13 @@ class AutoPlanChatManager(ManagerAgent):
async def act(
self,
message: Optional[str],
sender: Optional[Agent] = None,
message: AgentMessage,
sender: Agent,
reviewer: Optional[Agent] = None,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
**kwargs,
) -> Optional[ActionOutput]:
) -> ActionOutput:
"""Perform an action based on the received message."""
if not sender:
return ActionOutput(
@@ -165,7 +167,7 @@ class AutoPlanChatManager(ManagerAgent):
content="The sender cannot be empty!",
)
speaker: Agent = sender
final_message = message
final_message = message.content
for i in range(self.max_round):
if not self.memory:
return ActionOutput(
@@ -194,7 +196,7 @@ class AutoPlanChatManager(ManagerAgent):
plan_message = await planner.generate_reply(
received_message=AgentMessage.from_llm_message(
{"content": message}
{"content": message.content}
),
sender=self,
reviewer=reviewer,
@@ -269,8 +271,8 @@ class AutoPlanChatManager(ManagerAgent):
if reply_message:
action_report = agent_reply_message.action_report
if action_report:
plan_result = action_report.get("content", "")
final_message = action_report["view"]
plan_result = action_report.content
final_message = action_report.view
# The current planned Agent generation verification is
# successful