mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-06 19:40:13 +00:00
feat(agent): Release agent SDK (#1396)
This commit is contained in:
1
dbgpt/agent/core/__init__.py
Normal file
1
dbgpt/agent/core/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Core Module for the Agent."""
|
304
dbgpt/agent/core/agent.py
Normal file
304
dbgpt/agent/core/agent.py
Normal file
@@ -0,0 +1,304 @@
|
||||
"""Agent Interface."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from dbgpt.core import LLMClient
|
||||
from dbgpt.util.annotations import PublicAPI
|
||||
|
||||
from ..actions.action import ActionOutput
|
||||
from ..memory.gpts_memory import GptsMemory
|
||||
from ..resource.resource_loader import ResourceLoader
|
||||
|
||||
|
||||
class Agent(ABC):
|
||||
"""Agent Interface."""
|
||||
|
||||
@abstractmethod
|
||||
async def send(
|
||||
self,
|
||||
message: AgentMessage,
|
||||
recipient: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
request_reply: Optional[bool] = True,
|
||||
is_recovery: Optional[bool] = False,
|
||||
) -> None:
|
||||
"""Send a message to recipient agent.
|
||||
|
||||
Args:
|
||||
message(AgentMessage): the message to be sent.
|
||||
recipient(Agent): the recipient agent.
|
||||
reviewer(Agent): the reviewer agent.
|
||||
request_reply(bool): whether to request a reply.
|
||||
is_recovery(bool): whether the message is a recovery message.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def receive(
|
||||
self,
|
||||
message: AgentMessage,
|
||||
sender: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
request_reply: Optional[bool] = None,
|
||||
silent: Optional[bool] = False,
|
||||
is_recovery: Optional[bool] = False,
|
||||
) -> None:
|
||||
"""Receive a message from another agent.
|
||||
|
||||
Args:
|
||||
message(AgentMessage): the received message.
|
||||
sender(Agent): the sender agent.
|
||||
reviewer(Agent): the reviewer agent.
|
||||
request_reply(bool): whether to request a reply.
|
||||
silent(bool): whether to be silent.
|
||||
is_recovery(bool): whether the message is a recovery message.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def generate_reply(
|
||||
self,
|
||||
received_message: AgentMessage,
|
||||
sender: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
rely_messages: Optional[List[AgentMessage]] = None,
|
||||
**kwargs,
|
||||
) -> AgentMessage:
|
||||
"""Generate a reply based on the received messages.
|
||||
|
||||
Args:
|
||||
received_message(AgentMessage): the received message.
|
||||
sender: sender of an Agent instance.
|
||||
reviewer: reviewer of an Agent instance.
|
||||
rely_messages: a list of messages received.
|
||||
|
||||
Returns:
|
||||
AgentMessage: the generated reply. If None, no reply is generated.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def thinking(
|
||||
self, messages: List[AgentMessage], prompt: Optional[str] = None
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""Think and reason about the current task goal.
|
||||
|
||||
Based on the requirements of the current agent, reason about the current task
|
||||
goal through LLM
|
||||
|
||||
Args:
|
||||
messages(List[AgentMessage]): the messages to be reasoned
|
||||
prompt(str): the prompt to be reasoned
|
||||
|
||||
Returns:
|
||||
Tuple[Union[str, Dict, None], Optional[str]]: First element is the generated
|
||||
reply. If None, no reply is generated. The second element is the model
|
||||
name of current task.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def review(self, message: Optional[str], censored: Agent) -> Tuple[bool, Any]:
|
||||
"""Review the message based on the censored message.
|
||||
|
||||
Args:
|
||||
message:
|
||||
censored:
|
||||
|
||||
Returns:
|
||||
bool: whether the message is censored
|
||||
Any: the censored message
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def act(
|
||||
self,
|
||||
message: Optional[str],
|
||||
sender: Optional[Agent] = None,
|
||||
reviewer: Optional[Agent] = None,
|
||||
**kwargs,
|
||||
) -> Optional[ActionOutput]:
|
||||
"""Act based on the LLM inference results.
|
||||
|
||||
Parse the inference results for the current target and execute the inference
|
||||
results using the current agent's executor
|
||||
|
||||
Args:
|
||||
message: the message to be executed
|
||||
sender: sender of an Agent instance.
|
||||
reviewer: reviewer of an Agent instance.
|
||||
**kwargs:
|
||||
|
||||
Returns:
|
||||
ActionOutput: the action output of the agent.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def verify(
|
||||
self,
|
||||
message: AgentMessage,
|
||||
sender: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
**kwargs,
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""Verify whether the current execution results meet the target expectations.
|
||||
|
||||
Args:
|
||||
message: the message to be verified
|
||||
sender: sender of an Agent instance.
|
||||
reviewer: reviewer of an Agent instance.
|
||||
**kwargs:
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Optional[str]]: whether the verification is successful and the
|
||||
verification result.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_name(self) -> str:
|
||||
"""Return name of the agent."""
|
||||
|
||||
@abstractmethod
|
||||
def get_profile(self) -> str:
|
||||
"""Return profile of the agent."""
|
||||
|
||||
@abstractmethod
|
||||
def get_describe(self) -> str:
|
||||
"""Return describe of the agent."""
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class AgentContext:
|
||||
"""A class to represent the context of an Agent."""
|
||||
|
||||
conv_id: str
|
||||
gpts_app_name: Optional[str] = None
|
||||
language: Optional[str] = None
|
||||
max_chat_round: int = 100
|
||||
max_retry_round: int = 10
|
||||
max_new_tokens: int = 1024
|
||||
temperature: float = 0.5
|
||||
allow_format_str_template: Optional[bool] = False
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Return a dictionary representation of the AgentContext."""
|
||||
return dataclasses.asdict(self)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@PublicAPI(stability="beta")
|
||||
class AgentGenerateContext:
|
||||
"""A class to represent the input of a Agent."""
|
||||
|
||||
message: Optional[AgentMessage]
|
||||
sender: Optional[Agent] = None
|
||||
reviewer: Optional[Agent] = None
|
||||
silent: Optional[bool] = False
|
||||
|
||||
rely_messages: List[AgentMessage] = dataclasses.field(default_factory=list)
|
||||
final: Optional[bool] = True
|
||||
|
||||
memory: Optional[GptsMemory] = None
|
||||
agent_context: Optional[AgentContext] = None
|
||||
resource_loader: Optional[ResourceLoader] = None
|
||||
llm_client: Optional[LLMClient] = None
|
||||
|
||||
round_index: Optional[int] = None
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Return a dictionary representation of the AgentGenerateContext."""
|
||||
return dataclasses.asdict(self)
|
||||
|
||||
|
||||
ActionReportType = Dict[str, Any]
|
||||
MessageContextType = Union[str, Dict[str, Any]]
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@PublicAPI(stability="beta")
|
||||
class AgentReviewInfo:
|
||||
"""Message object for agent communication."""
|
||||
|
||||
approve: bool = False
|
||||
comments: Optional[str] = None
|
||||
|
||||
def copy(self) -> "AgentReviewInfo":
|
||||
"""Return a copy of the current AgentReviewInfo."""
|
||||
return AgentReviewInfo(approve=self.approve, comments=self.comments)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@PublicAPI(stability="beta")
|
||||
class AgentMessage:
|
||||
"""Message object for agent communication."""
|
||||
|
||||
content: Optional[str] = None
|
||||
name: Optional[str] = None
|
||||
context: Optional[MessageContextType] = None
|
||||
action_report: Optional[ActionReportType] = None
|
||||
review_info: Optional[AgentReviewInfo] = None
|
||||
current_goal: Optional[str] = None
|
||||
model_name: Optional[str] = None
|
||||
role: Optional[str] = None
|
||||
success: Optional[bool] = None
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Return a dictionary representation of the AgentMessage."""
|
||||
return dataclasses.asdict(self)
|
||||
|
||||
def to_llm_message(self) -> Dict[str, Any]:
|
||||
"""Return a dictionary representation of the AgentMessage."""
|
||||
return {
|
||||
"content": self.content,
|
||||
"context": self.context,
|
||||
"role": self.role,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_llm_message(cls, message: Dict[str, Any]) -> AgentMessage:
|
||||
"""Create an AgentMessage object from a dictionary."""
|
||||
return cls(
|
||||
content=message.get("content"),
|
||||
context=message.get("context"),
|
||||
role=message.get("role"),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_messages(cls, messages: List[Dict[str, Any]]) -> List[AgentMessage]:
|
||||
"""Create a list of AgentMessage objects from a list of dictionaries."""
|
||||
results = []
|
||||
field_names = [f.name for f in dataclasses.fields(cls)]
|
||||
for message in messages:
|
||||
kwargs = {
|
||||
key: value for key, value in message.items() if key in field_names
|
||||
}
|
||||
results.append(cls(**kwargs))
|
||||
return results
|
||||
|
||||
def copy(self) -> "AgentMessage":
|
||||
"""Return a copy of the current AgentMessage."""
|
||||
copied_context: Optional[MessageContextType] = None
|
||||
if self.context:
|
||||
if isinstance(self.context, dict):
|
||||
copied_context = self.context.copy()
|
||||
else:
|
||||
copied_context = self.context
|
||||
copied_action_report = self.action_report.copy() if self.action_report else None
|
||||
copied_review_info = self.review_info.copy() if self.review_info else None
|
||||
return AgentMessage(
|
||||
content=self.content,
|
||||
name=self.name,
|
||||
context=copied_context,
|
||||
action_report=copied_action_report,
|
||||
review_info=copied_review_info,
|
||||
current_goal=self.current_goal,
|
||||
model_name=self.model_name,
|
||||
role=self.role,
|
||||
success=self.success,
|
||||
)
|
107
dbgpt/agent/core/agent_manage.py
Normal file
107
dbgpt/agent/core/agent_manage.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""Manages the registration and retrieval of agents."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from typing import Dict, List, Type
|
||||
|
||||
from ..expand.code_assistant_agent import CodeAssistantAgent
|
||||
from ..expand.dashboard_assistant_agent import DashboardAssistantAgent
|
||||
from ..expand.data_scientist_agent import DataScientistAgent
|
||||
from ..expand.plugin_assistant_agent import PluginAssistantAgent
|
||||
from ..expand.summary_assistant_agent import SummaryAssistantAgent
|
||||
from .agent import Agent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def participant_roles(agents: List[Agent]) -> str:
|
||||
"""Return a string listing the roles of the agents."""
|
||||
# Default to all agents registered
|
||||
roles = []
|
||||
for agent in agents:
|
||||
roles.append(f"{agent.get_name()}: {agent.get_describe()}")
|
||||
return "\n".join(roles)
|
||||
|
||||
|
||||
def mentioned_agents(message_content: str, agents: List[Agent]) -> Dict:
|
||||
"""Return a dictionary mapping agent names to mention counts.
|
||||
|
||||
Finds and counts agent mentions in the string message_content, taking word
|
||||
boundaries into account.
|
||||
|
||||
Returns: A dictionary mapping agent names to mention counts (to be included,
|
||||
at least one mention must occur)
|
||||
"""
|
||||
mentions = dict()
|
||||
for agent in agents:
|
||||
regex = (
|
||||
r"(?<=\W)" + re.escape(agent.get_name()) + r"(?=\W)"
|
||||
) # Finds agent mentions, taking word boundaries into account
|
||||
count = len(
|
||||
re.findall(regex, " " + message_content + " ")
|
||||
) # Pad the message to help with matching
|
||||
if count > 0:
|
||||
mentions[agent.get_name()] = count
|
||||
return mentions
|
||||
|
||||
|
||||
class AgentManager:
|
||||
"""Manages the registration and retrieval of agents."""
|
||||
|
||||
def __init__(self):
|
||||
"""Create a new AgentManager."""
|
||||
self._agents = defaultdict()
|
||||
|
||||
def register_agent(self, cls):
|
||||
"""Register an agent."""
|
||||
self._agents[cls().profile] = cls
|
||||
|
||||
def get_by_name(self, name: str) -> Type[Agent]:
|
||||
"""Return an agent by name.
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent to retrieve.
|
||||
|
||||
Returns:
|
||||
Type[Agent]: The agent with the given name.
|
||||
|
||||
Raises:
|
||||
ValueError: If the agent with the given name is not registered.
|
||||
"""
|
||||
if name not in self._agents:
|
||||
raise ValueError(f"Agent:{name} not register!")
|
||||
return self._agents[name]
|
||||
|
||||
def get_describe_by_name(self, name: str) -> str:
|
||||
"""Return the description of an agent by name."""
|
||||
return self._agents[name].desc
|
||||
|
||||
def all_agents(self):
|
||||
"""Return a dictionary of all registered agents and their descriptions."""
|
||||
result = {}
|
||||
for name, cls in self._agents.items():
|
||||
result[name] = cls.desc
|
||||
return result
|
||||
|
||||
def list_agents(self):
|
||||
"""Return a list of all registered agents and their descriptions."""
|
||||
result = []
|
||||
for name, cls in self._agents.items():
|
||||
instance = cls()
|
||||
result.append(
|
||||
{
|
||||
"name": instance.profile,
|
||||
"desc": instance.goal,
|
||||
}
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
agent_manager = AgentManager()
|
||||
|
||||
agent_manager.register_agent(CodeAssistantAgent)
|
||||
agent_manager.register_agent(DashboardAssistantAgent)
|
||||
agent_manager.register_agent(DataScientistAgent)
|
||||
agent_manager.register_agent(SummaryAssistantAgent)
|
||||
agent_manager.register_agent(PluginAssistantAgent)
|
765
dbgpt/agent/core/base_agent.py
Normal file
765
dbgpt/agent/core/base_agent.py
Normal file
@@ -0,0 +1,765 @@
|
||||
"""Base agent class for conversable agents."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
|
||||
|
||||
from dbgpt._private.pydantic import Field
|
||||
from dbgpt.core import LLMClient, ModelMessageRoleType
|
||||
from dbgpt.util.error_types import LLMChatError
|
||||
from dbgpt.util.utils import colored
|
||||
|
||||
from ..actions.action import Action, ActionOutput
|
||||
from ..memory.base import GptsMessage
|
||||
from ..memory.gpts_memory import GptsMemory
|
||||
from ..resource.resource_api import AgentResource, ResourceClient
|
||||
from ..resource.resource_loader import ResourceLoader
|
||||
from .agent import Agent, AgentContext, AgentMessage, AgentReviewInfo
|
||||
from .llm.llm import LLMConfig, LLMStrategyType
|
||||
from .llm.llm_client import AIWrapper
|
||||
from .role import Role
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConversableAgent(Role, Agent):
|
||||
"""ConversableAgent is a agent that can communicate with other agents."""
|
||||
|
||||
agent_context: Optional[AgentContext] = Field(None, description="Agent context")
|
||||
actions: List[Action] = Field(default_factory=list)
|
||||
resources: List[AgentResource] = Field(default_factory=list)
|
||||
llm_config: Optional[LLMConfig] = None
|
||||
memory: GptsMemory = Field(default_factory=GptsMemory)
|
||||
resource_loader: Optional[ResourceLoader] = None
|
||||
max_retry_count: int = 3
|
||||
consecutive_auto_reply_counter: int = 0
|
||||
llm_client: Optional[AIWrapper] = None
|
||||
oai_system_message: List[Dict] = Field(default_factory=list)
|
||||
|
||||
class Config:
|
||||
"""Pydantic configuration."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Create a new agent."""
|
||||
Role.__init__(self, **kwargs)
|
||||
Agent.__init__(self)
|
||||
|
||||
def init_system_message(self) -> None:
|
||||
"""Initialize the system message."""
|
||||
content = self.prompt_template()
|
||||
# TODO: Don't modify the original data, need to be optimized
|
||||
self.oai_system_message = [
|
||||
{"content": content, "role": ModelMessageRoleType.SYSTEM}
|
||||
]
|
||||
|
||||
def check_available(self) -> None:
|
||||
"""Check if the agent is available.
|
||||
|
||||
Raises:
|
||||
ValueError: If the agent is not available.
|
||||
"""
|
||||
self.identity_check()
|
||||
# check run context
|
||||
if self.agent_context is None:
|
||||
raise ValueError(
|
||||
f"{self.name}[{self.profile}] Missing context in which agent is "
|
||||
f"running!"
|
||||
)
|
||||
|
||||
# resource check
|
||||
for resource in self.resources:
|
||||
if (
|
||||
self.resource_loader is None
|
||||
or self.resource_loader.get_resource_api(
|
||||
resource.type, check_instance=False
|
||||
)
|
||||
is None
|
||||
):
|
||||
raise ValueError(
|
||||
f"Resource {resource.type}:{resource.value} missing resource loader"
|
||||
f" implementation,unable to read resources!"
|
||||
)
|
||||
|
||||
# action check
|
||||
if self.actions and len(self.actions) > 0:
|
||||
have_resource_types = [item.type for item in self.resources]
|
||||
for action in self.actions:
|
||||
if (
|
||||
action.resource_need
|
||||
and action.resource_need not in have_resource_types
|
||||
):
|
||||
raise ValueError(
|
||||
f"{self.name}[{self.profile}] Missing resources required for "
|
||||
"runtime!"
|
||||
)
|
||||
else:
|
||||
if not self.is_human and not self.is_team:
|
||||
raise ValueError(
|
||||
f"This agent {self.name}[{self.profile}] is missing action modules."
|
||||
)
|
||||
# llm check
|
||||
if not self.is_human and (
|
||||
self.llm_config is None or self.llm_config.llm_client is None
|
||||
):
|
||||
raise ValueError(
|
||||
f"{self.name}[{self.profile}] Model configuration is missing or model "
|
||||
"service is unavailable!"
|
||||
)
|
||||
|
||||
@property
|
||||
def not_null_agent_context(self) -> AgentContext:
|
||||
"""Get the agent context.
|
||||
|
||||
Returns:
|
||||
AgentContext: The agent context.
|
||||
|
||||
Raises:
|
||||
ValueError: If the agent context is not initialized.
|
||||
"""
|
||||
if not self.agent_context:
|
||||
raise ValueError("Agent context is not initialized!")
|
||||
return self.agent_context
|
||||
|
||||
@property
|
||||
def not_null_resource_loader(self) -> ResourceLoader:
|
||||
"""Get the resource loader."""
|
||||
if not self.resource_loader:
|
||||
raise ValueError("Resource loader is not initialized!")
|
||||
return self.resource_loader
|
||||
|
||||
@property
|
||||
def not_null_llm_config(self) -> LLMConfig:
|
||||
"""Get the LLM config."""
|
||||
if not self.llm_config:
|
||||
raise ValueError("LLM config is not initialized!")
|
||||
return self.llm_config
|
||||
|
||||
@property
|
||||
def not_null_llm_client(self) -> LLMClient:
|
||||
"""Get the LLM client."""
|
||||
llm_client = self.not_null_llm_config.llm_client
|
||||
if not llm_client:
|
||||
raise ValueError("LLM client is not initialized!")
|
||||
return llm_client
|
||||
|
||||
async def preload_resource(self) -> None:
|
||||
"""Preload resources before agent initialization."""
|
||||
pass
|
||||
|
||||
async def build(self) -> "ConversableAgent":
|
||||
"""Build the agent."""
|
||||
# Check if agent is available
|
||||
self.check_available()
|
||||
_language = self.not_null_agent_context.language
|
||||
if _language:
|
||||
self.language = _language
|
||||
|
||||
# Preload resources
|
||||
await self.preload_resource()
|
||||
# Initialize resource loader
|
||||
for action in self.actions:
|
||||
action.init_resource_loader(self.resource_loader)
|
||||
|
||||
# Initialize system messages
|
||||
self.init_system_message()
|
||||
|
||||
# Initialize LLM Server
|
||||
if not self.is_human:
|
||||
if not self.llm_config or not self.llm_config.llm_client:
|
||||
raise ValueError("LLM client is not initialized!")
|
||||
self.llm_client = AIWrapper(llm_client=self.llm_config.llm_client)
|
||||
return self
|
||||
|
||||
def bind(self, target: Any) -> "ConversableAgent":
|
||||
"""Bind the resources to the agent."""
|
||||
if isinstance(target, LLMConfig):
|
||||
self.llm_config = target
|
||||
elif isinstance(target, GptsMemory):
|
||||
self.memory = target
|
||||
elif isinstance(target, AgentContext):
|
||||
self.agent_context = target
|
||||
elif isinstance(target, ResourceLoader):
|
||||
self.resource_loader = target
|
||||
elif isinstance(target, list) and target and len(target) > 0:
|
||||
if _is_list_of_type(target, Action):
|
||||
self.actions.extend(target)
|
||||
elif _is_list_of_type(target, AgentResource):
|
||||
self.resources = target
|
||||
return self
|
||||
|
||||
async def send(
|
||||
self,
|
||||
message: AgentMessage,
|
||||
recipient: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
request_reply: Optional[bool] = True,
|
||||
is_recovery: Optional[bool] = False,
|
||||
) -> None:
|
||||
"""Send a message to recipient agent."""
|
||||
await recipient.receive(
|
||||
message=message,
|
||||
sender=self,
|
||||
reviewer=reviewer,
|
||||
request_reply=request_reply,
|
||||
is_recovery=is_recovery,
|
||||
)
|
||||
|
||||
async def receive(
|
||||
self,
|
||||
message: AgentMessage,
|
||||
sender: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
request_reply: Optional[bool] = None,
|
||||
silent: Optional[bool] = False,
|
||||
is_recovery: Optional[bool] = False,
|
||||
) -> None:
|
||||
"""Receive a message from another agent."""
|
||||
await self._a_process_received_message(message, sender)
|
||||
if request_reply is False or request_reply is None:
|
||||
return
|
||||
|
||||
if not self.is_human:
|
||||
reply = await self.generate_reply(
|
||||
received_message=message, sender=sender, reviewer=reviewer
|
||||
)
|
||||
if reply is not None:
|
||||
await self.send(reply, sender)
|
||||
|
||||
def prepare_act_param(self) -> Dict[str, Any]:
|
||||
"""Prepare the parameters for the act method."""
|
||||
return {}
|
||||
|
||||
async def generate_reply(
|
||||
self,
|
||||
received_message: AgentMessage,
|
||||
sender: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
rely_messages: Optional[List[AgentMessage]] = None,
|
||||
**kwargs,
|
||||
) -> AgentMessage:
|
||||
"""Generate a reply based on the received messages."""
|
||||
logger.info(
|
||||
f"generate agent reply!sender={sender}, rely_messages_len={rely_messages}"
|
||||
)
|
||||
try:
|
||||
reply_message: AgentMessage = self._init_reply_message(
|
||||
received_message=received_message
|
||||
)
|
||||
await self._system_message_assembly(
|
||||
received_message.content, reply_message.context
|
||||
)
|
||||
|
||||
fail_reason = None
|
||||
current_retry_counter = 0
|
||||
is_success = True
|
||||
while current_retry_counter < self.max_retry_count:
|
||||
if current_retry_counter > 0:
|
||||
retry_message = self._init_reply_message(
|
||||
received_message=received_message
|
||||
)
|
||||
retry_message.content = fail_reason
|
||||
retry_message.current_goal = received_message.current_goal
|
||||
# The current message is a self-optimized message that needs to be
|
||||
# recorded.
|
||||
# It is temporarily set to be initiated by the originating end to
|
||||
# facilitate the organization of historical memory context.
|
||||
await sender.send(
|
||||
retry_message, self, reviewer, request_reply=False
|
||||
)
|
||||
|
||||
# 1.Think about how to do things
|
||||
llm_reply, model_name = await self.thinking(
|
||||
self._load_thinking_messages(
|
||||
received_message, sender, rely_messages
|
||||
)
|
||||
)
|
||||
reply_message.model_name = model_name
|
||||
reply_message.content = llm_reply
|
||||
|
||||
# 2.Review whether what is being done is legal
|
||||
approve, comments = await self.review(llm_reply, self)
|
||||
reply_message.review_info = AgentReviewInfo(
|
||||
approve=approve,
|
||||
comments=comments,
|
||||
)
|
||||
|
||||
# 3.Act based on the results of your thinking
|
||||
act_extent_param = self.prepare_act_param()
|
||||
act_out: Optional[ActionOutput] = await self.act(
|
||||
message=llm_reply,
|
||||
sender=sender,
|
||||
reviewer=reviewer,
|
||||
**act_extent_param,
|
||||
)
|
||||
if act_out:
|
||||
reply_message.action_report = act_out.dict()
|
||||
|
||||
# 4.Reply information verification
|
||||
check_pass, reason = await self.verify(reply_message, sender, reviewer)
|
||||
is_success = check_pass
|
||||
# 5.Optimize wrong answers myself
|
||||
if not check_pass:
|
||||
current_retry_counter += 1
|
||||
# Send error messages and issue new problem-solving instructions
|
||||
if current_retry_counter < self.max_retry_count:
|
||||
await self.send(
|
||||
reply_message, sender, reviewer, request_reply=False
|
||||
)
|
||||
fail_reason = reason
|
||||
else:
|
||||
break
|
||||
reply_message.success = is_success
|
||||
return reply_message
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("Generate reply exception!")
|
||||
err_message = AgentMessage(content=str(e))
|
||||
err_message.success = False
|
||||
return err_message
|
||||
|
||||
async def thinking(
|
||||
self, messages: List[AgentMessage], prompt: Optional[str] = None
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""Think and reason about the current task goal.
|
||||
|
||||
Args:
|
||||
messages(List[AgentMessage]): the messages to be reasoned
|
||||
prompt(str): the prompt to be reasoned
|
||||
"""
|
||||
last_model = None
|
||||
last_err = None
|
||||
retry_count = 0
|
||||
llm_messages = [message.to_llm_message() for message in messages]
|
||||
# LLM inference automatically retries 3 times to reduce interruption
|
||||
# probability caused by speed limit and network stability
|
||||
while retry_count < 3:
|
||||
llm_model = await self._a_select_llm_model(last_model)
|
||||
try:
|
||||
if prompt:
|
||||
llm_messages = _new_system_message(prompt) + llm_messages
|
||||
else:
|
||||
llm_messages = self.oai_system_message + llm_messages
|
||||
|
||||
if not self.llm_client:
|
||||
raise ValueError("LLM client is not initialized!")
|
||||
response = await self.llm_client.create(
|
||||
context=llm_messages[-1].pop("context", None),
|
||||
messages=llm_messages,
|
||||
llm_model=llm_model,
|
||||
max_new_tokens=self.not_null_agent_context.max_new_tokens,
|
||||
temperature=self.not_null_agent_context.temperature,
|
||||
)
|
||||
return response, llm_model
|
||||
except LLMChatError as e:
|
||||
logger.error(f"model:{llm_model} generate Failed!{str(e)}")
|
||||
retry_count += 1
|
||||
last_model = llm_model
|
||||
last_err = str(e)
|
||||
await asyncio.sleep(10)
|
||||
|
||||
if last_err:
|
||||
raise ValueError(last_err)
|
||||
else:
|
||||
raise ValueError("LLM model inference failed!")
|
||||
|
||||
async def review(self, message: Optional[str], censored: Agent) -> Tuple[bool, Any]:
|
||||
"""Review the message based on the censored message."""
|
||||
return True, None
|
||||
|
||||
async def act(
|
||||
self,
|
||||
message: Optional[str],
|
||||
sender: Optional[Agent] = None,
|
||||
reviewer: Optional[Agent] = None,
|
||||
**kwargs,
|
||||
) -> Optional[ActionOutput]:
|
||||
"""Perform actions."""
|
||||
last_out: Optional[ActionOutput] = None
|
||||
for action in self.actions:
|
||||
# Select the resources required by acton
|
||||
need_resource = None
|
||||
if self.resources and len(self.resources) > 0:
|
||||
for item in self.resources:
|
||||
if item.type == action.resource_need:
|
||||
need_resource = item
|
||||
break
|
||||
|
||||
if not message:
|
||||
raise ValueError("The message content is empty!")
|
||||
|
||||
last_out = await action.run(
|
||||
ai_message=message,
|
||||
resource=need_resource,
|
||||
rely_action_out=last_out,
|
||||
**kwargs,
|
||||
)
|
||||
return last_out
|
||||
|
||||
async def correctness_check(
|
||||
self, message: AgentMessage
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""Verify the correctness of the results."""
|
||||
return True, None
|
||||
|
||||
async def verify(
|
||||
self,
|
||||
message: AgentMessage,
|
||||
sender: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
**kwargs,
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""Verify the current execution results."""
|
||||
# Check approval results
|
||||
if message.review_info and not message.review_info.approve:
|
||||
return False, message.review_info.comments
|
||||
|
||||
# Check action run results
|
||||
action_output: Optional[ActionOutput] = ActionOutput.from_dict(
|
||||
message.action_report
|
||||
)
|
||||
if action_output:
|
||||
if not action_output.is_exe_success:
|
||||
return False, action_output.content
|
||||
elif not action_output.content or len(action_output.content.strip()) < 1:
|
||||
return (
|
||||
False,
|
||||
"The current execution result is empty. Please rethink the "
|
||||
"question and background and generate a new answer.. ",
|
||||
)
|
||||
|
||||
# agent output correctness check
|
||||
return await self.correctness_check(message)
|
||||
|
||||
async def initiate_chat(
|
||||
self,
|
||||
recipient: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
message: Optional[str] = None,
|
||||
):
|
||||
"""Initiate a chat with another agent.
|
||||
|
||||
Args:
|
||||
recipient (Agent): The recipient agent.
|
||||
reviewer (Agent): The reviewer agent.
|
||||
message (str): The message to send.
|
||||
"""
|
||||
await self.send(
|
||||
AgentMessage(content=message, current_goal=message),
|
||||
recipient,
|
||||
reviewer,
|
||||
request_reply=True,
|
||||
)
|
||||
|
||||
#######################################################################
|
||||
# Private Function Begin
|
||||
#######################################################################
|
||||
|
||||
def _init_actions(self, actions: List[Type[Action]]):
|
||||
self.actions = []
|
||||
for idx, action in enumerate(actions):
|
||||
if issubclass(action, Action):
|
||||
self.actions.append(action())
|
||||
|
||||
async def _a_append_message(
|
||||
self, message: AgentMessage, role, sender: Agent
|
||||
) -> bool:
|
||||
new_sender = cast(ConversableAgent, sender)
|
||||
self.consecutive_auto_reply_counter = (
|
||||
new_sender.consecutive_auto_reply_counter + 1
|
||||
)
|
||||
message_dict = message.to_dict()
|
||||
oai_message = {
|
||||
k: message_dict[k]
|
||||
for k in (
|
||||
"content",
|
||||
"function_call",
|
||||
"name",
|
||||
"context",
|
||||
"action_report",
|
||||
"review_info",
|
||||
"current_goal",
|
||||
"model_name",
|
||||
)
|
||||
if k in message_dict
|
||||
}
|
||||
|
||||
gpts_message: GptsMessage = GptsMessage(
|
||||
conv_id=self.not_null_agent_context.conv_id,
|
||||
sender=sender.get_profile(),
|
||||
receiver=self.profile,
|
||||
role=role,
|
||||
rounds=self.consecutive_auto_reply_counter,
|
||||
current_goal=oai_message.get("current_goal", None),
|
||||
content=oai_message.get("content", None),
|
||||
context=json.dumps(oai_message["context"], ensure_ascii=False)
|
||||
if "context" in oai_message
|
||||
else None,
|
||||
review_info=json.dumps(oai_message["review_info"], ensure_ascii=False)
|
||||
if "review_info" in oai_message
|
||||
else None,
|
||||
action_report=json.dumps(oai_message["action_report"], ensure_ascii=False)
|
||||
if "action_report" in oai_message
|
||||
else None,
|
||||
model_name=oai_message.get("model_name", None),
|
||||
)
|
||||
|
||||
self.memory.message_memory.append(gpts_message)
|
||||
return True
|
||||
|
||||
def _print_received_message(self, message: AgentMessage, sender: Agent):
|
||||
# print the message received
|
||||
print("\n", "-" * 80, flush=True, sep="")
|
||||
_print_name = self.name if self.name else self.profile
|
||||
print(
|
||||
colored(
|
||||
sender.get_name() if sender.get_name() else sender.get_profile(),
|
||||
"yellow",
|
||||
),
|
||||
"(to",
|
||||
f"{_print_name})-[{message.model_name or ''}]:\n",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
content = json.dumps(message.content, ensure_ascii=False)
|
||||
if content is not None:
|
||||
print(content, flush=True)
|
||||
|
||||
review_info = message.review_info
|
||||
if review_info:
|
||||
name = sender.get_name() if sender.get_name() else sender.get_profile()
|
||||
pass_msg = "Pass" if review_info.approve else "Reject"
|
||||
review_msg = f"{pass_msg}({review_info.comments})"
|
||||
approve_print = f">>>>>>>>{name} Review info: \n{review_msg}"
|
||||
print(colored(approve_print, "green"), flush=True)
|
||||
|
||||
action_report = message.action_report
|
||||
if action_report:
|
||||
name = sender.get_name() if sender.get_name() else sender.get_profile()
|
||||
action_msg = (
|
||||
"execution succeeded"
|
||||
if action_report["is_exe_success"]
|
||||
else "execution failed"
|
||||
)
|
||||
action_report_msg = f"{action_msg},\n{action_report['content']}"
|
||||
action_print = f">>>>>>>>{name} Action report: \n{action_report_msg}"
|
||||
print(colored(action_print, "blue"), flush=True)
|
||||
|
||||
print("\n", "-" * 80, flush=True, sep="")
|
||||
|
||||
async def _a_process_received_message(self, message: AgentMessage, sender: Agent):
|
||||
valid = await self._a_append_message(message, None, sender)
|
||||
if not valid:
|
||||
raise ValueError(
|
||||
"Received message can't be converted into a valid ChatCompletion"
|
||||
" message. Either content or function_call must be provided."
|
||||
)
|
||||
|
||||
self._print_received_message(message, sender)
|
||||
|
||||
async def _system_message_assembly(
|
||||
self, question: Optional[str], context: Optional[Union[str, Dict]] = None
|
||||
):
|
||||
# system message
|
||||
self.init_system_message()
|
||||
if len(self.oai_system_message) > 0:
|
||||
resource_prompt_list = []
|
||||
for item in self.resources:
|
||||
resource_client = self.not_null_resource_loader.get_resource_api(
|
||||
item.type, ResourceClient
|
||||
)
|
||||
if not resource_client:
|
||||
raise ValueError(
|
||||
f"Resource {item.type}:{item.value} missing resource loader"
|
||||
f" implementation,unable to read resources!"
|
||||
)
|
||||
resource_prompt_list.append(
|
||||
await resource_client.get_resource_prompt(item, question)
|
||||
)
|
||||
if context is None or not isinstance(context, dict):
|
||||
context = {}
|
||||
|
||||
resource_prompt = ""
|
||||
if len(resource_prompt_list) > 0:
|
||||
resource_prompt = "RESOURCES:" + "\n".join(resource_prompt_list)
|
||||
|
||||
out_schema: Optional[str] = ""
|
||||
if self.actions and len(self.actions) > 0:
|
||||
out_schema = self.actions[0].ai_out_schema
|
||||
for message in self.oai_system_message:
|
||||
new_content = message["content"].format(
|
||||
resource_prompt=resource_prompt,
|
||||
out_schema=out_schema,
|
||||
**context,
|
||||
)
|
||||
message["content"] = new_content
|
||||
|
||||
def _excluded_models(
|
||||
self,
|
||||
all_models: List[str],
|
||||
order_llms: Optional[List[str]] = None,
|
||||
excluded_models: Optional[List[str]] = None,
|
||||
):
|
||||
if not order_llms:
|
||||
order_llms = []
|
||||
if not excluded_models:
|
||||
excluded_models = []
|
||||
can_uses = []
|
||||
if order_llms and len(order_llms) > 0:
|
||||
for llm_name in order_llms:
|
||||
if llm_name in all_models and (
|
||||
not excluded_models or llm_name not in excluded_models
|
||||
):
|
||||
can_uses.append(llm_name)
|
||||
else:
|
||||
for llm_name in all_models:
|
||||
if not excluded_models or llm_name not in excluded_models:
|
||||
can_uses.append(llm_name)
|
||||
|
||||
return can_uses
|
||||
|
||||
async def _a_select_llm_model(
|
||||
self, excluded_models: Optional[List[str]] = None
|
||||
) -> str:
|
||||
logger.info(f"_a_select_llm_model:{excluded_models}")
|
||||
try:
|
||||
all_models = await self.not_null_llm_client.models()
|
||||
all_model_names = [item.model for item in all_models]
|
||||
# TODO Currently only two strategies, priority and default, are implemented.
|
||||
if self.not_null_llm_config.llm_strategy == LLMStrategyType.Priority:
|
||||
priority: List[str] = []
|
||||
strategy_context = self.not_null_llm_config.strategy_context
|
||||
if strategy_context is not None:
|
||||
priority = json.loads(strategy_context) # type: ignore
|
||||
can_uses = self._excluded_models(
|
||||
all_model_names, priority, excluded_models
|
||||
)
|
||||
else:
|
||||
can_uses = self._excluded_models(all_model_names, None, excluded_models)
|
||||
if can_uses and len(can_uses) > 0:
|
||||
return can_uses[0]
|
||||
else:
|
||||
raise ValueError("No model service available!")
|
||||
except Exception as e:
|
||||
logger.error(f"{self.profile} get next llm failed!{str(e)}")
|
||||
raise ValueError(f"Failed to allocate model service,{str(e)}!")
|
||||
|
||||
def _init_reply_message(self, received_message: AgentMessage) -> AgentMessage:
|
||||
"""Create a new message from the received message.
|
||||
|
||||
Initialize a new message from the received message
|
||||
|
||||
Args:
|
||||
received_message(AgentMessage): The received message
|
||||
|
||||
Returns:
|
||||
AgentMessage: A new message
|
||||
"""
|
||||
return AgentMessage(
|
||||
content=received_message.content,
|
||||
current_goal=received_message.current_goal,
|
||||
)
|
||||
|
||||
def _convert_to_ai_message(
|
||||
self, gpts_messages: List[GptsMessage]
|
||||
) -> List[AgentMessage]:
|
||||
oai_messages: List[AgentMessage] = []
|
||||
# Based on the current agent, all messages received are user, and all messages
|
||||
# sent are assistant.
|
||||
for item in gpts_messages:
|
||||
if item.role:
|
||||
role = item.role
|
||||
else:
|
||||
if item.receiver == self.profile:
|
||||
role = ModelMessageRoleType.HUMAN
|
||||
elif item.sender == self.profile:
|
||||
role = ModelMessageRoleType.AI
|
||||
else:
|
||||
continue
|
||||
|
||||
# Message conversion, priority is given to converting execution results,
|
||||
# and only model output results will be used if not.
|
||||
content = item.content
|
||||
if item.action_report:
|
||||
action_out = ActionOutput.from_dict(json.loads(item.action_report))
|
||||
if (
|
||||
action_out is not None
|
||||
and action_out.is_exe_success
|
||||
and action_out.content is not None
|
||||
):
|
||||
content = action_out.content
|
||||
oai_messages.append(
|
||||
AgentMessage(
|
||||
content=content,
|
||||
role=role,
|
||||
context=json.loads(item.context)
|
||||
if item.context is not None
|
||||
else None,
|
||||
)
|
||||
)
|
||||
return oai_messages
|
||||
|
||||
def _load_thinking_messages(
|
||||
self,
|
||||
received_message: AgentMessage,
|
||||
sender: Agent,
|
||||
rely_messages: Optional[List[AgentMessage]] = None,
|
||||
) -> List[AgentMessage]:
|
||||
current_goal = received_message.current_goal
|
||||
|
||||
# Convert and tailor the information in collective memory into contextual
|
||||
# memory available to the current Agent
|
||||
current_goal_messages = self._convert_to_ai_message(
|
||||
self.memory.message_memory.get_between_agents(
|
||||
self.not_null_agent_context.conv_id,
|
||||
self.profile,
|
||||
sender.get_profile(),
|
||||
current_goal,
|
||||
)
|
||||
)
|
||||
|
||||
# When there is no target and context, the current received message is used as
|
||||
# the target problem
|
||||
if current_goal_messages is None or len(current_goal_messages) <= 0:
|
||||
received_message.role = ModelMessageRoleType.HUMAN
|
||||
current_goal_messages = [received_message]
|
||||
|
||||
# relay messages
|
||||
cut_messages = []
|
||||
if rely_messages:
|
||||
# When directly relying on historical messages, use the execution result
|
||||
# content as a dependency
|
||||
for rely_message in rely_messages:
|
||||
action_report: Optional[ActionOutput] = ActionOutput.from_dict(
|
||||
rely_message.action_report
|
||||
)
|
||||
if action_report:
|
||||
# TODO: Modify in-place, need to be optimized
|
||||
rely_message.content = action_report.content
|
||||
|
||||
cut_messages.extend(rely_messages)
|
||||
|
||||
# TODO: allocate historical information based on token budget
|
||||
if len(current_goal_messages) < 5:
|
||||
cut_messages.extend(current_goal_messages)
|
||||
else:
|
||||
# For the time being, the smallest size of historical message records will
|
||||
# be used by default.
|
||||
# Use the first two rounds of messages to understand the initial goals
|
||||
cut_messages.extend(current_goal_messages[:2])
|
||||
# Use information from the last three rounds of communication to ensure
|
||||
# that current thinking knows what happened and what to do in the last
|
||||
# communication
|
||||
cut_messages.extend(current_goal_messages[-3:])
|
||||
return cut_messages
|
||||
|
||||
|
||||
def _new_system_message(content):
|
||||
"""Return the system message."""
|
||||
return [{"content": content, "role": ModelMessageRoleType.SYSTEM}]
|
||||
|
||||
|
||||
def _is_list_of_type(lst: List[Any], type_cls: type) -> bool:
|
||||
return all(isinstance(item, type_cls) for item in lst)
|
166
dbgpt/agent/core/base_team.py
Normal file
166
dbgpt/agent/core/base_team.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Base classes for managing a group of agents in a team chat."""
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
from dbgpt._private.pydantic import BaseModel, Field
|
||||
|
||||
from ..actions.action import ActionOutput
|
||||
from .agent import Agent, AgentMessage
|
||||
from .base_agent import ConversableAgent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _content_str(content: Union[str, List, None]) -> str:
|
||||
"""Convert content into a string format.
|
||||
|
||||
This function processes content that may be a string, a list of mixed text and
|
||||
image URLs, or None, and converts it into a string. Text is directly appended to
|
||||
the result string, while image URLs are represented by a placeholder image token.
|
||||
If the content is None, an empty string is returned.
|
||||
|
||||
Args:
|
||||
content (Union[str, List, None]): The content to be processed. Can be a
|
||||
string, a list of dictionaries representing text and image URLs, or None.
|
||||
|
||||
Returns:
|
||||
str: A string representation of the input content. Image URLs are replaced with
|
||||
an image token.
|
||||
|
||||
Note:
|
||||
- The function expects each dictionary in the list to have a "type" key that is
|
||||
either "text" or "image_url".
|
||||
For "text" type, the "text" key's value is appended to the result.
|
||||
For "image_url", an image token is appended.
|
||||
- This function is useful for handling content that may include both text and image
|
||||
references, especially in contexts where images need to be represented as
|
||||
placeholders.
|
||||
"""
|
||||
if content is None:
|
||||
return ""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if not isinstance(content, list):
|
||||
raise TypeError(f"content must be None, str, or list, but got {type(content)}")
|
||||
|
||||
rst = ""
|
||||
for item in content:
|
||||
if not isinstance(item, dict):
|
||||
raise TypeError(
|
||||
"Wrong content format: every element should be dict if the content is "
|
||||
"a list."
|
||||
)
|
||||
assert (
|
||||
"type" in item
|
||||
), "Wrong content format. Missing 'type' key in content's dict."
|
||||
if item["type"] == "text":
|
||||
rst += item["text"]
|
||||
elif item["type"] == "image_url":
|
||||
rst += "<image>"
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Wrong content format: unknown type {item['type']} within the content"
|
||||
)
|
||||
return rst
|
||||
|
||||
|
||||
class Team(BaseModel):
|
||||
"""Team class for managing a group of agents in a team chat."""
|
||||
|
||||
agents: List[Agent] = Field(default_factory=list)
|
||||
messages: List[Dict] = Field(default_factory=list)
|
||||
max_round: int = 100
|
||||
is_team: bool = True
|
||||
|
||||
class Config:
|
||||
"""Pydantic model configuration."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Create a new Team instance."""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def hire(self, agents: List[Agent]):
|
||||
"""Hire roles to cooperate."""
|
||||
self.agents.extend(agents)
|
||||
|
||||
@property
|
||||
def agent_names(self) -> List[str]:
|
||||
"""Return the names of the agents in the group chat."""
|
||||
return [agent.get_profile() for agent in self.agents]
|
||||
|
||||
def agent_by_name(self, name: str) -> Agent:
|
||||
"""Return the agent with a given name."""
|
||||
return self.agents[self.agent_names.index(name)]
|
||||
|
||||
async def select_speaker(
|
||||
self,
|
||||
last_speaker: Agent,
|
||||
selector: Agent,
|
||||
now_goal_context: Optional[str] = None,
|
||||
pre_allocated: Optional[str] = None,
|
||||
) -> Tuple[Agent, Optional[str]]:
|
||||
"""Select the next speaker in the group chat."""
|
||||
raise NotImplementedError
|
||||
|
||||
def reset(self):
|
||||
"""Reset the group chat."""
|
||||
self.messages.clear()
|
||||
|
||||
def append(self, message: Dict):
|
||||
"""Append a message to the group chat.
|
||||
|
||||
We cast the content to str here so that it can be managed by text-based
|
||||
model.
|
||||
"""
|
||||
message["content"] = _content_str(message["content"])
|
||||
self.messages.append(message)
|
||||
|
||||
|
||||
class ManagerAgent(ConversableAgent, Team):
|
||||
"""Manager Agent class."""
|
||||
|
||||
profile: str = "TeamManager"
|
||||
goal: str = "manage all hired intelligent agents to complete mission objectives"
|
||||
constraints: List[str] = []
|
||||
desc: str = goal
|
||||
is_team: bool = True
|
||||
|
||||
# The management agent does not need to retry the exception. The actual execution
|
||||
# of the agent has already been retried.
|
||||
max_retry_count: int = 1
|
||||
|
||||
class Config:
|
||||
"""Pydantic model configuration."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Create a new ManagerAgent instance."""
|
||||
ConversableAgent.__init__(self, **kwargs)
|
||||
Team.__init__(self, **kwargs)
|
||||
|
||||
async def thinking(
|
||||
self, messages: List[AgentMessage], prompt: Optional[str] = None
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""Think and reason about the current task goal."""
|
||||
# TeamManager, which is based on processes and plans by default, only needs to
|
||||
# ensure execution and does not require additional thinking.
|
||||
if messages is None or len(messages) <= 0:
|
||||
return None, None
|
||||
else:
|
||||
message = messages[-1]
|
||||
self.messages.append(message.to_llm_message())
|
||||
return message.content, None
|
||||
|
||||
async def act(
|
||||
self,
|
||||
message: Optional[str],
|
||||
sender: Optional[Agent] = None,
|
||||
reviewer: Optional[Agent] = None,
|
||||
**kwargs,
|
||||
) -> Optional[ActionOutput]:
|
||||
"""Perform actions based on the received message."""
|
||||
return None
|
1
dbgpt/agent/core/llm/__init__.py
Normal file
1
dbgpt/agent/core/llm/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""LLM for agents."""
|
116
dbgpt/agent/core/llm/llm.py
Normal file
116
dbgpt/agent/core/llm/llm.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""LLM module."""
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from dbgpt._private.pydantic import BaseModel, Field
|
||||
from dbgpt.core import LLMClient, ModelMetadata, ModelRequest
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _build_model_request(input_value: Dict) -> ModelRequest:
|
||||
"""Build model request from input value.
|
||||
|
||||
Args:
|
||||
input_value(str or dict): input value
|
||||
|
||||
Returns:
|
||||
ModelRequest: model request, pass to llm client
|
||||
"""
|
||||
parm = {
|
||||
"model": input_value.get("model"),
|
||||
"messages": input_value.get("messages"),
|
||||
"temperature": input_value.get("temperature", None),
|
||||
"max_new_tokens": input_value.get("max_new_tokens", None),
|
||||
"stop": input_value.get("stop", None),
|
||||
"stop_token_ids": input_value.get("stop_token_ids", None),
|
||||
"context_len": input_value.get("context_len", None),
|
||||
"echo": input_value.get("echo", None),
|
||||
"span_id": input_value.get("span_id", None),
|
||||
}
|
||||
|
||||
return ModelRequest(**parm)
|
||||
|
||||
|
||||
class LLMStrategyType(Enum):
|
||||
"""LLM strategy type."""
|
||||
|
||||
Priority = "priority"
|
||||
Auto = "auto"
|
||||
Default = "default"
|
||||
|
||||
|
||||
class LLMStrategy:
|
||||
"""LLM strategy base class."""
|
||||
|
||||
def __init__(self, llm_client: LLMClient, context: Optional[str] = None):
|
||||
"""Create an LLMStrategy instance."""
|
||||
self._llm_client = llm_client
|
||||
self._context = context
|
||||
|
||||
@property
|
||||
def type(self) -> LLMStrategyType:
|
||||
"""Return the strategy type."""
|
||||
return LLMStrategyType.Default
|
||||
|
||||
def _excluded_models(
|
||||
self,
|
||||
all_models: List[ModelMetadata],
|
||||
excluded_models: List[str],
|
||||
need_uses: Optional[List[str]] = None,
|
||||
):
|
||||
if not need_uses:
|
||||
need_uses = []
|
||||
can_uses = []
|
||||
for item in all_models:
|
||||
if item.model in need_uses and item.model not in excluded_models:
|
||||
can_uses.append(item)
|
||||
return can_uses
|
||||
|
||||
async def next_llm(self, excluded_models: Optional[List[str]] = None):
|
||||
"""Return next available llm model name.
|
||||
|
||||
Args:
|
||||
excluded_models(List[str]): excluded models
|
||||
|
||||
Returns:
|
||||
str: Next available llm model name
|
||||
"""
|
||||
if not excluded_models:
|
||||
excluded_models = []
|
||||
try:
|
||||
all_models = await self._llm_client.models()
|
||||
available_llms = self._excluded_models(all_models, excluded_models, None)
|
||||
if available_llms and len(available_llms) > 0:
|
||||
return available_llms[0].model
|
||||
else:
|
||||
raise ValueError("No model service available!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.type} get next llm failed!{str(e)}")
|
||||
raise ValueError(f"Failed to allocate model service,{str(e)}!")
|
||||
|
||||
|
||||
llm_strategies: Dict[LLMStrategyType, List[Type[LLMStrategy]]] = defaultdict(list)
|
||||
|
||||
|
||||
def register_llm_strategy(
|
||||
llm_strategy_type: LLMStrategyType, strategy: Type[LLMStrategy]
|
||||
):
|
||||
"""Register llm strategy."""
|
||||
llm_strategies[llm_strategy_type].append(strategy)
|
||||
|
||||
|
||||
class LLMConfig(BaseModel):
|
||||
"""LLM configuration."""
|
||||
|
||||
llm_client: Optional[LLMClient] = Field(default_factory=LLMClient)
|
||||
llm_strategy: LLMStrategyType = Field(default=LLMStrategyType.Default)
|
||||
strategy_context: Optional[Any] = None
|
||||
|
||||
class Config:
|
||||
"""Pydantic model config."""
|
||||
|
||||
arbitrary_types_allowed = True
|
183
dbgpt/agent/core/llm/llm_client.py
Normal file
183
dbgpt/agent/core/llm/llm_client.py
Normal file
@@ -0,0 +1,183 @@
|
||||
"""AIWrapper for LLM."""
|
||||
import json
|
||||
import logging
|
||||
import traceback
|
||||
from typing import Callable, Dict, Optional, Union
|
||||
|
||||
from dbgpt.core import LLMClient
|
||||
from dbgpt.core.interface.output_parser import BaseOutputParser
|
||||
from dbgpt.util.error_types import LLMChatError
|
||||
from dbgpt.util.tracer import root_tracer
|
||||
|
||||
from .llm import _build_model_request
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AIWrapper:
|
||||
"""AIWrapper for LLM."""
|
||||
|
||||
cache_path_root: str = ".cache"
|
||||
extra_kwargs = {
|
||||
"cache_seed",
|
||||
"filter_func",
|
||||
"allow_format_str_template",
|
||||
"context",
|
||||
"llm_model",
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self, llm_client: LLMClient, output_parser: Optional[BaseOutputParser] = None
|
||||
):
|
||||
"""Create an AIWrapper instance."""
|
||||
self.llm_echo = False
|
||||
self.model_cache_enable = False
|
||||
self._llm_client = llm_client
|
||||
self._output_parser = output_parser or BaseOutputParser(is_stream_out=False)
|
||||
|
||||
@classmethod
|
||||
def instantiate(
|
||||
cls,
|
||||
template: Optional[Union[str, Callable]] = None,
|
||||
context: Optional[Dict] = None,
|
||||
allow_format_str_template: Optional[bool] = False,
|
||||
):
|
||||
"""Instantiate the template with the context."""
|
||||
if not context or template is None:
|
||||
return template
|
||||
if isinstance(template, str):
|
||||
return template.format(**context) if allow_format_str_template else template
|
||||
return template(context)
|
||||
|
||||
def _construct_create_params(self, create_config: Dict, extra_kwargs: Dict) -> Dict:
|
||||
"""Prime the create_config with additional_kwargs."""
|
||||
# Validate the config
|
||||
prompt = create_config.get("prompt")
|
||||
messages = create_config.get("messages")
|
||||
if prompt is None and messages is None:
|
||||
raise ValueError(
|
||||
"Either prompt or messages should be in create config but not both."
|
||||
)
|
||||
|
||||
context = extra_kwargs.get("context")
|
||||
if context is None:
|
||||
# No need to instantiate if no context is provided.
|
||||
return create_config
|
||||
# Instantiate the prompt or messages
|
||||
allow_format_str_template = extra_kwargs.get("allow_format_str_template", False)
|
||||
# Make a copy of the config
|
||||
params = create_config.copy()
|
||||
if prompt is not None:
|
||||
# Instantiate the prompt
|
||||
params["prompt"] = self.instantiate(
|
||||
prompt, context, allow_format_str_template
|
||||
)
|
||||
elif context and messages and isinstance(messages, list):
|
||||
# Instantiate the messages
|
||||
params["messages"] = [
|
||||
{
|
||||
**m,
|
||||
"content": self.instantiate(
|
||||
m["content"], context, allow_format_str_template
|
||||
),
|
||||
}
|
||||
if m.get("content")
|
||||
else m
|
||||
for m in messages
|
||||
]
|
||||
return params
|
||||
|
||||
def _separate_create_config(self, config):
|
||||
"""Separate the config into create_config and extra_kwargs."""
|
||||
create_config = {k: v for k, v in config.items() if k not in self.extra_kwargs}
|
||||
extra_kwargs = {k: v for k, v in config.items() if k in self.extra_kwargs}
|
||||
return create_config, extra_kwargs
|
||||
|
||||
def _get_key(self, config):
|
||||
"""Get a unique identifier of a configuration.
|
||||
|
||||
Args:
|
||||
config (dict or list): A configuration.
|
||||
|
||||
Returns:
|
||||
tuple: A unique identifier which can be used as a key for a dict.
|
||||
"""
|
||||
non_cache_key = ["api_key", "base_url", "api_type", "api_version"]
|
||||
copied = False
|
||||
for key in non_cache_key:
|
||||
if key in config:
|
||||
config, copied = config.copy() if not copied else config, True
|
||||
config.pop(key)
|
||||
return json.dumps(config, sort_keys=True, ensure_ascii=False)
|
||||
|
||||
async def create(self, **config) -> Optional[str]:
|
||||
"""Create a response from the input config."""
|
||||
# merge the input config with the i-th config in the config list
|
||||
full_config = {**config}
|
||||
# separate the config into create_config and extra_kwargs
|
||||
create_config, extra_kwargs = self._separate_create_config(full_config)
|
||||
|
||||
# construct the create params
|
||||
params = self._construct_create_params(create_config, extra_kwargs)
|
||||
filter_func = extra_kwargs.get("filter_func")
|
||||
context = extra_kwargs.get("context")
|
||||
llm_model = extra_kwargs.get("llm_model")
|
||||
try:
|
||||
response = await self._completions_create(llm_model, params)
|
||||
except LLMChatError as e:
|
||||
logger.debug(f"{llm_model} generate failed!{str(e)}")
|
||||
raise e
|
||||
else:
|
||||
pass_filter = filter_func is None or filter_func(
|
||||
context=context, response=response
|
||||
)
|
||||
if pass_filter:
|
||||
# Return the response if it passes the filter
|
||||
return response
|
||||
else:
|
||||
return None
|
||||
|
||||
def _get_span_metadata(self, payload: Dict) -> Dict:
|
||||
metadata = {k: v for k, v in payload.items()}
|
||||
|
||||
metadata["messages"] = list(
|
||||
map(lambda m: m if isinstance(m, dict) else m.dict(), metadata["messages"])
|
||||
)
|
||||
return metadata
|
||||
|
||||
def _llm_messages_convert(self, params):
|
||||
gpts_messages = params["messages"]
|
||||
# TODO
|
||||
|
||||
return gpts_messages
|
||||
|
||||
async def _completions_create(self, llm_model, params) -> str:
|
||||
payload = {
|
||||
"model": llm_model,
|
||||
"prompt": params.get("prompt"),
|
||||
"messages": self._llm_messages_convert(params),
|
||||
"temperature": float(params.get("temperature")),
|
||||
"max_new_tokens": int(params.get("max_new_tokens")),
|
||||
"echo": self.llm_echo,
|
||||
}
|
||||
logger.info(f"Request: \n{payload}")
|
||||
span = root_tracer.start_span(
|
||||
"Agent.llm_client.no_streaming_call",
|
||||
metadata=self._get_span_metadata(payload),
|
||||
)
|
||||
payload["span_id"] = span.span_id
|
||||
payload["model_cache_enable"] = self.model_cache_enable
|
||||
try:
|
||||
model_request = _build_model_request(payload)
|
||||
model_output = await self._llm_client.generate(model_request.copy())
|
||||
parsed_output = self._output_parser.parse_model_nostream_resp(
|
||||
model_output, "#########################"
|
||||
)
|
||||
return parsed_output
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Call LLMClient error, {str(e)}, detail: {traceback.format_exc()}"
|
||||
)
|
||||
raise LLMChatError(original_exception=e) from e
|
||||
finally:
|
||||
span.end()
|
1
dbgpt/agent/core/llm/strategy/__init__.py
Normal file
1
dbgpt/agent/core/llm/strategy/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""LLM strategy module."""
|
37
dbgpt/agent/core/llm/strategy/priority.py
Normal file
37
dbgpt/agent/core/llm/strategy/priority.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Priority strategy for LLM."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import List, Optional
|
||||
|
||||
from ..llm import LLMStrategy, LLMStrategyType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LLMStrategyPriority(LLMStrategy):
|
||||
"""Priority strategy for llm model service."""
|
||||
|
||||
@property
|
||||
def type(self) -> LLMStrategyType:
|
||||
"""Return the strategy type."""
|
||||
return LLMStrategyType.Priority
|
||||
|
||||
async def next_llm(self, excluded_models: Optional[List[str]] = None) -> str:
|
||||
"""Return next available llm model name."""
|
||||
try:
|
||||
if not excluded_models:
|
||||
excluded_models = []
|
||||
all_models = await self._llm_client.models()
|
||||
if not self._context:
|
||||
raise ValueError("No context provided for priority strategy!")
|
||||
priority: List[str] = json.loads(self._context)
|
||||
can_uses = self._excluded_models(all_models, excluded_models, priority)
|
||||
if can_uses and len(can_uses) > 0:
|
||||
return can_uses[0].model
|
||||
else:
|
||||
raise ValueError("No model service available!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.type} get next llm failed!{str(e)}")
|
||||
raise ValueError(f"Failed to allocate model service,{str(e)}!")
|
128
dbgpt/agent/core/role.py
Normal file
128
dbgpt/agent/core/role.py
Normal file
@@ -0,0 +1,128 @@
|
||||
"""Role class for role-based conversation."""
|
||||
from abc import ABC
|
||||
from typing import List, Optional
|
||||
|
||||
from dbgpt._private.pydantic import BaseModel
|
||||
|
||||
|
||||
class Role(ABC, BaseModel):
|
||||
"""Role class for role-based conversation."""
|
||||
|
||||
profile: str = ""
|
||||
name: str = ""
|
||||
resource_introduction = ""
|
||||
goal: str = ""
|
||||
|
||||
expand_prompt: str = ""
|
||||
|
||||
fixed_subgoal: Optional[str] = None
|
||||
|
||||
constraints: List[str] = []
|
||||
examples: str = ""
|
||||
desc: str = ""
|
||||
language: str = "en"
|
||||
is_human: bool = False
|
||||
is_team: bool = False
|
||||
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def prompt_template(
|
||||
self,
|
||||
specified_prompt: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Return the prompt template for the role.
|
||||
|
||||
Args:
|
||||
specified_prompt (str, optional): The specified prompt. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The prompt template.
|
||||
"""
|
||||
if specified_prompt:
|
||||
return specified_prompt
|
||||
|
||||
expand_prompt = self.expand_prompt if len(self.expand_prompt) > 0 else ""
|
||||
examples_prompt = (
|
||||
"You can refer to the following examples:\n"
|
||||
if len(self.examples) > 0
|
||||
else ""
|
||||
)
|
||||
examples = self.examples if len(self.examples) > 0 else ""
|
||||
template = (
|
||||
f"{self.role_prompt}\n"
|
||||
"Please think step by step to achieve the goal. You can use the resources "
|
||||
"given below. At the same time, please strictly abide by the constraints "
|
||||
"and specifications in IMPORTANT REMINDER.\n\n"
|
||||
f"{{resource_prompt}}\n\n"
|
||||
f"{expand_prompt}\n\n"
|
||||
"*** IMPORTANT REMINDER ***\n"
|
||||
f"{self.language_require_prompt}\n"
|
||||
f"{self.constraints_prompt}\n"
|
||||
f"{examples_prompt}{examples}\n\n"
|
||||
f"{{out_schema}}"
|
||||
)
|
||||
return template
|
||||
|
||||
@property
|
||||
def role_prompt(self) -> str:
|
||||
"""Return the role prompt.
|
||||
|
||||
You are a {self.profile}, named {self.name}, your goal is {self.goal}.
|
||||
|
||||
Returns:
|
||||
str: The role prompt.
|
||||
"""
|
||||
profile_prompt = f"You are a {self.profile},"
|
||||
name_prompt = f"named {self.name}," if len(self.name) > 0 else ""
|
||||
goal_prompt = f"your goal is {self.goal}"
|
||||
prompt = f"""{profile_prompt}{name_prompt}{goal_prompt}"""
|
||||
return prompt
|
||||
|
||||
@property
|
||||
def constraints_prompt(self) -> str:
|
||||
"""Return the constraints prompt.
|
||||
|
||||
Return:
|
||||
str: The constraints prompt.
|
||||
"""
|
||||
if len(self.constraints) > 0:
|
||||
return "\n".join(
|
||||
f"{i + 1}. {item}" for i, item in enumerate(self.constraints)
|
||||
)
|
||||
return ""
|
||||
|
||||
@property
|
||||
def language_require_prompt(self) -> str:
|
||||
"""Return the language requirement prompt.
|
||||
|
||||
Returns:
|
||||
str: The language requirement prompt.
|
||||
"""
|
||||
if self.language == "zh":
|
||||
return "Please answer in simplified Chinese."
|
||||
else:
|
||||
return "Please answer in English."
|
||||
|
||||
@property
|
||||
def introduce(self) -> str:
|
||||
"""Introduce the role."""
|
||||
return self.desc
|
||||
|
||||
def identity_check(self) -> None:
|
||||
"""Check the identity of the role."""
|
||||
pass
|
||||
|
||||
def get_name(self) -> str:
|
||||
"""Get the name of the role."""
|
||||
return self.name
|
||||
|
||||
def get_profile(self) -> str:
|
||||
"""Get the profile of the role."""
|
||||
return self.profile
|
||||
|
||||
def get_describe(self) -> str:
|
||||
"""Get the describe of the role."""
|
||||
return self.desc
|
27
dbgpt/agent/core/schema.py
Normal file
27
dbgpt/agent/core/schema.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""Schema definition for the agent."""
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class PluginStorageType(Enum):
|
||||
"""Plugin storage type."""
|
||||
|
||||
Git = "git"
|
||||
Oss = "oss"
|
||||
|
||||
|
||||
class ApiTagType(Enum):
|
||||
"""API tag type."""
|
||||
|
||||
API_VIEW = "dbgpt_view"
|
||||
API_CALL = "dbgpt_call"
|
||||
|
||||
|
||||
class Status(Enum):
|
||||
"""Status of a task."""
|
||||
|
||||
TODO = "todo"
|
||||
RUNNING = "running"
|
||||
WAITING = "waiting"
|
||||
RETRYING = "retrying"
|
||||
FAILED = "failed"
|
||||
COMPLETE = "complete"
|
19
dbgpt/agent/core/user_proxy_agent.py
Normal file
19
dbgpt/agent/core/user_proxy_agent.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""A proxy agent for the user."""
|
||||
from .base_agent import ConversableAgent
|
||||
|
||||
|
||||
class UserProxyAgent(ConversableAgent):
|
||||
"""A proxy agent for the user.
|
||||
|
||||
That can execute code and provide feedback to the other agents.
|
||||
"""
|
||||
|
||||
name = "User"
|
||||
profile: str = "Human"
|
||||
|
||||
desc: str = (
|
||||
"A human admin. Interact with the planner to discuss the plan. "
|
||||
"Plan execution needs to be approved by this admin."
|
||||
)
|
||||
|
||||
is_human = True
|
Reference in New Issue
Block a user