feat(agent):Agent supports conversation context (#2230)

Co-authored-by: Fangyin Cheng <staneyffer@gmail.com>
This commit is contained in:
明天 2024-12-20 16:50:08 +08:00 committed by GitHub
parent 16c5233a6d
commit 2b4597e6a7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 164 additions and 18 deletions

View File

@ -333,4 +333,9 @@ DBGPT_LOG_LEVEL=INFO
# FIN_REPORT_MODEL=/app/models/bge-large-zh
## Turn off notebook display Python flow , which is enabled by default
NOTE_BOOK_ENABLE=False
NOTE_BOOK_ENABLE=False
## The agent historical message retention configuration defaults to the last two rounds.
# MESSAGES_KEEP_START_ROUNDS=0
# MESSAGES_KEEP_END_ROUNDS=2

View File

@ -361,6 +361,13 @@ class Config(metaclass=Singleton):
)
self.NOTE_BOOK_ROOT: str = os.getenv("NOTE_BOOK_ROOT", os.path.expanduser("~"))
self.MESSAGES_KEEP_START_ROUNDS: int = int(
os.getenv("MESSAGES_KEEP_START_ROUNDS", 0)
)
self.MESSAGES_KEEP_END_ROUNDS: int = int(
os.getenv("MESSAGES_KEEP_END_ROUNDS", 2)
)
@property
def local_db_manager(self) -> "ConnectorManager":
from dbgpt.datasource.manages import ConnectorManager

View File

@ -27,6 +27,8 @@ class Agent(ABC):
silent: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
) -> None:
"""Send a message to recipient agent.
@ -52,6 +54,8 @@ class Agent(ABC):
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
) -> None:
"""Receive a message from another agent.
@ -74,6 +78,7 @@ class Agent(ABC):
sender: Agent,
reviewer: Optional[Agent] = None,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
**kwargs,

View File

@ -209,6 +209,8 @@ class ConversableAgent(Role, Agent):
silent: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
) -> None:
"""Send a message to recipient agent."""
with root_tracer.start_span(
@ -232,6 +234,8 @@ class ConversableAgent(Role, Agent):
silent=silent,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
historical_dialogues=historical_dialogues,
rely_messages=rely_messages,
)
async def receive(
@ -244,6 +248,8 @@ class ConversableAgent(Role, Agent):
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
) -> None:
"""Receive a message from another agent."""
with root_tracer.start_span(
@ -272,6 +278,8 @@ class ConversableAgent(Role, Agent):
reviewer=reviewer,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
historical_dialogues=historical_dialogues,
rely_messages=rely_messages,
)
else:
reply = await self.generate_reply(
@ -279,6 +287,8 @@ class ConversableAgent(Role, Agent):
sender=sender,
reviewer=reviewer,
is_retry_chat=is_retry_chat,
historical_dialogues=historical_dialogues,
rely_messages=rely_messages,
)
if reply is not None:
@ -289,6 +299,7 @@ class ConversableAgent(Role, Agent):
received_message: Optional[AgentMessage],
sender: Agent,
rely_messages: Optional[List[AgentMessage]] = None,
**kwargs,
) -> Dict[str, Any]:
"""Prepare the parameters for the act method."""
return {}
@ -300,6 +311,7 @@ class ConversableAgent(Role, Agent):
sender: Agent,
reviewer: Optional[Agent] = None,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
**kwargs,
@ -361,9 +373,10 @@ class ConversableAgent(Role, Agent):
f"Depends on the number of historical messages:{len(rely_messages) if rely_messages else 0}" # noqa
)
thinking_messages, resource_info = await self._load_thinking_messages(
received_message,
sender,
rely_messages,
received_message=received_message,
sender=sender,
rely_messages=rely_messages,
historical_dialogues=historical_dialogues,
context=reply_message.get_dict_context(),
is_retry_chat=is_retry_chat,
)
@ -400,7 +413,10 @@ class ConversableAgent(Role, Agent):
span.metadata["comments"] = comments
act_extent_param = self.prepare_act_param(
received_message, sender, rely_messages
received_message=received_message,
sender=sender,
rely_messages=rely_messages,
historical_dialogues=historical_dialogues,
)
with root_tracer.start_span(
"agent.generate_reply.act",
@ -620,6 +636,8 @@ class ConversableAgent(Role, Agent):
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
message_rounds: int = 0,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
**context,
):
"""Initiate a chat with another agent.
@ -652,6 +670,8 @@ class ConversableAgent(Role, Agent):
agent_message,
recipient,
reviewer,
historical_dialogues=historical_dialogues,
rely_messages=rely_messages,
request_reply=request_reply,
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
@ -825,6 +845,38 @@ class ConversableAgent(Role, Agent):
return can_uses
def convert_to_agent_message(
self,
gpts_messages: List[GptsMessage],
is_rery_chat: bool = False,
) -> Optional[List[AgentMessage]]:
"""Convert gptmessage to agent message."""
oai_messages: List[AgentMessage] = []
# Based on the current agent, all messages received are user, and all messages
# sent are assistant.
if not gpts_messages:
return None
for item in gpts_messages:
# Message conversion, priority is given to converting execution results,
# and only model output results will be used if not.
content = item.content
oai_messages.append(
AgentMessage(
content=content,
context=(
json.loads(item.context) if item.context is not None else None
),
action_report=ActionOutput.from_dict(json.loads(item.action_report))
if item.action_report
else None,
name=item.sender,
rounds=item.rounds,
model_name=item.model_name,
success=item.is_success,
)
)
return oai_messages
async def _a_select_llm_model(
self, excluded_models: Optional[List[str]] = None
) -> str:
@ -959,6 +1011,7 @@ class ConversableAgent(Role, Agent):
received_message: AgentMessage,
sender: Agent,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
context: Optional[Dict[str, Any]] = None,
is_retry_chat: bool = False,
) -> Tuple[List[AgentMessage], Optional[Dict]]:
@ -1020,13 +1073,27 @@ class ConversableAgent(Role, Agent):
role=ModelMessageRoleType.SYSTEM,
)
)
if user_prompt:
agent_messages.append(
AgentMessage(
content=user_prompt,
role=ModelMessageRoleType.HUMAN,
)
# 关联上下文的历史消息
if historical_dialogues:
for i in range(len(historical_dialogues)):
if i % 2 == 0:
# 偶数开始, 偶数是用户信息
message = historical_dialogues[i]
message.role = ModelMessageRoleType.HUMAN
agent_messages.append(message)
else:
# 奇数是AI信息
message = historical_dialogues[i]
message.role = ModelMessageRoleType.AI
agent_messages.append(message)
# 当前的用户输入信息
agent_messages.append(
AgentMessage(
content=user_prompt,
role=ModelMessageRoleType.HUMAN,
)
)
return agent_messages, resource_references

View File

@ -161,8 +161,9 @@ class ManagerAgent(ConversableAgent, Team):
received_message: AgentMessage,
sender: Agent,
rely_messages: Optional[List[AgentMessage]] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
context: Optional[Dict[str, Any]] = None,
is_retry_chat: Optional[bool] = False,
is_retry_chat: bool = False,
) -> Tuple[List[AgentMessage], Optional[Dict]]:
"""Load messages for thinking."""
return [AgentMessage(content=received_message.content)], None

View File

@ -58,7 +58,7 @@ class GptsMessage:
receiver: str
role: str
content: str
rounds: Optional[int]
rounds: int = 0
is_success: bool = True
app_code: Optional[str] = None
app_name: Optional[str] = None

View File

@ -127,8 +127,11 @@ class GptsMemory:
await self.push_message(conv_id)
async def get_messages(self, conv_id: str) -> List[GptsMessage]:
"""Get conversation message."""
return self.messages_cache[conv_id]
"""Get message by conv_id."""
messages = self.messages_cache[conv_id]
if not messages:
messages = self.message_memory.get_by_conv_id(conv_id)
return messages
async def get_agent_messages(
self, conv_id: str, agent_role: str

View File

@ -2,7 +2,7 @@
import logging
from abc import ABC, abstractmethod
from typing import Optional, cast
from typing import List, Optional, cast
from dbgpt._private.config import Config
from dbgpt._private.pydantic import (
@ -114,6 +114,8 @@ class AWELBaseManager(ManagerAgent, ABC):
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
) -> None:
"""Recive message by base team."""
if request_reply is False or request_reply is None:

View File

@ -169,6 +169,7 @@ assistants:[
received_message: Optional[AgentMessage],
sender: Agent,
rely_messages: Optional[List[AgentMessage]] = None,
**kwargs,
) -> Dict[str, Any]:
"""Prepare the parameters for the act method."""
return {

View File

@ -1,5 +1,5 @@
"""A proxy agent for the user."""
from typing import Optional
from typing import List, Optional
from .. import ActionOutput, Agent, AgentMessage
from .base_agent import ConversableAgent
@ -39,6 +39,8 @@ class UserProxyAgent(ConversableAgent):
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: Optional[str] = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
) -> None:
"""Receive a message from another agent."""
if not silent:

View File

@ -206,11 +206,44 @@ class MultiAgents(BaseComponent, ABC):
if not gpt_app:
raise ValueError(f"Not found app {gpts_name}!")
historical_dialogues: List[GptsMessage] = []
if not is_retry_chat:
# 新建gpts对话记录
# Create a new gpts conversation record
gpt_app: GptsApp = self.gpts_app.app_detail(gpts_name)
if not gpt_app:
raise ValueError(f"Not found app {gpts_name}!")
## When creating a new gpts conversation record, determine whether to include the history of previous topics according to the application definition.
## TODO BEGIN
# Temporarily use system configuration management, and subsequently use application configuration management
if CFG.MESSAGES_KEEP_START_ROUNDS and CFG.MESSAGES_KEEP_START_ROUNDS > 0:
gpt_app.keep_start_rounds = CFG.MESSAGES_KEEP_START_ROUNDS
if CFG.MESSAGES_KEEP_END_ROUNDS and CFG.MESSAGES_KEEP_END_ROUNDS > 0:
gpt_app.keep_end_rounds = CFG.MESSAGES_KEEP_END_ROUNDS
## TODO END
if gpt_app.keep_start_rounds > 0 or gpt_app.keep_end_rounds > 0:
if gpts_conversations and len(gpts_conversations) > 0:
rely_conversations = []
if gpt_app.keep_start_rounds + gpt_app.keep_end_rounds < len(
gpts_conversations
):
if gpt_app.keep_start_rounds > 0:
front = gpts_conversations[gpt_app.keep_start_rounds :]
rely_conversations.extend(front)
if gpt_app.keep_end_rounds > 0:
back = gpts_conversations[-gpt_app.keep_end_rounds :]
rely_conversations.extend(back)
else:
rely_conversations = gpts_conversations
for gpts_conversation in rely_conversations:
temps: List[GptsMessage] = await self.memory.get_messages(
gpts_conversation.conv_id
)
if temps and len(temps) > 1:
historical_dialogues.append(temps[0])
historical_dialogues.append(temps[-1])
self.gpts_conversations.add(
GptsConversationsEntity(
conv_id=agent_conv_id,
@ -277,6 +310,8 @@ class MultiAgents(BaseComponent, ABC):
is_retry_chat,
last_speaker_name=last_speaker_name,
init_message_rounds=message_round,
enable_verbose=enable_verbose,
historical_dialogues=historical_dialogues,
**ext_info,
)
)
@ -418,6 +453,8 @@ class MultiAgents(BaseComponent, ABC):
link_sender: ConversableAgent = None,
app_link_start: bool = False,
enable_verbose: bool = True,
historical_dialogues: Optional[List[GptsMessage]] = None,
rely_messages: Optional[List[GptsMessage]] = None,
**ext_info,
):
gpts_status = Status.COMPLETE.value
@ -529,6 +566,10 @@ class MultiAgents(BaseComponent, ABC):
is_retry_chat=is_retry_chat,
last_speaker_name=last_speaker_name,
message_rounds=init_message_rounds,
historical_dialogues=user_proxy.convert_to_agent_message(
historical_dialogues
),
rely_messages=rely_messages,
**ext_info,
)

View File

@ -93,6 +93,8 @@ class StartAppAssistantAgent(ConversableAgent):
is_recovery: Optional[bool] = False,
is_retry_chat: bool = False,
last_speaker_name: str = None,
historical_dialogues: Optional[List[AgentMessage]] = None,
rely_messages: Optional[List[AgentMessage]] = None,
) -> None:
await self._a_process_received_message(message, sender)
if request_reply is False or request_reply is None:

View File

@ -135,6 +135,10 @@ class GptsApp(BaseModel):
recommend_questions: Optional[List[RecommendQuestion]] = []
admins: List[str] = Field(default_factory=list)
# By default, keep the last two rounds of conversation records as the context
keep_start_rounds: int = 0
keep_end_rounds: int = 0
def to_dict(self):
return {k: self._serialize(v) for k, v in self.__dict__.items()}
@ -170,6 +174,8 @@ class GptsApp(BaseModel):
owner_avatar_url=d.get("owner_avatar_url", None),
recommend_questions=d.get("recommend_questions", []),
admins=d.get("admins", []),
keep_start_rounds=d.get("keep_start_rounds", 0),
keep_end_rounds=d.get("keep_end_rounds", 2),
)
@model_validator(mode="before")
@ -547,6 +553,8 @@ class GptsAppDao(BaseDao):
"published": app_info.published,
"details": [],
"admins": [],
# "keep_start_rounds": app_info.keep_start_rounds,
# "keep_end_rounds": app_info.keep_end_rounds,
}
)
for app_info in app_entities
@ -918,6 +926,8 @@ class GptsAppDao(BaseDao):
app_entity.icon = gpts_app.icon
app_entity.team_context = _parse_team_context(gpts_app.team_context)
app_entity.param_need = json.dumps(gpts_app.param_need)
app_entity.keep_start_rounds = gpts_app.keep_start_rounds
app_entity.keep_end_rounds = gpts_app.keep_end_rounds
session.merge(app_entity)
old_details = session.query(GptsAppDetailEntity).filter(