mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-07 12:00:46 +00:00
feat: (0.6)New UI (#1855)
Co-authored-by: 夏姜 <wenfengjiang.jwf@digital-engine.com> Co-authored-by: aries_ckt <916701291@qq.com> Co-authored-by: wb-lh513319 <wb-lh513319@alibaba-inc.com> Co-authored-by: csunny <cfqsunny@163.com>
This commit is contained in:
@@ -1,13 +1,17 @@
|
||||
"""Base agent class for conversable agents."""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from concurrent.futures import Executor, ThreadPoolExecutor
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, cast
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, final
|
||||
|
||||
from jinja2 import Template
|
||||
|
||||
from dbgpt._private.pydantic import ConfigDict, Field
|
||||
from dbgpt.core import LLMClient, ModelMessageRoleType
|
||||
from dbgpt.core import LLMClient, ModelMessageRoleType, PromptTemplate
|
||||
from dbgpt.util.error_types import LLMChatError
|
||||
from dbgpt.util.executor_utils import blocking_func_to_async
|
||||
from dbgpt.util.tracer import SpanType, root_tracer
|
||||
@@ -36,9 +40,14 @@ class ConversableAgent(Role, Agent):
|
||||
actions: List[Action] = Field(default_factory=list)
|
||||
resource: Optional[Resource] = Field(None, description="Resource")
|
||||
llm_config: Optional[LLMConfig] = None
|
||||
bind_prompt: Optional[PromptTemplate] = None
|
||||
max_retry_count: int = 3
|
||||
consecutive_auto_reply_counter: int = 0
|
||||
llm_client: Optional[AIWrapper] = None
|
||||
# 确认当前Agent是否需要进行流式输出
|
||||
stream_out: bool = True
|
||||
# 确认当前Agent是否需要进行参考资源展示
|
||||
show_reference: bool = False
|
||||
|
||||
executor: Executor = Field(
|
||||
default_factory=lambda: ThreadPoolExecutor(max_workers=1),
|
||||
description="Executor for running tasks",
|
||||
@@ -130,7 +139,7 @@ class ConversableAgent(Role, Agent):
|
||||
if self.resource:
|
||||
await self.blocking_func_to_async(self.resource.preload_resource)
|
||||
|
||||
async def build(self) -> "ConversableAgent":
|
||||
async def build(self, is_retry_chat: bool = False) -> "ConversableAgent":
|
||||
"""Build the agent."""
|
||||
# Preload resources
|
||||
await self.preload_resource()
|
||||
@@ -157,6 +166,16 @@ class ConversableAgent(Role, Agent):
|
||||
)
|
||||
# Clone the memory structure
|
||||
self.memory = self.memory.structure_clone()
|
||||
# init agent memory
|
||||
if is_retry_chat:
|
||||
# recover agent memory message
|
||||
agent_history_memories = (
|
||||
await self.memory.gpts_memory.get_agent_history_memory(
|
||||
self.not_null_agent_context.conv_id, self.role
|
||||
)
|
||||
)
|
||||
for agent_history_memory in agent_history_memories:
|
||||
await self.write_memories(**agent_history_memory)
|
||||
return self
|
||||
|
||||
def bind(self, target: Any) -> "ConversableAgent":
|
||||
@@ -164,7 +183,7 @@ class ConversableAgent(Role, Agent):
|
||||
if isinstance(target, LLMConfig):
|
||||
self.llm_config = target
|
||||
elif isinstance(target, GptsMemory):
|
||||
raise ValueError("GptsMemory is not supported!")
|
||||
raise ValueError("GptsMemory is not supported!Please Use Agent Memory")
|
||||
elif isinstance(target, AgentContext):
|
||||
self.agent_context = target
|
||||
elif isinstance(target, Resource):
|
||||
@@ -175,6 +194,9 @@ class ConversableAgent(Role, Agent):
|
||||
self.profile = target
|
||||
elif isinstance(target, type) and issubclass(target, Action):
|
||||
self.actions.append(target())
|
||||
elif isinstance(target, PromptTemplate):
|
||||
self.bind_prompt = target
|
||||
|
||||
return self
|
||||
|
||||
async def send(
|
||||
@@ -184,6 +206,9 @@ class ConversableAgent(Role, Agent):
|
||||
reviewer: Optional[Agent] = None,
|
||||
request_reply: Optional[bool] = True,
|
||||
is_recovery: Optional[bool] = False,
|
||||
silent: Optional[bool] = False,
|
||||
is_retry_chat: bool = False,
|
||||
last_speaker_name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Send a message to recipient agent."""
|
||||
with root_tracer.start_span(
|
||||
@@ -204,6 +229,9 @@ class ConversableAgent(Role, Agent):
|
||||
reviewer=reviewer,
|
||||
request_reply=request_reply,
|
||||
is_recovery=is_recovery,
|
||||
silent=silent,
|
||||
is_retry_chat=is_retry_chat,
|
||||
last_speaker_name=last_speaker_name,
|
||||
)
|
||||
|
||||
async def receive(
|
||||
@@ -214,6 +242,8 @@ class ConversableAgent(Role, Agent):
|
||||
request_reply: Optional[bool] = None,
|
||||
silent: Optional[bool] = False,
|
||||
is_recovery: Optional[bool] = False,
|
||||
is_retry_chat: bool = False,
|
||||
last_speaker_name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Receive a message from another agent."""
|
||||
with root_tracer.start_span(
|
||||
@@ -233,37 +263,45 @@ class ConversableAgent(Role, Agent):
|
||||
await self._a_process_received_message(message, sender)
|
||||
if request_reply is False or request_reply is None:
|
||||
return
|
||||
if self.is_human:
|
||||
# Not generating a reply for human agents now
|
||||
return
|
||||
|
||||
if (
|
||||
self.consecutive_auto_reply_counter
|
||||
<= self.not_null_agent_context.max_chat_round
|
||||
):
|
||||
# If reply count is less than the maximum chat round, generate a reply
|
||||
reply = await self.generate_reply(
|
||||
received_message=message, sender=sender, reviewer=reviewer
|
||||
)
|
||||
if not self.is_human:
|
||||
if isinstance(sender, ConversableAgent) and sender.is_human:
|
||||
reply = await self.generate_reply(
|
||||
received_message=message,
|
||||
sender=sender,
|
||||
reviewer=reviewer,
|
||||
is_retry_chat=is_retry_chat,
|
||||
last_speaker_name=last_speaker_name,
|
||||
)
|
||||
else:
|
||||
reply = await self.generate_reply(
|
||||
received_message=message,
|
||||
sender=sender,
|
||||
reviewer=reviewer,
|
||||
is_retry_chat=is_retry_chat,
|
||||
)
|
||||
|
||||
if reply is not None:
|
||||
await self.send(reply, sender)
|
||||
else:
|
||||
logger.info(
|
||||
f"Current round {self.consecutive_auto_reply_counter} "
|
||||
f"exceeds the maximum chat round "
|
||||
f"{self.not_null_agent_context.max_chat_round}!"
|
||||
)
|
||||
|
||||
def prepare_act_param(self) -> Dict[str, Any]:
|
||||
def prepare_act_param(
|
||||
self,
|
||||
received_message: Optional[AgentMessage],
|
||||
sender: Agent,
|
||||
rely_messages: Optional[List[AgentMessage]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Prepare the parameters for the act method."""
|
||||
return {}
|
||||
|
||||
@final
|
||||
async def generate_reply(
|
||||
self,
|
||||
received_message: AgentMessage,
|
||||
sender: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
rely_messages: Optional[List[AgentMessage]] = None,
|
||||
is_retry_chat: bool = False,
|
||||
last_speaker_name: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> AgentMessage:
|
||||
"""Generate a reply based on the received messages."""
|
||||
@@ -300,10 +338,12 @@ class ConversableAgent(Role, Agent):
|
||||
while current_retry_counter < self.max_retry_count:
|
||||
if current_retry_counter > 0:
|
||||
retry_message = self._init_reply_message(
|
||||
received_message=received_message
|
||||
received_message=received_message,
|
||||
rely_messages=rely_messages,
|
||||
)
|
||||
retry_message.content = fail_reason
|
||||
retry_message.current_goal = received_message.current_goal
|
||||
|
||||
# The current message is a self-optimized message that needs to be
|
||||
# recorded.
|
||||
# It is temporarily set to be initiated by the originating end to
|
||||
@@ -312,11 +352,16 @@ class ConversableAgent(Role, Agent):
|
||||
retry_message, self, reviewer, request_reply=False
|
||||
)
|
||||
|
||||
thinking_messages = await self._load_thinking_messages(
|
||||
# In manual retry mode, load all messages of the last speaker as dependent messages # noqa
|
||||
logger.info(
|
||||
f"Depends on the number of historical messages:{len(rely_messages) if rely_messages else 0}!" # noqa
|
||||
)
|
||||
thinking_messages, resource_info = await self._load_thinking_messages(
|
||||
received_message,
|
||||
sender,
|
||||
rely_messages,
|
||||
context=reply_message.get_dict_context(),
|
||||
is_retry_chat=is_retry_chat,
|
||||
)
|
||||
with root_tracer.start_span(
|
||||
"agent.generate_reply.thinking",
|
||||
@@ -328,9 +373,12 @@ class ConversableAgent(Role, Agent):
|
||||
},
|
||||
) as span:
|
||||
# 1.Think about how to do things
|
||||
llm_reply, model_name = await self.thinking(thinking_messages)
|
||||
llm_reply, model_name = await self.thinking(
|
||||
thinking_messages, sender
|
||||
)
|
||||
reply_message.model_name = model_name
|
||||
reply_message.content = llm_reply
|
||||
reply_message.resource_info = resource_info
|
||||
span.metadata["llm_reply"] = llm_reply
|
||||
span.metadata["model_name"] = model_name
|
||||
|
||||
@@ -347,7 +395,9 @@ class ConversableAgent(Role, Agent):
|
||||
span.metadata["approve"] = approve
|
||||
span.metadata["comments"] = comments
|
||||
|
||||
act_extent_param = self.prepare_act_param()
|
||||
act_extent_param = self.prepare_act_param(
|
||||
received_message, sender, rely_messages
|
||||
)
|
||||
with root_tracer.start_span(
|
||||
"agent.generate_reply.act",
|
||||
metadata={
|
||||
@@ -358,14 +408,16 @@ class ConversableAgent(Role, Agent):
|
||||
},
|
||||
) as span:
|
||||
# 3.Act based on the results of your thinking
|
||||
act_out: Optional[ActionOutput] = await self.act(
|
||||
message=llm_reply,
|
||||
act_out: ActionOutput = await self.act(
|
||||
message=reply_message,
|
||||
sender=sender,
|
||||
reviewer=reviewer,
|
||||
is_retry_chat=is_retry_chat,
|
||||
last_speaker_name=last_speaker_name,
|
||||
**act_extent_param,
|
||||
)
|
||||
if act_out:
|
||||
reply_message.action_report = act_out.to_dict()
|
||||
reply_message.action_report = act_out
|
||||
span.metadata["action_report"] = (
|
||||
act_out.to_dict() if act_out else None
|
||||
)
|
||||
@@ -390,6 +442,8 @@ class ConversableAgent(Role, Agent):
|
||||
ai_message: str = llm_reply or ""
|
||||
# 5.Optimize wrong answers myself
|
||||
if not check_pass:
|
||||
if not act_out.have_retry:
|
||||
break
|
||||
current_retry_counter += 1
|
||||
# Send error messages and issue new problem-solving instructions
|
||||
if current_retry_counter < self.max_retry_count:
|
||||
@@ -413,6 +467,8 @@ class ConversableAgent(Role, Agent):
|
||||
)
|
||||
break
|
||||
reply_message.success = is_success
|
||||
# 6.final message adjustment
|
||||
await self.adjust_final_message(is_success, reply_message)
|
||||
return reply_message
|
||||
|
||||
except Exception as e:
|
||||
@@ -425,7 +481,10 @@ class ConversableAgent(Role, Agent):
|
||||
root_span.end()
|
||||
|
||||
async def thinking(
|
||||
self, messages: List[AgentMessage], prompt: Optional[str] = None
|
||||
self,
|
||||
messages: List[AgentMessage],
|
||||
sender: Optional[Agent] = None,
|
||||
prompt: Optional[str] = None,
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""Think and reason about the current task goal.
|
||||
|
||||
@@ -454,6 +513,10 @@ class ConversableAgent(Role, Agent):
|
||||
max_new_tokens=self.not_null_agent_context.max_new_tokens,
|
||||
temperature=self.not_null_agent_context.temperature,
|
||||
verbose=self.not_null_agent_context.verbose,
|
||||
memory=self.memory.gpts_memory,
|
||||
conv_id=self.not_null_agent_context.conv_id,
|
||||
sender=sender.role if sender else "?",
|
||||
stream_out=self.stream_out,
|
||||
)
|
||||
return response, llm_model
|
||||
except LLMChatError as e:
|
||||
@@ -474,22 +537,16 @@ class ConversableAgent(Role, Agent):
|
||||
|
||||
async def act(
|
||||
self,
|
||||
message: Optional[str],
|
||||
sender: Optional[Agent] = None,
|
||||
message: AgentMessage,
|
||||
sender: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
is_retry_chat: bool = False,
|
||||
last_speaker_name: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> Optional[ActionOutput]:
|
||||
) -> ActionOutput:
|
||||
"""Perform actions."""
|
||||
last_out: Optional[ActionOutput] = None
|
||||
for i, action in enumerate(self.actions):
|
||||
# Select the resources required by acton
|
||||
if action.resource_need and self.resource:
|
||||
need_resources = self.resource.get_resource_by_type(
|
||||
action.resource_need
|
||||
)
|
||||
else:
|
||||
need_resources = []
|
||||
|
||||
if not message:
|
||||
raise ValueError("The message content is empty!")
|
||||
|
||||
@@ -500,7 +557,6 @@ class ConversableAgent(Role, Agent):
|
||||
"sender": sender.name if sender else None,
|
||||
"recipient": self.name,
|
||||
"reviewer": reviewer.name if reviewer else None,
|
||||
"need_resource": need_resources[0].name if need_resources else None,
|
||||
"rely_action_out": last_out.to_dict() if last_out else None,
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
"action_index": i,
|
||||
@@ -508,12 +564,14 @@ class ConversableAgent(Role, Agent):
|
||||
},
|
||||
) as span:
|
||||
last_out = await action.run(
|
||||
ai_message=message,
|
||||
ai_message=message.content if message.content else "",
|
||||
resource=None,
|
||||
rely_action_out=last_out,
|
||||
**kwargs,
|
||||
)
|
||||
span.metadata["action_out"] = last_out.to_dict() if last_out else None
|
||||
if not last_out:
|
||||
raise ValueError("Action should return value!")
|
||||
return last_out
|
||||
|
||||
async def correctness_check(
|
||||
@@ -535,9 +593,7 @@ class ConversableAgent(Role, Agent):
|
||||
return False, message.review_info.comments
|
||||
|
||||
# Check action run results
|
||||
action_output: Optional[ActionOutput] = ActionOutput.from_dict(
|
||||
message.action_report
|
||||
)
|
||||
action_output: Optional[ActionOutput] = message.action_report
|
||||
if action_output:
|
||||
if not action_output.is_exe_success:
|
||||
return False, action_output.content
|
||||
@@ -556,6 +612,11 @@ class ConversableAgent(Role, Agent):
|
||||
recipient: Agent,
|
||||
reviewer: Optional[Agent] = None,
|
||||
message: Optional[str] = None,
|
||||
request_reply: bool = True,
|
||||
is_retry_chat: bool = False,
|
||||
last_speaker_name: Optional[str] = None,
|
||||
message_rounds: int = 0,
|
||||
**context,
|
||||
):
|
||||
"""Initiate a chat with another agent.
|
||||
|
||||
@@ -564,7 +625,12 @@ class ConversableAgent(Role, Agent):
|
||||
reviewer (Agent): The reviewer agent.
|
||||
message (str): The message to send.
|
||||
"""
|
||||
agent_message = AgentMessage(content=message, current_goal=message)
|
||||
agent_message = AgentMessage(
|
||||
content=message,
|
||||
current_goal=message,
|
||||
rounds=message_rounds,
|
||||
context=context,
|
||||
)
|
||||
with root_tracer.start_span(
|
||||
"agent.initiate_chat",
|
||||
span_type=SpanType.AGENT,
|
||||
@@ -582,9 +648,19 @@ class ConversableAgent(Role, Agent):
|
||||
agent_message,
|
||||
recipient,
|
||||
reviewer,
|
||||
request_reply=True,
|
||||
request_reply=request_reply,
|
||||
is_retry_chat=is_retry_chat,
|
||||
last_speaker_name=last_speaker_name,
|
||||
)
|
||||
|
||||
async def adjust_final_message(
|
||||
self,
|
||||
is_success: bool,
|
||||
reply_message: AgentMessage,
|
||||
):
|
||||
"""Adjust final message after agent reply."""
|
||||
return is_success, reply_message
|
||||
|
||||
#######################################################################
|
||||
# Private Function Begin
|
||||
#######################################################################
|
||||
@@ -598,50 +674,40 @@ class ConversableAgent(Role, Agent):
|
||||
async def _a_append_message(
|
||||
self, message: AgentMessage, role, sender: Agent
|
||||
) -> bool:
|
||||
new_sender = cast(ConversableAgent, sender)
|
||||
self.consecutive_auto_reply_counter = (
|
||||
new_sender.consecutive_auto_reply_counter + 1
|
||||
)
|
||||
message_dict = message.to_dict()
|
||||
oai_message = {
|
||||
k: message_dict[k]
|
||||
for k in (
|
||||
"content",
|
||||
"function_call",
|
||||
"name",
|
||||
"context",
|
||||
"action_report",
|
||||
"review_info",
|
||||
"current_goal",
|
||||
"model_name",
|
||||
)
|
||||
if k in message_dict
|
||||
}
|
||||
|
||||
gpts_message: GptsMessage = GptsMessage(
|
||||
conv_id=self.not_null_agent_context.conv_id,
|
||||
sender=sender.role,
|
||||
receiver=self.role,
|
||||
role=role,
|
||||
rounds=self.consecutive_auto_reply_counter,
|
||||
current_goal=oai_message.get("current_goal", None),
|
||||
content=oai_message.get("content", None),
|
||||
rounds=message.rounds,
|
||||
is_success=message.success,
|
||||
app_code=sender.not_null_agent_context.gpts_app_code
|
||||
if isinstance(sender, ConversableAgent)
|
||||
else None,
|
||||
app_name=sender.not_null_agent_context.gpts_app_name
|
||||
if isinstance(sender, ConversableAgent)
|
||||
else None,
|
||||
current_goal=message.current_goal,
|
||||
content=message.content if message.content else "",
|
||||
context=(
|
||||
json.dumps(oai_message["context"], ensure_ascii=False)
|
||||
if "context" in oai_message
|
||||
json.dumps(message.context, ensure_ascii=False)
|
||||
if message.context
|
||||
else None
|
||||
),
|
||||
review_info=(
|
||||
json.dumps(oai_message["review_info"], ensure_ascii=False)
|
||||
if "review_info" in oai_message
|
||||
json.dumps(message.review_info.to_dict(), ensure_ascii=False)
|
||||
if message.review_info
|
||||
else None
|
||||
),
|
||||
action_report=(
|
||||
json.dumps(oai_message["action_report"], ensure_ascii=False)
|
||||
if "action_report" in oai_message
|
||||
json.dumps(message.action_report.to_dict(), ensure_ascii=False)
|
||||
if message.action_report
|
||||
else None
|
||||
),
|
||||
model_name=oai_message.get("model_name", None),
|
||||
model_name=message.model_name,
|
||||
resource_info=(
|
||||
json.dumps(message.resource_info) if message.resource_info else None
|
||||
),
|
||||
)
|
||||
|
||||
with root_tracer.start_span(
|
||||
@@ -651,7 +717,9 @@ class ConversableAgent(Role, Agent):
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
},
|
||||
):
|
||||
self.memory.message_memory.append(gpts_message)
|
||||
await self.memory.gpts_memory.append_message(
|
||||
self.not_null_agent_context.conv_id, gpts_message
|
||||
)
|
||||
return True
|
||||
|
||||
def _print_received_message(self, message: AgentMessage, sender: Agent):
|
||||
@@ -685,10 +753,10 @@ class ConversableAgent(Role, Agent):
|
||||
name = sender.name if sender.name else sender.role
|
||||
action_msg = (
|
||||
"execution succeeded"
|
||||
if action_report["is_exe_success"]
|
||||
if action_report.is_exe_success
|
||||
else "execution failed"
|
||||
)
|
||||
action_report_msg = f"{action_msg},\n{action_report['content']}"
|
||||
action_report_msg = f"{action_msg},\n{action_report.content}"
|
||||
action_print = f">>>>>>>>{name} Action report: \n{action_report_msg}"
|
||||
print(colored(action_print, "blue"), flush=True)
|
||||
|
||||
@@ -704,20 +772,30 @@ class ConversableAgent(Role, Agent):
|
||||
|
||||
self._print_received_message(message, sender)
|
||||
|
||||
async def generate_resource_variables(
|
||||
self, question: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate the resource variables."""
|
||||
resource_prompt = None
|
||||
async def load_resource(self, question: str, is_retry_chat: bool = False):
|
||||
"""Load agent bind resource."""
|
||||
if self.resource:
|
||||
resource_prompt = await self.resource.get_prompt(
|
||||
resource_prompt, resource_reference = await self.resource.get_prompt(
|
||||
lang=self.language, question=question
|
||||
)
|
||||
return resource_prompt, resource_reference
|
||||
return None, None
|
||||
|
||||
async def generate_resource_variables(
|
||||
self, resource_prompt: Optional[str] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate the resource variables."""
|
||||
out_schema: Optional[str] = ""
|
||||
if self.actions and len(self.actions) > 0:
|
||||
out_schema = self.actions[0].ai_out_schema
|
||||
return {"resource_prompt": resource_prompt, "out_schema": out_schema}
|
||||
if not resource_prompt:
|
||||
resource_prompt = ""
|
||||
now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
return {
|
||||
"resource_prompt": resource_prompt,
|
||||
"out_schema": out_schema,
|
||||
"now_time": now_time,
|
||||
}
|
||||
|
||||
def _excluded_models(
|
||||
self,
|
||||
@@ -769,7 +847,11 @@ class ConversableAgent(Role, Agent):
|
||||
logger.error(f"{self.role} get next llm failed!{str(e)}")
|
||||
raise ValueError(f"Failed to allocate model service,{str(e)}!")
|
||||
|
||||
def _init_reply_message(self, received_message: AgentMessage) -> AgentMessage:
|
||||
def _init_reply_message(
|
||||
self,
|
||||
received_message: AgentMessage,
|
||||
rely_messages: Optional[List[AgentMessage]] = None,
|
||||
) -> AgentMessage:
|
||||
"""Create a new message from the received message.
|
||||
|
||||
Initialize a new message from the received message
|
||||
@@ -783,10 +865,14 @@ class ConversableAgent(Role, Agent):
|
||||
return AgentMessage(
|
||||
content=received_message.content,
|
||||
current_goal=received_message.current_goal,
|
||||
context=received_message.context,
|
||||
rounds=received_message.rounds + 1,
|
||||
)
|
||||
|
||||
def _convert_to_ai_message(
|
||||
self, gpts_messages: List[GptsMessage]
|
||||
self,
|
||||
gpts_messages: List[GptsMessage],
|
||||
is_rery_chat: bool = False,
|
||||
) -> List[AgentMessage]:
|
||||
oai_messages: List[AgentMessage] = []
|
||||
# Based on the current agent, all messages received are user, and all messages
|
||||
@@ -807,12 +893,16 @@ class ConversableAgent(Role, Agent):
|
||||
content = item.content
|
||||
if item.action_report:
|
||||
action_out = ActionOutput.from_dict(json.loads(item.action_report))
|
||||
if (
|
||||
action_out is not None
|
||||
and action_out.is_exe_success
|
||||
and action_out.content is not None
|
||||
):
|
||||
content = action_out.content
|
||||
if is_rery_chat:
|
||||
if action_out is not None and action_out.content:
|
||||
content = action_out.content
|
||||
else:
|
||||
if (
|
||||
action_out is not None
|
||||
and action_out.is_exe_success
|
||||
and action_out.content is not None
|
||||
):
|
||||
content = action_out.content
|
||||
oai_messages.append(
|
||||
AgentMessage(
|
||||
content=content,
|
||||
@@ -824,13 +914,50 @@ class ConversableAgent(Role, Agent):
|
||||
)
|
||||
return oai_messages
|
||||
|
||||
async def build_system_prompt(
|
||||
self,
|
||||
question: Optional[str] = None,
|
||||
most_recent_memories: Optional[str] = None,
|
||||
resource_vars: Optional[Dict] = None,
|
||||
context: Optional[Dict[str, Any]] = None,
|
||||
is_retry_chat: bool = False,
|
||||
):
|
||||
"""Build system prompt."""
|
||||
system_prompt = None
|
||||
if self.bind_prompt:
|
||||
prompt_param = {}
|
||||
if resource_vars:
|
||||
prompt_param.update(resource_vars)
|
||||
if context:
|
||||
prompt_param.update(context)
|
||||
if self.bind_prompt.template_format == "f-string":
|
||||
system_prompt = self.bind_prompt.template.format(
|
||||
**prompt_param,
|
||||
)
|
||||
elif self.bind_prompt.template_format == "jinja2":
|
||||
system_prompt = Template(self.bind_prompt.template).render(prompt_param)
|
||||
else:
|
||||
logger.warning("Bind prompt template not exsit or format not support!")
|
||||
if not system_prompt:
|
||||
param: Dict = context if context else {}
|
||||
system_prompt = await self.build_prompt(
|
||||
question=question,
|
||||
is_system=True,
|
||||
most_recent_memories=most_recent_memories,
|
||||
resource_vars=resource_vars,
|
||||
is_retry_chat=is_retry_chat,
|
||||
**param,
|
||||
)
|
||||
return system_prompt
|
||||
|
||||
async def _load_thinking_messages(
|
||||
self,
|
||||
received_message: AgentMessage,
|
||||
sender: Agent,
|
||||
rely_messages: Optional[List[AgentMessage]] = None,
|
||||
context: Optional[Dict[str, Any]] = None,
|
||||
) -> List[AgentMessage]:
|
||||
is_retry_chat: bool = False,
|
||||
) -> Tuple[List[AgentMessage], Optional[Dict]]:
|
||||
observation = received_message.content
|
||||
if not observation:
|
||||
raise ValueError("The received message content is empty!")
|
||||
@@ -843,9 +970,7 @@ class ConversableAgent(Role, Agent):
|
||||
# When directly relying on historical messages, use the execution result
|
||||
# content as a dependency
|
||||
for message in copied_rely_messages:
|
||||
action_report: Optional[ActionOutput] = ActionOutput.from_dict(
|
||||
message.action_report
|
||||
)
|
||||
action_report: Optional[ActionOutput] = message.action_report
|
||||
if action_report:
|
||||
# TODO: Modify in-place, need to be optimized
|
||||
message.content = action_report.content
|
||||
@@ -858,17 +983,28 @@ class ConversableAgent(Role, Agent):
|
||||
reply_message_str += f"Observation: {message.content}\n"
|
||||
if reply_message_str:
|
||||
memories += "\n" + reply_message_str
|
||||
try:
|
||||
resource_prompt_str, resource_references = await self.load_resource(
|
||||
observation, is_retry_chat=is_retry_chat
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(f"Load resource error!{str(e)}")
|
||||
raise ValueError(f"Load resource error!{str(e)}")
|
||||
|
||||
system_prompt = await self.build_prompt(
|
||||
resource_vars = await self.generate_resource_variables(resource_prompt_str)
|
||||
|
||||
system_prompt = await self.build_system_prompt(
|
||||
question=observation,
|
||||
is_system=True,
|
||||
most_recent_memories=memories,
|
||||
**context,
|
||||
resource_vars=resource_vars,
|
||||
context=context,
|
||||
is_retry_chat=is_retry_chat,
|
||||
)
|
||||
user_prompt = await self.build_prompt(
|
||||
question=observation,
|
||||
is_system=False,
|
||||
most_recent_memories=memories,
|
||||
resource_vars=resource_vars,
|
||||
**context,
|
||||
)
|
||||
|
||||
@@ -888,74 +1024,7 @@ class ConversableAgent(Role, Agent):
|
||||
)
|
||||
)
|
||||
|
||||
return agent_messages
|
||||
|
||||
def _old_load_thinking_messages(
|
||||
self,
|
||||
received_message: AgentMessage,
|
||||
sender: Agent,
|
||||
rely_messages: Optional[List[AgentMessage]] = None,
|
||||
) -> List[AgentMessage]:
|
||||
current_goal = received_message.current_goal
|
||||
|
||||
# Convert and tailor the information in collective memory into contextual
|
||||
# memory available to the current Agent
|
||||
|
||||
with root_tracer.start_span(
|
||||
"agent._load_thinking_messages",
|
||||
metadata={
|
||||
"sender": sender.name,
|
||||
"recipient": self.name,
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
"current_goal": current_goal,
|
||||
},
|
||||
) as span:
|
||||
# Get historical information from the memory
|
||||
memory_messages = self.memory.message_memory.get_between_agents(
|
||||
self.not_null_agent_context.conv_id,
|
||||
self.role,
|
||||
sender.role,
|
||||
current_goal,
|
||||
)
|
||||
span.metadata["memory_messages"] = [
|
||||
message.to_dict() for message in memory_messages
|
||||
]
|
||||
current_goal_messages = self._convert_to_ai_message(memory_messages)
|
||||
|
||||
# When there is no target and context, the current received message is used as
|
||||
# the target problem
|
||||
if current_goal_messages is None or len(current_goal_messages) <= 0:
|
||||
received_message.role = ModelMessageRoleType.HUMAN
|
||||
current_goal_messages = [received_message]
|
||||
|
||||
# relay messages
|
||||
cut_messages = []
|
||||
if rely_messages:
|
||||
# When directly relying on historical messages, use the execution result
|
||||
# content as a dependency
|
||||
for rely_message in rely_messages:
|
||||
action_report: Optional[ActionOutput] = ActionOutput.from_dict(
|
||||
rely_message.action_report
|
||||
)
|
||||
if action_report:
|
||||
# TODO: Modify in-place, need to be optimized
|
||||
rely_message.content = action_report.content
|
||||
|
||||
cut_messages.extend(rely_messages)
|
||||
|
||||
# TODO: allocate historical information based on token budget
|
||||
if len(current_goal_messages) < 5:
|
||||
cut_messages.extend(current_goal_messages)
|
||||
else:
|
||||
# For the time being, the smallest size of historical message records will
|
||||
# be used by default.
|
||||
# Use the first two rounds of messages to understand the initial goals
|
||||
cut_messages.extend(current_goal_messages[:2])
|
||||
# Use information from the last three rounds of communication to ensure
|
||||
# that current thinking knows what happened and what to do in the last
|
||||
# communication
|
||||
cut_messages.extend(current_goal_messages[-3:])
|
||||
return cut_messages
|
||||
return agent_messages, resource_references
|
||||
|
||||
|
||||
def _new_system_message(content):
|
||||
|
Reference in New Issue
Block a user