mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-16 07:24:05 +00:00
Gpts app v0.4 (#1170)
This commit is contained in:
parent
63ab612e75
commit
c78bd22fda
@ -197,6 +197,8 @@ CREATE TABLE IF NOT EXISTS `prompt_manage`
|
||||
`sys_code` varchar(255) DEFAULT NULL COMMENT 'system app ',
|
||||
`created_at` datetime DEFAULT NULL COMMENT 'create time',
|
||||
`updated_at` datetime DEFAULT NULL COMMENT 'last update time',
|
||||
`team_mode` varchar(255) NULL COMMENT 'agent team work mode',
|
||||
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_gpts_conversations` (`conv_id`),
|
||||
KEY `idx_gpts_name` (`gpts_name`)
|
||||
@ -230,7 +232,7 @@ CREATE TABLE `gpts_messages` (
|
||||
`model_name` varchar(255) DEFAULT NULL COMMENT 'message generate model',
|
||||
`rounds` int(11) NOT NULL COMMENT 'dialogue turns',
|
||||
`content` text COMMENT 'Content of the speech',
|
||||
`current_gogal` text COMMENT 'The target corresponding to the current message',
|
||||
`current_goal` text COMMENT 'The target corresponding to the current message',
|
||||
`context` text COMMENT 'Current conversation context',
|
||||
`review_info` text COMMENT 'Current conversation review info',
|
||||
`action_report` text COMMENT 'Current conversation action report',
|
||||
|
@ -220,7 +220,7 @@ class ConversableAgent(Agent):
|
||||
"context",
|
||||
"action_report",
|
||||
"review_info",
|
||||
"current_gogal",
|
||||
"current_goal",
|
||||
"model_name",
|
||||
)
|
||||
if k in message
|
||||
@ -246,7 +246,7 @@ class ConversableAgent(Agent):
|
||||
receiver=self.name,
|
||||
role=role,
|
||||
rounds=self.consecutive_auto_reply_counter,
|
||||
current_gogal=oai_message.get("current_gogal", None),
|
||||
current_goal=oai_message.get("current_goal", None),
|
||||
content=oai_message.get("content", None),
|
||||
context=json.dumps(oai_message["context"], ensure_ascii=False)
|
||||
if "context" in oai_message
|
||||
@ -458,16 +458,16 @@ class ConversableAgent(Agent):
|
||||
sender,
|
||||
rely_messages: Optional[List[Dict]] = None,
|
||||
):
|
||||
current_gogal = current_message.get("current_gogal", None)
|
||||
current_goal = current_message.get("current_goal", None)
|
||||
### Convert and tailor the information in collective memory into contextual memory available to the current Agent
|
||||
current_gogal_messages = self._gpts_message_to_ai_message(
|
||||
current_goal_messages = self._gpts_message_to_ai_message(
|
||||
self.memory.message_memory.get_between_agents(
|
||||
self.agent_context.conv_id, self.name, sender.name, current_gogal
|
||||
self.agent_context.conv_id, self.name, sender.name, current_goal
|
||||
)
|
||||
)
|
||||
if current_gogal_messages is None or len(current_gogal_messages) <= 0:
|
||||
if current_goal_messages is None or len(current_goal_messages) <= 0:
|
||||
current_message["role"] = ModelMessageRoleType.HUMAN
|
||||
current_gogal_messages = [current_message]
|
||||
current_goal_messages = [current_message]
|
||||
### relay messages
|
||||
cut_messages = []
|
||||
if rely_messages:
|
||||
@ -479,13 +479,13 @@ class ConversableAgent(Agent):
|
||||
else:
|
||||
cut_messages.extend(self._rely_messages)
|
||||
|
||||
if len(current_gogal_messages) < self.dialogue_memory_rounds:
|
||||
cut_messages.extend(current_gogal_messages)
|
||||
if len(current_goal_messages) < self.dialogue_memory_rounds:
|
||||
cut_messages.extend(current_goal_messages)
|
||||
else:
|
||||
# TODO: allocate historical information based on token budget
|
||||
cut_messages.extend(current_gogal_messages[:2])
|
||||
cut_messages.extend(current_goal_messages[:2])
|
||||
# end_round = self.dialogue_memory_rounds - 2
|
||||
cut_messages.extend(current_gogal_messages[-3:])
|
||||
cut_messages.extend(current_goal_messages[-3:])
|
||||
return cut_messages
|
||||
|
||||
async def a_system_fill_param(self):
|
||||
@ -502,7 +502,7 @@ class ConversableAgent(Agent):
|
||||
## 0.New message build
|
||||
new_message = {}
|
||||
new_message["context"] = message.get("context", None)
|
||||
new_message["current_gogal"] = message.get("current_gogal", None)
|
||||
new_message["current_goal"] = message.get("current_goal", None)
|
||||
|
||||
## 1.LLM Reasonging
|
||||
await self.a_system_fill_param()
|
||||
@ -576,7 +576,7 @@ class ConversableAgent(Agent):
|
||||
## Send error messages to yourself for retrieval optimization and increase the number of retrievals
|
||||
retry_message = {}
|
||||
retry_message["context"] = message.get("context", None)
|
||||
retry_message["current_gogal"] = message.get("current_gogal", None)
|
||||
retry_message["current_goal"] = message.get("current_goal", None)
|
||||
retry_message["model_name"] = message.get("model_name", None)
|
||||
retry_message["content"] = fail_reason
|
||||
## Use the original sender to send the retry message to yourself
|
||||
@ -603,7 +603,7 @@ class ConversableAgent(Agent):
|
||||
"context": json.loads(last_message.context)
|
||||
if last_message.context
|
||||
else None,
|
||||
"current_gogal": last_message.current_gogal,
|
||||
"current_goal": last_message.current_goal,
|
||||
"review_info": json.loads(last_message.review_info)
|
||||
if last_message.review_info
|
||||
else None,
|
||||
|
@ -323,7 +323,7 @@ class ConversableAgent(Role, Agent):
|
||||
await self.a_send(
|
||||
{
|
||||
"content": context["message"],
|
||||
"current_gogal": context["message"],
|
||||
"current_goal": context["message"],
|
||||
},
|
||||
recipient,
|
||||
reviewer,
|
||||
@ -352,7 +352,7 @@ class ConversableAgent(Role, Agent):
|
||||
"context",
|
||||
"action_report",
|
||||
"review_info",
|
||||
"current_gogal",
|
||||
"current_goal",
|
||||
"model_name",
|
||||
)
|
||||
if k in message
|
||||
@ -364,7 +364,7 @@ class ConversableAgent(Role, Agent):
|
||||
receiver=self.profile,
|
||||
role=role,
|
||||
rounds=self.consecutive_auto_reply_counter,
|
||||
current_gogal=oai_message.get("current_gogal", None),
|
||||
current_goal=oai_message.get("current_goal", None),
|
||||
content=oai_message.get("content", None),
|
||||
context=json.dumps(oai_message["context"], ensure_ascii=False)
|
||||
if "context" in oai_message
|
||||
@ -501,7 +501,7 @@ class ConversableAgent(Role, Agent):
|
||||
"""
|
||||
new_message = {}
|
||||
new_message["context"] = recive_message.get("context", None)
|
||||
new_message["current_gogal"] = recive_message.get("current_gogal", None)
|
||||
new_message["current_goal"] = recive_message.get("current_goal", None)
|
||||
return new_message
|
||||
|
||||
def _convert_to_ai_message(
|
||||
@ -544,19 +544,19 @@ class ConversableAgent(Role, Agent):
|
||||
sender,
|
||||
rely_messages: Optional[List[Dict]] = None,
|
||||
) -> Optional[List[Dict]]:
|
||||
current_gogal = receive_message.get("current_gogal", None)
|
||||
current_goal = receive_message.get("current_goal", None)
|
||||
|
||||
### Convert and tailor the information in collective memory into contextual memory available to the current Agent
|
||||
current_gogal_messages = self._convert_to_ai_message(
|
||||
current_goal_messages = self._convert_to_ai_message(
|
||||
self.memory.message_memory.get_between_agents(
|
||||
self.agent_context.conv_id, self.profile, sender.profile, current_gogal
|
||||
self.agent_context.conv_id, self.profile, sender.profile, current_goal
|
||||
)
|
||||
)
|
||||
|
||||
# When there is no target and context, the current received message is used as the target problem
|
||||
if current_gogal_messages is None or len(current_gogal_messages) <= 0:
|
||||
if current_goal_messages is None or len(current_goal_messages) <= 0:
|
||||
receive_message["role"] = ModelMessageRoleType.HUMAN
|
||||
current_gogal_messages = [receive_message]
|
||||
current_goal_messages = [receive_message]
|
||||
|
||||
### relay messages
|
||||
cut_messages = []
|
||||
@ -572,14 +572,14 @@ class ConversableAgent(Role, Agent):
|
||||
cut_messages.extend(rely_messages)
|
||||
|
||||
# TODO: allocate historical information based on token budget
|
||||
if len(current_gogal_messages) < 5:
|
||||
cut_messages.extend(current_gogal_messages)
|
||||
if len(current_goal_messages) < 5:
|
||||
cut_messages.extend(current_goal_messages)
|
||||
else:
|
||||
# For the time being, the smallest size of historical message records will be used by default.
|
||||
# Use the first two rounds of messages to understand the initial goals
|
||||
cut_messages.extend(current_gogal_messages[:2])
|
||||
cut_messages.extend(current_goal_messages[:2])
|
||||
# Use information from the last three rounds of communication to ensure that current thinking knows what happened and what to do in the last communication
|
||||
cut_messages.extend(current_gogal_messages[-3:])
|
||||
cut_messages.extend(current_goal_messages[-3:])
|
||||
return cut_messages
|
||||
|
||||
def _new_system_message(self, content):
|
||||
|
@ -42,7 +42,7 @@ class CodeAssistantAgent(ConversableAgent):
|
||||
self._init_actions([CodeAction])
|
||||
|
||||
async def a_correctness_check(self, message: Optional[Dict]):
|
||||
task_gogal = message.get("current_gogal", None)
|
||||
task_gogal = message.get("current_goal", None)
|
||||
action_report = message.get("action_report", None)
|
||||
task_result = ""
|
||||
if action_report:
|
||||
|
@ -196,7 +196,7 @@ class RetrieveSummaryAssistantAgent(ConversableAgent):
|
||||
## New message build
|
||||
new_message = {}
|
||||
new_message["context"] = current_messages[-1].get("context", None)
|
||||
new_message["current_gogal"] = current_messages[-1].get("current_gogal", None)
|
||||
new_message["current_goal"] = current_messages[-1].get("current_goal", None)
|
||||
new_message["role"] = "assistant"
|
||||
new_message["content"] = user_question
|
||||
new_message["model_name"] = model
|
||||
@ -206,7 +206,7 @@ class RetrieveSummaryAssistantAgent(ConversableAgent):
|
||||
## Summary message build
|
||||
summary_message = {}
|
||||
summary_message["context"] = message.get("context", None)
|
||||
summary_message["current_gogal"] = message.get("current_gogal", None)
|
||||
summary_message["current_goal"] = message.get("current_goal", None)
|
||||
|
||||
summaries = ""
|
||||
count = 0
|
||||
@ -262,7 +262,7 @@ class RetrieveSummaryAssistantAgent(ConversableAgent):
|
||||
|
||||
async def a_verify(self, message: Optional[Dict]):
|
||||
self.update_system_message(self.CHECK_RESULT_SYSTEM_MESSAGE)
|
||||
current_goal = message.get("current_gogal", None)
|
||||
current_goal = message.get("current_goal", None)
|
||||
action_report = message.get("action_report", None)
|
||||
task_result = ""
|
||||
if action_report:
|
||||
|
@ -35,7 +35,7 @@ class SummaryAssistantAgent(ConversableAgent):
|
||||
self._init_actions([BlankAction])
|
||||
|
||||
# async def a_correctness_check(self, message: Optional[Dict]):
|
||||
# current_goal = message.get("current_gogal", None)
|
||||
# current_goal = message.get("current_goal", None)
|
||||
# action_report = message.get("action_report", None)
|
||||
# task_result = ""
|
||||
# if action_report:
|
||||
|
@ -43,7 +43,7 @@ class GptsMessage:
|
||||
role: str
|
||||
content: str
|
||||
rounds: Optional[int]
|
||||
current_gogal: str = None
|
||||
current_goal: str = None
|
||||
context: Optional[str] = None
|
||||
review_info: Optional[str] = None
|
||||
action_report: Optional[str] = None
|
||||
@ -61,7 +61,7 @@ class GptsMessage:
|
||||
content=d["content"],
|
||||
rounds=d["rounds"],
|
||||
model_name=d["model_name"],
|
||||
current_gogal=d["current_gogal"],
|
||||
current_goal=d["current_goal"],
|
||||
context=d["context"],
|
||||
review_info=d["review_info"],
|
||||
action_report=d["action_report"],
|
||||
|
@ -57,7 +57,7 @@ class GptsMessage:
|
||||
role: str
|
||||
content: str
|
||||
rounds: Optional[int]
|
||||
current_gogal: str = None
|
||||
current_goal: str = None
|
||||
context: Optional[str] = None
|
||||
review_info: Optional[str] = None
|
||||
action_report: Optional[str] = None
|
||||
@ -75,7 +75,7 @@ class GptsMessage:
|
||||
content=d["content"],
|
||||
rounds=d["rounds"],
|
||||
model_name=d["model_name"],
|
||||
current_gogal=d["current_gogal"],
|
||||
current_goal=d["current_goal"],
|
||||
context=d["context"],
|
||||
review_info=d["review_info"],
|
||||
action_report=d["action_report"],
|
||||
@ -203,7 +203,7 @@ class GptsMessageMemory(ABC):
|
||||
conv_id: str,
|
||||
agent1: str,
|
||||
agent2: str,
|
||||
current_gogal: Optional[str] = None,
|
||||
current_goal: Optional[str] = None,
|
||||
) -> Optional[List[GptsMessage]]:
|
||||
"""
|
||||
Query information related to an agent
|
||||
|
@ -100,11 +100,11 @@ class DefaultGptsMessageMemory(GptsMessageMemory):
|
||||
conv_id: str,
|
||||
agent1: str,
|
||||
agent2: str,
|
||||
current_gogal: Optional[str] = None,
|
||||
current_goal: Optional[str] = None,
|
||||
) -> Optional[List[GptsMessage]]:
|
||||
if current_gogal:
|
||||
if current_goal:
|
||||
result = self.df.query(
|
||||
f"conv_id==@conv_id and ((sender==@agent1 and receiver==@agent2) or (sender==@agent2 and receiver==@agent1)) and current_gogal==@current_gogal"
|
||||
f"conv_id==@conv_id and ((sender==@agent1 and receiver==@agent2) or (sender==@agent2 and receiver==@agent1)) and current_goal==@current_goal"
|
||||
)
|
||||
else:
|
||||
result = self.df.query(
|
||||
|
@ -58,7 +58,7 @@ class GptsMemory:
|
||||
count = count + 1
|
||||
if count == 1:
|
||||
continue
|
||||
if not message.current_gogal or len(message.current_gogal) <= 0:
|
||||
if not message.current_goal or len(message.current_goal) <= 0:
|
||||
if len(temp_group) > 0:
|
||||
vis_items.append(await self._plan_vis_build(temp_group))
|
||||
temp_group.clear()
|
||||
@ -69,7 +69,7 @@ class GptsMemory:
|
||||
vis_items.append(await self._messages_to_agents_vis(temp_messages))
|
||||
temp_messages.clear()
|
||||
|
||||
last_gogal = message.current_gogal
|
||||
last_gogal = message.current_goal
|
||||
temp_group[last_gogal].append(message)
|
||||
|
||||
if len(temp_group) > 0:
|
||||
|
@ -184,7 +184,7 @@ class GptsMessageStorage(StorageItem):
|
||||
role: str
|
||||
content: str
|
||||
rounds: Optional[int]
|
||||
current_gogal: str = None
|
||||
current_goal: str = None
|
||||
context: Optional[str] = None
|
||||
review_info: Optional[str] = None
|
||||
action_report: Optional[str] = None
|
||||
@ -204,7 +204,7 @@ class GptsMessageStorage(StorageItem):
|
||||
content=d["content"],
|
||||
rounds=d["rounds"],
|
||||
model_name=d["model_name"],
|
||||
current_gogal=d["current_gogal"],
|
||||
current_goal=d["current_goal"],
|
||||
context=d["context"],
|
||||
review_info=d["review_info"],
|
||||
action_report=d["action_report"],
|
||||
@ -239,7 +239,7 @@ class GptsMessageStorage(StorageItem):
|
||||
role=self.role,
|
||||
content=self.content,
|
||||
rounds=self.rounds,
|
||||
current_gogal=self.current_gogal,
|
||||
current_goal=self.current_goal,
|
||||
context=self.context,
|
||||
review_info=self.review_info,
|
||||
action_report=self.action_report,
|
||||
@ -258,7 +258,7 @@ class GptsMessageStorage(StorageItem):
|
||||
role=gpts_message.role,
|
||||
content=gpts_message.content,
|
||||
rounds=gpts_message.rounds,
|
||||
current_gogal=gpts_message.current_gogal,
|
||||
current_goal=gpts_message.current_goal,
|
||||
context=gpts_message.context,
|
||||
review_info=gpts_message.review_info,
|
||||
action_report=gpts_message.action_report,
|
||||
@ -344,9 +344,9 @@ class GptsMessageManager(GptsMessageMemory):
|
||||
conv_id: str,
|
||||
agent1: str,
|
||||
agent2: str,
|
||||
current_gogal: Optional[str] = None,
|
||||
current_goal: Optional[str] = None,
|
||||
) -> Optional[List[GptsMessage]]:
|
||||
return super().get_between_agents(conv_id, agent1, agent2, current_gogal)
|
||||
return super().get_between_agents(conv_id, agent1, agent2, current_goal)
|
||||
|
||||
def get_by_conv_id(self, conv_id: str) -> Optional[List[GptsMessage]]:
|
||||
return super().get_by_conv_id(conv_id)
|
||||
|
@ -40,7 +40,7 @@ class ChatAgent(BaseChat):
|
||||
|
||||
# load select plugin
|
||||
agent_module = CFG.SYSTEM_APP.get_component(
|
||||
ComponentType.AGENT_HUB, ModulePlugin
|
||||
ComponentType.PLUGIN_HUB, ModulePlugin
|
||||
)
|
||||
self.plugins_prompt_generator = agent_module.load_select_plugin(
|
||||
self.plugins_prompt_generator, self.select_plugins
|
||||
|
@ -94,10 +94,10 @@ class MetaDbGptsMessageMemory(GptsMessageMemory):
|
||||
conv_id: str,
|
||||
agent1: str,
|
||||
agent2: str,
|
||||
current_gogal: Optional[str] = None,
|
||||
current_goal: Optional[str] = None,
|
||||
) -> Optional[List[GptsMessage]]:
|
||||
db_results = self.gpts_message.get_between_agents(
|
||||
conv_id, agent1, agent2, current_gogal
|
||||
conv_id, agent1, agent2, current_goal
|
||||
)
|
||||
results = []
|
||||
db_results = sorted(db_results, key=lambda x: x.rounds)
|
||||
|
@ -39,7 +39,7 @@ class GptsMessagesEntity(Model):
|
||||
content = Column(
|
||||
Text(length=2**31 - 1), nullable=True, comment="Content of the speech"
|
||||
)
|
||||
current_gogal = Column(
|
||||
current_goal = Column(
|
||||
Text, nullable=True, comment="The target corresponding to the current message"
|
||||
)
|
||||
context = Column(Text, nullable=True, comment="Current conversation context")
|
||||
@ -78,7 +78,7 @@ class GptsMessagesDao(BaseDao):
|
||||
model_name=entity.get("model_name", None),
|
||||
context=entity.get("context", None),
|
||||
rounds=entity.get("rounds", None),
|
||||
current_gogal=entity.get("current_gogal", None),
|
||||
current_goal=entity.get("current_goal", None),
|
||||
review_info=entity.get("review_info", None),
|
||||
action_report=entity.get("action_report", None),
|
||||
)
|
||||
@ -120,7 +120,7 @@ class GptsMessagesDao(BaseDao):
|
||||
conv_id: str,
|
||||
agent1: str,
|
||||
agent2: str,
|
||||
current_gogal: Optional[str] = None,
|
||||
current_goal: Optional[str] = None,
|
||||
) -> Optional[List[GptsMessagesEntity]]:
|
||||
session = self.get_raw_session()
|
||||
gpts_messages = session.query(GptsMessagesEntity)
|
||||
@ -139,9 +139,9 @@ class GptsMessagesDao(BaseDao):
|
||||
),
|
||||
)
|
||||
)
|
||||
if current_gogal:
|
||||
if current_goal:
|
||||
gpts_messages = gpts_messages.filter(
|
||||
GptsMessagesEntity.current_gogal == current_gogal
|
||||
GptsMessagesEntity.current_goal == current_goal
|
||||
)
|
||||
result = gpts_messages.order_by(GptsMessagesEntity.rounds).all()
|
||||
session.close()
|
||||
|
@ -48,7 +48,7 @@ class AgentOperator(
|
||||
now_rely_messages: List[Dict] = []
|
||||
|
||||
# Isolate the message delivery mechanism and pass it to the operator
|
||||
input_value.message["current_gogal"] = (
|
||||
input_value.message["current_goal"] = (
|
||||
f"[{self._agent.name if self._agent.name else self._agent.profile}]:"
|
||||
+ input_value.message["content"]
|
||||
)
|
||||
@ -139,14 +139,14 @@ class AwelAgentOperator(
|
||||
agent = await self.get_agent(input_value)
|
||||
if agent.fixed_subgoal and len(agent.fixed_subgoal) > 0:
|
||||
# Isolate the message delivery mechanism and pass it to the operator
|
||||
input_value.message["current_gogal"] = (
|
||||
input_value.message["current_goal"] = (
|
||||
f"[{agent.name if agent.name else agent.profile}]:"
|
||||
+ agent.fixed_subgoal
|
||||
)
|
||||
now_message["content"] = agent.fixed_subgoal
|
||||
else:
|
||||
# Isolate the message delivery mechanism and pass it to the operator
|
||||
input_value.message["current_gogal"] = (
|
||||
input_value.message["current_goal"] = (
|
||||
f"[{agent.name if agent.name else agent.profile}]:"
|
||||
+ input_value.message["content"]
|
||||
)
|
||||
|
@ -45,7 +45,7 @@ class AwelLayoutChatManager(ManagerAgent):
|
||||
start_message_context: AgentGenerateContext = AgentGenerateContext(
|
||||
message={
|
||||
"content": message,
|
||||
"current_gogal": message,
|
||||
"current_goal": message,
|
||||
},
|
||||
sender=self,
|
||||
reviewer=reviewer,
|
||||
|
@ -58,7 +58,7 @@ class AwelLayoutChatNewManager(ManagerAgent):
|
||||
start_message_context: AgentGenerateContext = AgentGenerateContext(
|
||||
message={
|
||||
"content": message,
|
||||
"current_gogal": message,
|
||||
"current_goal": message,
|
||||
},
|
||||
sender=self,
|
||||
reviewer=reviewer,
|
||||
|
@ -161,7 +161,7 @@ class AutoPlanChatManager(ManagerAgent):
|
||||
now_plan: GptsPlan = todo_plans[0]
|
||||
current_goal_message = {
|
||||
"content": now_plan.sub_task_content,
|
||||
"current_gogal": now_plan.sub_task_content,
|
||||
"current_goal": now_plan.sub_task_content,
|
||||
"context": {
|
||||
"plan_task": now_plan.sub_task_content,
|
||||
"plan_task_num": now_plan.sub_task_num,
|
||||
|
Loading…
Reference in New Issue
Block a user