mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-23 10:28:11 +00:00
Gpts app v0.4 (#1169)
This commit is contained in:
@@ -75,11 +75,13 @@ class DashboardAction(Action[List[ChartItem]]):
|
||||
sql_df = await resource_db_client.a_query_to_df(
|
||||
resource.value, chart_item.sql
|
||||
)
|
||||
chart_item["data"] = sql_df
|
||||
chart_dict = chart_item.dict()
|
||||
|
||||
chart_dict["data"] = sql_df
|
||||
except Exception as e:
|
||||
logger.warn(f"Sql excute Failed!{str(e)}")
|
||||
chart_item["err_msg"] = str(e)
|
||||
chart_params.append(chart_item)
|
||||
chart_dict["err_msg"] = str(e)
|
||||
chart_params.append(chart_dict)
|
||||
view = await self.render_protocal.disply(charts=chart_params)
|
||||
return ActionOutput(
|
||||
is_exe_success=True,
|
||||
|
@@ -17,6 +17,7 @@ class DashboardAssistantAgent(ConversableAgent):
|
||||
constraints: List[str] = [
|
||||
"You are only responsible for collecting and sorting out the analysis SQL that already exists in historical messages, and do not generate any analysis sql yourself.",
|
||||
"In order to build a report with rich display types, you can appropriately adjust the display type of the charts you collect so that you can build a better report. Of course, you can choose from the following available display types: {display_type}",
|
||||
"Please read and completely collect all analysis sql in the historical conversation, and do not omit or modify the content of the analysis sql.",
|
||||
]
|
||||
desc: str = "Observe and organize various analysis results and construct professional reports"
|
||||
|
||||
|
@@ -12,6 +12,8 @@ class Role(ABC, BaseModel):
|
||||
|
||||
expand_prompt: str = ""
|
||||
|
||||
fixed_subgoal: Optional[str] = None
|
||||
|
||||
constraints: List[str] = []
|
||||
examples: str = ""
|
||||
desc: str = ""
|
||||
@@ -19,8 +21,8 @@ class Role(ABC, BaseModel):
|
||||
is_human: bool = False
|
||||
is_team: bool = False
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def prompt_template(
|
||||
self,
|
||||
@@ -32,16 +34,16 @@ class Role(ABC, BaseModel):
|
||||
template = f"""
|
||||
{self.role_prompt}
|
||||
Please think step by step to achieve the goal. You can use the resources given below. At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
|
||||
|
||||
|
||||
{{resource_prompt}}
|
||||
|
||||
{self.expand_prompt if len(self.expand_prompt)>0 else ""}
|
||||
|
||||
|
||||
{self.expand_prompt if len(self.expand_prompt) > 0 else ""}
|
||||
|
||||
*** IMPORTANT REMINDER ***
|
||||
{self.language_require_prompt}
|
||||
{self.constraints_prompt}
|
||||
|
||||
{'You can refer to the following examples:' if len(self.examples) > 0 else ""}
|
||||
|
||||
{'You can refer to the following examples:' if len(self.examples) > 0 else ""}
|
||||
{self.examples if len(self.examples) > 0 else ""}
|
||||
|
||||
{{out_schema}}
|
||||
@@ -61,7 +63,7 @@ class Role(ABC, BaseModel):
|
||||
def constraints_prompt(self):
|
||||
if len(self.constraints) > 0:
|
||||
return "\n".join(
|
||||
f"{i+1}. {item}" for i, item in enumerate(self.constraints)
|
||||
f"{i + 1}. {item}" for i, item in enumerate(self.constraints)
|
||||
)
|
||||
|
||||
@property
|
||||
|
@@ -135,22 +135,30 @@ class AwelAgentOperator(
|
||||
self,
|
||||
input_value: AgentGenerateContext,
|
||||
) -> AgentGenerateContext:
|
||||
now_rely_messages: List[Dict] = []
|
||||
|
||||
now_message = input_value.message
|
||||
agent = await self.get_agent(input_value)
|
||||
if agent.fixed_subgoal and len(agent.fixed_subgoal) > 0:
|
||||
# Isolate the message delivery mechanism and pass it to the operator
|
||||
input_value.message["current_gogal"] = (
|
||||
f"[{agent.name if agent.name else agent.profile}]:"
|
||||
+ agent.fixed_subgoal
|
||||
)
|
||||
now_message["content"] = agent.fixed_subgoal
|
||||
else:
|
||||
# Isolate the message delivery mechanism and pass it to the operator
|
||||
input_value.message["current_gogal"] = (
|
||||
f"[{agent.name if agent.name else agent.profile}]:"
|
||||
+ input_value.message["content"]
|
||||
)
|
||||
|
||||
# Isolate the message delivery mechanism and pass it to the operator
|
||||
input_value.message["current_gogal"] = (
|
||||
f"[{agent.name if agent.name else agent.profile}]:"
|
||||
+ input_value.message["content"]
|
||||
)
|
||||
now_rely_messages: List[Dict] = []
|
||||
###What was received was the User message
|
||||
human_message = input_value.message.copy()
|
||||
human_message["role"] = ModelMessageRoleType.HUMAN
|
||||
now_rely_messages.append(human_message)
|
||||
|
||||
###Send a message (no reply required) and pass the message content
|
||||
now_message = input_value.message
|
||||
|
||||
if input_value.rely_messages and len(input_value.rely_messages) > 0:
|
||||
now_message = input_value.rely_messages[-1]
|
||||
await input_value.sender.a_send(now_message, agent, input_value.reviewer, False)
|
||||
@@ -200,9 +208,13 @@ class AwelAgentOperator(
|
||||
llm_config = LLMConfig(llm_client=input_value.llm_client)
|
||||
else:
|
||||
llm_config = LLMConfig(llm_client=self.llm_client)
|
||||
|
||||
kwargs = {}
|
||||
if self.awel_agent.role_name:
|
||||
kwargs["name"] = self.awel_agent.role_name
|
||||
if self.awel_agent.fixed_subgoal:
|
||||
kwargs["fixed_subgoal"] = self.awel_agent.fixed_subgoal
|
||||
agent = (
|
||||
await agent_cls(name=self.awel_agent.role_name)
|
||||
await agent_cls(**kwargs)
|
||||
.bind(input_value.memory)
|
||||
.bind(llm_config)
|
||||
.bind(input_value.agent_context)
|
||||
|
@@ -139,6 +139,14 @@ class AwelAgentConfig(LLMConfig):
|
||||
default=None,
|
||||
description="The agent role name.",
|
||||
),
|
||||
Parameter.build_from(
|
||||
label="Fixed Gogal",
|
||||
name="fixed_subgoal",
|
||||
type=str,
|
||||
optional=True,
|
||||
default=None,
|
||||
description="The agent fixed gogal.",
|
||||
),
|
||||
Parameter.build_from(
|
||||
label="Agent Resource",
|
||||
name="agent_resource",
|
||||
@@ -162,6 +170,7 @@ class AwelAgent(BaseModel):
|
||||
role_name: Optional[str] = None
|
||||
llm_config: Optional[LLMConfig] = None
|
||||
resources: List[AgentResource] = Field(default_factory=list)
|
||||
fixed_subgoal: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
@@ -117,6 +117,7 @@ class AutoPlanChatManager(ManagerAgent):
|
||||
reviewer: Optional[ConversableAgent] = None,
|
||||
) -> Optional[ActionOutput]:
|
||||
speaker = sender
|
||||
final_message = message
|
||||
for i in range(self.max_round):
|
||||
plans = self.memory.plans_memory.get_by_conv_id(self.agent_context.conv_id)
|
||||
|
||||
@@ -153,7 +154,7 @@ class AutoPlanChatManager(ManagerAgent):
|
||||
# complete
|
||||
return ActionOutput(
|
||||
is_exe_success=True,
|
||||
content=f"{plans[-1].result}", # work results message
|
||||
content=final_message, # work results message
|
||||
)
|
||||
else:
|
||||
try:
|
||||
@@ -201,11 +202,14 @@ class AutoPlanChatManager(ManagerAgent):
|
||||
)
|
||||
|
||||
plan_result = ""
|
||||
final_message = reply_message["content"]
|
||||
if is_success:
|
||||
if reply_message:
|
||||
action_report = reply_message.get("action_report", None)
|
||||
if action_report:
|
||||
plan_result = action_report.get("content", "")
|
||||
final_message = action_report["view"]
|
||||
|
||||
### The current planned Agent generation verification is successful
|
||||
##Plan executed successfully
|
||||
self.memory.plans_memory.complete_task(
|
||||
@@ -213,7 +217,6 @@ class AutoPlanChatManager(ManagerAgent):
|
||||
now_plan.sub_task_num,
|
||||
plan_result,
|
||||
)
|
||||
|
||||
else:
|
||||
plan_result = reply_message["content"]
|
||||
self.memory.plans_memory.update_task(
|
||||
@@ -228,6 +231,7 @@ class AutoPlanChatManager(ManagerAgent):
|
||||
return ActionOutput(
|
||||
is_exe_success=False, content=plan_result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
f"An exception was encountered during the execution of the current plan step.{str(e)}"
|
||||
|
@@ -1,11 +1,14 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from ..base import Vis
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VisDashboard(Vis):
|
||||
async def generate_content(self, **kwargs) -> Optional[str]:
|
||||
async def generate_param(self, **kwargs) -> Optional[str]:
|
||||
charts = kwargs.get("charts", None)
|
||||
title = kwargs.get("title", None)
|
||||
if not charts:
|
||||
@@ -24,14 +27,14 @@ class VisDashboard(Vis):
|
||||
try:
|
||||
df = chart.get("data", None)
|
||||
err_msg = chart.get("err_msg", None)
|
||||
if not df:
|
||||
if df is None:
|
||||
param["err_msg"] = err_msg
|
||||
else:
|
||||
param["data"] = json.loads(
|
||||
df.to_json(orient="records", date_format="iso", date_unit="s")
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception("dashboard chart build faild!")
|
||||
param["data"] = []
|
||||
param["err_msg"] = str(e)
|
||||
chart_items.append(param)
|
||||
|
Reference in New Issue
Block a user