refactor:merge Agent_Hub branch

This commit is contained in:
aries_ckt 2023-11-20 16:35:42 +08:00
commit 7b3682ae19
8 changed files with 54 additions and 163 deletions

View File

@ -370,7 +370,8 @@ class ApiCall:
return html
else:
api_call_element = ET.Element("chart-view")
api_call_element.set("content", self.__to_antv_vis_param(api_status))
api_call_element.attrib["content"] = self.__to_antv_vis_param(api_status)
# api_call_element.set("content", self.__to_antv_vis_param(api_status))
# api_call_element.text = self.__to_antv_vis_param(api_status)
result = ET.tostring(api_call_element, encoding="utf-8")
return result.decode("utf-8")

View File

@ -158,8 +158,15 @@ class LLMModelAdaper:
else:
raise ValueError(f"Unknown role: {role}")
can_use_system = ""
can_use_systems:[] = []
if system_messages:
if len(system_messages) > 1:
## Compatible with dbgpt complex scenarios, the last system will protect more complete information entered by the current user
user_messages[-1] = system_messages[-1]
can_use_systems = system_messages[:-1]
else:
can_use_systems = system_messages
for i in range(len(user_messages)):
# TODO vicuna 兼容 测试完放弃
user_messages[-1] = system_messages[-1]
if len(system_messages) > 1:
@ -171,8 +178,10 @@ class LLMModelAdaper:
conv.append_message(conv.roles[1], ai_messages[i])
if isinstance(conv, Conversation):
conv.set_system_message(can_use_system)
conv.set_system_message("".join(can_use_systems))
else:
conv.update_system_message("".join(can_use_systems))
conv.update_system_message(can_use_system)
# Add a blank message for the assistant.

View File

@ -58,6 +58,42 @@ def _initialize_openai(params: ProxyModelParameters):
return openai_params
def __convert_2_gpt_messages(messages: List[ModelMessage]):
chat_round = 0
gpt_messages = []
last_usr_message = ""
system_messages = []
for message in messages:
if message.role == ModelMessageRoleType.HUMAN:
last_usr_message = message.content
elif message.role == ModelMessageRoleType.SYSTEM:
system_messages.append(message.content)
elif message.role == ModelMessageRoleType.AI:
last_ai_message = message.content
gpt_messages.append({"role": "user", "content": last_usr_message})
gpt_messages.append({"role": "assistant", "content": last_ai_message})
# build last user messge
if len(system_messages) >0:
if len(system_messages) > 1:
end_message = system_messages[-1]
else:
last_message = messages[-1]
if last_message.role == ModelMessageRoleType.HUMAN:
end_message = system_messages[-1] + "\n" + last_message.content
else:
end_message = system_messages[-1]
else:
last_message = messages[-1]
end_message = last_message.content
gpt_messages.append({"role": "user", "content": end_message})
return gpt_messages, system_messages
def _initialize_openai_v1(params: ProxyModelParameters):
try:
from openai import OpenAI

View File

@ -1,64 +0,0 @@
import autogen
from autogen import oai, AssistantAgent, UserProxyAgent, config_list_from_json
from openai.openai_object import OpenAIObject
config_list = [
{
"model": "gpt-3.5-turbo",
"api_base": "http://43.156.9.162:3001/api/openai/v1",
"api_type": "open_ai",
"api_key": "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",
}
]
llm_config = {
"request_timeout": 600,
"seed": 45, # change the seed for different trials
"config_list": config_list,
"temperature": 0,
"max_tokens": 3000,
}
# assistant = AssistantAgent("assistant", llm_config=llm_config)
# user_proxy = UserProxyAgent("user_proxy", code_execution_config={"work_dir": "coding"})
#
assistant = autogen.AssistantAgent(
name="assistant",
llm_config=llm_config,
is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False,
)
# 创建名为 user_proxy 的用户代理实例,这里定义为进行干预
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode="TERMINATE",
max_consecutive_auto_reply=1,
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={"work_dir": "web"},
llm_config=llm_config,
system_message="""Reply TERMINATE if the task has been solved at full satisfaction.
Otherwise, reply CONTINUE, or the reason why the task is not solved yet.""",
)
task1 = """今天是星期几?,还有几天周末?请告诉我答案。"""
if __name__ == "__main__":
user_proxy.initiate_chat(assistant, message=task1)
# user_proxy.initiate_chat(assistant, message="Plot a chart of NVDA and TESLA stock price change YTD.")
# response: OpenAIObject = oai.Completion.create(
# config_list=[
# {
# "model": "gpt-3.5-turbo",
# "api_base": "http://43.156.9.162:3001/api/openai/v1",
# "api_type": "open_ai",
# "api_key": "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",
# }
# ],
# prompt="你好呀!",
# )
#
# print(response)
#
# text = response.get("choices")[0].get("text")
# print(text)

View File

@ -1,94 +0,0 @@
import autogen
config_list = [
{
"model": "gpt-3.5-turbo",
"api_base": "http://43.156.9.162:3001/api/openai/v1",
"api_type": "open_ai",
"api_key": "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",
}
]
llm_config = {
"functions": [
{
"name": "python",
"description": "run cell in ipython and return the execution result.",
"parameters": {
"type": "object",
"properties": {
"cell": {
"type": "string",
"description": "Valid Python cell to execute.",
}
},
"required": ["cell"],
},
},
{
"name": "sh",
"description": "run a shell script and return the execution result.",
"parameters": {
"type": "object",
"properties": {
"script": {
"type": "string",
"description": "Valid shell script to execute.",
}
},
"required": ["script"],
},
},
],
"config_list": config_list,
"request_timeout": 120,
"max_tokens": 3000,
}
chatbot = autogen.AssistantAgent(
name="chatbot",
system_message="For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.",
llm_config=llm_config,
)
# create a UserProxyAgent instance named "user_proxy"
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
is_termination_msg=lambda x: x.get("content", "")
and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
code_execution_config={"work_dir": "coding"},
)
# define functions according to the function description
from IPython import get_ipython
def exec_python(cell):
ipython = get_ipython()
result = ipython.run_cell(cell)
log = str(result.result)
if result.error_before_exec is not None:
log += f"\n{result.error_before_exec}"
if result.error_in_exec is not None:
log += f"\n{result.error_in_exec}"
return log
def exec_sh(script):
return user_proxy.execute_code_blocks([("sh", script)])
# register the functions
user_proxy.register_function(
function_map={
"python": exec_python,
"sh": exec_sh,
}
)
if __name__ == "__main__":
# start the conversation
user_proxy.initiate_chat(
chatbot,
message="Draw two agents chatting with each other with an example dialog. Don't add plt.show().",
)

View File

@ -19,7 +19,7 @@ Constraint:
2.Please choose the best one from the display methods given below for data rendering, and put the type name into the name parameter value that returns the required format. If you cannot find the most suitable one, use 'Table' as the display method. , the available data display methods are as follows: {disply_type}
3.The table name that needs to be used in SQL is: {table_name}. Please check the sql you generated and do not use column names that are not in the data structure.
4.Give priority to answering using data analysis. If the user's question does not involve data analysis, you can answer according to your understanding.
5.The <api-call></api-call> part of the required output format needs to be parsed by the code. Please ensure that this part of the content is output as required.
5.All analysis sql content is converted to: <api-call><name>[data display method]</name><args><sql>[correct duckdb data analysis sql]</sql></args></ api-call> content like this, and answer in the following format.
Please respond in the following format:
thoughts summary to say to user.<api-call><name>[Data display method]</name><args><sql>[Correct duckdb data analysis sql]</sql></args></api-call>
@ -36,7 +36,8 @@ _DEFAULT_TEMPLATE_ZH = """
2.请从如下给出的展示方式种选择最优的一种用以进行数据渲染将类型名称放入返回要求格式的name参数值种如果找不到最合适的则使用'Table'作为展示方式可用数据展示方式如下: {disply_type}
3.SQL中需要使用的表名是: {table_name},请检查你生成的sql不要使用没在数据结构中的列名
4.优先使用数据分析的方式回答如果用户问题不涉及数据分析内容你可以按你的理解进行回答
5.要求的输出格式中<api-call></api-call>部分需要被代码解析执行请确保这部分内容按要求输出不要参考历史信息的返回格式请按下面要求返回
5.所有分析sql内容都转换为<api-call><name>[数据展示方式]</name><args><sql>[正确的duckdb数据分析sql]</sql></args></api-call>这样的内容, 并按下面要求格式回答
请确保你的输出内容格式如下:
对用户说的想法摘要.<api-call><name>[数据展示方式]</name><args><sql>[正确的duckdb数据分析sql]</sql></args></api-call>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long