mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-07-31 15:47:05 +00:00
feat(ChatDB): ChatDB English Prompt fix
1.ChatDB English Promopt fix
This commit is contained in:
parent
d38c9c7cb1
commit
8708a11561
@ -158,24 +158,23 @@ class LLMModelAdaper:
|
||||
else:
|
||||
raise ValueError(f"Unknown role: {role}")
|
||||
|
||||
can_use_system = ""
|
||||
can_use_systems:[] = []
|
||||
if system_messages:
|
||||
# TODO vicuna 兼容 测试完放弃
|
||||
user_messages[-1] = system_messages[-1]
|
||||
if len(system_messages) > 1:
|
||||
can_use_system = system_messages[0]
|
||||
|
||||
## Compatible with dbgpt complex scenarios, the last system will protect more complete information entered by the current user
|
||||
user_messages[-1] = system_messages[-1]
|
||||
can_use_systems = system_messages[:-1]
|
||||
else:
|
||||
can_use_systems = system_messages
|
||||
for i in range(len(user_messages)):
|
||||
conv.append_message(conv.roles[0], user_messages[i])
|
||||
if i < len(ai_messages):
|
||||
conv.append_message(conv.roles[1], ai_messages[i])
|
||||
|
||||
if isinstance(conv, Conversation):
|
||||
conv.set_system_message(can_use_system)
|
||||
conv.set_system_message("".join(can_use_systems))
|
||||
else:
|
||||
conv.update_system_message(can_use_system)
|
||||
|
||||
|
||||
conv.update_system_message("".join(can_use_systems))
|
||||
|
||||
# Add a blank message for the assistant.
|
||||
conv.append_message(conv.roles[1], None)
|
||||
|
@ -57,6 +57,42 @@ def _initialize_openai(params: ProxyModelParameters):
|
||||
return openai_params
|
||||
|
||||
|
||||
def __convert_2_gpt_messages(messages: List[ModelMessage]):
|
||||
|
||||
chat_round = 0
|
||||
gpt_messages = []
|
||||
|
||||
last_usr_message = ""
|
||||
system_messages = []
|
||||
|
||||
for message in messages:
|
||||
if message.role == ModelMessageRoleType.HUMAN:
|
||||
last_usr_message = message.content
|
||||
elif message.role == ModelMessageRoleType.SYSTEM:
|
||||
system_messages.append(message.content)
|
||||
elif message.role == ModelMessageRoleType.AI:
|
||||
last_ai_message = message.content
|
||||
gpt_messages.append({"role": "user", "content": last_usr_message})
|
||||
gpt_messages.append({"role": "assistant", "content": last_ai_message})
|
||||
|
||||
# build last user messge
|
||||
|
||||
if len(system_messages) >0:
|
||||
if len(system_messages) > 1:
|
||||
end_message = system_messages[-1]
|
||||
else:
|
||||
last_message = messages[-1]
|
||||
if last_message.role == ModelMessageRoleType.HUMAN:
|
||||
end_message = system_messages[-1] + "\n" + last_message.content
|
||||
else:
|
||||
end_message = system_messages[-1]
|
||||
else:
|
||||
last_message = messages[-1]
|
||||
end_message = last_message.content
|
||||
gpt_messages.append({"role": "user", "content": end_message})
|
||||
return gpt_messages, system_messages
|
||||
|
||||
|
||||
def _build_request(model: ProxyModel, params):
|
||||
history = []
|
||||
|
||||
|
@ -19,7 +19,7 @@ Constraint:
|
||||
2.Please choose the best one from the display methods given below for data rendering, and put the type name into the name parameter value that returns the required format. If you cannot find the most suitable one, use 'Table' as the display method. , the available data display methods are as follows: {disply_type}
|
||||
3.The table name that needs to be used in SQL is: {table_name}. Please check the sql you generated and do not use column names that are not in the data structure.
|
||||
4.Give priority to answering using data analysis. If the user's question does not involve data analysis, you can answer according to your understanding.
|
||||
5.The <api-call></api-call> part of the required output format needs to be parsed by the code. Please ensure that this part of the content is output as required.
|
||||
5.All analysis sql content is converted to: <api-call><name>[data display method]</name><args><sql>[correct duckdb data analysis sql]</sql></args></ api-call> content like this, and answer in the following format.
|
||||
|
||||
Please respond in the following format:
|
||||
thoughts summary to say to user.<api-call><name>[Data display method]</name><args><sql>[Correct duckdb data analysis sql]</sql></args></api-call>
|
||||
@ -36,7 +36,8 @@ _DEFAULT_TEMPLATE_ZH = """
|
||||
2.请从如下给出的展示方式种选择最优的一种用以进行数据渲染,将类型名称放入返回要求格式的name参数值种,如果找不到最合适的则使用'Table'作为展示方式,可用数据展示方式如下: {disply_type}
|
||||
3.SQL中需要使用的表名是: {table_name},请检查你生成的sql,不要使用没在数据结构中的列名,。
|
||||
4.优先使用数据分析的方式回答,如果用户问题不涉及数据分析内容,你可以按你的理解进行回答
|
||||
5.要求的输出格式中<api-call></api-call>部分需要被代码解析执行,请确保这部分内容按要求输出,不要参考历史信息的返回格式,请按下面要求返回
|
||||
5.所有分析sql内容,都转换为:<api-call><name>[数据展示方式]</name><args><sql>[正确的duckdb数据分析sql]</sql></args></api-call>这样的内容, 并按下面要求格式回答
|
||||
|
||||
请确保你的输出内容格式如下:
|
||||
对用户说的想法摘要.<api-call><name>[数据展示方式]</name><args><sql>[正确的duckdb数据分析sql]</sql></args></api-call>
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1 +1 @@
|
||||
self.__BUILD_MANIFEST=function(s,c,a,e,t,d,n,b,f,k,h,i){return{__rewrites:{beforeFiles:[],afterFiles:[],fallback:[]},"/":["static/chunks/29107295-90b90cb30c825230.js",s,c,a,d,n,b,"static/chunks/539-dcd22f1f6b99ebee.js","static/chunks/pages/index-b1c8f59fe7e5d7df.js"],"/_error":["static/chunks/pages/_error-dee72aff9b2e2c12.js"],"/agent":[s,c,e,f,t,d,"static/chunks/pages/agent-762425b419303d9d.js"],"/chat":["static/chunks/pages/chat-b1a399159cf29d92.js"],"/chat/[scene]/[id]":["static/chunks/pages/chat/[scene]/[id]-760bb41f43cab145.js"],"/database":[s,c,a,e,t,n,k,"static/chunks/643-d8f53f40dd3c5b40.js","static/chunks/pages/database-b4f32916b9d484a7.js"],"/knowledge":[h,s,c,e,f,t,d,n,"static/chunks/109-0dace28dd2667396.js","static/chunks/pages/knowledge-dc52dba3c1c37db4.js"],"/knowledge/chunk":[e,t,"static/chunks/pages/knowledge/chunk-765a4b202d79ac28.js"],"/models":[h,s,c,a,i,k,"static/chunks/pages/models-fa9ad76f43cc7a5f.js"],"/prompt":[s,c,a,i,"static/chunks/837-e6d4d1eb9e057050.js",b,"static/chunks/607-b224c640f6907e4b.js","static/chunks/pages/prompt-7f839dfd56bc4c20.js"],sortedPages:["/","/_app","/_error","/agent","/chat","/chat/[scene]/[id]","/database","/knowledge","/knowledge/chunk","/models","/prompt"]}}("static/chunks/64-91b49d45b9846775.js","static/chunks/479-b20198841f9a6a1e.js","static/chunks/9-bb2c54d5c06ba4bf.js","static/chunks/442-197e6cbc1e54109a.js","static/chunks/813-cce9482e33f2430c.js","static/chunks/924-ba8e16df4d61ff5c.js","static/chunks/411-d9eba2657c72f766.js","static/chunks/270-2f094a936d056513.js","static/chunks/365-a224ec0807392b35.js","static/chunks/928-74244889bd7f2699.js","static/chunks/75fc9c18-a784766a129ec5fb.js","static/chunks/947-5980a3ff49069ddd.js"),self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB();
|
||||
self.__BUILD_MANIFEST=function(s,c,a,e,t,d,n,b,f,k,h,i){return{__rewrites:{beforeFiles:[],afterFiles:[],fallback:[]},"/":["static/chunks/29107295-90b90cb30c825230.js",s,c,a,d,n,b,"static/chunks/539-dcd22f1f6b99ebee.js","static/chunks/pages/index-b1c8f59fe7e5d7df.js"],"/_error":["static/chunks/pages/_error-dee72aff9b2e2c12.js"],"/agent":[s,c,e,f,t,d,"static/chunks/pages/agent-762425b419303d9d.js"],"/chat":["static/chunks/pages/chat-b1a399159cf29d92.js"],"/chat/[scene]/[id]":["static/chunks/pages/chat/[scene]/[id]-760bb41f43cab145.js"],"/database":[s,c,a,e,t,n,k,"static/chunks/643-d8f53f40dd3c5b40.js","static/chunks/pages/database-b4f32916b9d484a7.js"],"/knowledge":[h,s,c,e,f,t,d,n,"static/chunks/109-0dace28dd2667396.js","static/chunks/pages/knowledge-258ebd9530cbbd39.js"],"/knowledge/chunk":[e,t,"static/chunks/pages/knowledge/chunk-9f117a5ed799edd3.js"],"/models":[h,s,c,a,i,k,"static/chunks/pages/models-fa9ad76f43cc7a5f.js"],"/prompt":[s,c,a,i,"static/chunks/837-e6d4d1eb9e057050.js",b,"static/chunks/607-b224c640f6907e4b.js","static/chunks/pages/prompt-7f839dfd56bc4c20.js"],sortedPages:["/","/_app","/_error","/agent","/chat","/chat/[scene]/[id]","/database","/knowledge","/knowledge/chunk","/models","/prompt"]}}("static/chunks/64-91b49d45b9846775.js","static/chunks/479-b20198841f9a6a1e.js","static/chunks/9-bb2c54d5c06ba4bf.js","static/chunks/442-197e6cbc1e54109a.js","static/chunks/813-cce9482e33f2430c.js","static/chunks/924-ba8e16df4d61ff5c.js","static/chunks/411-d9eba2657c72f766.js","static/chunks/270-2f094a936d056513.js","static/chunks/365-a224ec0807392b35.js","static/chunks/928-74244889bd7f2699.js","static/chunks/75fc9c18-a784766a129ec5fb.js","static/chunks/947-5980a3ff49069ddd.js"),self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB();
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user