mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-02 09:37:03 +00:00
feat(agent): Multi agent sdk (#976)
Co-authored-by: xtyuns <xtyuns@163.com> Co-authored-by: Fangyin Cheng <staneyffer@gmail.com> Co-authored-by: csunny <cfqsunny@163.com> Co-authored-by: qidanrui <qidanrui@gmail.com>
This commit is contained in:
@@ -91,34 +91,43 @@ def _initialize_openai_v1(params: ProxyModelParameters):
|
||||
return openai_params, api_type, api_version, proxies
|
||||
|
||||
|
||||
def _build_request(model: ProxyModel, params):
|
||||
history = []
|
||||
def __convert_2_gpt_messages(messages: List[ModelMessage]):
|
||||
chat_round = 0
|
||||
gpt_messages = []
|
||||
last_usr_message = ""
|
||||
system_messages = []
|
||||
|
||||
for message in messages:
|
||||
if message.role == ModelMessageRoleType.HUMAN or message.role == "user":
|
||||
last_usr_message = message.content
|
||||
elif message.role == ModelMessageRoleType.SYSTEM:
|
||||
system_messages.append(message.content)
|
||||
elif message.role == ModelMessageRoleType.AI or message.role == "assistant":
|
||||
last_ai_message = message.content
|
||||
gpt_messages.append({"role": "user", "content": last_usr_message})
|
||||
gpt_messages.append({"role": "assistant", "content": last_ai_message})
|
||||
|
||||
if len(system_messages) > 0:
|
||||
if len(system_messages) < 2:
|
||||
gpt_messages.insert(0, {"role": "system", "content": system_messages[0]})
|
||||
gpt_messages.append({"role": "user", "content": last_usr_message})
|
||||
else:
|
||||
gpt_messages.append({"role": "user", "content": system_messages[1]})
|
||||
else:
|
||||
last_message = messages[-1]
|
||||
if last_message.role == ModelMessageRoleType.HUMAN:
|
||||
gpt_messages.append({"role": "user", "content": last_message.content})
|
||||
|
||||
return gpt_messages
|
||||
|
||||
|
||||
def _build_request(model: ProxyModel, params):
|
||||
model_params = model.get_params()
|
||||
logger.info(f"Model: {model}, model_params: {model_params}")
|
||||
|
||||
messages: List[ModelMessage] = params["messages"]
|
||||
# Add history conversation
|
||||
for message in messages:
|
||||
if message.role == ModelMessageRoleType.HUMAN:
|
||||
history.append({"role": "user", "content": message.content})
|
||||
elif message.role == ModelMessageRoleType.SYSTEM:
|
||||
history.append({"role": "system", "content": message.content})
|
||||
elif message.role == ModelMessageRoleType.AI:
|
||||
history.append({"role": "assistant", "content": message.content})
|
||||
else:
|
||||
pass
|
||||
|
||||
# Move the last user's information to the end
|
||||
last_user_input_index = None
|
||||
for i in range(len(history) - 1, -1, -1):
|
||||
if history[i]["role"] == "user":
|
||||
last_user_input_index = i
|
||||
break
|
||||
if last_user_input_index:
|
||||
last_user_input = history.pop(last_user_input_index)
|
||||
history.append(last_user_input)
|
||||
|
||||
history = __convert_2_gpt_messages(messages)
|
||||
payloads = {
|
||||
"temperature": params.get("temperature"),
|
||||
"max_tokens": params.get("max_new_tokens"),
|
||||
|
@@ -25,6 +25,7 @@ def __convert_2_tongyi_messages(messages: List[ModelMessage]):
|
||||
if len(system_messages) > 0:
|
||||
if len(system_messages) < 2:
|
||||
tongyi_messages.insert(0, {"role": "system", "content": system_messages[0]})
|
||||
tongyi_messages.append({"role": "user", "content": last_usr_message})
|
||||
else:
|
||||
tongyi_messages.append({"role": "user", "content": system_messages[1]})
|
||||
else:
|
||||
|
Reference in New Issue
Block a user