mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-12 20:53:48 +00:00
feat(core): Support multi round conversation operator (#986)
This commit is contained in:
438
examples/awel/data_analyst_assistant.py
Normal file
438
examples/awel/data_analyst_assistant.py
Normal file
@@ -0,0 +1,438 @@
|
||||
"""AWEL: Data analyst assistant.
|
||||
|
||||
DB-GPT will automatically load and execute the current file after startup.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Run this file in your terminal with dev mode.
|
||||
# First terminal
|
||||
export OPENAI_API_KEY=xxx
|
||||
export OPENAI_API_BASE=https://api.openai.com/v1
|
||||
python examples/awel/simple_chat_history_example.py
|
||||
|
||||
|
||||
Code fix command, return no streaming response
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
# Open a new terminal
|
||||
# Second terminal
|
||||
|
||||
DBGPT_SERVER="http://127.0.0.1:5555"
|
||||
MODEL="gpt-3.5-turbo"
|
||||
# Fist round
|
||||
curl -X POST $DBGPT_SERVER/api/v1/awel/trigger/examples/data_analyst/copilot \
|
||||
-H "Content-Type: application/json" -d '{
|
||||
"command": "dbgpt_awel_data_analyst_code_fix",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"stream": false,
|
||||
"context": {
|
||||
"conv_uid": "uuid_conv_copilot_1234",
|
||||
"chat_mode": "chat_with_code"
|
||||
},
|
||||
"messages": "SELECT * FRM orders WHERE order_amount > 500;"
|
||||
}'
|
||||
|
||||
"""
|
||||
import logging
|
||||
from functools import cache
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from dbgpt._private.pydantic import BaseModel, Field
|
||||
from dbgpt.core import (
|
||||
InMemoryStorage,
|
||||
LLMClient,
|
||||
MessageStorageItem,
|
||||
ModelMessage,
|
||||
ModelMessageRoleType,
|
||||
PromptManager,
|
||||
PromptTemplate,
|
||||
StorageConversation,
|
||||
StorageInterface,
|
||||
)
|
||||
from dbgpt.core.awel import DAG, HttpTrigger, JoinOperator, MapOperator
|
||||
from dbgpt.core.operator import (
|
||||
BufferedConversationMapperOperator,
|
||||
LLMBranchOperator,
|
||||
LLMOperator,
|
||||
PostConversationOperator,
|
||||
PostStreamingConversationOperator,
|
||||
PreConversationOperator,
|
||||
RequestBuildOperator,
|
||||
StreamingLLMOperator,
|
||||
)
|
||||
from dbgpt.model import MixinLLMOperator, OpenAIStreamingOperator
|
||||
from dbgpt.util.utils import colored
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CODE_FIX = "dbgpt_awel_data_analyst_code_fix"
|
||||
CODE_PERF = "dbgpt_awel_data_analyst_code_perf"
|
||||
CODE_EXPLAIN = "dbgpt_awel_data_analyst_code_explain"
|
||||
CODE_COMMENT = "dbgpt_awel_data_analyst_code_comment"
|
||||
CODE_TRANSLATE = "dbgpt_awel_data_analyst_code_translate"
|
||||
|
||||
CODE_FIX_TEMPLATE_ZH = """作为一名经验丰富的数据仓库开发者和数据分析师,
|
||||
这里有一段 {language} 代码。请按照最佳实践检查代码,找出并修复所有错误。请给出修复后的代码,并且提供对您所做的每一行更正的逐行解释,请使用和用户相同的语言进行回答。"""
|
||||
CODE_FIX_TEMPLATE_EN = """As an experienced data warehouse developer and data analyst,
|
||||
here is a snippet of code of {language}. Please review the code following best practices to identify and fix all errors.
|
||||
Provide the corrected code and include a line-by-line explanation of all the fixes you've made, please use the same language as the user."""
|
||||
|
||||
CODE_PERF_TEMPLATE_ZH = """作为一名经验丰富的数据仓库开发者和数据分析师,这里有一段 {language} 代码。
|
||||
请你按照最佳实践来优化这段代码。请在代码中加入注释点明所做的更改,并解释每项优化的原因,以便提高代码的维护性和性能,请使用和用户相同的语言进行回答。"""
|
||||
CODE_PERF_TEMPLATE_EN = """As an experienced data warehouse developer and data analyst,
|
||||
you are provided with a snippet of code of {language}. Please optimize the code according to best practices.
|
||||
Include comments to highlight the changes made and explain the reasons for each optimization for better maintenance and performance,
|
||||
please use the same language as the user."""
|
||||
CODE_EXPLAIN_TEMPLATE_ZH = """作为一名经验丰富的数据仓库开发者和数据分析师,
|
||||
现在给你的是一份 {language} 代码。请你逐行解释代码的含义,请使用和用户相同的语言进行回答。"""
|
||||
|
||||
CODE_EXPLAIN_TEMPLATE_EN = """As an experienced data warehouse developer and data analyst,
|
||||
you are provided with a snippet of code of {language}. Please explain the meaning of the code line by line,
|
||||
please use the same language as the user."""
|
||||
|
||||
CODE_COMMENT_TEMPLATE_ZH = """作为一名经验丰富的数据仓库开发者和数据分析师,现在给你的是一份 {language} 代码。
|
||||
请你为每一行代码添加注释,解释每个部分的作用,请使用和用户相同的语言进行回答。"""
|
||||
|
||||
CODE_COMMENT_TEMPLATE_EN = """As an experienced Data Warehouse Developer and Data Analyst.
|
||||
Below is a snippet of code written in {language}.
|
||||
Please provide line-by-line comments explaining what each section of the code does, please use the same language as the user."""
|
||||
|
||||
CODE_TRANSLATE_TEMPLATE_ZH = """作为一名经验丰富的数据仓库开发者和数据分析师,现在手头有一份用{source_language}语言编写的代码片段。
|
||||
请你将这段代码准确无误地翻译成{target_language}语言,确保语法和功能在翻译后的代码中得到正确体现,请使用和用户相同的语言进行回答。"""
|
||||
CODE_TRANSLATE_TEMPLATE_EN = """As an experienced data warehouse developer and data analyst,
|
||||
you're presented with a snippet of code written in {source_language}.
|
||||
Please translate this code into {target_language} ensuring that the syntax and functionalities are accurately reflected in the translated code,
|
||||
please use the same language as the user."""
|
||||
|
||||
|
||||
class ReqContext(BaseModel):
|
||||
user_name: Optional[str] = Field(
|
||||
None, description="The user name of the model request."
|
||||
)
|
||||
|
||||
sys_code: Optional[str] = Field(
|
||||
None, description="The system code of the model request."
|
||||
)
|
||||
conv_uid: Optional[str] = Field(
|
||||
None, description="The conversation uid of the model request."
|
||||
)
|
||||
chat_mode: Optional[str] = Field(
|
||||
"chat_with_code", description="The chat mode of the model request."
|
||||
)
|
||||
|
||||
|
||||
class TriggerReqBody(BaseModel):
|
||||
messages: str = Field(..., description="User input messages")
|
||||
command: Optional[str] = Field(default="fix", description="Command name")
|
||||
model: Optional[str] = Field(default="gpt-3.5-turbo", description="Model name")
|
||||
stream: Optional[bool] = Field(default=False, description="Whether return stream")
|
||||
language: Optional[str] = Field(default="hive", description="Language")
|
||||
target_language: Optional[str] = Field(
|
||||
default="hive", description="Target language, use in translate"
|
||||
)
|
||||
context: Optional[ReqContext] = Field(
|
||||
default=None, description="The context of the model request."
|
||||
)
|
||||
|
||||
|
||||
@cache
|
||||
def load_or_save_prompt_template(pm: PromptManager):
|
||||
ext_params = {
|
||||
"chat_scene": "chat_with_code",
|
||||
"sub_chat_scene": "data_analyst",
|
||||
"prompt_type": "common",
|
||||
}
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["language"],
|
||||
template=CODE_FIX_TEMPLATE_ZH,
|
||||
),
|
||||
prompt_name=CODE_FIX,
|
||||
prompt_language="zh",
|
||||
**ext_params,
|
||||
)
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["language"],
|
||||
template=CODE_FIX_TEMPLATE_EN,
|
||||
),
|
||||
prompt_name=CODE_FIX,
|
||||
prompt_language="en",
|
||||
**ext_params,
|
||||
)
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["language"],
|
||||
template=CODE_PERF_TEMPLATE_ZH,
|
||||
),
|
||||
prompt_name=CODE_PERF,
|
||||
prompt_language="zh",
|
||||
**ext_params,
|
||||
)
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["language"],
|
||||
template=CODE_PERF_TEMPLATE_EN,
|
||||
),
|
||||
prompt_name=CODE_PERF,
|
||||
prompt_language="en",
|
||||
**ext_params,
|
||||
)
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["language"],
|
||||
template=CODE_EXPLAIN_TEMPLATE_ZH,
|
||||
),
|
||||
prompt_name=CODE_EXPLAIN,
|
||||
prompt_language="zh",
|
||||
**ext_params,
|
||||
)
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["language"],
|
||||
template=CODE_EXPLAIN_TEMPLATE_EN,
|
||||
),
|
||||
prompt_name=CODE_EXPLAIN,
|
||||
prompt_language="en",
|
||||
**ext_params,
|
||||
)
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["language"],
|
||||
template=CODE_COMMENT_TEMPLATE_ZH,
|
||||
),
|
||||
prompt_name=CODE_COMMENT,
|
||||
prompt_language="zh",
|
||||
**ext_params,
|
||||
)
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["language"],
|
||||
template=CODE_COMMENT_TEMPLATE_EN,
|
||||
),
|
||||
prompt_name=CODE_COMMENT,
|
||||
prompt_language="en",
|
||||
**ext_params,
|
||||
)
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["source_language", "target_language"],
|
||||
template=CODE_TRANSLATE_TEMPLATE_ZH,
|
||||
),
|
||||
prompt_name=CODE_TRANSLATE,
|
||||
prompt_language="zh",
|
||||
**ext_params,
|
||||
)
|
||||
pm.query_or_save(
|
||||
PromptTemplate(
|
||||
input_variables=["source_language", "target_language"],
|
||||
template=CODE_TRANSLATE_TEMPLATE_EN,
|
||||
),
|
||||
prompt_name=CODE_TRANSLATE,
|
||||
prompt_language="en",
|
||||
**ext_params,
|
||||
)
|
||||
|
||||
|
||||
class CopilotOperator(MapOperator[TriggerReqBody, Dict[str, Any]]):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._default_prompt_manager = PromptManager()
|
||||
|
||||
async def map(self, input_value: TriggerReqBody) -> Dict[str, Any]:
|
||||
from dbgpt.serve.prompt.serve import SERVE_APP_NAME as PROMPT_SERVE_APP_NAME
|
||||
from dbgpt.serve.prompt.serve import Serve as PromptServe
|
||||
|
||||
prompt_serve = self.system_app.get_component(
|
||||
PROMPT_SERVE_APP_NAME, PromptServe, default_component=None
|
||||
)
|
||||
if prompt_serve:
|
||||
pm = prompt_serve.prompt_manager
|
||||
else:
|
||||
pm = self._default_prompt_manager
|
||||
load_or_save_prompt_template(pm)
|
||||
|
||||
user_language = self.system_app.config.get_current_lang(default="en")
|
||||
|
||||
prompt_list = pm.prefer_query(
|
||||
input_value.command, prefer_prompt_language=user_language
|
||||
)
|
||||
if not prompt_list:
|
||||
error_msg = f"Prompt not found for command {input_value.command}, user_language: {user_language}"
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg)
|
||||
prompt = prompt_list[0].to_prompt_template()
|
||||
if input_value.command == CODE_TRANSLATE:
|
||||
format_params = {
|
||||
"source_language": input_value.language,
|
||||
"target_language": input_value.target_language,
|
||||
}
|
||||
else:
|
||||
format_params = {"language": input_value.language}
|
||||
|
||||
system_message = prompt.format(**format_params)
|
||||
messages = [
|
||||
ModelMessage(role=ModelMessageRoleType.SYSTEM, content=system_message),
|
||||
ModelMessage(role=ModelMessageRoleType.HUMAN, content=input_value.messages),
|
||||
]
|
||||
context = input_value.context.dict() if input_value.context else {}
|
||||
return {
|
||||
"messages": messages,
|
||||
"stream": input_value.stream,
|
||||
"model": input_value.model,
|
||||
"context": context,
|
||||
}
|
||||
|
||||
|
||||
class MyConversationOperator(PreConversationOperator):
|
||||
def __init__(
|
||||
self,
|
||||
storage: Optional[StorageInterface[StorageConversation, Any]] = None,
|
||||
message_storage: Optional[StorageInterface[MessageStorageItem, Any]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(storage, message_storage, **kwargs)
|
||||
|
||||
def _get_conversion_serve(self):
|
||||
from dbgpt.serve.conversation.serve import (
|
||||
SERVE_APP_NAME as CONVERSATION_SERVE_APP_NAME,
|
||||
)
|
||||
from dbgpt.serve.conversation.serve import Serve as ConversationServe
|
||||
|
||||
conversation_serve: ConversationServe = self.system_app.get_component(
|
||||
CONVERSATION_SERVE_APP_NAME, ConversationServe, default_component=None
|
||||
)
|
||||
return conversation_serve
|
||||
|
||||
@property
|
||||
def storage(self):
|
||||
if self._storage:
|
||||
return self._storage
|
||||
conversation_serve = self._get_conversion_serve()
|
||||
if conversation_serve:
|
||||
return conversation_serve.conv_storage
|
||||
else:
|
||||
logger.info("Conversation storage not found, use InMemoryStorage default")
|
||||
self._storage = InMemoryStorage()
|
||||
return self._storage
|
||||
|
||||
@property
|
||||
def message_storage(self):
|
||||
if self._message_storage:
|
||||
return self._message_storage
|
||||
conversation_serve = self._get_conversion_serve()
|
||||
if conversation_serve:
|
||||
return conversation_serve.message_storage
|
||||
else:
|
||||
logger.info("Message storage not found, use InMemoryStorage default")
|
||||
self._message_storage = InMemoryStorage()
|
||||
return self._message_storage
|
||||
|
||||
|
||||
class MyLLMOperator(MixinLLMOperator, LLMOperator):
|
||||
def __init__(self, llm_client: Optional[LLMClient] = None, **kwargs):
|
||||
super().__init__(llm_client)
|
||||
LLMOperator.__init__(self, llm_client, **kwargs)
|
||||
|
||||
|
||||
class MyStreamingLLMOperator(MixinLLMOperator, StreamingLLMOperator):
|
||||
def __init__(self, llm_client: Optional[LLMClient] = None, **kwargs):
|
||||
super().__init__(llm_client)
|
||||
StreamingLLMOperator.__init__(self, llm_client, **kwargs)
|
||||
|
||||
|
||||
def history_message_mapper(
|
||||
messages_by_round: List[List[ModelMessage]],
|
||||
) -> List[ModelMessage]:
|
||||
"""Mapper for history conversation.
|
||||
|
||||
If there are multi system messages, just keep the first system message.
|
||||
"""
|
||||
has_system_message = False
|
||||
mapper_messages = []
|
||||
for messages in messages_by_round:
|
||||
for message in messages:
|
||||
if message.role == ModelMessageRoleType.SYSTEM:
|
||||
if has_system_message:
|
||||
continue
|
||||
else:
|
||||
mapper_messages.append(message)
|
||||
has_system_message = True
|
||||
else:
|
||||
mapper_messages.append(message)
|
||||
print("history_message_mapper start:" + "=" * 70)
|
||||
print(colored(ModelMessage.get_printable_message(mapper_messages), "green"))
|
||||
print("history_message_mapper end:" + "=" * 72)
|
||||
return mapper_messages
|
||||
|
||||
|
||||
with DAG("dbgpt_awel_data_analyst_assistant") as dag:
|
||||
trigger = HttpTrigger(
|
||||
"/examples/data_analyst/copilot",
|
||||
request_body=TriggerReqBody,
|
||||
methods="POST",
|
||||
streaming_predict_func=lambda x: x.stream,
|
||||
)
|
||||
|
||||
copilot_task = CopilotOperator()
|
||||
request_handle_task = RequestBuildOperator()
|
||||
|
||||
# Pre-process conversation
|
||||
pre_conversation_task = MyConversationOperator()
|
||||
# Keep last k round conversation.
|
||||
history_conversation_task = BufferedConversationMapperOperator(
|
||||
last_k_round=5, message_mapper=history_message_mapper
|
||||
)
|
||||
|
||||
# Save conversation to storage.
|
||||
post_conversation_task = PostConversationOperator()
|
||||
# Save streaming conversation to storage.
|
||||
post_streaming_conversation_task = PostStreamingConversationOperator()
|
||||
|
||||
# Use LLMOperator to generate response.
|
||||
llm_task = MyLLMOperator(task_name="llm_task")
|
||||
streaming_llm_task = MyStreamingLLMOperator(task_name="streaming_llm_task")
|
||||
branch_task = LLMBranchOperator(
|
||||
stream_task_name="streaming_llm_task", no_stream_task_name="llm_task"
|
||||
)
|
||||
model_parse_task = MapOperator(lambda out: out.to_dict())
|
||||
openai_format_stream_task = OpenAIStreamingOperator()
|
||||
result_join_task = JoinOperator(
|
||||
combine_function=lambda not_stream_out, stream_out: not_stream_out or stream_out
|
||||
)
|
||||
|
||||
(
|
||||
trigger
|
||||
>> copilot_task
|
||||
>> request_handle_task
|
||||
>> pre_conversation_task
|
||||
>> history_conversation_task
|
||||
>> branch_task
|
||||
)
|
||||
# The branch of no streaming response.
|
||||
(
|
||||
branch_task
|
||||
>> llm_task
|
||||
>> post_conversation_task
|
||||
>> model_parse_task
|
||||
>> result_join_task
|
||||
)
|
||||
# The branch of streaming response.
|
||||
(
|
||||
branch_task
|
||||
>> streaming_llm_task
|
||||
>> post_streaming_conversation_task
|
||||
>> openai_format_stream_task
|
||||
>> result_join_task
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if dag.leaf_nodes[0].dev_mode:
|
||||
from dbgpt.core.awel import setup_dev_environment
|
||||
|
||||
setup_dev_environment([dag])
|
||||
else:
|
||||
pass
|
@@ -33,12 +33,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"is_executing": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"sk-ElhG3036tcvECTOYO9nHqAkUIWtqT55JXs1cTjODz1bnUQDz\"\n",
|
||||
"os.environ[\"OPENAI_API_BASE\"] = \"https://api.chatanywhere.tech/v1\""
|
||||
"# Set your api key and api base url\n",
|
||||
"# os.environ[\"OPENAI_API_KEY\"] = \"Your API\"\n",
|
||||
"# os.environ[\"OPENAI_API_BASE\"] = \"https://api.openai.com/v1\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -50,12 +53,12 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mUser\u001b[0m (to plan_manager)-[]:\n",
|
||||
"\u001B[33mUser\u001B[0m (to plan_manager)-[]:\n",
|
||||
"\n",
|
||||
"\"Obtain simple information about issues in the repository 'eosphoros-ai/DB-GPT' in the past three days and analyze the data. Create a Markdown table grouped by day and status.\"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mplan_manager\u001b[0m (to Planner)-[]:\n",
|
||||
"\u001B[33mplan_manager\u001B[0m (to Planner)-[]:\n",
|
||||
"\n",
|
||||
"\"Obtain simple information about issues in the repository 'eosphoros-ai/DB-GPT' in the past three days and analyze the data. Create a Markdown table grouped by day and status.\"\n",
|
||||
"\n",
|
||||
@@ -72,15 +75,15 @@
|
||||
" \"content\": \"Write a Python script to process the retrieved issues data to group them by creation date and status. Then, format the grouped data into a Markdown table.\",\n",
|
||||
" \"rely\": \"1\"\n",
|
||||
"}]\n",
|
||||
"\u001b[33mPlanner\u001b[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\u001B[33mPlanner\u001B[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\n",
|
||||
"\"[{\\n \\\"serial_number\\\": \\\"1\\\",\\n \\\"agent\\\": \\\"CodeEngineer\\\",\\n \\\"content\\\": \\\"Write a Python script to use the GitHub API to retrieve issues from the 'eosphoros-ai/DB-GPT' repository that were created in the past three days. The script should extract the issue's creation date and status.\\\",\\n \\\"rely\\\": \\\"\\\"\\n},\\n{\\n \\\"serial_number\\\": \\\"2\\\",\\n \\\"agent\\\": \\\"CodeEngineer\\\",\\n \\\"content\\\": \\\"Write a Python script to process the retrieved issues data to group them by creation date and status. Then, format the grouped data into a Markdown table.\\\",\\n \\\"rely\\\": \\\"1\\\"\\n}]\"\n",
|
||||
"\u001b[32m>>>>>>>>Planner Review info: \n",
|
||||
" Pass.None\u001b[0m\n",
|
||||
"\u001b[34m>>>>>>>>Planner Action report: \n",
|
||||
"\u001B[32m>>>>>>>>Planner Review info: \n",
|
||||
" Pass.None\u001B[0m\n",
|
||||
"\u001B[34m>>>>>>>>Planner Action report: \n",
|
||||
"execution succeeded,\n",
|
||||
"1,Write a Python script to use the GitHub API to retrieve issues from the 'eosphoros-ai/DB-GPT' repository that were created in the past three days. The script should extract the issue's creation date and status.\n",
|
||||
"2,Write a Python script to process the retrieved issues data to group them by creation date and status. Then, format the grouped data into a Markdown table.\u001b[0m\n",
|
||||
"2,Write a Python script to process the retrieved issues data to group them by creation date and status. Then, format the grouped data into a Markdown table.\u001B[0m\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
@@ -96,7 +99,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mplan_manager\u001b[0m (to CodeEngineer)-[]:\n",
|
||||
"\u001B[33mplan_manager\u001B[0m (to CodeEngineer)-[]:\n",
|
||||
"\n",
|
||||
"\"Write a Python script to use the GitHub API to retrieve issues from the 'eosphoros-ai/DB-GPT' repository that were created in the past three days. The script should extract the issue's creation date and status.\"\n",
|
||||
"\n",
|
||||
@@ -141,14 +144,14 @@
|
||||
"```\n",
|
||||
"\n",
|
||||
"When you run this script, it will print out the creation date and status of issues from the 'eosphoros-ai/DB-GPT' repository that were created in the past three days. Make sure to have internet access while running the script as it needs to make requests to the GitHub API.\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001b[33mCodeEngineer\u001b[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\u001B[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001B[0m\n",
|
||||
"\u001B[33mCodeEngineer\u001B[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\n",
|
||||
"\"```python\\n# filename: github_issues.py\\nimport requests\\nfrom datetime import datetime, timedelta\\n\\n# Function to get issues created in the past three days from a GitHub repository\\ndef get_recent_issues(owner, repo):\\n # Calculate the date for three days ago\\n three_days_ago = datetime.utcnow() - timedelta(days=3)\\n # Format the date in ISO 8601 format\\n since = three_days_ago.isoformat()\\n\\n # GitHub API URL for issues of the given repository\\n issues_url = f\\\"https://api.github.com/repos/{owner}/{repo}/issues\\\"\\n \\n # Parameters to get issues created in the past three days\\n params = {\\n 'since': since,\\n 'state': 'all', # Get both open and closed issues\\n }\\n \\n # Send a request to the GitHub API\\n response = requests.get(issues_url, params=params)\\n # Raise an exception if the request was unsuccessful\\n response.raise_for_status()\\n \\n # Parse the JSON response\\n issues = response.json()\\n \\n # Extract and print the creation date and status of each issue\\n for issue in issues:\\n created_at = issue['created_at']\\n state = issue['state']\\n print(f\\\"Issue created at: {created_at}, Status: {state}\\\")\\n\\n# Replace 'owner' and 'repo' with the actual owner and repository name\\nget_recent_issues('eosphoros-ai', 'DB-GPT')\\n```\\n\\nWhen you run this script, it will print out the creation date and status of issues from the 'eosphoros-ai/DB-GPT' repository that were created in the past three days. Make sure to have internet access while running the script as it needs to make requests to the GitHub API.\"\n",
|
||||
"\u001b[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001b[0m\n",
|
||||
"\u001b[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"\u001B[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001B[0m\n",
|
||||
"\u001B[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"execution failed,\n",
|
||||
"exitcode: 1 (execution failed)\n",
|
||||
" \n",
|
||||
@@ -156,7 +159,7 @@
|
||||
" File \"github_issues.py\", line 2, in <module>\n",
|
||||
" import requests\n",
|
||||
"ModuleNotFoundError: No module named 'requests'\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001B[0m\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
@@ -172,7 +175,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mplan_manager\u001b[0m (to CodeEngineer)-[]:\n",
|
||||
"\u001B[33mplan_manager\u001B[0m (to CodeEngineer)-[]:\n",
|
||||
"\n",
|
||||
"\"exitcode: 1 (execution failed)\\n \\nTraceback (most recent call last):\\n File \\\"github_issues.py\\\", line 2, in <module>\\n import requests\\nModuleNotFoundError: No module named 'requests'\\n\"\n",
|
||||
"\n",
|
||||
@@ -225,17 +228,17 @@
|
||||
"```\n",
|
||||
"\n",
|
||||
"Please run the above Python script after you have installed the `requests` module.\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is sh)...\u001b[0m\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 1 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001B[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is sh)...\u001B[0m\n",
|
||||
"\u001B[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 1 (inferred language is python)...\u001B[0m\n",
|
||||
"un_stream ai response: False\n",
|
||||
"\u001b[33mCodeEngineer\u001b[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\u001B[33mCodeEngineer\u001B[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\n",
|
||||
"\"It appears that the `requests` module is not installed on your system. You can install it using `pip`, the Python package manager. Here's the command to install the `requests` module:\\n\\n```sh\\npip install requests\\n```\\n\\nAfter installing the `requests` module, you can try running the script again. Here is the complete script including the installation of the `requests` module:\\n\\n```python\\n# filename: github_issues.py\\nimport requests\\nfrom datetime import datetime, timedelta\\n\\n# Function to get issues created in the past three days from a GitHub repository\\ndef get_recent_issues(owner, repo):\\n # Calculate the date for three days ago\\n three_days_ago = datetime.utcnow() - timedelta(days=3)\\n # Format the date in ISO 8601 format\\n since = three_days_ago.isoformat()\\n\\n # GitHub API URL for issues of the given repository\\n issues_url = f\\\"https://api.github.com/repos/{owner}/{repo}/issues\\\"\\n \\n # Parameters to get issues created in the past three days\\n params = {\\n 'since': since,\\n 'state': 'all', # Get both open and closed issues\\n }\\n \\n # Send a request to the GitHub API\\n response = requests.get(issues_url, params=params)\\n # Raise an exception if the request was unsuccessful\\n response.raise_for_status()\\n \\n # Parse the JSON response\\n issues = response.json()\\n \\n # Extract and print the creation date and status of each issue\\n for issue in issues:\\n created_at = issue['created_at']\\n state = issue['state']\\n print(f\\\"Issue created at: {created_at}, Status: {state}\\\")\\n\\n# Replace 'owner' and 'repo' with the actual owner and repository name\\nget_recent_issues('eosphoros-ai', 'DB-GPT')\\n```\\n\\nPlease run the above Python script after you have installed the `requests` module.\"\n",
|
||||
"\u001b[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001b[0m\n",
|
||||
"\u001b[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"\u001B[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001B[0m\n",
|
||||
"\u001B[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"execution succeeded,\n",
|
||||
"\n",
|
||||
"Collecting requests\n",
|
||||
@@ -302,7 +305,7 @@
|
||||
"Issue created at: 2023-10-26T01:45:37Z, Status: open\n",
|
||||
"Issue created at: 2023-08-30T06:02:32Z, Status: closed\n",
|
||||
"Issue created at: 2023-06-18T01:11:22Z, Status: open\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001B[0m\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
@@ -318,7 +321,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mplan_manager\u001b[0m (to CodeEngineer)-[]:\n",
|
||||
"\u001B[33mplan_manager\u001B[0m (to CodeEngineer)-[]:\n",
|
||||
"\n",
|
||||
"\"The execution result of the code you wrote is judged as not answering the task question. Please re-understand and complete the task.\"\n",
|
||||
"\n",
|
||||
@@ -376,15 +379,15 @@
|
||||
"```\n",
|
||||
"\n",
|
||||
"When you run this script, it will check for the `requests` module and install it if necessary. Then it will retrieve the issues from the 'eosphoros-ai/DB-GPT' repository created in the past three days and print their creation date and status.\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001B[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001B[0m\n",
|
||||
"un_stream ai response: False\n",
|
||||
"\u001b[33mCodeEngineer\u001b[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\u001B[33mCodeEngineer\u001B[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\n",
|
||||
"\"I apologize for the inconvenience. Let me provide you with a Python script that includes a check to ensure the `requests` module is installed. If it's not installed, the script will attempt to install it using `pip`. Please run the following Python script:\\n\\n```python\\n# filename: github_issues.py\\nimport subprocess\\nimport pkg_resources\\nfrom datetime import datetime, timedelta\\n\\n# Ensure the 'requests' package is installed\\nrequired_packages = {'requests'}\\ninstalled_packages = {pkg.key for pkg in pkg_resources.working_set}\\nmissing_packages = required_packages - installed_packages\\n\\nif missing_packages:\\n subprocess.check_call(['pip', 'install', *missing_packages])\\n\\nimport requests\\n\\n# Function to get issues created in the past three days from a GitHub repository\\ndef get_recent_issues(owner, repo):\\n # Calculate the date for three days ago\\n three_days_ago = datetime.utcnow() - timedelta(days=3)\\n # Format the date in ISO 8601 format\\n since = three_days_ago.isoformat()\\n\\n # GitHub API URL for issues of the given repository\\n issues_url = f\\\"https://api.github.com/repos/{owner}/{repo}/issues\\\"\\n \\n # Parameters to get issues created in the past three days\\n params = {\\n 'since': since,\\n 'state': 'all', # Get both open and closed issues\\n }\\n \\n # Send a request to the GitHub API\\n response = requests.get(issues_url, params=params)\\n # Raise an exception if the request was unsuccessful\\n response.raise_for_status()\\n \\n # Parse the JSON response\\n issues = response.json()\\n \\n # Extract and print the creation date and status of each issue\\n for issue in issues:\\n created_at = issue['created_at']\\n state = issue['state']\\n print(f\\\"Issue created at: {created_at}, Status: {state}\\\")\\n\\n# Replace 'owner' and 'repo' with the actual owner and repository name\\nget_recent_issues('eosphoros-ai', 'DB-GPT')\\n```\\n\\nWhen you run this script, it will check for the `requests` module and install it if necessary. Then it will retrieve the issues from the 'eosphoros-ai/DB-GPT' repository created in the past three days and print their creation date and status.\"\n",
|
||||
"\u001b[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001b[0m\n",
|
||||
"\u001b[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"\u001B[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001B[0m\n",
|
||||
"\u001B[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"execution succeeded,\n",
|
||||
"\n",
|
||||
"/workspace/github_issues.py:3: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html\n",
|
||||
@@ -421,7 +424,7 @@
|
||||
"Issue created at: 2023-10-26T01:45:37Z, Status: open\n",
|
||||
"Issue created at: 2023-08-30T06:02:32Z, Status: closed\n",
|
||||
"Issue created at: 2023-06-18T01:11:22Z, Status: open\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001B[0m\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
@@ -437,7 +440,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mplan_manager\u001b[0m (to CodeEngineer)-[]:\n",
|
||||
"\u001B[33mplan_manager\u001B[0m (to CodeEngineer)-[]:\n",
|
||||
"\n",
|
||||
"\"The execution result of the code you wrote is judged as not answering the task question. Please re-understand and complete the task.\"\n",
|
||||
"\n",
|
||||
@@ -484,15 +487,15 @@
|
||||
"```\n",
|
||||
"\n",
|
||||
"This script retrieves all issues from the specified repository and then filters them based on the creation date to find the ones that were created in the past three days. It then prints the creation date and status of each issue that meets the criteria. Please execute this updated script.\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001B[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001B[0m\n",
|
||||
"un_stream ai response: True\n",
|
||||
"\u001b[33mCodeEngineer\u001b[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\u001B[33mCodeEngineer\u001B[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\n",
|
||||
"\"I apologize for the oversight. Let's try a different approach to ensure we get the issues created in the past three days from the 'eosphoros-ai/DB-GPT' repository. I will provide you with an updated Python script that correctly retrieves and filters the issues based on their creation date.\\n\\n```python\\n# filename: github_issues.py\\nimport requests\\nfrom datetime import datetime, timedelta\\n\\n# Function to get issues created in the past three days from a GitHub repository\\ndef get_recent_issues(owner, repo):\\n # Calculate the date for three days ago\\n three_days_ago = datetime.utcnow() - timedelta(days=3)\\n # Format the date in ISO 8601 format\\n since = three_days_ago.isoformat()\\n\\n # GitHub API URL for issues of the given repository\\n issues_url = f\\\"https://api.github.com/repos/{owner}/{repo}/issues\\\"\\n\\n # Parameters to get all issues, GitHub API does not allow filtering by creation date directly\\n params = {\\n 'state': 'all', # Get both open and closed issues\\n }\\n\\n # Send a request to the GitHub API\\n response = requests.get(issues_url, params=params)\\n # Raise an exception if the request was unsuccessful\\n response.raise_for_status()\\n\\n # Parse the JSON response\\n issues = response.json()\\n\\n # Filter and print the creation date and status of each issue created in the past three days\\n for issue in issues:\\n created_at = datetime.strptime(issue['created_at'], \\\"%Y-%m-%dT%H:%M:%SZ\\\")\\n if created_at > three_days_ago:\\n state = issue['state']\\n print(f\\\"Issue created at: {created_at}, Status: {state}\\\")\\n\\n# Replace 'owner' and 'repo' with the actual owner and repository name\\nget_recent_issues('eosphoros-ai', 'DB-GPT')\\n```\\n\\nThis script retrieves all issues from the specified repository and then filters them based on the creation date to find the ones that were created in the past three days. It then prints the creation date and status of each issue that meets the criteria. Please execute this updated script.\"\n",
|
||||
"\u001b[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001b[0m\n",
|
||||
"\u001b[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"\u001B[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001B[0m\n",
|
||||
"\u001B[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"execution succeeded,\n",
|
||||
"\n",
|
||||
"/workspace/github_issues.py:8: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).\n",
|
||||
@@ -510,7 +513,7 @@
|
||||
"Issue created at: 2023-12-25 08:35:48, Status: open\n",
|
||||
"Issue created at: 2023-12-25 08:34:13, Status: closed\n",
|
||||
"Issue created at: 2023-12-25 06:33:37, Status: closed\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001B[0m\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
@@ -526,7 +529,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mplan_manager\u001b[0m (to CodeEngineer)-[]:\n",
|
||||
"\u001B[33mplan_manager\u001B[0m (to CodeEngineer)-[]:\n",
|
||||
"\n",
|
||||
"\"Read the result data of the dependent steps in the above historical message to complete the current goal:Write a Python script to process the retrieved issues data to group them by creation date and status. Then, format the grouped data into a Markdown table.\"\n",
|
||||
"\n",
|
||||
@@ -573,15 +576,15 @@
|
||||
"```\n",
|
||||
"\n",
|
||||
"When you run this script, it will print out a Markdown table with the number of open and closed issues grouped by their creation dates.\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001B[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001B[0m\n",
|
||||
"un_stream ai response: True\n",
|
||||
"\u001b[33mCodeEngineer\u001b[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\u001B[33mCodeEngineer\u001B[0m (to plan_manager)-[gpt-4-vision-preview]:\n",
|
||||
"\n",
|
||||
"\"```python\\n# Let's process the retrieved issues data to group them by creation date and status.\\n# Then, we will format the grouped data into a Markdown table.\\n\\n# The data from the previous step\\nissues_data = [\\n {\\\"created_at\\\": \\\"2023-12-27 03:20:12\\\", \\\"status\\\": \\\"open\\\"},\\n {\\\"created_at\\\": \\\"2023-12-26 16:13:42\\\", \\\"status\\\": \\\"open\\\"},\\n {\\\"created_at\\\": \\\"2023-12-26 10:23:43\\\", \\\"status\\\": \\\"open\\\"},\\n {\\\"created_at\\\": \\\"2023-12-26 07:25:37\\\", \\\"status\\\": \\\"closed\\\"},\\n {\\\"created_at\\\": \\\"2023-12-26 05:01:26\\\", \\\"status\\\": \\\"closed\\\"},\\n {\\\"created_at\\\": \\\"2023-12-26 03:54:15\\\", \\\"status\\\": \\\"open\\\"},\\n {\\\"created_at\\\": \\\"2023-12-26 02:19:48\\\", \\\"status\\\": \\\"open\\\"},\\n {\\\"created_at\\\": \\\"2023-12-25 12:26:40\\\", \\\"status\\\": \\\"open\\\"},\\n {\\\"created_at\\\": \\\"2023-12-25 12:00:52\\\", \\\"status\\\": \\\"open\\\"},\\n {\\\"created_at\\\": \\\"2023-12-25 08:36:56\\\", \\\"status\\\": \\\"open\\\"},\\n {\\\"created_at\\\": \\\"2023-12-25 08:35:48\\\", \\\"status\\\": \\\"open\\\"},\\n {\\\"created_at\\\": \\\"2023-12-25 08:34:13\\\", \\\"status\\\": \\\"closed\\\"},\\n {\\\"created_at\\\": \\\"2023-12-25 06:33:37\\\", \\\"status\\\": \\\"closed\\\"},\\n]\\n\\n# Grouping the issues by creation date and status\\nfrom collections import defaultdict\\ngrouped_issues = defaultdict(lambda: defaultdict(list))\\n\\nfor issue in issues_data:\\n date = issue[\\\"created_at\\\"].split(' ')[0] # Extract just the date part\\n grouped_issues[date][issue[\\\"status\\\"]].append(issue)\\n\\n# Format the grouped data into a Markdown table\\nmarkdown_table = \\\"Date | Open Issues | Closed Issues\\\\n\\\"\\nmarkdown_table += \\\"--- | --- | ---\\\\n\\\"\\n\\nfor date, statuses in sorted(grouped_issues.items()):\\n open_issues = len(statuses[\\\"open\\\"]) if \\\"open\\\" in statuses else 0\\n closed_issues = len(statuses[\\\"closed\\\"]) if \\\"closed\\\" in statuses else 0\\n markdown_table += f\\\"{date} | {open_issues} | {closed_issues}\\\\n\\\"\\n\\nprint(markdown_table)\\n```\\n\\nWhen you run this script, it will print out a Markdown table with the number of open and closed issues grouped by their creation dates.\"\n",
|
||||
"\u001b[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001b[0m\n",
|
||||
"\u001b[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"\u001B[32m>>>>>>>>CodeEngineer Review info: \n",
|
||||
" Pass.None\u001B[0m\n",
|
||||
"\u001B[34m>>>>>>>>CodeEngineer Action report: \n",
|
||||
"execution succeeded,\n",
|
||||
"\n",
|
||||
"Date | Open Issues | Closed Issues\n",
|
||||
@@ -590,17 +593,17 @@
|
||||
"2023-12-26 | 4 | 2\n",
|
||||
"2023-12-27 | 1 | 0\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001B[0m\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mplan_manager\u001b[0m (to User)-[None]:\n",
|
||||
"\u001B[33mplan_manager\u001B[0m (to User)-[None]:\n",
|
||||
"\n",
|
||||
"\"Obtain simple information about issues in the repository 'eosphoros-ai/DB-GPT' in the past three days and analyze the data. Create a Markdown table grouped by day and status.\"\n",
|
||||
"\u001b[32m>>>>>>>>plan_manager Review info: \n",
|
||||
" Pass.None\u001b[0m\n",
|
||||
"\u001b[34m>>>>>>>>plan_manager Action report: \n",
|
||||
"\u001B[32m>>>>>>>>plan_manager Review info: \n",
|
||||
" Pass.None\u001B[0m\n",
|
||||
"\u001B[34m>>>>>>>>plan_manager Action report: \n",
|
||||
"execution succeeded,\n",
|
||||
"TERMINATE\u001b[0m\n",
|
||||
"TERMINATE\u001B[0m\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"```agent-messages\n",
|
||||
|
Reference in New Issue
Block a user