mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-07-21 11:29:15 +00:00
feat(agent): Add trace for agent (#1407)
This commit is contained in:
parent
7d6dfd9ea8
commit
aea575e0b4
@ -8,6 +8,7 @@ from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
|
||||
from dbgpt._private.pydantic import Field
|
||||
from dbgpt.core import LLMClient, ModelMessageRoleType
|
||||
from dbgpt.util.error_types import LLMChatError
|
||||
from dbgpt.util.tracer import SpanType, root_tracer
|
||||
from dbgpt.util.utils import colored
|
||||
|
||||
from ..actions.action import Action, ActionOutput
|
||||
@ -199,13 +200,25 @@ class ConversableAgent(Role, Agent):
|
||||
is_recovery: Optional[bool] = False,
|
||||
) -> None:
|
||||
"""Send a message to recipient agent."""
|
||||
await recipient.receive(
|
||||
message=message,
|
||||
sender=self,
|
||||
reviewer=reviewer,
|
||||
request_reply=request_reply,
|
||||
is_recovery=is_recovery,
|
||||
)
|
||||
with root_tracer.start_span(
|
||||
"agent.send",
|
||||
metadata={
|
||||
"sender": self.get_name(),
|
||||
"recipient": recipient.get_name(),
|
||||
"reviewer": reviewer.get_name() if reviewer else None,
|
||||
"agent_message": message.to_dict(),
|
||||
"request_reply": request_reply,
|
||||
"is_recovery": is_recovery,
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
},
|
||||
):
|
||||
await recipient.receive(
|
||||
message=message,
|
||||
sender=self,
|
||||
reviewer=reviewer,
|
||||
request_reply=request_reply,
|
||||
is_recovery=is_recovery,
|
||||
)
|
||||
|
||||
async def receive(
|
||||
self,
|
||||
@ -217,16 +230,30 @@ class ConversableAgent(Role, Agent):
|
||||
is_recovery: Optional[bool] = False,
|
||||
) -> None:
|
||||
"""Receive a message from another agent."""
|
||||
await self._a_process_received_message(message, sender)
|
||||
if request_reply is False or request_reply is None:
|
||||
return
|
||||
with root_tracer.start_span(
|
||||
"agent.receive",
|
||||
metadata={
|
||||
"sender": sender.get_name(),
|
||||
"recipient": self.get_name(),
|
||||
"reviewer": reviewer.get_name() if reviewer else None,
|
||||
"agent_message": message.to_dict(),
|
||||
"request_reply": request_reply,
|
||||
"silent": silent,
|
||||
"is_recovery": is_recovery,
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
"is_human": self.is_human,
|
||||
},
|
||||
):
|
||||
await self._a_process_received_message(message, sender)
|
||||
if request_reply is False or request_reply is None:
|
||||
return
|
||||
|
||||
if not self.is_human:
|
||||
reply = await self.generate_reply(
|
||||
received_message=message, sender=sender, reviewer=reviewer
|
||||
)
|
||||
if reply is not None:
|
||||
await self.send(reply, sender)
|
||||
if not self.is_human:
|
||||
reply = await self.generate_reply(
|
||||
received_message=message, sender=sender, reviewer=reviewer
|
||||
)
|
||||
if reply is not None:
|
||||
await self.send(reply, sender)
|
||||
|
||||
def prepare_act_param(self) -> Dict[str, Any]:
|
||||
"""Prepare the parameters for the act method."""
|
||||
@ -244,13 +271,44 @@ class ConversableAgent(Role, Agent):
|
||||
logger.info(
|
||||
f"generate agent reply!sender={sender}, rely_messages_len={rely_messages}"
|
||||
)
|
||||
root_span = root_tracer.start_span(
|
||||
"agent.generate_reply",
|
||||
metadata={
|
||||
"sender": sender.get_name(),
|
||||
"recipient": self.get_name(),
|
||||
"reviewer": reviewer.get_name() if reviewer else None,
|
||||
"received_message": received_message.to_dict(),
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
"rely_messages": [msg.to_dict() for msg in rely_messages]
|
||||
if rely_messages
|
||||
else None,
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
reply_message: AgentMessage = self._init_reply_message(
|
||||
received_message=received_message
|
||||
)
|
||||
await self._system_message_assembly(
|
||||
received_message.content, reply_message.context
|
||||
)
|
||||
with root_tracer.start_span(
|
||||
"agent.generate_reply._init_reply_message",
|
||||
metadata={
|
||||
"received_message": received_message.to_dict(),
|
||||
},
|
||||
) as span:
|
||||
# initialize reply message
|
||||
reply_message: AgentMessage = self._init_reply_message(
|
||||
received_message=received_message
|
||||
)
|
||||
span.metadata["reply_message"] = reply_message.to_dict()
|
||||
|
||||
with root_tracer.start_span(
|
||||
"agent.generate_reply._system_message_assembly",
|
||||
metadata={
|
||||
"reply_message": reply_message.to_dict(),
|
||||
},
|
||||
) as span:
|
||||
# assemble system message
|
||||
await self._system_message_assembly(
|
||||
received_message.content, reply_message.context
|
||||
)
|
||||
span.metadata["assembled_system_messages"] = self.oai_system_message
|
||||
|
||||
fail_reason = None
|
||||
current_retry_counter = 0
|
||||
@ -270,36 +328,73 @@ class ConversableAgent(Role, Agent):
|
||||
retry_message, self, reviewer, request_reply=False
|
||||
)
|
||||
|
||||
# 1.Think about how to do things
|
||||
llm_reply, model_name = await self.thinking(
|
||||
self._load_thinking_messages(
|
||||
received_message, sender, rely_messages
|
||||
thinking_messages = self._load_thinking_messages(
|
||||
received_message, sender, rely_messages
|
||||
)
|
||||
with root_tracer.start_span(
|
||||
"agent.generate_reply.thinking",
|
||||
metadata={
|
||||
"thinking_messages": [
|
||||
msg.to_dict() for msg in thinking_messages
|
||||
],
|
||||
},
|
||||
) as span:
|
||||
# 1.Think about how to do things
|
||||
llm_reply, model_name = await self.thinking(thinking_messages)
|
||||
reply_message.model_name = model_name
|
||||
reply_message.content = llm_reply
|
||||
span.metadata["llm_reply"] = llm_reply
|
||||
span.metadata["model_name"] = model_name
|
||||
|
||||
with root_tracer.start_span(
|
||||
"agent.generate_reply.review",
|
||||
metadata={"llm_reply": llm_reply, "censored": self.get_name()},
|
||||
) as span:
|
||||
# 2.Review whether what is being done is legal
|
||||
approve, comments = await self.review(llm_reply, self)
|
||||
reply_message.review_info = AgentReviewInfo(
|
||||
approve=approve,
|
||||
comments=comments,
|
||||
)
|
||||
)
|
||||
reply_message.model_name = model_name
|
||||
reply_message.content = llm_reply
|
||||
span.metadata["approve"] = approve
|
||||
span.metadata["comments"] = comments
|
||||
|
||||
# 2.Review whether what is being done is legal
|
||||
approve, comments = await self.review(llm_reply, self)
|
||||
reply_message.review_info = AgentReviewInfo(
|
||||
approve=approve,
|
||||
comments=comments,
|
||||
)
|
||||
|
||||
# 3.Act based on the results of your thinking
|
||||
act_extent_param = self.prepare_act_param()
|
||||
act_out: Optional[ActionOutput] = await self.act(
|
||||
message=llm_reply,
|
||||
sender=sender,
|
||||
reviewer=reviewer,
|
||||
**act_extent_param,
|
||||
)
|
||||
if act_out:
|
||||
reply_message.action_report = act_out.dict()
|
||||
with root_tracer.start_span(
|
||||
"agent.generate_reply.act",
|
||||
metadata={
|
||||
"llm_reply": llm_reply,
|
||||
"sender": sender.get_name(),
|
||||
"reviewer": reviewer.get_name() if reviewer else None,
|
||||
"act_extent_param": act_extent_param,
|
||||
},
|
||||
) as span:
|
||||
# 3.Act based on the results of your thinking
|
||||
act_out: Optional[ActionOutput] = await self.act(
|
||||
message=llm_reply,
|
||||
sender=sender,
|
||||
reviewer=reviewer,
|
||||
**act_extent_param,
|
||||
)
|
||||
if act_out:
|
||||
reply_message.action_report = act_out.dict()
|
||||
span.metadata["action_report"] = act_out.dict() if act_out else None
|
||||
|
||||
# 4.Reply information verification
|
||||
check_pass, reason = await self.verify(reply_message, sender, reviewer)
|
||||
is_success = check_pass
|
||||
with root_tracer.start_span(
|
||||
"agent.generate_reply.verify",
|
||||
metadata={
|
||||
"llm_reply": llm_reply,
|
||||
"sender": sender.get_name(),
|
||||
"reviewer": reviewer.get_name() if reviewer else None,
|
||||
},
|
||||
) as span:
|
||||
# 4.Reply information verification
|
||||
check_pass, reason = await self.verify(
|
||||
reply_message, sender, reviewer
|
||||
)
|
||||
is_success = check_pass
|
||||
span.metadata["check_pass"] = check_pass
|
||||
span.metadata["reason"] = reason
|
||||
# 5.Optimize wrong answers myself
|
||||
if not check_pass:
|
||||
current_retry_counter += 1
|
||||
@ -319,6 +414,9 @@ class ConversableAgent(Role, Agent):
|
||||
err_message = AgentMessage(content=str(e))
|
||||
err_message.success = False
|
||||
return err_message
|
||||
finally:
|
||||
root_span.metadata["reply_message"] = reply_message.to_dict()
|
||||
root_span.end()
|
||||
|
||||
async def thinking(
|
||||
self, messages: List[AgentMessage], prompt: Optional[str] = None
|
||||
@ -378,7 +476,7 @@ class ConversableAgent(Role, Agent):
|
||||
) -> Optional[ActionOutput]:
|
||||
"""Perform actions."""
|
||||
last_out: Optional[ActionOutput] = None
|
||||
for action in self.actions:
|
||||
for i, action in enumerate(self.actions):
|
||||
# Select the resources required by acton
|
||||
need_resource = None
|
||||
if self.resources and len(self.resources) > 0:
|
||||
@ -390,12 +488,27 @@ class ConversableAgent(Role, Agent):
|
||||
if not message:
|
||||
raise ValueError("The message content is empty!")
|
||||
|
||||
last_out = await action.run(
|
||||
ai_message=message,
|
||||
resource=need_resource,
|
||||
rely_action_out=last_out,
|
||||
**kwargs,
|
||||
)
|
||||
with root_tracer.start_span(
|
||||
"agent.act.run",
|
||||
metadata={
|
||||
"message": message,
|
||||
"sender": sender.get_name() if sender else None,
|
||||
"recipient": self.get_name(),
|
||||
"reviewer": reviewer.get_name() if reviewer else None,
|
||||
"need_resource": need_resource.to_dict() if need_resource else None,
|
||||
"rely_action_out": last_out.dict() if last_out else None,
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
"action_index": i,
|
||||
"total_action": len(self.actions),
|
||||
},
|
||||
) as span:
|
||||
last_out = await action.run(
|
||||
ai_message=message,
|
||||
resource=need_resource,
|
||||
rely_action_out=last_out,
|
||||
**kwargs,
|
||||
)
|
||||
span.metadata["action_out"] = last_out.dict() if last_out else None
|
||||
return last_out
|
||||
|
||||
async def correctness_check(
|
||||
@ -446,12 +559,24 @@ class ConversableAgent(Role, Agent):
|
||||
reviewer (Agent): The reviewer agent.
|
||||
message (str): The message to send.
|
||||
"""
|
||||
await self.send(
|
||||
AgentMessage(content=message, current_goal=message),
|
||||
recipient,
|
||||
reviewer,
|
||||
request_reply=True,
|
||||
)
|
||||
agent_message = AgentMessage(content=message, current_goal=message)
|
||||
with root_tracer.start_span(
|
||||
"agent.initiate_chat",
|
||||
span_type=SpanType.AGENT,
|
||||
metadata={
|
||||
"sender": self.get_name(),
|
||||
"recipient": recipient.get_name(),
|
||||
"reviewer": reviewer.get_name() if reviewer else None,
|
||||
"agent_message": agent_message.to_dict(),
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
},
|
||||
):
|
||||
await self.send(
|
||||
agent_message,
|
||||
recipient,
|
||||
reviewer,
|
||||
request_reply=True,
|
||||
)
|
||||
|
||||
#######################################################################
|
||||
# Private Function Begin
|
||||
@ -506,8 +631,15 @@ class ConversableAgent(Role, Agent):
|
||||
model_name=oai_message.get("model_name", None),
|
||||
)
|
||||
|
||||
self.memory.message_memory.append(gpts_message)
|
||||
return True
|
||||
with root_tracer.start_span(
|
||||
"agent.save_message_to_memory",
|
||||
metadata={
|
||||
"gpts_message": gpts_message.to_dict(),
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
},
|
||||
):
|
||||
self.memory.message_memory.append(gpts_message)
|
||||
return True
|
||||
|
||||
def _print_received_message(self, message: AgentMessage, sender: Agent):
|
||||
# print the message received
|
||||
@ -711,14 +843,27 @@ class ConversableAgent(Role, Agent):
|
||||
|
||||
# Convert and tailor the information in collective memory into contextual
|
||||
# memory available to the current Agent
|
||||
current_goal_messages = self._convert_to_ai_message(
|
||||
self.memory.message_memory.get_between_agents(
|
||||
|
||||
with root_tracer.start_span(
|
||||
"agent._load_thinking_messages",
|
||||
metadata={
|
||||
"sender": sender.get_name(),
|
||||
"recipient": self.get_name(),
|
||||
"conv_uid": self.not_null_agent_context.conv_id,
|
||||
"current_goal": current_goal,
|
||||
},
|
||||
) as span:
|
||||
# Get historical information from the memory
|
||||
memory_messages = self.memory.message_memory.get_between_agents(
|
||||
self.not_null_agent_context.conv_id,
|
||||
self.profile,
|
||||
sender.get_profile(),
|
||||
current_goal,
|
||||
)
|
||||
)
|
||||
span.metadata["memory_messages"] = [
|
||||
message.to_dict() for message in memory_messages
|
||||
]
|
||||
current_goal_messages = self._convert_to_ai_message(memory_messages)
|
||||
|
||||
# When there is no target and context, the current received message is used as
|
||||
# the target problem
|
||||
|
@ -25,7 +25,17 @@ CHECK_RESULT_SYSTEM_MESSAGE = (
|
||||
"such as: True.\n"
|
||||
" If it is determined to be a failure, return false and the reason, "
|
||||
"such as: False. There are no numbers in the execution results that answer the "
|
||||
"computational goals of the mission."
|
||||
"computational goals of the mission.\n"
|
||||
"You can refer to the following examples:\n"
|
||||
"user: Please understand the following task objectives and results and give your "
|
||||
"judgment:\nTask goal: Calculate the result of 1 + 2 using Python code.\n"
|
||||
"Execution Result: 3\n"
|
||||
"assistant: True\n\n"
|
||||
"user: Please understand the following task objectives and results and give your "
|
||||
"judgment:\nTask goal: Calculate the result of 100 * 10 using Python code.\n"
|
||||
"Execution Result: 'you can get the result by multiplying 100 by 10'\n"
|
||||
"assistant: False. There are no numbers in the execution results that answer the "
|
||||
"computational goals of the mission.\n"
|
||||
)
|
||||
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""Database resource client API."""
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
from typing import Iterator, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Iterator, List, Optional, Union
|
||||
|
||||
from .resource_api import AgentResource, ResourceClient, ResourceType
|
||||
|
||||
@ -48,7 +48,8 @@ class ResourceDbClient(ResourceClient):
|
||||
class SqliteLoadClient(ResourceDbClient):
|
||||
"""SQLite resource client."""
|
||||
|
||||
from sqlalchemy.orm.session import Session
|
||||
if TYPE_CHECKING:
|
||||
from sqlalchemy.orm.session import Session
|
||||
|
||||
def __init__(self):
|
||||
"""Create a SQLite resource client."""
|
||||
@ -59,7 +60,7 @@ class SqliteLoadClient(ResourceDbClient):
|
||||
return "sqlite"
|
||||
|
||||
@contextmanager
|
||||
def connect(self, db) -> Iterator[Session]:
|
||||
def connect(self, db) -> Iterator["Session"]:
|
||||
"""Connect to the database."""
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
@ -1 +1,4 @@
|
||||
"""Utils for the agent module."""
|
||||
from .cmp import cmp_string_equal # noqa: F401
|
||||
|
||||
__ALL__ = ["cmp_string_equal"]
|
||||
|
@ -223,8 +223,8 @@ def run_webserver(param: WebServerParameters = None):
|
||||
if not param:
|
||||
param = _get_webserver_params()
|
||||
initialize_tracer(
|
||||
system_app,
|
||||
os.path.join(LOGDIR, param.tracer_file),
|
||||
system_app=system_app,
|
||||
tracer_storage_cls=param.tracer_storage_cls,
|
||||
)
|
||||
|
||||
|
@ -5,8 +5,10 @@ Manages the lifecycle and registration of components.
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import atexit
|
||||
import logging
|
||||
import sys
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, Dict, Optional, Type, TypeVar, Union
|
||||
@ -162,6 +164,8 @@ class SystemApp(LifeCycle):
|
||||
] = {} # Dictionary to store registered components.
|
||||
self._asgi_app = asgi_app
|
||||
self._app_config = app_config or AppConfig()
|
||||
self._stop_event = threading.Event()
|
||||
self._stop_event.clear()
|
||||
self._build()
|
||||
|
||||
@property
|
||||
@ -273,11 +277,14 @@ class SystemApp(LifeCycle):
|
||||
|
||||
def before_stop(self):
|
||||
"""Invoke the before_stop hooks for all registered components."""
|
||||
if self._stop_event.is_set():
|
||||
return
|
||||
for _, v in self.components.items():
|
||||
try:
|
||||
v.before_stop()
|
||||
except Exception as e:
|
||||
pass
|
||||
self._stop_event.set()
|
||||
|
||||
async def async_before_stop(self):
|
||||
"""Asynchronously invoke the before_stop hooks for all registered components."""
|
||||
@ -287,6 +294,7 @@ class SystemApp(LifeCycle):
|
||||
def _build(self):
|
||||
"""Integrate lifecycle events with the internal ASGI app if available."""
|
||||
if not self.app:
|
||||
self._register_exit_handler()
|
||||
return
|
||||
|
||||
@self.app.on_event("startup")
|
||||
@ -308,3 +316,7 @@ class SystemApp(LifeCycle):
|
||||
"""ASGI app shutdown event handler."""
|
||||
await self.async_before_stop()
|
||||
self.before_stop()
|
||||
|
||||
def _register_exit_handler(self):
|
||||
"""Register an exit handler to stop the system app."""
|
||||
atexit.register(self.before_stop)
|
||||
|
@ -1126,8 +1126,8 @@ def run_worker_manager(
|
||||
|
||||
system_app = SystemApp(app)
|
||||
initialize_tracer(
|
||||
system_app,
|
||||
os.path.join(LOGDIR, worker_params.tracer_file),
|
||||
system_app=system_app,
|
||||
root_operation_name="DB-GPT-WorkerManager-Entry",
|
||||
tracer_storage_cls=worker_params.tracer_storage_cls,
|
||||
)
|
||||
|
@ -15,6 +15,7 @@ class SpanType(str, Enum):
|
||||
BASE = "base"
|
||||
RUN = "run"
|
||||
CHAT = "chat"
|
||||
AGENT = "agent"
|
||||
|
||||
|
||||
class SpanTypeRunName(str, Enum):
|
||||
@ -99,6 +100,21 @@ class Span:
|
||||
"metadata": _clean_for_json(self.metadata),
|
||||
}
|
||||
|
||||
def copy(self) -> Span:
|
||||
"""Create a copy of this span."""
|
||||
metadata = self.metadata.copy() if self.metadata else None
|
||||
span = Span(
|
||||
self.trace_id,
|
||||
self.span_id,
|
||||
self.span_type,
|
||||
self.parent_span_id,
|
||||
self.operation_name,
|
||||
metadata=metadata,
|
||||
)
|
||||
span.start_time = self.start_time
|
||||
span.end_time = self.end_time
|
||||
return span
|
||||
|
||||
|
||||
class SpanStorageType(str, Enum):
|
||||
ON_CREATE = "on_create"
|
||||
@ -191,7 +207,7 @@ class TracerContext:
|
||||
|
||||
|
||||
def _clean_for_json(data: Optional[str, Any] = None):
|
||||
if not data:
|
||||
if data is None:
|
||||
return None
|
||||
if isinstance(data, dict):
|
||||
cleaned_dict = {}
|
||||
|
@ -49,7 +49,9 @@ class SpanStorageContainer(SpanStorage):
|
||||
self.flush_thread = threading.Thread(
|
||||
target=self._flush_to_storages, daemon=True
|
||||
)
|
||||
self._stop_event = threading.Event()
|
||||
self.flush_thread.start()
|
||||
self._stop_event.clear()
|
||||
|
||||
def append_storage(self, storage: SpanStorage):
|
||||
"""Append sotrage to container
|
||||
@ -68,7 +70,7 @@ class SpanStorageContainer(SpanStorage):
|
||||
pass # If the signal queue is full, it's okay. The flush thread will handle it.
|
||||
|
||||
def _flush_to_storages(self):
|
||||
while True:
|
||||
while not self._stop_event.is_set():
|
||||
interval = time.time() - self.last_flush_time
|
||||
if interval < self.flush_interval:
|
||||
try:
|
||||
@ -90,13 +92,24 @@ class SpanStorageContainer(SpanStorage):
|
||||
try:
|
||||
storage.append_span_batch(spans_to_write)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
f"Append spans to storage {str(storage)} failed: {str(e)}, span_data: {spans_to_write}"
|
||||
)
|
||||
|
||||
self.executor.submit(append_and_ignore_error, s, spans_to_write)
|
||||
try:
|
||||
self.executor.submit(append_and_ignore_error, s, spans_to_write)
|
||||
except RuntimeError:
|
||||
append_and_ignore_error(s, spans_to_write)
|
||||
self.last_flush_time = time.time()
|
||||
|
||||
def before_stop(self):
|
||||
try:
|
||||
self.flush_signal_queue.put(True)
|
||||
self._stop_event.set()
|
||||
self.flush_thread.join()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class FileSpanStorage(SpanStorage):
|
||||
def __init__(self, filename: str):
|
||||
|
@ -52,7 +52,7 @@ def test_start_and_end_span(tracer: Tracer):
|
||||
assert span.end_time is not None
|
||||
|
||||
stored_span = tracer._get_current_storage().spans[0]
|
||||
assert stored_span == span
|
||||
assert stored_span.span_id == span.span_id
|
||||
|
||||
|
||||
def test_start_and_end_span_with_tracer_manager(tracer_manager: TracerManager):
|
||||
@ -76,8 +76,12 @@ def test_parent_child_span_relation(tracer: Tracer):
|
||||
tracer.end_span(child_span)
|
||||
tracer.end_span(parent_span)
|
||||
|
||||
assert parent_span in tracer._get_current_storage().spans
|
||||
assert child_span in tracer._get_current_storage().spans
|
||||
assert parent_span.operation_name in [
|
||||
s.operation_name for s in tracer._get_current_storage().spans
|
||||
]
|
||||
assert child_span.operation_name in [
|
||||
s.operation_name for s in tracer._get_current_storage().spans
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -36,7 +36,7 @@ class DefaultTracer(Tracer):
|
||||
self._span_storage_type = span_storage_type
|
||||
|
||||
def append_span(self, span: Span):
|
||||
self._get_current_storage().append_span(span)
|
||||
self._get_current_storage().append_span(span.copy())
|
||||
|
||||
def start_span(
|
||||
self,
|
||||
@ -130,9 +130,13 @@ class TracerManager:
|
||||
"""
|
||||
tracer = self._get_tracer()
|
||||
if not tracer:
|
||||
return Span("empty_span", "empty_span")
|
||||
return Span(
|
||||
"empty_span", "empty_span", span_type=span_type, metadata=metadata
|
||||
)
|
||||
if not parent_span_id:
|
||||
parent_span_id = self.get_current_span_id()
|
||||
if not span_type and parent_span_id:
|
||||
span_type = self._get_current_span_type()
|
||||
return tracer.start_span(
|
||||
operation_name, parent_span_id, span_type=span_type, metadata=metadata
|
||||
)
|
||||
@ -156,6 +160,10 @@ class TracerManager:
|
||||
ctx = self._trace_context_var.get()
|
||||
return ctx.span_id if ctx else None
|
||||
|
||||
def _get_current_span_type(self) -> Optional[SpanType]:
|
||||
current_span = self.get_current_span()
|
||||
return current_span.span_type if current_span else None
|
||||
|
||||
|
||||
root_tracer: TracerManager = TracerManager()
|
||||
|
||||
@ -197,14 +205,19 @@ def _parse_operation_name(func, *args):
|
||||
|
||||
|
||||
def initialize_tracer(
|
||||
system_app: SystemApp,
|
||||
tracer_filename: str,
|
||||
root_operation_name: str = "DB-GPT-Web-Entry",
|
||||
tracer_storage_cls: str = None,
|
||||
system_app: Optional[SystemApp] = None,
|
||||
tracer_storage_cls: Optional[str] = None,
|
||||
create_system_app: bool = False,
|
||||
):
|
||||
"""Initialize the tracer with the given filename and system app."""
|
||||
from dbgpt.util.tracer.span_storage import FileSpanStorage, SpanStorageContainer
|
||||
|
||||
if not system_app and create_system_app:
|
||||
system_app = SystemApp()
|
||||
if not system_app:
|
||||
return
|
||||
from dbgpt.util.tracer.span_storage import FileSpanStorage, SpanStorageContainer
|
||||
|
||||
trace_context_var = ContextVar(
|
||||
"trace_context",
|
||||
|
@ -1,5 +1,6 @@
|
||||
"""GPT-Vis Module."""
|
||||
|
||||
from .base import Vis # noqa: F401
|
||||
from .client import vis_client # noqa: F401
|
||||
from .tags.vis_agent_message import VisAgentMessages # noqa: F401
|
||||
from .tags.vis_agent_plans import VisAgentPlans # noqa: F401
|
||||
@ -9,6 +10,7 @@ from .tags.vis_dashboard import VisDashboard # noqa: F401
|
||||
from .tags.vis_plugin import VisPlugin # noqa: F401
|
||||
|
||||
__ALL__ = [
|
||||
"Vis",
|
||||
"vis_client",
|
||||
"VisAgentMessages",
|
||||
"VisAgentPlans",
|
||||
|
91
docs/docs/agents/cookbook/calculator_with_agents.md
Normal file
91
docs/docs/agents/cookbook/calculator_with_agents.md
Normal file
@ -0,0 +1,91 @@
|
||||
# Calculator With Agents
|
||||
|
||||
In this example, we will show you how to use an agent as your calculator.
|
||||
|
||||
## Installations
|
||||
|
||||
Install the required packages by running the following command:
|
||||
|
||||
```bash
|
||||
pip install "dbgpt[agent]>=0.5.4rc0" -U
|
||||
pip install openai
|
||||
```
|
||||
|
||||
## Code
|
||||
|
||||
Create a new Python file and add the following code:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
|
||||
from dbgpt.agent import AgentContext, GptsMemory, LLMConfig, UserProxyAgent
|
||||
from dbgpt.agent.expand.code_assistant_agent import CodeAssistantAgent
|
||||
from dbgpt.model.proxy import OpenAILLMClient
|
||||
|
||||
async def main():
|
||||
llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo")
|
||||
context: AgentContext = AgentContext(conv_id="test123")
|
||||
default_memory: GptsMemory = GptsMemory()
|
||||
|
||||
# Create a code assistant agent
|
||||
coder = (
|
||||
await CodeAssistantAgent()
|
||||
.bind(context)
|
||||
.bind(LLMConfig(llm_client=llm_client))
|
||||
.bind(default_memory)
|
||||
.build()
|
||||
)
|
||||
|
||||
# Create a user proxy agent
|
||||
user_proxy = await UserProxyAgent().bind(context).bind(default_memory).build()
|
||||
|
||||
# Initiate a chat with the user proxy agent
|
||||
await user_proxy.initiate_chat(
|
||||
recipient=coder,
|
||||
reviewer=user_proxy,
|
||||
message="calculate the result of 321 * 123"
|
||||
)
|
||||
# Obtain conversation history messages between agents
|
||||
print(await default_memory.one_chat_completions("test123"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
You will see the following output:
|
||||
|
||||
````bash
|
||||
--------------------------------------------------------------------------------
|
||||
User (to Turing)-[]:
|
||||
|
||||
"calculate the result of 321 * 123"
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
un_stream ai response: ```python
|
||||
# Calculate the result of 321 * 123
|
||||
result = 321 * 123
|
||||
print(result)
|
||||
```
|
||||
|
||||
>>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...
|
||||
execute_code was called without specifying a value for use_docker. Since the python docker package is not available, code will be run natively. Note: this fallback behavior is subject to change
|
||||
un_stream ai response: True
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
Turing (to User)-[gpt-3.5-turbo]:
|
||||
|
||||
"```python\n# Calculate the result of 321 * 123\nresult = 321 * 123\nprint(result)\n```"
|
||||
>>>>>>>>Turing Review info:
|
||||
Pass(None)
|
||||
>>>>>>>>Turing Action report:
|
||||
execution succeeded,
|
||||
|
||||
39483
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
```agent-plans
|
||||
[{"name": "calculate the result of 321 * 123", "num": 1, "status": "complete", "agent": "Human", "markdown": "```agent-messages\n[{\"sender\": \"CodeEngineer\", \"receiver\": \"Human\", \"model\": \"gpt-3.5-turbo\", \"markdown\": \"```vis-code\\n{\\\"exit_success\\\": true, \\\"language\\\": \\\"python\\\", \\\"code\\\": [[\\\"python\\\", \\\"# Calculate the result of 321 * 123\\\\nresult = 321 * 123\\\\nprint(result)\\\"]], \\\"log\\\": \\\"\\\\n39483\\\\n\\\"}\\n```\"}]\n```"}]
|
||||
```
|
||||
(dbgpt-agents-py3.11) (base) ➜ dbgpt-agents
|
||||
````
|
705
docs/docs/agents/custom_agents.md
Normal file
705
docs/docs/agents/custom_agents.md
Normal file
@ -0,0 +1,705 @@
|
||||
# Write Your Custom Agent
|
||||
|
||||
## Introduction
|
||||
|
||||
In this example, we will show you how to create a custom agent that can be used as a
|
||||
summarizer.
|
||||
|
||||
## Installations
|
||||
|
||||
Install the required packages by running the following command:
|
||||
|
||||
```bash
|
||||
pip install "dbgpt[agent]>=0.5.4rc0" -U
|
||||
pip install openai
|
||||
```
|
||||
|
||||
## Create a Custom Agent
|
||||
|
||||
### Initialize The Agent
|
||||
|
||||
In most cases, you just need to inherit basic agents and override the corresponding
|
||||
methods.
|
||||
|
||||
```python
|
||||
from dbgpt.agent import ConversableAgent
|
||||
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
```
|
||||
|
||||
### Define the profile
|
||||
|
||||
Before designing each Agent, it is necessary to define its role, identity, and
|
||||
functional role. The specific definitions are as follows:
|
||||
|
||||
```python
|
||||
from dbgpt.agent import ConversableAgent
|
||||
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
# The name of the agent
|
||||
name = "Aristotle"
|
||||
# The profile of the agent
|
||||
profile: str = "Summarizer"
|
||||
# The core functional goals of the agent tell LLM what it can do with it.
|
||||
goal: str = (
|
||||
"Summarize answer summaries based on user questions from provided "
|
||||
"resource information or from historical conversation memories."
|
||||
)
|
||||
# Introduction and description of the agent, used for task assignment and display.
|
||||
# If it is empty, the goal content will be used.
|
||||
desc: str = (
|
||||
"You can summarize provided text content according to user's questions"
|
||||
" and output the summarization."
|
||||
)
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
```
|
||||
|
||||
### Supplementary Prompt Constraints
|
||||
|
||||
Agent's prompt is assembled using a fixed template by default(an external template can
|
||||
be bound if there are some special requirements). which mainly includes:
|
||||
1. Identity definition(automatically constructed)
|
||||
2. Resource information(automatically constructed)
|
||||
3. Constraint logic
|
||||
4. Reference case (optional)
|
||||
5. Output format templates and constraints (automatically constructed)
|
||||
|
||||
So, we can define the constraints of the agent's prompt as follows:
|
||||
|
||||
```python
|
||||
from dbgpt.agent import ConversableAgent
|
||||
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
# The name of the agent
|
||||
name = "Aristotle"
|
||||
# The profile of the agent
|
||||
profile: str = "Summarizer"
|
||||
# The core functional goals of the agent tell LLM what it can do with it.
|
||||
goal: str = (
|
||||
"Summarize answer summaries based on user questions from provided "
|
||||
"resource information or from historical conversation memories."
|
||||
)
|
||||
# Introduction and description of the agent, used for task assignment and display.
|
||||
# If it is empty, the goal content will be used.
|
||||
desc: str = (
|
||||
"You can summarize provided text content according to user's questions"
|
||||
" and output the summarization."
|
||||
)
|
||||
# Refer to the following. It can contain multiple constraints and reasoning
|
||||
# restriction logic, and supports the use of parameter template {param_name}.
|
||||
constraints: list[str] = [
|
||||
"Prioritize the summary of answers to user questions from the improved resource"
|
||||
" text. If no relevant information is found, summarize it from the historical "
|
||||
"dialogue memory given. It is forbidden to make up your own.",
|
||||
"You need to first detect user's question that you need to answer with your"
|
||||
" summarization.",
|
||||
"Extract the provided text content used for summarization.",
|
||||
"Then you need to summarize the extracted text content.",
|
||||
"Output the content of summarization ONLY related to user's question. The "
|
||||
"output language must be the same to user's question language.",
|
||||
"If you think the provided text content is not related to user questions at "
|
||||
"all, ONLY output '{not_related_message}'!!.",
|
||||
]
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
```
|
||||
|
||||
### Prompt Template Format
|
||||
|
||||
If dynamic parameters are used in the prompt, the actual dialogue process is required
|
||||
to assemble the values, and the following interface (`_init_reply_message`) needs to be
|
||||
overloaded and implemented:
|
||||
|
||||
```python
|
||||
from dbgpt.agent import ConversableAgent, AgentMessage
|
||||
|
||||
NOT_RELATED_MESSAGE = "Did not find the information you want."
|
||||
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
# The name of the agent
|
||||
name = "Aristotle"
|
||||
# The profile of the agent
|
||||
profile: str = "Summarizer"
|
||||
# The core functional goals of the agent tell LLM what it can do with it.
|
||||
goal: str = (
|
||||
"Summarize answer summaries based on user questions from provided "
|
||||
"resource information or from historical conversation memories."
|
||||
)
|
||||
# Introduction and description of the agent, used for task assignment and display.
|
||||
# If it is empty, the goal content will be used.
|
||||
desc: str = (
|
||||
"You can summarize provided text content according to user's questions"
|
||||
" and output the summarization."
|
||||
)
|
||||
# Refer to the following. It can contain multiple constraints and reasoning
|
||||
# restriction logic, and supports the use of parameter template {param_name}.
|
||||
constraints: list[str] = [
|
||||
"Prioritize the summary of answers to user questions from the improved resource"
|
||||
" text. If no relevant information is found, summarize it from the historical "
|
||||
"dialogue memory given. It is forbidden to make up your own.",
|
||||
"You need to first detect user's question that you need to answer with your"
|
||||
" summarization.",
|
||||
"Extract the provided text content used for summarization.",
|
||||
"Then you need to summarize the extracted text content.",
|
||||
"Output the content of summarization ONLY related to user's question. The "
|
||||
"output language must be the same to user's question language.",
|
||||
"If you think the provided text content is not related to user questions at "
|
||||
"all, ONLY output '{not_related_message}'!!.",
|
||||
]
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def _init_reply_message(self, received_message: AgentMessage) -> AgentMessage:
|
||||
reply_message = super()._init_reply_message(received_message)
|
||||
# Fill in the dynamic parameters in the prompt template
|
||||
reply_message.context = {
|
||||
"not_related_message": NOT_RELATED_MESSAGE
|
||||
}
|
||||
return reply_message
|
||||
```
|
||||
|
||||
### Resource Preloading (Optional)
|
||||
|
||||
If there are some specific resources, the bound resources must be loaded in advance
|
||||
when the agent is initialized. You can refer to the following implementation.
|
||||
It is determined based on the actual situation of the resources. In most cases, it is
|
||||
not necessary.
|
||||
|
||||
```python
|
||||
from dbgpt.agent import ConversableAgent, AgentMessage
|
||||
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
# ... other code
|
||||
async def preload_resource(self) -> None:
|
||||
# Load the required resources
|
||||
for resource in self.resources:
|
||||
# Load your resource, please write your own code here
|
||||
pass
|
||||
```
|
||||
|
||||
### Result Checking (Optional)
|
||||
|
||||
If the action execution results need to be strictly verified and verified, there are
|
||||
two modes: code logic verification and LLM verification. Of course, verification is not
|
||||
necessary and the default pass is not implemented. Here is an example using LL verification:
|
||||
|
||||
```python
|
||||
|
||||
from typing import Tuple, Optional
|
||||
|
||||
from dbgpt.agent import ConversableAgent, AgentMessage
|
||||
from dbgpt.core import ModelMessageRoleType
|
||||
|
||||
CHECK_RESULT_SYSTEM_MESSAGE = (
|
||||
"You are an expert in analyzing the results of a summary task."
|
||||
"Your responsibility is to check whether the summary results can summarize the "
|
||||
"input provided by the user, and then make a judgment. You need to answer "
|
||||
"according to the following rules:\n"
|
||||
" Rule 1: If you think the summary results can summarize the input provided"
|
||||
" by the user, only return True.\n"
|
||||
" Rule 2: If you think the summary results can NOT summarize the input "
|
||||
"provided by the user, return False and the reason, split by | and ended "
|
||||
"by TERMINATE. For instance: False|Some important concepts in the input are "
|
||||
"not summarized. TERMINATE"
|
||||
)
|
||||
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
# ... other code
|
||||
async def correctness_check(
|
||||
self, message: AgentMessage
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
current_goal = message.current_goal
|
||||
action_report = message.action_report
|
||||
task_result = ""
|
||||
if action_report:
|
||||
task_result = action_report.get("content", "")
|
||||
|
||||
check_result, model = await self.thinking(
|
||||
messages=[
|
||||
AgentMessage(
|
||||
role=ModelMessageRoleType.HUMAN,
|
||||
content=(
|
||||
"Please understand the following user input and summary results"
|
||||
" and give your judgment:\n"
|
||||
f"User Input: {current_goal}\n"
|
||||
f"Summary Results: {task_result}"
|
||||
),
|
||||
)
|
||||
],
|
||||
prompt=CHECK_RESULT_SYSTEM_MESSAGE,
|
||||
)
|
||||
|
||||
fail_reason = ""
|
||||
if check_result and (
|
||||
"true" in check_result.lower() or "yes" in check_result.lower()
|
||||
):
|
||||
success = True
|
||||
else:
|
||||
success = False
|
||||
try:
|
||||
_, fail_reason = check_result.split("|")
|
||||
fail_reason = (
|
||||
"The summary results cannot summarize the user input due"
|
||||
f" to: {fail_reason}. Please re-understand and complete the summary"
|
||||
" task."
|
||||
)
|
||||
except Exception:
|
||||
fail_reason = (
|
||||
"The summary results cannot summarize the user input. "
|
||||
"Please re-understand and complete the summary task."
|
||||
)
|
||||
return success, fail_reason
|
||||
```
|
||||
|
||||
## Create A Custom Action
|
||||
|
||||
### Initialize The Action
|
||||
|
||||
All Agent's operations on the external environment and the real world are implemented
|
||||
through `Action`. Action defines the Agent's output content structure and actually
|
||||
performs the corresponding operations. The specific `Action` implementation inherits
|
||||
the `Action` base class, as follows:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from dbgpt.vis import Vis
|
||||
from dbgpt.agent import Action, ActionOutput, AgentResource, ResourceType
|
||||
from dbgpt.agent.util import cmp_string_equal
|
||||
|
||||
NOT_RELATED_MESSAGE = "Did not find the information you want."
|
||||
|
||||
# The parameter object that the Action that the current Agent needs to execute needs to output.
|
||||
class SummaryActionInput(BaseModel):
|
||||
summary: str = Field(
|
||||
...,
|
||||
description="The summary content",
|
||||
)
|
||||
|
||||
class SummaryAction(Action[SummaryActionInput]):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@property
|
||||
def resource_need(self) -> Optional[ResourceType]:
|
||||
# The resource type that the current Agent needs to use
|
||||
# here we do not need to use resources, just return None
|
||||
return None
|
||||
|
||||
@property
|
||||
def render_protocol(self) -> Optional[Vis]:
|
||||
# The visualization rendering protocol that the current Agent needs to use
|
||||
# here we do not need to use visualization rendering, just return None
|
||||
return None
|
||||
|
||||
@property
|
||||
def out_model_type(self):
|
||||
return SummaryActionInput
|
||||
|
||||
async def run(
|
||||
self,
|
||||
ai_message: str,
|
||||
resource: Optional[AgentResource] = None,
|
||||
rely_action_out: Optional[ActionOutput] = None,
|
||||
need_vis_render: bool = True,
|
||||
**kwargs,
|
||||
) -> ActionOutput:
|
||||
"""Perform the action.
|
||||
|
||||
The entry point for actual execution of Action. Action execution will be
|
||||
automatically initiated after model inference.
|
||||
"""
|
||||
try:
|
||||
# Parse the input message
|
||||
param: SummaryActionInput = self._input_convert(ai_message, SummaryActionInput)
|
||||
except Exception:
|
||||
return ActionOutput(
|
||||
is_exe_success=False,
|
||||
content="The requested correctly structured answer could not be found, "
|
||||
f"ai message: {ai_message}",
|
||||
)
|
||||
# Check if the summary content is not related to user questions
|
||||
if param.summary and cmp_string_equal(
|
||||
param.summary,
|
||||
NOT_RELATED_MESSAGE,
|
||||
ignore_case=True,
|
||||
ignore_punctuation=True,
|
||||
ignore_whitespace=True,
|
||||
):
|
||||
return ActionOutput(
|
||||
is_exe_success=False,
|
||||
content="the provided text content is not related to user questions at all."
|
||||
f"ai message: {ai_message}",
|
||||
)
|
||||
else:
|
||||
return ActionOutput(
|
||||
is_exe_success=True,
|
||||
content=param.summary,
|
||||
)
|
||||
```
|
||||
|
||||
### Binding Action to Agent
|
||||
|
||||
After the development and definition of agent and cction are completed,
|
||||
bind the action to the corresponding agent.
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from dbgpt.agent import Action,ConversableAgent
|
||||
|
||||
class SummaryActionInput(BaseModel):
|
||||
...
|
||||
|
||||
class SummaryAction(Action[SummaryActionInput]):
|
||||
...
|
||||
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._init_actions([SummaryAction])
|
||||
```
|
||||
|
||||
### Action Extended Parameter Processing
|
||||
|
||||
```python
|
||||
from typing import Optional, Dict, Any
|
||||
from pydantic import BaseModel
|
||||
from dbgpt.agent import Action, ActionOutput, AgentResource, ConversableAgent
|
||||
|
||||
|
||||
class SummaryActionInput(BaseModel):
|
||||
...
|
||||
|
||||
class SummaryAction(Action[SummaryActionInput]):
|
||||
...
|
||||
|
||||
async def run(
|
||||
self,
|
||||
ai_message: str,
|
||||
resource: Optional[AgentResource] = None,
|
||||
rely_action_out: Optional[ActionOutput] = None,
|
||||
need_vis_render: bool = True,
|
||||
**kwargs,
|
||||
) -> ActionOutput:
|
||||
# Read the extended parameters passed in by the agent
|
||||
extra_param = kwargs.get("action_extra_param_key", None)
|
||||
pass
|
||||
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._init_actions([SummaryAction])
|
||||
|
||||
def prepare_act_param(self) -> Dict[str, Any]:
|
||||
return {"action_extra_param_key": "this is extra param"}
|
||||
```
|
||||
|
||||
## Use Your Custom Agent
|
||||
|
||||
After the custom agent is created, you can use it in the following way:
|
||||
|
||||
```python
|
||||
|
||||
import asyncio
|
||||
|
||||
from dbgpt.agent import AgentContext, ConversableAgent, GptsMemory, LLMConfig, UserProxyAgent
|
||||
from dbgpt.model.proxy import OpenAILLMClient
|
||||
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
...
|
||||
|
||||
async def main():
|
||||
llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo")
|
||||
context: AgentContext = AgentContext(conv_id="summarize")
|
||||
|
||||
default_memory: GptsMemory = GptsMemory()
|
||||
|
||||
summarizer = (
|
||||
await MySummarizerAgent()
|
||||
.bind(context)
|
||||
.bind(LLMConfig(llm_client=llm_client))
|
||||
.bind(default_memory)
|
||||
.build()
|
||||
)
|
||||
|
||||
user_proxy = await UserProxyAgent().bind(default_memory).bind(context).build()
|
||||
|
||||
|
||||
await user_proxy.initiate_chat(
|
||||
recipient=summarizer,
|
||||
reviewer=user_proxy,
|
||||
message="""I want to summarize advantages of Nuclear Power according to the following content.
|
||||
Nuclear power in space is the use of nuclear power in outer space, typically either small fission systems or radioactive decay for electricity or heat. Another use is for scientific observation, as in a Mössbauer spectrometer. The most common type is a radioisotope thermoelectric generator, which has been used on many space probes and on crewed lunar missions. Small fission reactors for Earth observation satellites, such as the TOPAZ nuclear reactor, have also been flown.[1] A radioisotope heater unit is powered by radioactive decay and can keep components from becoming too cold to function, potentially over a span of decades.[2]
|
||||
The United States tested the SNAP-10A nuclear reactor in space for 43 days in 1965,[3] with the next test of a nuclear reactor power system intended for space use occurring on 13 September 2012 with the Demonstration Using Flattop Fission (DUFF) test of the Kilopower reactor.[4]
|
||||
After a ground-based test of the experimental 1965 Romashka reactor, which used uranium and direct thermoelectric conversion to electricity,[5] the USSR sent about 40 nuclear-electric satellites into space, mostly powered by the BES-5 reactor. The more powerful TOPAZ-II reactor produced 10 kilowatts of electricity.[3]
|
||||
Examples of concepts that use nuclear power for space propulsion systems include the nuclear electric rocket (nuclear powered ion thruster(s)), the radioisotope rocket, and radioisotope electric propulsion (REP).[6] One of the more explored concepts is the nuclear thermal rocket, which was ground tested in the NERVA program. Nuclear pulse propulsion was the subject of Project Orion.[7]
|
||||
Regulation and hazard prevention[edit]
|
||||
After the ban of nuclear weapons in space by the Outer Space Treaty in 1967, nuclear power has been discussed at least since 1972 as a sensitive issue by states.[8] Particularly its potential hazards to Earth's environment and thus also humans has prompted states to adopt in the U.N. General Assembly the Principles Relevant to the Use of Nuclear Power Sources in Outer Space (1992), particularly introducing safety principles for launches and to manage their traffic.[8]
|
||||
Benefits
|
||||
Both the Viking 1 and Viking 2 landers used RTGs for power on the surface of Mars. (Viking launch vehicle pictured)
|
||||
While solar power is much more commonly used, nuclear power can offer advantages in some areas. Solar cells, although efficient, can only supply energy to spacecraft in orbits where the solar flux is sufficiently high, such as low Earth orbit and interplanetary destinations close enough to the Sun. Unlike solar cells, nuclear power systems function independently of sunlight, which is necessary for deep space exploration. Nuclear-based systems can have less mass than solar cells of equivalent power, allowing more compact spacecraft that are easier to orient and direct in space. In the case of crewed spaceflight, nuclear power concepts that can power both life support and propulsion systems may reduce both cost and flight time.[9]
|
||||
Selected applications and/or technologies for space include:
|
||||
Radioisotope thermoelectric generator
|
||||
Radioisotope heater unit
|
||||
Radioisotope piezoelectric generator
|
||||
Radioisotope rocket
|
||||
Nuclear thermal rocket
|
||||
Nuclear pulse propulsion
|
||||
Nuclear electric rocket
|
||||
""",
|
||||
)
|
||||
print(await default_memory.one_chat_completions("summarize"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
Full code as follows:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
from dbgpt.agent import (
|
||||
Action,
|
||||
ActionOutput,
|
||||
AgentContext,
|
||||
AgentMessage,
|
||||
AgentResource,
|
||||
ConversableAgent,
|
||||
GptsMemory,
|
||||
LLMConfig,
|
||||
ResourceType,
|
||||
UserProxyAgent,
|
||||
)
|
||||
from dbgpt.agent.util import cmp_string_equal
|
||||
from dbgpt.core import ModelMessageRoleType
|
||||
from dbgpt.model.proxy import OpenAILLMClient
|
||||
from dbgpt.vis import Vis
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
NOT_RELATED_MESSAGE = "Did not find the information you want."
|
||||
|
||||
CHECK_RESULT_SYSTEM_MESSAGE = (
|
||||
"You are an expert in analyzing the results of a summary task."
|
||||
"Your responsibility is to check whether the summary results can summarize the "
|
||||
"input provided by the user, and then make a judgment. You need to answer "
|
||||
"according to the following rules:\n"
|
||||
" Rule 1: If you think the summary results can summarize the input provided"
|
||||
" by the user, only return True.\n"
|
||||
" Rule 2: If you think the summary results can NOT summarize the input "
|
||||
"provided by the user, return False and the reason, split by | and ended "
|
||||
"by TERMINATE. For instance: False|Some important concepts in the input are "
|
||||
"not summarized. TERMINATE"
|
||||
)
|
||||
class MySummarizerAgent(ConversableAgent):
|
||||
# The name of the agent
|
||||
name = "Aristotle"
|
||||
# The profile of the agent
|
||||
profile: str = "Summarizer"
|
||||
# The core functional goals of the agent tell LLM what it can do with it.
|
||||
goal: str = (
|
||||
"Summarize answer summaries based on user questions from provided "
|
||||
"resource information or from historical conversation memories."
|
||||
)
|
||||
# Introduction and description of the agent, used for task assignment and display.
|
||||
# If it is empty, the goal content will be used.
|
||||
desc: str = (
|
||||
"You can summarize provided text content according to user's questions"
|
||||
" and output the summarization."
|
||||
)
|
||||
# Refer to the following. It can contain multiple constraints and reasoning
|
||||
# restriction logic, and supports the use of parameter template {param_name}.
|
||||
constraints: list[str] = [
|
||||
"Prioritize the summary of answers to user questions from the improved resource"
|
||||
" text. If no relevant information is found, summarize it from the historical "
|
||||
"dialogue memory given. It is forbidden to make up your own.",
|
||||
"You need to first detect user's question that you need to answer with your"
|
||||
" summarization.",
|
||||
"Extract the provided text content used for summarization.",
|
||||
"Then you need to summarize the extracted text content.",
|
||||
"Output the content of summarization ONLY related to user's question. The "
|
||||
"output language must be the same to user's question language.",
|
||||
"If you think the provided text content is not related to user questions at "
|
||||
"all, ONLY output '{not_related_message}'!!.",
|
||||
]
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._init_actions([SummaryAction])
|
||||
|
||||
def _init_reply_message(self, received_message: AgentMessage) -> AgentMessage:
|
||||
reply_message = super()._init_reply_message(received_message)
|
||||
# Fill in the dynamic parameters in the prompt template
|
||||
reply_message.context = {
|
||||
"not_related_message": NOT_RELATED_MESSAGE
|
||||
}
|
||||
return reply_message
|
||||
|
||||
def prepare_act_param(self) -> Dict[str, Any]:
|
||||
return {"action_extra_param_key": "this is extra param"}
|
||||
|
||||
async def correctness_check(
|
||||
self, message: AgentMessage
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
current_goal = message.current_goal
|
||||
action_report = message.action_report
|
||||
task_result = ""
|
||||
if action_report:
|
||||
task_result = action_report.get("content", "")
|
||||
|
||||
check_result, model = await self.thinking(
|
||||
messages=[
|
||||
AgentMessage(
|
||||
role=ModelMessageRoleType.HUMAN,
|
||||
content=(
|
||||
"Please understand the following user input and summary results"
|
||||
" and give your judgment:\n"
|
||||
f"User Input: {current_goal}\n"
|
||||
f"Summary Results: {task_result}"
|
||||
),
|
||||
)
|
||||
],
|
||||
prompt=CHECK_RESULT_SYSTEM_MESSAGE,
|
||||
)
|
||||
|
||||
fail_reason = ""
|
||||
if check_result and (
|
||||
"true" in check_result.lower() or "yes" in check_result.lower()
|
||||
):
|
||||
success = True
|
||||
else:
|
||||
success = False
|
||||
try:
|
||||
_, fail_reason = check_result.split("|")
|
||||
fail_reason = (
|
||||
"The summary results cannot summarize the user input due"
|
||||
f" to: {fail_reason}. Please re-understand and complete the summary"
|
||||
" task."
|
||||
)
|
||||
except Exception:
|
||||
fail_reason = (
|
||||
"The summary results cannot summarize the user input. "
|
||||
"Please re-understand and complete the summary task."
|
||||
)
|
||||
return success, fail_reason
|
||||
|
||||
|
||||
# The parameter object that the Action that the current Agent needs to execute needs to output.
|
||||
class SummaryActionInput(BaseModel):
|
||||
summary: str = Field(
|
||||
...,
|
||||
description="The summary content",
|
||||
)
|
||||
|
||||
class SummaryAction(Action[SummaryActionInput]):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@property
|
||||
def resource_need(self) -> Optional[ResourceType]:
|
||||
# The resource type that the current Agent needs to use
|
||||
# here we do not need to use resources, just return None
|
||||
return None
|
||||
|
||||
@property
|
||||
def render_protocol(self) -> Optional[Vis]:
|
||||
# The visualization rendering protocol that the current Agent needs to use
|
||||
# here we do not need to use visualization rendering, just return None
|
||||
return None
|
||||
|
||||
@property
|
||||
def out_model_type(self):
|
||||
return SummaryActionInput
|
||||
|
||||
async def run(
|
||||
self,
|
||||
ai_message: str,
|
||||
resource: Optional[AgentResource] = None,
|
||||
rely_action_out: Optional[ActionOutput] = None,
|
||||
need_vis_render: bool = True,
|
||||
**kwargs,
|
||||
) -> ActionOutput:
|
||||
"""Perform the action.
|
||||
|
||||
The entry point for actual execution of Action. Action execution will be
|
||||
automatically initiated after model inference.
|
||||
"""
|
||||
extra_param = kwargs.get("action_extra_param_key", None)
|
||||
try:
|
||||
# Parse the input message
|
||||
param: SummaryActionInput = self._input_convert(ai_message, SummaryActionInput)
|
||||
except Exception:
|
||||
return ActionOutput(
|
||||
is_exe_success=False,
|
||||
content="The requested correctly structured answer could not be found, "
|
||||
f"ai message: {ai_message}",
|
||||
)
|
||||
# Check if the summary content is not related to user questions
|
||||
if param.summary and cmp_string_equal(
|
||||
param.summary,
|
||||
NOT_RELATED_MESSAGE,
|
||||
ignore_case=True,
|
||||
ignore_punctuation=True,
|
||||
ignore_whitespace=True,
|
||||
):
|
||||
return ActionOutput(
|
||||
is_exe_success=False,
|
||||
content="the provided text content is not related to user questions at all."
|
||||
f"ai message: {ai_message}",
|
||||
)
|
||||
else:
|
||||
return ActionOutput(
|
||||
is_exe_success=True,
|
||||
content=param.summary,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo")
|
||||
context: AgentContext = AgentContext(conv_id="summarize")
|
||||
|
||||
default_memory: GptsMemory = GptsMemory()
|
||||
|
||||
summarizer = (
|
||||
await MySummarizerAgent()
|
||||
.bind(context)
|
||||
.bind(LLMConfig(llm_client=llm_client))
|
||||
.bind(default_memory)
|
||||
.build()
|
||||
)
|
||||
|
||||
user_proxy = await UserProxyAgent().bind(default_memory).bind(context).build()
|
||||
|
||||
|
||||
await user_proxy.initiate_chat(
|
||||
recipient=summarizer,
|
||||
reviewer=user_proxy,
|
||||
message="""I want to summarize advantages of Nuclear Power according to the following content.
|
||||
Nuclear power in space is the use of nuclear power in outer space, typically either small fission systems or radioactive decay for electricity or heat. Another use is for scientific observation, as in a Mössbauer spectrometer. The most common type is a radioisotope thermoelectric generator, which has been used on many space probes and on crewed lunar missions. Small fission reactors for Earth observation satellites, such as the TOPAZ nuclear reactor, have also been flown.[1] A radioisotope heater unit is powered by radioactive decay and can keep components from becoming too cold to function, potentially over a span of decades.[2]
|
||||
The United States tested the SNAP-10A nuclear reactor in space for 43 days in 1965,[3] with the next test of a nuclear reactor power system intended for space use occurring on 13 September 2012 with the Demonstration Using Flattop Fission (DUFF) test of the Kilopower reactor.[4]
|
||||
After a ground-based test of the experimental 1965 Romashka reactor, which used uranium and direct thermoelectric conversion to electricity,[5] the USSR sent about 40 nuclear-electric satellites into space, mostly powered by the BES-5 reactor. The more powerful TOPAZ-II reactor produced 10 kilowatts of electricity.[3]
|
||||
Examples of concepts that use nuclear power for space propulsion systems include the nuclear electric rocket (nuclear powered ion thruster(s)), the radioisotope rocket, and radioisotope electric propulsion (REP).[6] One of the more explored concepts is the nuclear thermal rocket, which was ground tested in the NERVA program. Nuclear pulse propulsion was the subject of Project Orion.[7]
|
||||
Regulation and hazard prevention[edit]
|
||||
After the ban of nuclear weapons in space by the Outer Space Treaty in 1967, nuclear power has been discussed at least since 1972 as a sensitive issue by states.[8] Particularly its potential hazards to Earth's environment and thus also humans has prompted states to adopt in the U.N. General Assembly the Principles Relevant to the Use of Nuclear Power Sources in Outer Space (1992), particularly introducing safety principles for launches and to manage their traffic.[8]
|
||||
Benefits
|
||||
Both the Viking 1 and Viking 2 landers used RTGs for power on the surface of Mars. (Viking launch vehicle pictured)
|
||||
While solar power is much more commonly used, nuclear power can offer advantages in some areas. Solar cells, although efficient, can only supply energy to spacecraft in orbits where the solar flux is sufficiently high, such as low Earth orbit and interplanetary destinations close enough to the Sun. Unlike solar cells, nuclear power systems function independently of sunlight, which is necessary for deep space exploration. Nuclear-based systems can have less mass than solar cells of equivalent power, allowing more compact spacecraft that are easier to orient and direct in space. In the case of crewed spaceflight, nuclear power concepts that can power both life support and propulsion systems may reduce both cost and flight time.[9]
|
||||
Selected applications and/or technologies for space include:
|
||||
Radioisotope thermoelectric generator
|
||||
Radioisotope heater unit
|
||||
Radioisotope piezoelectric generator
|
||||
Radioisotope rocket
|
||||
Nuclear thermal rocket
|
||||
Nuclear pulse propulsion
|
||||
Nuclear electric rocket
|
||||
""",
|
||||
)
|
||||
print(await default_memory.one_chat_completions("summarize"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
@ -450,9 +450,19 @@ const sidebars = {
|
||||
type: 'category',
|
||||
label: 'Agents',
|
||||
items: [
|
||||
{
|
||||
type: "category",
|
||||
label: "Cookbook",
|
||||
items: [
|
||||
{
|
||||
type: "doc",
|
||||
id: "agents/cookbook/calculator_with_agents"
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
type: 'doc',
|
||||
id: 'cookbook/agents/codeagent_develop',
|
||||
id: 'agents/custom_agents',
|
||||
}
|
||||
],
|
||||
},
|
||||
|
@ -31,9 +31,12 @@ from dbgpt.agent.expand.summary_assistant_agent import SummaryAssistantAgent
|
||||
from dbgpt.agent.plan import WrappedAWELLayoutManager
|
||||
from dbgpt.agent.resource import PluginFileLoadClient
|
||||
from dbgpt.configs.model_config import ROOT_PATH
|
||||
from dbgpt.util.tracer import initialize_tracer
|
||||
|
||||
test_plugin_dir = os.path.join(ROOT_PATH, "examples/test_files/plugins")
|
||||
|
||||
initialize_tracer("/tmp/agent_trace.jsonl", create_system_app=True)
|
||||
|
||||
|
||||
async def main():
|
||||
from dbgpt.model.proxy import OpenAILLMClient
|
||||
|
@ -40,7 +40,7 @@ async def main():
|
||||
await user_proxy.initiate_chat(
|
||||
recipient=coder,
|
||||
reviewer=user_proxy,
|
||||
message="式计算下321 * 123等于多少", # 用python代码的方式计算下321 * 123等于多少
|
||||
message="计算下321 * 123等于多少", # 用python代码的方式计算下321 * 123等于多少
|
||||
# message="download data from https://raw.githubusercontent.com/uwdata/draco/master/data/cars.csv and plot a visualization that tells us about the relationship between weight and horsepower. Save the plot to a file. Print the fields in a dataset before visualizing it.",
|
||||
)
|
||||
## dbgpt-vis message infos
|
||||
|
@ -28,11 +28,14 @@ from dbgpt.agent import (
|
||||
)
|
||||
from dbgpt.agent.expand.data_scientist_agent import DataScientistAgent
|
||||
from dbgpt.agent.resource import SqliteLoadClient
|
||||
from dbgpt.util.tracer import initialize_tracer
|
||||
|
||||
current_dir = os.getcwd()
|
||||
parent_dir = os.path.dirname(current_dir)
|
||||
test_plugin_dir = os.path.join(parent_dir, "test_files")
|
||||
|
||||
initialize_tracer("/tmp/agent_trace.jsonl", create_system_app=True)
|
||||
|
||||
|
||||
async def main():
|
||||
from dbgpt.model.proxy.llms.chatgpt import OpenAILLMClient
|
||||
|
12
setup.py
12
setup.py
@ -423,10 +423,18 @@ def core_requires():
|
||||
"tomlkit",
|
||||
"rich",
|
||||
]
|
||||
# Agent dependencies
|
||||
setup_spec.extras["agent"] = setup_spec.extras["cli"] + [
|
||||
"termcolor",
|
||||
# https://github.com/eosphoros-ai/DB-GPT/issues/551
|
||||
# TODO: remove pandas dependency
|
||||
"pandas==2.0.3",
|
||||
]
|
||||
|
||||
# Just use by DB-GPT internal, we should find the smallest dependency set for run
|
||||
# we core unit test.
|
||||
# The dependency "framework" is too large for now.
|
||||
setup_spec.extras["simple_framework"] = setup_spec.extras["cli"] + [
|
||||
setup_spec.extras["simple_framework"] = setup_spec.extras["agent"] + [
|
||||
"jinja2",
|
||||
"uvicorn",
|
||||
"shortuuid",
|
||||
@ -456,8 +464,6 @@ def core_requires():
|
||||
setup_spec.extras["framework"] = setup_spec.extras["simple_framework"] + [
|
||||
"coloredlogs",
|
||||
"seaborn",
|
||||
# https://github.com/eosphoros-ai/DB-GPT/issues/551
|
||||
"pandas==2.0.3",
|
||||
"auto-gpt-plugin-template",
|
||||
"gTTS==2.3.1",
|
||||
"pymysql",
|
||||
|
Loading…
Reference in New Issue
Block a user