diff --git a/dbgpt/agent/core/agent.py b/dbgpt/agent/core/agent.py
index 9285df072..4e13017ee 100644
--- a/dbgpt/agent/core/agent.py
+++ b/dbgpt/agent/core/agent.py
@@ -187,6 +187,7 @@ class AgentContext:
max_new_tokens: int = 1024
temperature: float = 0.5
allow_format_str_template: Optional[bool] = False
+ verbose: bool = False
def to_dict(self) -> Dict[str, Any]:
"""Return a dictionary representation of the AgentContext."""
diff --git a/dbgpt/agent/core/base_agent.py b/dbgpt/agent/core/base_agent.py
index cc064b9ee..d42ff5ec4 100644
--- a/dbgpt/agent/core/base_agent.py
+++ b/dbgpt/agent/core/base_agent.py
@@ -21,6 +21,7 @@ from .agent import Agent, AgentContext, AgentMessage, AgentReviewInfo
from .memory.agent_memory import AgentMemory
from .memory.gpts.base import GptsMessage
from .memory.gpts.gpts_memory import GptsMemory
+from .profile.base import ProfileConfig
from .role import Role
logger = logging.getLogger(__name__)
@@ -170,6 +171,10 @@ class ConversableAgent(Role, Agent):
self.resource = target
elif isinstance(target, AgentMemory):
self.memory = target
+ elif isinstance(target, ProfileConfig):
+ self.profile = target
+ elif isinstance(target, type) and issubclass(target, Action):
+ self.actions.append(target())
return self
async def send(
@@ -228,13 +233,26 @@ class ConversableAgent(Role, Agent):
await self._a_process_received_message(message, sender)
if request_reply is False or request_reply is None:
return
+ if self.is_human:
+ # Not generating a reply for human agents now
+ return
- if not self.is_human:
+ if (
+ self.consecutive_auto_reply_counter
+ <= self.not_null_agent_context.max_chat_round
+ ):
+ # If reply count is less than the maximum chat round, generate a reply
reply = await self.generate_reply(
received_message=message, sender=sender, reviewer=reviewer
)
if reply is not None:
await self.send(reply, sender)
+ else:
+ logger.info(
+ f"Current round {self.consecutive_auto_reply_counter} "
+ f"exceeds the maximum chat round "
+ f"{self.not_null_agent_context.max_chat_round}!"
+ )
def prepare_act_param(self) -> Dict[str, Any]:
"""Prepare the parameters for the act method."""
@@ -381,7 +399,7 @@ class ConversableAgent(Role, Agent):
reply_message, sender, reviewer, request_reply=False
)
fail_reason = reason
- await self.save_to_memory(
+ await self.write_memories(
question=question,
ai_message=ai_message,
action_output=act_out,
@@ -389,7 +407,7 @@ class ConversableAgent(Role, Agent):
check_fail_reason=fail_reason,
)
else:
- await self.save_to_memory(
+ await self.write_memories(
question=question,
ai_message=ai_message,
action_output=act_out,
@@ -437,6 +455,7 @@ class ConversableAgent(Role, Agent):
llm_model=llm_model,
max_new_tokens=self.not_null_agent_context.max_new_tokens,
temperature=self.not_null_agent_context.temperature,
+ verbose=self.not_null_agent_context.verbose,
)
return response, llm_model
except LLMChatError as e:
diff --git a/dbgpt/agent/core/memory/hybrid.py b/dbgpt/agent/core/memory/hybrid.py
index 7316ca39c..536e60519 100644
--- a/dbgpt/agent/core/memory/hybrid.py
+++ b/dbgpt/agent/core/memory/hybrid.py
@@ -60,6 +60,8 @@ class HybridMemory(Memory, Generic[T]):
sensory_memory=self._sensory_memory.structure_clone(now),
short_term_memory=self._short_term_memory.structure_clone(now),
long_term_memory=self._long_term_memory.structure_clone(now),
+ default_insight_extractor=self._default_insight_extractor,
+ default_importance_scorer=self._default_importance_scorer,
)
m._copy_from(self)
return m
diff --git a/dbgpt/agent/core/memory/long_term.py b/dbgpt/agent/core/memory/long_term.py
index 441ce2b2d..80e1c46c2 100644
--- a/dbgpt/agent/core/memory/long_term.py
+++ b/dbgpt/agent/core/memory/long_term.py
@@ -73,6 +73,7 @@ class LongTermMemory(Memory, Generic[T]):
vector_store: VectorStoreBase,
now: Optional[datetime] = None,
reflection_threshold: Optional[float] = None,
+ _default_importance: Optional[float] = None,
):
"""Create a long-term memory."""
self.now = now or datetime.now()
@@ -85,6 +86,7 @@ class LongTermMemory(Memory, Generic[T]):
self.memory_retriever = LongTermRetriever(
now=self.now, index_store=vector_store
)
+ self._default_importance = _default_importance
@immutable
def structure_clone(
@@ -99,6 +101,7 @@ class LongTermMemory(Memory, Generic[T]):
executor=self.executor,
vector_store=self._vector_store,
reflection_threshold=self.reflection_threshold,
+ _default_importance=self._default_importance,
)
m._copy_from(self)
return m
@@ -112,6 +115,8 @@ class LongTermMemory(Memory, Generic[T]):
) -> Optional[DiscardedMemoryFragments[T]]:
"""Write a memory fragment to the memory."""
importance = memory_fragment.importance
+ if importance is None:
+ importance = self._default_importance
last_accessed_time = memory_fragment.last_accessed_time
if importance is None:
raise ValueError("importance is required.")
diff --git a/dbgpt/agent/core/memory/short_term.py b/dbgpt/agent/core/memory/short_term.py
index e5a11a175..a47b5eb94 100644
--- a/dbgpt/agent/core/memory/short_term.py
+++ b/dbgpt/agent/core/memory/short_term.py
@@ -121,10 +121,11 @@ class EnhancedShortTermMemory(ShortTermMemory[T]):
# do not repeatedly add observation memory to summary, so use [:-1].
for enhance_memory in self.enhance_memories[idx][:-1]:
content.append(enhance_memory)
+ # Append the current observation memory
content.append(memory_fragment)
# Merge the enhanced memories to single memory
merged_enhance_memory: T = memory.reduce(
- content, merged_memory=memory.importance
+ content, importance=memory.importance
)
to_get_insight_memories.append(merged_enhance_memory)
enhance_memories.append(merged_enhance_memory)
diff --git a/dbgpt/agent/core/profile/base.py b/dbgpt/agent/core/profile/base.py
index ca7f59986..7b76f615c 100644
--- a/dbgpt/agent/core/profile/base.py
+++ b/dbgpt/agent/core/profile/base.py
@@ -1,9 +1,11 @@
"""Profile module."""
from abc import ABC, abstractmethod
-from typing import List, Optional
+from typing import Any, Dict, List, Optional, Set
import cachetools
+from jinja2.meta import find_undeclared_variables
+from jinja2.sandbox import Environment, SandboxedEnvironment
from dbgpt._private.pydantic import BaseModel, ConfigDict, Field, model_validator
from dbgpt.util.configure import ConfigInfo, DynConfig
@@ -22,50 +24,53 @@ VALID_TEMPLATE_KEYS = {
"question",
}
-_DEFAULT_SYSTEM_TEMPLATE = """
+_DEFAULT_SYSTEM_TEMPLATE = """\
You are a {{ role }}, {% if name %}named {{ name }}, {% endif %}your goal is {{ goal }}.
Please think step by step to achieve the goal. You can use the resources given below.
At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
-{% if resource_prompt %} {{ resource_prompt }} {% endif %}
-{% if expand_prompt %} {{ expand_prompt }} {% endif %}
+{% if resource_prompt %}\
+{{ resource_prompt }}
+{% endif %}\
+{% if expand_prompt %}\
+{{ expand_prompt }}
+{% endif %}\
*** IMPORTANT REMINDER ***
-{% if language == 'zh' %}
+{% if language == 'zh' %}\
Please answer in simplified Chinese.
-{% else %}
+{% else %}\
Please answer in English.
-{% endif %}
+{% endif %}\
-{% if constraints %}
-{% for constraint in constraints %}
+{% if constraints %}\
+{% for constraint in constraints %}\
{{ loop.index }}. {{ constraint }}
-{% endfor %}
-{% endif %}
+{% endfor %}\
+{% endif %}\
-{% if examples %}
+{% if examples %}\
You can refer to the following examples:
-{{ examples }}
-{% endif %}
+{{ examples }}\
+{% endif %}\
-{% if out_schema %} {{ out_schema }} {% endif %}
+{% if out_schema %} {{ out_schema }} {% endif %}\
""" # noqa
-_DEFAULT_USER_TEMPLATE = """
-{% if most_recent_memories %}
+_DEFAULT_USER_TEMPLATE = """\
+{% if most_recent_memories %}\
Most recent observations:
{{ most_recent_memories }}
-{% endif %}
+{% endif %}\
-{% if question %}
+{% if question %}\
Question: {{ question }}
{% endif %}
"""
-_DEFAULT_SAVE_MEMORY_TEMPLATE = """
+_DEFAULT_WRITE_MEMORY_TEMPLATE = """\
{% if question %}Question: {{ question }} {% endif %}
{% if thought %}Thought: {{ thought }} {% endif %}
{% if action %}Action: {{ action }} {% endif %}
-{% if observation %}Observation: {{ observation }} {% endif %}
"""
@@ -112,9 +117,141 @@ class Profile(ABC):
"""Return the user prompt template of current agent."""
@abstractmethod
- def get_save_memory_template(self) -> str:
+ def get_write_memory_template(self) -> str:
"""Return the save memory template of current agent."""
+ def format_system_prompt(
+ self,
+ template_env: Optional[Environment] = None,
+ question: Optional[str] = None,
+ language: str = "en",
+ most_recent_memories: Optional[str] = None,
+ resource_vars: Optional[Dict[str, Any]] = None,
+ **kwargs
+ ) -> str:
+ """Format the system prompt.
+
+ Args:
+ template_env(Optional[Environment]): The template environment for jinja2.
+ question(Optional[str]): The question.
+ language(str): The language of current context.
+ most_recent_memories(Optional[str]): The most recent memories, it reads
+ from memory.
+ resource_vars(Optional[Dict[str, Any]]): The resource variables.
+
+ Returns:
+ str: The formatted system prompt.
+ """
+ return self._format_prompt(
+ self.get_system_prompt_template(),
+ template_env=template_env,
+ question=question,
+ language=language,
+ most_recent_memories=most_recent_memories,
+ resource_vars=resource_vars,
+ **kwargs
+ )
+
+ def format_user_prompt(
+ self,
+ template_env: Optional[Environment] = None,
+ question: Optional[str] = None,
+ language: str = "en",
+ most_recent_memories: Optional[str] = None,
+ resource_vars: Optional[Dict[str, Any]] = None,
+ **kwargs
+ ) -> str:
+ """Format the user prompt.
+
+ Args:
+ template_env(Optional[Environment]): The template environment for jinja2.
+ question(Optional[str]): The question.
+ language(str): The language of current context.
+ most_recent_memories(Optional[str]): The most recent memories, it reads
+ from memory.
+ resource_vars(Optional[Dict[str, Any]]): The resource variables.
+
+ Returns:
+ str: The formatted user prompt.
+ """
+ return self._format_prompt(
+ self.get_user_prompt_template(),
+ template_env=template_env,
+ question=question,
+ language=language,
+ most_recent_memories=most_recent_memories,
+ resource_vars=resource_vars,
+ **kwargs
+ )
+
+ @property
+ def _sub_render_keys(self) -> Set[str]:
+ """Return the sub render keys.
+
+ If the value is a string and the key is in the sub render keys, it will be
+ rendered.
+
+ Returns:
+ Set[str]: The sub render keys.
+ """
+ return {"role", "name", "goal", "expand_prompt", "constraints"}
+
+ def _format_prompt(
+ self,
+ template: str,
+ template_env: Optional[Environment] = None,
+ question: Optional[str] = None,
+ language: str = "en",
+ most_recent_memories: Optional[str] = None,
+ resource_vars: Optional[Dict[str, Any]] = None,
+ **kwargs
+ ) -> str:
+ """Format the prompt."""
+ if not template_env:
+ template_env = SandboxedEnvironment()
+ pass_vars = {
+ "role": self.get_role(),
+ "name": self.get_name(),
+ "goal": self.get_goal(),
+ "expand_prompt": self.get_expand_prompt(),
+ "language": language,
+ "constraints": self.get_constraints(),
+ "most_recent_memories": (
+ most_recent_memories if most_recent_memories else None
+ ),
+ "examples": self.get_examples(),
+ "question": question,
+ }
+ if resource_vars:
+ # Merge resource variables
+ pass_vars.update(resource_vars)
+ pass_vars.update(kwargs)
+
+ # Parse the template to find all variables in the template
+ template_vars = find_undeclared_variables(template_env.parse(template))
+ # Just keep the valid template key variables
+ filtered_data = {
+ key: pass_vars[key] for key in template_vars if key in pass_vars
+ }
+
+ def _render_template(_template_env, _template: str, **_kwargs):
+ r_template = _template_env.from_string(_template)
+ return r_template.render(**_kwargs)
+
+ for key in filtered_data.keys():
+ value = filtered_data[key]
+ if key in self._sub_render_keys and value:
+ if isinstance(value, str):
+ # Render the sub-template
+ filtered_data[key] = _render_template(
+ template_env, value, **pass_vars
+ )
+ elif isinstance(value, list):
+ for i, item in enumerate(value):
+ if isinstance(item, str):
+ value[i] = _render_template(template_env, item, **pass_vars)
+ return _render_template(template_env, template, **filtered_data)
+
class DefaultProfile(BaseModel, Profile):
"""Default profile."""
@@ -145,8 +282,8 @@ class DefaultProfile(BaseModel, Profile):
_DEFAULT_USER_TEMPLATE, description="The user prompt template of the agent."
)
- save_memory_template: str = Field(
- _DEFAULT_SAVE_MEMORY_TEMPLATE,
+ write_memory_template: str = Field(
+ _DEFAULT_WRITE_MEMORY_TEMPLATE,
description="The save memory template of the agent.",
)
@@ -189,9 +326,9 @@ class DefaultProfile(BaseModel, Profile):
"""Return the user prompt template of current agent."""
return self.user_prompt_template
- def get_save_memory_template(self) -> str:
+ def get_write_memory_template(self) -> str:
"""Return the save memory template of current agent."""
- return self.save_memory_template
+ return self.write_memory_template
class ProfileFactory:
@@ -316,8 +453,8 @@ class ProfileConfig(BaseModel):
user_prompt_template: str | ConfigInfo | None = DynConfig(
_DEFAULT_USER_TEMPLATE, description="The user prompt template."
)
- save_memory_template: str | ConfigInfo | None = DynConfig(
- _DEFAULT_SAVE_MEMORY_TEMPLATE, description="The save memory template."
+ write_memory_template: str | ConfigInfo | None = DynConfig(
+ _DEFAULT_WRITE_MEMORY_TEMPLATE, description="The save memory template."
)
factory: ProfileFactory | None = Field(None, description="The profile factory.")
@@ -356,7 +493,7 @@ class ProfileConfig(BaseModel):
expand_prompt = self.expand_prompt
system_prompt_template = self.system_prompt_template
user_prompt_template = self.user_prompt_template
- save_memory_template = self.save_memory_template
+ write_memory_template = self.write_memory_template
examples = self.examples
call_args = {
"prefer_prompt_language": prefer_prompt_language,
@@ -380,8 +517,8 @@ class ProfileConfig(BaseModel):
system_prompt_template = system_prompt_template.query(**call_args)
if isinstance(user_prompt_template, ConfigInfo):
user_prompt_template = user_prompt_template.query(**call_args)
- if isinstance(save_memory_template, ConfigInfo):
- save_memory_template = save_memory_template.query(**call_args)
+ if isinstance(write_memory_template, ConfigInfo):
+ write_memory_template = write_memory_template.query(**call_args)
if self.factory is not None:
factory_profile = self.factory.create_profile(
@@ -405,7 +542,7 @@ class ProfileConfig(BaseModel):
examples=examples,
system_prompt_template=system_prompt_template,
user_prompt_template=user_prompt_template,
- save_memory_template=save_memory_template,
+ write_memory_template=write_memory_template,
)
def __hash__(self):
diff --git a/dbgpt/agent/core/role.py b/dbgpt/agent/core/role.py
index 99078bb27..d8e16aa71 100644
--- a/dbgpt/agent/core/role.py
+++ b/dbgpt/agent/core/role.py
@@ -1,9 +1,8 @@
"""Role class for role-based conversation."""
from abc import ABC
-from typing import Any, Dict, List, Optional, Set
+from typing import Any, Dict, List, Optional
-from jinja2.meta import find_undeclared_variables
from jinja2.sandbox import SandboxedEnvironment
from dbgpt._private.pydantic import BaseModel, ConfigDict, Field
@@ -45,43 +44,26 @@ class Role(ABC, BaseModel):
Returns:
str: The prompt template.
"""
- prompt_template = (
- self.system_prompt_template if is_system else self.user_prompt_template
- )
- template_vars = self._get_template_variables(prompt_template)
- _sub_render_keys = {"role", "name", "goal", "expand_prompt", "constraints"}
- pass_vars = {
- "role": self.role,
- "name": self.name,
- "goal": self.goal,
- "expand_prompt": self.expand_prompt,
- "language": self.language,
- "constraints": self.constraints,
- "most_recent_memories": (
- most_recent_memories if most_recent_memories else None
- ),
- "examples": self.examples,
- # "out_schema": out_schema if out_schema else None,
- # "resource_prompt": resource_prompt if resource_prompt else None,
- "question": question,
- }
resource_vars = await self.generate_resource_variables(question)
- pass_vars.update(resource_vars)
- pass_vars.update(kwargs)
- filtered_data = {
- key: pass_vars[key] for key in template_vars if key in pass_vars
- }
- for key in filtered_data.keys():
- value = filtered_data[key]
- if key in _sub_render_keys and value:
- if isinstance(value, str):
- # Render the sub-template
- filtered_data[key] = self._render_template(value, **pass_vars)
- elif isinstance(value, list):
- for i, item in enumerate(value):
- if isinstance(item, str):
- value[i] = self._render_template(item, **pass_vars)
- return self._render_template(prompt_template, **filtered_data)
+
+ if is_system:
+ return self.current_profile.format_system_prompt(
+ template_env=self.template_env,
+ question=question,
+ language=self.language,
+ most_recent_memories=most_recent_memories,
+ resource_vars=resource_vars,
+ **kwargs,
+ )
+ else:
+ return self.current_profile.format_user_prompt(
+ template_env=self.template_env,
+ question=question,
+ language=self.language,
+ most_recent_memories=most_recent_memories,
+ resource_vars=resource_vars,
+ **kwargs,
+ )
async def generate_resource_variables(
self, question: Optional[str] = None
@@ -108,11 +90,6 @@ class Role(ABC, BaseModel):
"""Return the name of the role."""
return self.current_profile.get_name()
- @property
- def examples(self) -> Optional[str]:
- """Return the examples of the role."""
- return self.current_profile.get_examples()
-
@property
def role(self) -> str:
"""Return the role of the role."""
@@ -134,28 +111,9 @@ class Role(ABC, BaseModel):
return self.current_profile.get_description()
@property
- def expand_prompt(self) -> Optional[str]:
- """Return the expand prompt of the role."""
- return self.current_profile.get_expand_prompt()
-
- @property
- def system_prompt_template(self) -> str:
- """Return the current system prompt template."""
- return self.current_profile.get_system_prompt_template()
-
- @property
- def user_prompt_template(self) -> str:
- """Return the current user prompt template."""
- return self.current_profile.get_user_prompt_template()
-
- @property
- def save_memory_template(self) -> str:
+ def write_memory_template(self) -> str:
"""Return the current save memory template."""
- return self.current_profile.get_save_memory_template()
-
- def _get_template_variables(self, template: str) -> Set[str]:
- parsed_content = self.template_env.parse(template)
- return find_undeclared_variables(parsed_content)
+ return self.current_profile.get_write_memory_template()
def _render_template(self, template: str, **kwargs):
r_template = self.template_env.from_string(template)
@@ -188,7 +146,7 @@ class Role(ABC, BaseModel):
recent_messages = [m.raw_observation for m in memories]
return "".join(recent_messages)
- async def save_to_memory(
+ async def write_memories(
self,
question: str,
ai_message: str,
@@ -196,13 +154,24 @@ class Role(ABC, BaseModel):
check_pass: bool = True,
check_fail_reason: Optional[str] = None,
) -> None:
- """Save the role to the memory."""
+ """Write the memories to the memory.
+
+ We suggest you to override this method to save the conversation to memory
+ according to your needs.
+
+ Args:
+ question(str): The question received.
+ ai_message(str): The AI message, LLM output.
+ action_output(ActionOutput): The action output.
+ check_pass(bool): Whether the check pass.
+ check_fail_reason(str): The check fail reason.
+ """
if not action_output:
raise ValueError("Action output is required to save to memory.")
mem_thoughts = action_output.thoughts or ai_message
- observation = action_output.observations or action_output.content
- if not check_pass and check_fail_reason:
+ observation = action_output.observations
+ if not check_pass and observation and check_fail_reason:
observation += "\n" + check_fail_reason
memory_map = {
@@ -211,7 +180,7 @@ class Role(ABC, BaseModel):
"action": action_output.action,
"observation": observation,
}
- save_memory_template = self.save_memory_template
- memory_content = self._render_template(save_memory_template, **memory_map)
+ write_memory_template = self.write_memory_template
+ memory_content = self._render_template(write_memory_template, **memory_map)
fragment = AgentMemoryFragment(memory_content)
await self.memory.write(fragment)
diff --git a/dbgpt/agent/expand/resources/search_tool.py b/dbgpt/agent/expand/resources/search_tool.py
index 63e64ba39..ff5b48be8 100644
--- a/dbgpt/agent/expand/resources/search_tool.py
+++ b/dbgpt/agent/expand/resources/search_tool.py
@@ -19,8 +19,20 @@ def baidu_search(
Please set number of results not less than 8 for rich search results.
"""
- import requests
- from bs4 import BeautifulSoup
+ try:
+ import requests
+ except ImportError:
+ raise ImportError(
+ "`requests` is required for baidu_search tool, please run "
+ "`pip install requests` to install it."
+ )
+ try:
+ from bs4 import BeautifulSoup
+ except ImportError:
+ raise ImportError(
+ "`beautifulsoup4` is required for baidu_search tool, please run "
+ "`pip install beautifulsoup4` to install it."
+ )
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:112.0) "
diff --git a/dbgpt/agent/resource/database.py b/dbgpt/agent/resource/database.py
index 14b27a585..19cfb125f 100644
--- a/dbgpt/agent/resource/database.py
+++ b/dbgpt/agent/resource/database.py
@@ -3,16 +3,18 @@
import dataclasses
import logging
from concurrent.futures import Executor, ThreadPoolExecutor
-from typing import Any, Generic, List, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Any, Generic, List, Optional, Tuple, Union
import cachetools
-from dbgpt.datasource.rdbms.base import RDBMSConnector
from dbgpt.util.cache_utils import cached
from dbgpt.util.executor_utils import blocking_func_to_async
from .base import P, Resource, ResourceParameters, ResourceType
+if TYPE_CHECKING:
+ from dbgpt.datasource.rdbms.base import RDBMSConnector
+
logger = logging.getLogger(__name__)
_DEFAULT_PROMPT_TEMPLATE = (
@@ -143,7 +145,7 @@ class RDBMSConnectorResource(DBResource[DBParameters]):
def __init__(
self,
name: str,
- connector: Optional[RDBMSConnector] = None,
+ connector: Optional["RDBMSConnector"] = None,
db_name: Optional[str] = None,
db_type: Optional[str] = None,
dialect: Optional[str] = None,
@@ -168,7 +170,7 @@ class RDBMSConnectorResource(DBResource[DBParameters]):
)
@property
- def connector(self) -> RDBMSConnector:
+ def connector(self) -> "RDBMSConnector":
"""Return the connector."""
if not self._connector:
raise ValueError("Connector is not set.")
diff --git a/dbgpt/agent/resource/tool/base.py b/dbgpt/agent/resource/tool/base.py
index 1afda4912..7c0e7db31 100644
--- a/dbgpt/agent/resource/tool/base.py
+++ b/dbgpt/agent/resource/tool/base.py
@@ -127,6 +127,10 @@ class BaseTool(Resource[ToolResourceParameters], ABC):
parameters=parameters_string,
)
+ def __str__(self) -> str:
+ """Return the string representation of the tool."""
+ return f"Tool: {self.name} ({self.description})"
+
class FunctionTool(BaseTool):
"""Function tool.
diff --git a/dbgpt/agent/resource/tool/pack.py b/dbgpt/agent/resource/tool/pack.py
index 300c0dee6..9583011f6 100644
--- a/dbgpt/agent/resource/tool/pack.py
+++ b/dbgpt/agent/resource/tool/pack.py
@@ -63,6 +63,31 @@ class ToolPack(ResourcePack):
typed_tools = [cast(BaseTool, t) for t in tools]
return [ToolPack(typed_tools)] # type: ignore
+ async def get_prompt(
+ self,
+ *,
+ lang: str = "en",
+ prompt_type: str = "default",
+ question: Optional[str] = None,
+ resource_name: Optional[str] = None,
+ **kwargs,
+ ) -> str:
+ """Get the prompt for the resources."""
+ prompt_list = []
+ for name, resource in self._resources.items():
+ prompt = await resource.get_prompt(
+ lang=lang,
+ prompt_type=prompt_type,
+ question=question,
+ resource_name=resource_name,
+ **kwargs,
+ )
+ prompt_list.append(prompt)
+ head_prompt = (
+ "You have access to the following APIs:" if lang == "en" else "您可以使用以下API:"
+ )
+ return head_prompt + "\n" + self._prompt_separator.join(prompt_list)
+
def add_command(
self,
command_label: str,
diff --git a/dbgpt/agent/util/llm/llm_client.py b/dbgpt/agent/util/llm/llm_client.py
index a17f15c49..17b7ffb14 100644
--- a/dbgpt/agent/util/llm/llm_client.py
+++ b/dbgpt/agent/util/llm/llm_client.py
@@ -1,4 +1,5 @@
"""AIWrapper for LLM."""
+
import json
import logging
import traceback
@@ -75,14 +76,16 @@ class AIWrapper:
elif context and messages and isinstance(messages, list):
# Instantiate the messages
params["messages"] = [
- {
- **m,
- "content": self.instantiate(
- m["content"], context, allow_format_str_template
- ),
- }
- if m.get("content")
- else m
+ (
+ {
+ **m,
+ "content": self.instantiate(
+ m["content"], context, allow_format_str_template
+ ),
+ }
+ if m.get("content")
+ else m
+ )
for m in messages
]
return params
@@ -110,7 +113,7 @@ class AIWrapper:
config.pop(key)
return json.dumps(config, sort_keys=True, ensure_ascii=False)
- async def create(self, **config) -> Optional[str]:
+ async def create(self, verbose: bool = False, **config) -> Optional[str]:
"""Create a response from the input config."""
# merge the input config with the i-th config in the config list
full_config = {**config}
@@ -123,7 +126,7 @@ class AIWrapper:
context = extra_kwargs.get("context")
llm_model = extra_kwargs.get("llm_model")
try:
- response = await self._completions_create(llm_model, params)
+ response = await self._completions_create(llm_model, params, verbose)
except LLMChatError as e:
logger.debug(f"{llm_model} generate failed!{str(e)}")
raise e
@@ -151,7 +154,9 @@ class AIWrapper:
return gpts_messages
- async def _completions_create(self, llm_model, params) -> str:
+ async def _completions_create(
+ self, llm_model, params, verbose: bool = False
+ ) -> str:
payload = {
"model": llm_model,
"prompt": params.get("prompt"),
@@ -169,10 +174,16 @@ class AIWrapper:
payload["model_cache_enable"] = self.model_cache_enable
try:
model_request = _build_model_request(payload)
+ str_prompt = model_request.messages_to_string()
model_output = await self._llm_client.generate(model_request.copy())
parsed_output = self._output_parser.parse_model_nostream_resp(
model_output, "#########################"
)
+ if verbose:
+ print("\n", "-" * 80, flush=True, sep="")
+ print(f"String Prompt[verbose]: \n{str_prompt}")
+ print(f"LLM Output[verbose]: \n{parsed_output}")
+ print("-" * 80, "\n", flush=True, sep="")
return parsed_output
except Exception as e:
logger.error(
diff --git a/dbgpt/rag/embedding/embeddings.py b/dbgpt/rag/embedding/embeddings.py
index 30e7ba949..851ab1101 100644
--- a/dbgpt/rag/embedding/embeddings.py
+++ b/dbgpt/rag/embedding/embeddings.py
@@ -745,6 +745,8 @@ class OllamaEmbeddings(BaseModel, Embeddings):
The default model name is "llama2".
"""
+ model_config = ConfigDict(arbitrary_types_allowed=True, protected_namespaces=())
+
api_url: str = Field(
default="http://localhost:11434",
description="The URL of the embeddings API.",
diff --git a/dbgpt/rag/retriever/time_weighted.py b/dbgpt/rag/retriever/time_weighted.py
index a544de396..03583908a 100644
--- a/dbgpt/rag/retriever/time_weighted.py
+++ b/dbgpt/rag/retriever/time_weighted.py
@@ -63,7 +63,7 @@ class TimeWeightedEmbeddingRetriever(EmbeddingRetriever):
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in chunks]
for i, doc in enumerate(dup_docs):
- if "last_accessed_at" not in doc.metadata:
+ if doc.metadata.get("last_accessed_at") is None:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
diff --git a/dbgpt/util/configure/base.py b/dbgpt/util/configure/base.py
index 93dc07a4a..1eaed3a82 100644
--- a/dbgpt/util/configure/base.py
+++ b/dbgpt/util/configure/base.py
@@ -68,7 +68,7 @@ class PromptManagerConfigProvider(ConfigProvider):
try:
from dbgpt.serve.prompt.serve import Serve
except ImportError:
- logger.warning("Prompt manager is not available.")
+ logger.debug("Prompt manager is not available.")
return None
cfg = Config()
diff --git a/docs/docs/agents/introduction/conversation.md b/docs/docs/agents/introduction/conversation.md
new file mode 100644
index 000000000..2fe969392
--- /dev/null
+++ b/docs/docs/agents/introduction/conversation.md
@@ -0,0 +1,177 @@
+# Multi-Agents Conversation
+
+Here we will show you how to write a multi-agents conversation program.
+
+## Two Agents Conversation
+
+First, create common `LLMConfig` and `AgentMemory` for both agents.
+
+```python
+import os
+from dbgpt.agent import AgentContext, AgentMemory
+from dbgpt.model.proxy import OpenAILLMClient
+
+llm_client = OpenAILLMClient(
+ model_alias="gpt-4o",
+ api_base=os.getenv("OPENAI_API_BASE"),
+ api_key=os.getenv("OPENAI_API_KEY"),
+)
+
+context: AgentContext = AgentContext(
+ conv_id="test123",
+ language="en",
+ temperature=0.9,
+ max_new_tokens=2048,
+ max_chat_round=4,
+)
+# Create an agent memory, default memory is ShortTermMemory
+agent_memory: AgentMemory = AgentMemory()
+
+
+system_prompt_template = """\
+You are a {{ role }}, {% if name %}named {{ name }}, {% endif %}your goal is {{ goal }}.
+*** IMPORTANT REMINDER ***
+{% if language == 'zh' %}\
+Please answer in simplified Chinese.
+{% else %}\
+Please answer in English.
+{% endif %}\
+""" # noqa
+
+user_prompt_template = """\
+{% if most_recent_memories %}\
+Most recent observations:
+{{ most_recent_memories }}
+{% endif %}\
+{% if question %}\
+user: {{ question }}
+{% endif %}
+"""
+```
+
+In above code, we set `max_chat_round=4` in `AgentContext`, which means the conversation
+will end after 4 rounds.
+
+And here we set `system_prompt_template` and `user_prompt_template` for both agents for a simple conversation, we
+will introduce it in profile module later.
+
+
+Then, create two agents, `Bob` and `Alice`, and initiate a chat between them.
+
+```python
+
+import asyncio
+from dbgpt.agent import ConversableAgent, ProfileConfig, LLMConfig, BlankAction
+
+
+async def main():
+ bob_profile = ProfileConfig(
+ name="Bob",
+ role="Comedians",
+ system_prompt_template=system_prompt_template,
+ user_prompt_template=user_prompt_template,
+ )
+ bob = (
+ await ConversableAgent(profile=bob_profile)
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(agent_memory)
+ .bind(BlankAction)
+ .build()
+ )
+ alice_profile = ProfileConfig(
+ name="Alice",
+ role="Comedians",
+ system_prompt_template=system_prompt_template,
+ user_prompt_template=user_prompt_template,
+ )
+ alice = (
+ await ConversableAgent(profile=alice_profile)
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(agent_memory)
+ .bind(BlankAction)
+ .build()
+ )
+
+ await bob.initiate_chat(alice, message="Tell me a joke.")
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
+```
+
+Run the code, you will see the conversation between `Bob` and `Alice`:
+
+```bash
+--------------------------------------------------------------------------------
+Bob (to Alice)-[]:
+
+"Tell me a joke."
+
+--------------------------------------------------------------------------------
+un_stream ai response: Why don't scientists trust atoms?
+
+Because they make up everything!
+
+--------------------------------------------------------------------------------
+Alice (to Bob)-[gpt-4o]:
+
+"Why don't scientists trust atoms?\n\nBecause they make up everything!"
+>>>>>>>>Alice Review info:
+Pass(None)
+>>>>>>>>Alice Action report:
+execution succeeded,
+Why don't scientists trust atoms?
+
+Because they make up everything!
+
+--------------------------------------------------------------------------------
+un_stream ai response: That's a classic! You know, it's always good to have a few science jokes in your toolbox—they have the potential energy to make everyone laugh, and they rarely get a negative reaction!
+
+--------------------------------------------------------------------------------
+Bob (to Alice)-[gpt-4o]:
+
+"That's a classic! You know, it's always good to have a few science jokes in your toolbox—they have the potential energy to make everyone laugh, and they rarely get a negative reaction!"
+>>>>>>>>Bob Review info:
+Pass(None)
+>>>>>>>>Bob Action report:
+execution succeeded,
+That's a classic! You know, it's always good to have a few science jokes in your toolbox—they have the potential energy to make everyone laugh, and they rarely get a negative reaction!
+
+--------------------------------------------------------------------------------
+un_stream ai response: Absolutely, science jokes have a universal appeal! Here's another one for your collection:
+
+Why did the biologist go to the beach?
+
+Because they wanted to study the current events!
+
+--------------------------------------------------------------------------------
+Alice (to Bob)-[gpt-4o]:
+
+"Absolutely, science jokes have a universal appeal! Here's another one for your collection:\n\nWhy did the biologist go to the beach?\n\nBecause they wanted to study the current events!"
+>>>>>>>>Alice Review info:
+Pass(None)
+>>>>>>>>Alice Action report:
+execution succeeded,
+Absolutely, science jokes have a universal appeal! Here's another one for your collection:
+
+Why did the biologist go to the beach?
+
+Because they wanted to study the current events!
+
+--------------------------------------------------------------------------------
+un_stream ai response: Haha, that's a good one! You know, biologists at the beach must have some serious kelp issues, too. They just can’t help but dive into their work—whether it's in the lab or lounging in the sand!
+
+--------------------------------------------------------------------------------
+Bob (to Alice)-[gpt-4o]:
+
+"Haha, that's a good one! You know, biologists at the beach must have some serious kelp issues, too. They just can’t help but dive into their work—whether it's in the lab or lounging in the sand!"
+>>>>>>>>Bob Review info:
+Pass(None)
+>>>>>>>>Bob Action report:
+execution succeeded,
+Haha, that's a good one! You know, biologists at the beach must have some serious kelp issues, too. They just can’t help but dive into their work—whether it's in the lab or lounging in the sand!
+
+--------------------------------------------------------------------------------
+```
\ No newline at end of file
diff --git a/docs/docs/agents/custom_agents.md b/docs/docs/agents/introduction/custom_agents.md
similarity index 100%
rename from docs/docs/agents/custom_agents.md
rename to docs/docs/agents/introduction/custom_agents.md
diff --git a/docs/docs/agents/introduction/database.md b/docs/docs/agents/introduction/database.md
new file mode 100644
index 000000000..da4564ed3
--- /dev/null
+++ b/docs/docs/agents/introduction/database.md
@@ -0,0 +1,208 @@
+# Agents With Database
+
+Most of the time, we want the agent to answer questions based on the data in the database,
+or make decisions based on the data in the database. In this case, we need to connect
+the agent to the database.
+
+## Installation
+
+To use the database in the agent, you need to install the dependencies with the following command:
+
+```bash
+pip install "dbgpt[simple_framework]>=0.5.9rc0"
+```
+
+## Create A Database Connector
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+
+
+
+
+:::tip NOTE
+We provide a temporary SQLite database for testing. The temporary database will be
+created in temporary directory and will be deleted after the program exits.
+:::
+
+```python
+from dbgpt.datasource.rdbms.conn_sqlite import SQLiteTempConnector
+
+connector = SQLiteTempConnector.create_temporary_db()
+connector.create_temp_tables(
+ {
+ "user": {
+ "columns": {
+ "id": "INTEGER PRIMARY KEY",
+ "name": "TEXT",
+ "age": "INTEGER",
+ },
+ "data": [
+ (1, "Tom", 10),
+ (2, "Jerry", 16),
+ (3, "Jack", 18),
+ (4, "Alice", 20),
+ (5, "Bob", 22),
+ ],
+ }
+ }
+)
+
+```
+
+
+
+
+:::tip NOTE
+We connect to the SQLite database by giving the database file path, please make sure the file path is correct.
+:::
+
+```python
+from dbgpt.datasource.rdbms.conn_sqlite import SQLiteConnector
+
+connector = SQLiteConnector.from_file_path("path/to/your/database.db")
+```
+
+
+
+
+
+:::tip NOTE
+
+We connect to the MySQL database by giving the database connection information, please
+make sure the connection information is correct.
+:::
+
+```python
+from dbgpt.datasource.rdbms.conn_mysql import MySQLConnector
+
+connector = MySQLConnector.from_uri_db(
+ host="localhost",
+ port=3307,
+ user="root",
+ pwd="********",
+ db_name="user_manager",
+ engine_args={"connect_args": {"charset": "utf8mb4"}},
+)
+```
+
+
+
+
+
+
+## Create A Database Resource
+
+```python
+from dbgpt.agent.resource import RDBMSConnectorResource
+
+db_resource = RDBMSConnectorResource("user_manager", connector=connector)
+```
+
+As previously mentioned, the **Database** is a kind of resource, we can use most database
+which supported in DB-GPT(like SQLite, MySQL, ClickHouse, ApacheDoris, DuckDB, Hive,
+MSSQL, OceanBase, PostgreSQL, StarRocks, Vertica, etc.) as the resource.
+
+## Use Database In Your Agent
+
+```python
+import asyncio
+import os
+from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
+from dbgpt.agent.expand.data_scientist_agent import DataScientistAgent
+from dbgpt.model.proxy import OpenAILLMClient
+
+async def main():
+
+ llm_client = OpenAILLMClient(
+ model_alias="gpt-3.5-turbo", # or other models, eg. "gpt-4o"
+ api_base=os.getenv("OPENAI_API_BASE"),
+ api_key=os.getenv("OPENAI_API_KEY"),
+ )
+ context: AgentContext = AgentContext(
+ conv_id="test123", language="en", temperature=0.5, max_new_tokens=2048
+ )
+ agent_memory = AgentMemory()
+
+ user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
+
+ sql_boy = (
+ await DataScientistAgent()
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(db_resource)
+ .bind(agent_memory)
+ .build()
+ )
+
+ await user_proxy.initiate_chat(
+ recipient=sql_boy,
+ reviewer=user_proxy,
+ message="What is the name and age of the user with age less than 18",
+ )
+
+ ## dbgpt-vis message infos
+ print(await agent_memory.gpts_memory.one_chat_completions("test123"))
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
+
+```
+
+The output will be like this:
+
+``````bash
+--------------------------------------------------------------------------------
+User (to Edgar)-[]:
+
+"What is the name and age of the user with age less than 18"
+
+--------------------------------------------------------------------------------
+un_stream ai response: {
+ "display_type": "response_table",
+ "sql": "SELECT name, age FROM user WHERE age < 18",
+ "thought": "I have selected a response_table to display the names and ages of users with an age less than 18. The SQL query retrieves the name and age columns from the user table where the age is less than 18."
+}
+
+--------------------------------------------------------------------------------
+Edgar (to User)-[gpt-3.5-turbo]:
+
+"{\n \"display_type\": \"response_table\",\n \"sql\": \"SELECT name, age FROM user WHERE age < 18\",\n \"thought\": \"I have selected a response_table to display the names and ages of users with an age less than 18. The SQL query retrieves the name and age columns from the user table where the age is less than 18.\"\n}"
+>>>>>>>>Edgar Review info:
+Pass(None)
+>>>>>>>>Edgar Action report:
+execution succeeded,
+{"display_type":"response_table","sql":"SELECT name, age FROM user WHERE age < 18","thought":"I have selected a response_table to display the names and ages of users with an age less than 18. The SQL query retrieves the name and age columns from the user table where the age is less than 18."}
+
+--------------------------------------------------------------------------------
+```agent-plans
+[{"name": "What is the name and age of the user with age less than 18", "num": 1, "status": "complete", "agent": "Human", "markdown": "```agent-messages\n[{\"sender\": \"DataScientist\", \"receiver\": \"Human\", \"model\": \"gpt-3.5-turbo\", \"markdown\": \"```vis-chart\\n{\\\"sql\\\": \\\"SELECT name, age FROM user WHERE age < 18\\\", \\\"type\\\": \\\"response_table\\\", \\\"title\\\": \\\"\\\", \\\"describe\\\": \\\"I have selected a response_table to display the names and ages of users with an age less than 18. The SQL query retrieves the name and age columns from the user table where the age is less than 18.\\\", \\\"data\\\": [{\\\"name\\\": \\\"Tom\\\", \\\"age\\\": 10}, {\\\"name\\\": \\\"Jerry\\\", \\\"age\\\": 16}]}\\n```\"}]\n```"}]
+```
+``````
+
+Let's parse the result from above output, we just focus on the last part
+(output with [GPT-Vis](https://github.com/eosphoros-ai/GPT-Vis) protocol):
+```json
+[
+ {
+ "name": "What is the name and age of the user with age less than 18",
+ "num": 1,
+ "status": "complete",
+ "agent": "Human",
+ "markdown": "```agent-messages\n[{\"sender\": \"DataScientist\", \"receiver\": \"Human\", \"model\": \"gpt-3.5-turbo\", \"markdown\": \"```vis-chart\\n{\\\"sql\\\": \\\"SELECT name, age FROM user WHERE age < 18\\\", \\\"type\\\": \\\"response_table\\\", \\\"title\\\": \\\"\\\", \\\"describe\\\": \\\"I have selected a response_table to display the names and ages of users with an age less than 18. The SQL query retrieves the name and age columns from the user table where the age is less than 18.\\\", \\\"data\\\": [{\\\"name\\\": \\\"Tom\\\", \\\"age\\\": 10}, {\\\"name\\\": \\\"Jerry\\\", \\\"age\\\": 16}]}\\n```\"}]\n```"
+ }
+]
+```
+What is GPT-Vis? GPT-Vis is a collection components for GPTs, generative AI, and LLM projects.
+It provides a protocol(a custom code syntax in markdown) to describe the output of the AI model,
+and be able to render the output in rich UI components.
+
+Here, the output is a table, which contains the name and age of the users with age less than 18.
\ No newline at end of file
diff --git a/docs/docs/agents/introduction/introduction.md b/docs/docs/agents/introduction/introduction.md
new file mode 100644
index 000000000..b582b6126
--- /dev/null
+++ b/docs/docs/agents/introduction/introduction.md
@@ -0,0 +1,171 @@
+# Data Driven Multi-Agents
+
+## Introduction
+
+DB-GPT agent is a data-driven multi-agent system that aims to provide a production-level
+agent development framework. We believe that production-level agent applications need
+to be based on data-driven decisions and can be orchestrated in a controllable agentic workflow.
+
+### Multi-Level API Design
+
+- Python agent API: Build an agents application with Python code, you just need install `dbgpt` package with `pip install "dbgpt[agent]"`
+- Application API: Build an agents application in DB-GPT project, you can use all the capabilities of other modules in DB-GPT project.
+
+Most of the time, you can use the Python agent API to build your agents application in
+a simple way, only a little change to the code when you need to deploy your agents to production.
+
+## Quick Start
+
+### Installation
+
+Firstly, you need to install the `dbgpt` package with the following command:
+```bash
+pip install "dbgpt[agent]>=0.5.9rc0"
+```
+
+Then, you can install the `openai` package with the following command:
+```bash
+pip install openai
+```
+
+### Write Your First Calculator With Agent
+
+The LLM is the "brain" of the agent, now we use the OpenAI LLM.
+In DB-GPT agents, you can use all models then supported by DB-GPT, whether they are
+locally deployed LLMs or proxy models, whether they are deployed on a single machine or in a cluster.
+
+```python
+import os
+from dbgpt.model.proxy import OpenAILLMClient
+
+llm_client = OpenAILLMClient(
+ model_alias="gpt-3.5-turbo", # or other models, eg. "gpt-4o"
+ api_base=os.getenv("OPENAI_API_BASE"),
+ api_key=os.getenv("OPENAI_API_KEY"),
+)
+```
+
+Then, you should create an agent context and agent memory.
+
+```python
+from dbgpt.agent import AgentContext, AgentMemory
+
+# language="zh" for Chinese
+context: AgentContext = AgentContext(
+ conv_id="test123", language="en", temperature=0.5, max_new_tokens=2048
+)
+# Create an agent memory, default memory is ShortTermMemory
+agent_memory: AgentMemory = AgentMemory()
+```
+Memory stores information perceived from the environment and leverages the recorded
+memories to facilitate future actions.
+Default memory is `ShortTermMemory`, it just keeps the latest `k` turns of the conversation.
+Your can use other memory, such as `LongTermMemory`, `SensoryMemory` and `HybridMemory`, we will introduce them later.
+
+Then, you can create a code assistant agent and a user proxy agent.
+
+```python
+import asyncio
+
+from dbgpt.agent import LLMConfig, UserProxyAgent
+from dbgpt.agent.expand.code_assistant_agent import CodeAssistantAgent
+
+
+async def main():
+
+ # Create a code assistant agent
+ coder = (
+ await CodeAssistantAgent()
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(agent_memory)
+ .build()
+ )
+
+ # Create a user proxy agent
+ user_proxy = await UserProxyAgent().bind(context).bind(agent_memory).build()
+
+ # Initiate a chat with the user proxy agent
+ await user_proxy.initiate_chat(
+ recipient=coder,
+ reviewer=user_proxy,
+ message="Calculate the result of 321 * 123",
+ )
+ # Obtain conversation history messages between agents
+ print(await agent_memory.gpts_memory.one_chat_completions("test123"))
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
+
+```
+
+You will see the following output:
+
+``````bash
+--------------------------------------------------------------------------------
+User (to Turing)-[]:
+
+"Calculate the result of 321 * 123"
+
+--------------------------------------------------------------------------------
+un_stream ai response: ```python
+# filename: calculate_multiplication.py
+
+result = 321 * 123
+print(result)
+```
+
+>>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...
+execute_code was called without specifying a value for use_docker. Since the python docker package is not available, code will be run natively. Note: this fallback behavior is subject to change
+un_stream ai response: True
+
+--------------------------------------------------------------------------------
+Turing (to User)-[gpt-3.5-turbo]:
+
+"```python\n# filename: calculate_multiplication.py\n\nresult = 321 * 123\nprint(result)\n```"
+>>>>>>>>Turing Review info:
+Pass(None)
+>>>>>>>>Turing Action report:
+execution succeeded,
+
+39483
+
+
+--------------------------------------------------------------------------------
+```agent-plans
+[{"name": "Calculate the result of 321 * 123", "num": 1, "status": "complete", "agent": "Human", "markdown": "```agent-messages\n[{\"sender\": \"CodeEngineer\", \"receiver\": \"Human\", \"model\": \"gpt-3.5-turbo\", \"markdown\": \"```vis-code\\n{\\\"exit_success\\\": true, \\\"language\\\": \\\"python\\\", \\\"code\\\": [[\\\"python\\\", \\\"# filename: calculate_multiplication.py\\\\n\\\\nresult = 321 * 123\\\\nprint(result)\\\"]], \\\"log\\\": \\\"\\\\n39483\\\\n\\\"}\\n```\"}]\n```"}]
+```
+``````
+
+In DB-GPT agents, most core interfaces are asynchronous for high performance.
+So we will write all the code to build the agent in an asynchronous way. In development,
+you can use the `asyncio.run(main())` to run the agent.
+
+Here is the graph of above code:
+
+
+
+
+
+
+In the above code, we create a `CodeAssistantAgent` and a `UserProxyAgent`.
+`UserProxyAgent` is a proxy of the user, it is an admin agent that can initiate a chat
+with other agents, and it can review the feedback of the agents.
+
+`CodeAssistantAgent` is a code assistant agent, it will generate some codes to solve
+the question of the user, in this case, it will generate a Python code to calculate the
+result of `321 * 123`, then the code will be executed in its internal `CodeAction`, the
+result will be returned to the user if it is reviewed passed.
+
+In the end of the code, we print the conversation history messages between agents.
+
+## What's Next
+
+- How to use tools in DB-GPT agents
+- How to connect to the database in DB-GPT agents
+- How to use planning in DB-GPT agents
+- How to use various memories in DB-GPT agents
+- How to write a custom agent in DB-GPT agents
+- How to integrate agents with AWEL(Agentic Workflow Expression Language)
+- How to deploy agents in production
\ No newline at end of file
diff --git a/docs/docs/agents/introduction/planning.md b/docs/docs/agents/introduction/planning.md
new file mode 100644
index 000000000..f147b1ec1
--- /dev/null
+++ b/docs/docs/agents/introduction/planning.md
@@ -0,0 +1,386 @@
+# Agents Planning
+
+When faced with a complex task, humans tend to deconstruct it into
+simpler subtasks and solve them individually. The planning module aims to empower the
+agents with such human capability, which is expected to make the agent behave more
+reasonably, powerfully, and reliably
+
+## Introduction
+
+Here is a simple architecture diagram of the planning module:
+
+
+
+
+
+In the above diagram, the planning module will receive the task from the user, then it will
+decompose the task into subtasks and assign them to the corresponding agents(Agent1, Agent2,...).
+
+## Analysis Your Database With Agents
+
+In the following example, we will show you how to use the planning module to analyze your database.
+
+To run this example, please install the dependencies according to [previous instructions](./database#installation).
+
+### Prepare The Database
+
+For simplicity, we will use a temporary SQLite database to store the data. The database
+will be created in the temporary directory and will be deleted after the program exits.
+
+Fist, create a temporary SQLite database and create some tables and insert some data:
+
+```python
+from dbgpt.datasource.rdbms.conn_sqlite import SQLiteTempConnector
+
+connector = SQLiteTempConnector.create_temporary_db()
+connector.create_temp_tables(
+ {
+ "students": {
+ "columns": {
+ "student_id": "INTEGER PRIMARY KEY",
+ "student_name": "TEXT",
+ "major": "TEXT",
+ "year_of_enrollment": "INTEGER",
+ "student_age": "INTEGER",
+ },
+ "data": [
+ (1, "Zhang San", "Computer Science", 2020, 20),
+ (2, "Li Si", "Computer Science", 2021, 19),
+ (3, "Wang Wu", "Physics", 2020, 21),
+ (4, "Zhao Liu", "Mathematics", 2021, 19),
+ (5, "Zhou Qi", "Computer Science", 2022, 18),
+ (6, "Wu Ba", "Physics", 2020, 21),
+ (7, "Zheng Jiu", "Mathematics", 2021, 19),
+ (8, "Sun Shi", "Computer Science", 2022, 18),
+ (9, "Liu Shiyi", "Physics", 2020, 21),
+ (10, "Chen Shier", "Mathematics", 2021, 19),
+ ],
+ },
+ "courses": {
+ "columns": {
+ "course_id": "INTEGER PRIMARY KEY",
+ "course_name": "TEXT",
+ "credit": "REAL",
+ },
+ "data": [
+ (1, "Introduction to Computer Science", 3),
+ (2, "Data Structures", 4),
+ (3, "Advanced Physics", 3),
+ (4, "Linear Algebra", 4),
+ (5, "Calculus", 5),
+ (6, "Programming Languages", 4),
+ (7, "Quantum Mechanics", 3),
+ (8, "Probability Theory", 4),
+ (9, "Database Systems", 4),
+ (10, "Computer Networks", 4),
+ ],
+ },
+ "scores": {
+ "columns": {
+ "student_id": "INTEGER",
+ "course_id": "INTEGER",
+ "score": "INTEGER",
+ "semester": "TEXT",
+ },
+ "data": [
+ (1, 1, 90, "Fall 2020"),
+ (1, 2, 85, "Spring 2021"),
+ (2, 1, 88, "Fall 2021"),
+ (2, 2, 90, "Spring 2022"),
+ (3, 3, 92, "Fall 2020"),
+ (3, 4, 85, "Spring 2021"),
+ (4, 3, 88, "Fall 2021"),
+ (4, 4, 86, "Spring 2022"),
+ (5, 1, 90, "Fall 2022"),
+ (5, 2, 87, "Spring 2023"),
+ ],
+ },
+ }
+)
+```
+
+### Create A Database Resource
+
+```python
+from dbgpt.agent.resource import RDBMSConnectorResource
+
+db_resource = RDBMSConnectorResource("student_manager", connector=connector)
+```
+
+### Analyze The Database Automatically Plan
+
+To create a plan to analyze the database, here we introduce a new agent `AutoPlanChatManager`,
+which can automatically plan the task and assign the subtasks to the corresponding agents.
+
+`AutoPlanChatManager` is a special agent, the creation of the agent is similar to other agents,
+but it has a special method `hire` to hire other agents.
+
+```python
+
+import asyncio
+import os
+
+from dbgpt.agent import (
+ AgentContext,
+ AgentMemory,
+ AutoPlanChatManager,
+ LLMConfig,
+ UserProxyAgent,
+)
+from dbgpt.agent.expand.data_scientist_agent import DataScientistAgent
+from dbgpt.model.proxy import OpenAILLMClient
+
+async def main():
+ llm_client = OpenAILLMClient(
+ model_alias="gpt-3.5-turbo", # or other models, eg. "gpt-4o"
+ api_base=os.getenv("OPENAI_API_BASE"),
+ api_key=os.getenv("OPENAI_API_KEY"),
+ )
+ context: AgentContext = AgentContext(
+ conv_id="test123", language="en", temperature=0.5, max_new_tokens=2048
+ )
+ agent_memory = AgentMemory()
+
+ user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
+
+ sql_boy = (
+ await DataScientistAgent()
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(db_resource)
+ .bind(agent_memory)
+ .build()
+ )
+ manager = (
+ await AutoPlanChatManager()
+ .bind(context)
+ .bind(agent_memory)
+ .bind(LLMConfig(llm_client=llm_client))
+ .build()
+ )
+ manager.hire([sql_boy])
+
+ await user_proxy.initiate_chat(
+ recipient=manager,
+ reviewer=user_proxy,
+ message="Analyze student scores from at least three dimensions",
+ )
+
+ # dbgpt-vis message infos
+ print(await agent_memory.gpts_memory.one_chat_completions("test123"))
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
+```
+
+The output will be like this:
+
+``````bash
+--------------------------------------------------------------------------------
+User (to AutoPlanChatManager)-[]:
+
+"Analyze student scores from at least three dimensions"
+
+--------------------------------------------------------------------------------
+un_stream ai response: [
+ {
+ "serial_number": "1",
+ "agent": "DataScientist",
+ "content": "Retrieve student scores data from the database including scores for each subject, overall performance, and attendance records.",
+ "rely": ""
+ },
+ {
+ "serial_number": "2",
+ "agent": "DataScientist",
+ "content": "Analyze student scores data to identify trends and patterns in academic performance, subject-wise scores, and attendance correlation.",
+ "rely": "1"
+ },
+ {
+ "serialjson_number": "3",
+ "agent": "DataScientist",
+ "content": "Visualize the analyzed data using appropriate graphs and charts to represent the student scores from different dimensions effectively.",
+ "rely": "2"
+ }
+]
+
+--------------------------------------------------------------------------------
+Planner (to AutoPlanChatManager)-[gpt-3.5-turbo]:
+
+"[\n {\n \"serial_number\": \"1\",\n \"agent\": \"DataScientist\",\n \"content\": \"Retrieve student scores data from the database including scores for each subject, overall performance, and attendance records.\",\n \"rely\": \"\"\n },\n {\n \"serial_number\": \"2\",\n \"agent\": \"DataScientist\",\n \"content\": \"Analyze student scores data to identify trends and patterns in academic performance, subject-wise scores, and attendance correlation.\",\n \"rely\": \"1\"\n },\n {\n \"serialjson_number\": \"3\",\n \"agent\": \"DataScientist\",\n \"content\": \"Visualize the analyzed data using appropriate graphs and charts to represent the student scores from different dimensions effectively.\",\n \"rely\": \"2\"\n }\n]"
+>>>>>>>>Planner Review info:
+Pass(None)
+>>>>>>>>Planner Action report:
+execution succeeded,
+[
+ {
+ "serial_number": "1",
+ "agent": "DataScientist",
+ "content": "Retrieve student scores data from the database including scores for each subject, overall performance, and attendance records.",
+ "rely": ""
+ },
+ {
+ "serial_number": "2",
+ "agent": "DataScientist",
+ "content": "Analyze student scores data to identify trends and patterns in academic performance, subject-wise scores, and attendance correlation.",
+ "rely": "1"
+ },
+ {
+ "serialjson_number": "3",
+ "agent": "DataScientist",
+ "content": "Visualize the analyzed data using appropriate graphs and charts to represent the student scores from different dimensions effectively.",
+ "rely": "2"
+ }
+]
+
+--------------------------------------------------------------------------------
+GroupChat select_speaker failed to resolve the next speaker's name. This is because the speaker selection OAI call returned:
+DataScientist
+
+--------------------------------------------------------------------------------
+AutoPlanChatManager (to Edgar)-[]:
+
+"Retrieve student scores data from the database including scores for each subject, overall performance, and attendance records."
+
+--------------------------------------------------------------------------------
+un_stream ai response: {
+ "display_type": "response_table",
+ "sql": "SELECT s.student_id, s.student_name, s.major, s.year_of_enrollment, s.student_age, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id",
+ "thought": "I have crafted a SQL query that retrieves student scores data including scores for each subject, overall performance, and attendance records by joining the 'students', 'scores', and 'courses' tables on their respective keys."
+}
+
+--------------------------------------------------------------------------------
+Edgar (to AutoPlanChatManager)-[gpt-3.5-turbo]:
+
+"{\n \"display_type\": \"response_table\",\n \"sql\": \"SELECT s.student_id, s.student_name, s.major, s.year_of_enrollment, s.student_age, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id\",\n \"thought\": \"I have crafted a SQL query that retrieves student scores data including scores for each subject, overall performance, and attendance records by joining the 'students', 'scores', and 'courses' tables on their respective keys.\"\n}"
+>>>>>>>>Edgar Review info:
+Pass(None)
+>>>>>>>>Edgar Action report:
+execution succeeded,
+{"display_type":"response_table","sql":"SELECT s.student_id, s.student_name, s.major, s.year_of_enrollment, s.student_age, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id","thought":"I have crafted a SQL query that retrieves student scores data including scores for each subject, overall performance, and attendance records by joining the 'students', 'scores', and 'courses' tables on their respective keys."}
+
+--------------------------------------------------------------------------------
+GroupChat select_speaker failed to resolve the next speaker's name. This is because the speaker selection OAI call returned:
+DataScientist
+
+--------------------------------------------------------------------------------
+AutoPlanChatManager (to Edgar)-[]:
+
+"Read the result data of the dependent steps in the above historical message to complete the current goal:Analyze student scores data to identify trends and patterns in academic performance, subject-wise scores, and attendance correlation."
+
+--------------------------------------------------------------------------------
+un_stream ai response: {
+ "display_type": "response_table",
+ "sql": "SELECT student_id, student_name, course_name, credit, AVG(score) AS average_score, COUNT(score) AS total_scores, MAX(score) AS highest_score, MIN(score) AS lowest_score, SUM(score) AS total_score, AVG(semester) AS average_semester FROM (SELECT s.student_id, s.student_name, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id) GROUP BY student_id, course_name",
+ "thought": "I have created a SQL query that analyzes student scores data to identify trends and patterns in academic performance, subject-wise scores, and attendance correlation. The query calculates average score, total scores, highest score, lowest score, total score, and average semester for each student in each course."
+}
+
+--------------------------------------------------------------------------------
+Edgar (to AutoPlanChatManager)-[gpt-3.5-turbo]:
+
+"{\n \"display_type\": \"response_table\",\n \"sql\": \"SELECT student_id, student_name, course_name, credit, AVG(score) AS average_score, COUNT(score) AS total_scores, MAX(score) AS highest_score, MIN(score) AS lowest_score, SUM(score) AS total_score, AVG(semester) AS average_semester FROM (SELECT s.student_id, s.student_name, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id) GROUP BY student_id, course_name\",\n \"thought\": \"I have created a SQL query that analyzes student scores data to identify trends and patterns in academic performance, subject-wise scores, and attendance correlation. The query calculates average score, total scores, highest score, lowest score, total score, and average semester for each student in each course.\"\n}"
+>>>>>>>>Edgar Review info:
+Pass(None)
+>>>>>>>>Edgar Action report:
+execution succeeded,
+{"display_type":"response_table","sql":"SELECT student_id, student_name, course_name, credit, AVG(score) AS average_score, COUNT(score) AS total_scores, MAX(score) AS highest_score, MIN(score) AS lowest_score, SUM(score) AS total_score, AVG(semester) AS average_semester FROM (SELECT s.student_id, s.student_name, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id) GROUP BY student_id, course_name","thought":"I have created a SQL query that analyzes student scores data to identify trends and patterns in academic performance, subject-wise scores, and attendance correlation. The query calculates average score, total scores, highest score, lowest score, total score, and average semester for each student in each course."}
+
+--------------------------------------------------------------------------------
+GroupChat select_speaker failed to resolve the next speaker's name. This is because the speaker selection OAI call returned:
+DataScientist
+
+--------------------------------------------------------------------------------
+AutoPlanChatManager (to Edgar)-[]:
+
+"Read the result data of the dependent steps in the above historical message to complete the current goal:Visualize the analyzed data using appropriate graphs and charts to represent the student scores from different dimensions effectively."
+
+--------------------------------------------------------------------------------
+un_stream ai response: {
+ "display_type": "response_table",
+ "sql": "SELECT student_id, student_name, course_name, credit, AVG(score) AS average_score, COUNT(score) AS total_scores, MAX(score) AS highest_score, MIN(score) AS lowest_score, SUM(score) AS total_score, AVG(semester) AS average_semester FROM (SELECT s.student_id, s.student_name, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id) GROUP BY student_id, course_name",
+ "thought": "The SQL query provided will generate a table with the analyzed data including average score, total scores, highest score, lowest score, total score, and average semester for each student in each course. This table can be further used for visualization purposes to represent student scores from different dimensions effectively."
+}
+
+--------------------------------------------------------------------------------
+Edgar (to AutoPlanChatManager)-[gpt-3.5-turbo]:
+
+"{\n \"display_type\": \"response_table\",\n \"sql\": \"SELECT student_id, student_name, course_name, credit, AVG(score) AS average_score, COUNT(score) AS total_scores, MAX(score) AS highest_score, MIN(score) AS lowest_score, SUM(score) AS total_score, AVG(semester) AS average_semester FROM (SELECT s.student_id, s.student_name, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id) GROUP BY student_id, course_name\",\n \"thought\": \"The SQL query provided will generate a table with the analyzed data including average score, total scores, highest score, lowest score, total score, and average semester for each student in each course. This table can be further used for visualization purposes to represent student scores from different dimensions effectively.\"\n}"
+>>>>>>>>Edgar Review info:
+Pass(None)
+>>>>>>>>Edgar Action report:
+execution succeeded,
+{"display_type":"response_table","sql":"SELECT student_id, student_name, course_name, credit, AVG(score) AS average_score, COUNT(score) AS total_scores, MAX(score) AS highest_score, MIN(score) AS lowest_score, SUM(score) AS total_score, AVG(semester) AS average_semester FROM (SELECT s.student_id, s.student_name, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id) GROUP BY student_id, course_name","thought":"The SQL query provided will generate a table with the analyzed data including average score, total scores, highest score, lowest score, total score, and average semester for each student in each course. This table can be further used for visualization purposes to represent student scores from different dimensions effectively."}
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+AutoPlanChatManager (to User)-[]:
+
+"Analyze student scores from at least three dimensions"
+>>>>>>>>AutoPlanChatManager Review info:
+Pass(None)
+>>>>>>>>AutoPlanChatManager Action report:
+execution succeeded,
+```vis-chart
+{"sql": "SELECT student_id, student_name, course_name, credit, AVG(score) AS average_score, COUNT(score) AS total_scores, MAX(score) AS highest_score, MIN(score) AS lowest_score, SUM(score) AS total_score, AVG(semester) AS average_semester FROM (SELECT s.student_id, s.student_name, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id) GROUP BY student_id, course_name", "type": "response_table", "title": "", "describe": "The SQL query provided will generate a table with the analyzed data including average score, total scores, highest score, lowest score, total score, and average semester for each student in each course. This table can be further used for visualization purposes to represent student scores from different dimensions effectively.", "data": [{"student_id": 1, "student_name": "Zhang San", "course_name": "Data Structures", "credit": 4.0, "average_score": 85.0, "total_scores": 1, "highest_score": 85, "lowest_score": 85, "total_score": 85, "average_semester": 0.0}, {"student_id": 1, "student_name": "Zhang San", "course_name": "Introduction to Computer Science", "credit": 3.0, "average_score": 90.0, "total_scores": 1, "highest_score": 90, "lowest_score": 90, "total_score": 90, "average_semester": 0.0}, {"student_id": 2, "student_name": "Li Si", "course_name": "Data Structures", "credit": 4.0, "average_score": 90.0, "total_scores": 1, "highest_score": 90, "lowest_score": 90, "total_score": 90, "average_semester": 0.0}, {"student_id": 2, "student_name": "Li Si", "course_name": "Introduction to Computer Science", "credit": 3.0, "average_score": 88.0, "total_scores": 1, "highest_score": 88, "lowest_score": 88, "total_score": 88, "average_semester": 0.0}, {"student_id": 3, "student_name": "Wang Wu", "course_name": "Advanced Physics", "credit": 3.0, "average_score": 92.0, "total_scores": 1, "highest_score": 92, "lowest_score": 92, "total_score": 92, "average_semester": 0.0}, {"student_id": 3, "student_name": "Wang Wu", "course_name": "Linear Algebra", "credit": 4.0, "average_score": 85.0, "total_scores": 1, "highest_score": 85, "lowest_score": 85, "total_score": 85, "average_semester": 0.0}, {"student_id": 4, "student_name": "Zhao Liu", "course_name": "Advanced Physics", "credit": 3.0, "average_score": 88.0, "total_scores": 1, "highest_score": 88, "lowest_score": 88, "total_score": 88, "average_semester": 0.0}, {"student_id": 4, "student_name": "Zhao Liu", "course_name": "Linear Algebra", "credit": 4.0, "average_score": 86.0, "total_scores": 1, "highest_score": 86, "lowest_score": 86, "total_score": 86, "average_semester": 0.0}, {"student_id": 5, "student_name": "Zhou Qi", "course_name": "Data Structures", "credit": 4.0, "average_score": 87.0, "total_scores": 1, "highest_score": 87, "lowest_score": 87, "total_score": 87, "average_semester": 0.0}, {"student_id": 5, "student_name": "Zhou Qi", "course_name": "Introduction to Computer Science", "credit": 3.0, "average_score": 90.0, "total_scores": 1, "highest_score": 90, "lowest_score": 90, "total_score": 90, "average_semester": 0.0}]}
+```
+
+--------------------------------------------------------------------------------
+``````
+
+The output is [GPT-Vis](https://github.com/eosphoros-ai/GPT-Vis) protocol, we can parse some
+useful results.
+
+**1. Plans**
+```json
+[
+ {
+ "serial_number": "1",
+ "agent": "DataScientist",
+ "content": "Retrieve student scores data from the database including scores for each subject, overall performance, and attendance records.",
+ "rely": ""
+ },
+ {
+ "serial_number": "2",
+ "agent": "DataScientist",
+ "content": "Analyze student scores data to identify trends and patterns in academic performance, subject-wise scores, and attendance correlation.",
+ "rely": "1"
+ },
+ {
+ "serialjson_number": "3",
+ "agent": "DataScientist",
+ "content": "Visualize the analyzed data using appropriate graphs and charts to represent the student scores from different dimensions effectively.",
+ "rely": "2"
+ }
+]
+```
+
+**2. LLM output of first task**
+```json
+{
+ "display_type": "response_table",
+ "sql": "SELECT s.student_id, s.student_name, s.major, s.year_of_enrollment, s.student_age, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id",
+ "thought": "I have crafted a SQL query that retrieves student scores data including scores for each subject, overall performance, and attendance records by joining the 'students', 'scores', and 'courses' tables on their respective keys."
+}
+```
+
+**3. LLM output of second task**
+```json
+{
+ "display_type": "response_table",
+ "sql": "SELECT student_id, student_name, course_name, credit, AVG(score) AS average_score, COUNT(score) AS total_scores, MAX(score) AS highest_score, MIN(score) AS lowest_score, SUM(score) AS total_score, AVG(semester) AS average_semester FROM (SELECT s.student_id, s.student_name, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id) GROUP BY student_id, course_name",
+ "thought": "I have created a SQL query that analyzes student scores data to identify trends and patterns in academic performance, subject-wise scores, and attendance correlation. The query calculates average score, total scores, highest score, lowest score, total score, and average semester for each student in each course."
+}
+```
+
+**4. LLM output of third task**
+```json
+{
+ "display_type": "response_table",
+ "sql": "SELECT student_id, student_name, course_name, credit, AVG(score) AS average_score, COUNT(score) AS total_scores, MAX(score) AS highest_score, MIN(score) AS lowest_score, SUM(score) AS total_score, AVG(semester) AS average_semester FROM (SELECT s.student_id, s.student_name, c.course_name, c.credit, sc.score, sc.semester FROM students s JOIN scores sc ON s.student_id = sc.student_id JOIN courses c ON sc.course_id = c.course_id) GROUP BY student_id, course_name",
+ "thought": "The SQL query provided will generate a table with the analyzed data including average score, total scores, highest score, lowest score, total score, and average semester for each student in each course. This table can be further used for visualization purposes to represent student scores from different dimensions effectively."
+}
+```
+
+Of course, it will include the data after the execution of the SQL query if you run
+above code, here we not show the data because it is too long.
\ No newline at end of file
diff --git a/docs/docs/agents/introduction/tools.md b/docs/docs/agents/introduction/tools.md
new file mode 100644
index 000000000..daaa12b82
--- /dev/null
+++ b/docs/docs/agents/introduction/tools.md
@@ -0,0 +1,181 @@
+# Tool Use
+
+While LLMs can complete a wide range of tasks, they may not work well for the domains
+which need comprehensive expert knowledge. In addition, LLMs may also encounter
+hallucination problems, which are hard to be resolved by themselves.
+
+So, we need to use some tools to help LLMs to complete the tasks.
+
+:::note
+In DB-GPT agents, most LLMs support tool calls as long as their own capabilities are not too weak.
+(Such as `glm-4-9b-chat`, `Yi-1.5-34B-Chat`, `Qwen2-72B-Instruct`, etc.)
+:::
+
+## Writing Tools
+
+Sometimes, LLMs may not be able to complete the calculation tasks directly, so we can
+write a simple calculator tool to help them.
+```python
+from dbgpt.agent.resource import tool
+
+@tool
+def simple_calculator(first_number: int, second_number: int, operator: str) -> float:
+ """Simple calculator tool. Just support +, -, *, /."""
+ if isinstance(first_number, str):
+ first_number = int(first_number)
+ if isinstance(second_number, str):
+ second_number = int(second_number)
+ if operator == "+":
+ return first_number + second_number
+ elif operator == "-":
+ return first_number - second_number
+ elif operator == "*":
+ return first_number * second_number
+ elif operator == "/":
+ return first_number / second_number
+ else:
+ raise ValueError(f"Invalid operator: {operator}")
+```
+
+To test multiple tools, let's write another tool to help LLMs to count the number of files in a directory.
+
+```python
+import os
+from typing_extensions import Annotated, Doc
+
+@tool
+def count_directory_files(path: Annotated[str, Doc("The directory path")]) -> int:
+ """Count the number of files in a directory."""
+ if not os.path.isdir(path):
+ raise ValueError(f"Invalid directory path: {path}")
+ return len(os.listdir(path))
+```
+## Wrap Your Tools To `ToolPack`
+
+Most of the time, you may have multiple tools, so you can wrap them to a `ToolPack`.
+`ToolPack` is a collection of tools, you can use it to manage your tools, and the agent
+can select the appropriate tool from the `ToolPack` according to the task requirements.
+
+```python
+from dbgpt.agent.resource import ToolPack
+
+tools = ToolPack([simple_calculator, count_directory_files])
+```
+
+## Use Tools In Your Agent
+
+```python
+import asyncio
+import os
+from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
+from dbgpt.agent.expand.tool_assistant_agent import ToolAssistantAgent
+from dbgpt.model.proxy import OpenAILLMClient
+
+async def main():
+
+ llm_client = OpenAILLMClient(
+ model_alias="gpt-3.5-turbo", # or other models, eg. "gpt-4o"
+ api_base=os.getenv("OPENAI_API_BASE"),
+ api_key=os.getenv("OPENAI_API_KEY"),
+ )
+ context: AgentContext = AgentContext(
+ conv_id="test123", language="en", temperature=0.5, max_new_tokens=2048
+ )
+ agent_memory = AgentMemory()
+
+ user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
+
+ tool_man = (
+ await ToolAssistantAgent()
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(agent_memory)
+ .bind(tools)
+ .build()
+ )
+
+ await user_proxy.initiate_chat(
+ recipient=tool_man,
+ reviewer=user_proxy,
+ message="Calculate the product of 10 and 99",
+ )
+
+ await user_proxy.initiate_chat(
+ recipient=tool_man,
+ reviewer=user_proxy,
+ message="Count the number of files in /tmp",
+ )
+
+ # dbgpt-vis message infos
+ print(await agent_memory.gpts_memory.one_chat_completions("test123"))
+
+if __name__ == "__main__":
+ asyncio.run(main())
+
+```
+The output will be like this:
+``````bash
+--------------------------------------------------------------------------------
+User (to LuBan)-[]:
+
+"Calculate the product of 10 and 99"
+
+--------------------------------------------------------------------------------
+un_stream ai response: {
+ "thought": "To calculate the product of 10 and 99, we need to use a tool that can perform multiplication operation.",
+ "tool_name": "simple_calculator",
+ "args": {
+ "first_number": 10,
+ "second_number": 99,
+ "operator": "*"
+ }
+}
+
+--------------------------------------------------------------------------------
+LuBan (to User)-[gpt-3.5-turbo]:
+
+"{\n \"thought\": \"To calculate the product of 10 and 99, we need to use a tool that can perform multiplication operation.\",\n \"tool_name\": \"simple_calculator\",\n \"args\": {\n \"first_number\": 10,\n \"second_number\": 99,\n \"operator\": \"*\"\n }\n}"
+>>>>>>>>LuBan Review info:
+Pass(None)
+>>>>>>>>LuBan Action report:
+execution succeeded,
+990
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+User (to LuBan)-[]:
+
+"Count the number of files in /tmp"
+
+--------------------------------------------------------------------------------
+un_stream ai response: {
+ "thought": "To count the number of files in /tmp directory, we should use a tool that can perform this operation.",
+ "tool_name": "count_directory_files",
+ "args": {
+ "path": "/tmp"
+ }
+}
+
+--------------------------------------------------------------------------------
+LuBan (to User)-[gpt-3.5-turbo]:
+
+"{\n \"thought\": \"To count the number of files in /tmp directory, we should use a tool that can perform this operation.\",\n \"tool_name\": \"count_directory_files\",\n \"args\": {\n \"path\": \"/tmp\"\n }\n}"
+>>>>>>>>LuBan Review info:
+Pass(None)
+>>>>>>>>LuBan Action report:
+execution succeeded,
+19
+
+--------------------------------------------------------------------------------
+``````
+
+
+In the above code, we use the `ToolAssistantAgent` to select and call the appropriate tool.
+
+## More Details?
+
+In the above code, we use the `tool` decorator to define the tool function. It will wrap the function to a
+`FunctionTool` object. And `FunctionTool` is a subclass of `BaseTool`, which is a base class of all tools.
+
+Actually, **tool** is a special **resource** in the `DB-GPT` agent. You will see more details in the [Resource](./resource.md) section.
\ No newline at end of file
diff --git a/docs/docs/agents/modules/action/action.md b/docs/docs/agents/modules/action/action.md
new file mode 100644
index 000000000..cc090876e
--- /dev/null
+++ b/docs/docs/agents/modules/action/action.md
@@ -0,0 +1,64 @@
+# Action Introduction
+
+> The action module is responsible for translating the agent’s decisions into specific outcomes.
+> This module is located at the most downstream position and directly interacts with the environment.
+> It is influenced by the profile, memory, and planning modules.
+
+## Actions Overview
+
+In DB-GPT, any agent must have an action.
+
+
+There are four perspectives according the paper
+[A survey on large language model based autonomous agents](https://link.springer.com/article/10.1007/s11704-024-40231-1):
+
+### Action Goals
+
+What the agent wants to achieve with the action?
+
+1. Task Completion: Complete specific tasks, write a function in software development,
+and make an iron pick in the game.
+
+2. Communication: Communicate with other agents.
+
+3. Environment exploration: Explore unfamiliar environments to expand its perception
+and strike a balance between exploring and exploiting.
+
+### Action Production
+
+How are the actions generated?
+1. Action via memory recollection. In this strategy, the action is generated by
+extracting information from the agent memory according to the current task. The task
+and the extracted memories are used as prompts to trigger the agent actions.
+
+2. Action via plan following. In this strategy, the agent takes actions following its pregenerated plans.
+
+### Action Space
+
+What are the available actions?
+Action space refers to the set of possible actions that can be performed by the agent.
+In general, we can roughly divide these actions into two classes:
+(1) external tools and(2) internal knowledge of the LLMs.
+
+### Action Impact
+
+What are the consequences of the actions?
+
+Action impact refers to the consequences of the action. In fact, the action impact can encompass numerous instances.
+
+Here are some examples of action impact:
+1. Changing environments: Agents can directly alter environment states by actions, such
+as moving their positions, collecting items, and constructing buildings.
+2. Altering internal states: Actions taken by the agent can also change the agent itself,
+including updating memories, forming new plans, acquiring novel knowledge, and more.
+3. Triggering new actions: In the task completion process, one agent action can be triggered by another one.
+
+## Actions In DB-GPT Agents
+
+In previous [Write Your Custom Agent](../../introduction/custom_agents#create-a-custom-action),
+you have seen a basic example of an action in the agent. It is a simple way to define the agent's action.
+
+
+In previous [Tool Use](../../introduction/tools.md), you have seen how to write tools
+to help LLMs to complete the tasks. And we know than `Tool` is a kind of `Resource`.
+In following sections, we will show you how to use `Resource` in the action module.
diff --git a/docs/docs/agents/modules/agents_design.md b/docs/docs/agents/modules/agents_design.md
new file mode 100644
index 000000000..e69de29bb
diff --git a/docs/docs/agents/modules/memory/hybrid_memory.md b/docs/docs/agents/modules/memory/hybrid_memory.md
new file mode 100644
index 000000000..da95e0ae5
--- /dev/null
+++ b/docs/docs/agents/modules/memory/hybrid_memory.md
@@ -0,0 +1,145 @@
+# Hybrid Memory
+
+> This structure explicitly models the human short-term and long-term memories. The
+> short-term memory temporarily buffers recent perceptions, while long-term memory consolidates
+> important information over time.
+
+
+For example, the short-term memory contains the context information about the agent current situations,
+while the long-term memory stores the agent past behaviors and thoughts, which can be retrieved according to the current events.
+
+## Creating A Hybrid Memory
+
+### Method 1: Creating A Hybrid Memory with Default Values
+
+It will use OpenAI Embedding API and ChromaStore as the default values.
+
+```python
+import shutil
+from dbgpt.agent import HybridMemory, AgentMemory
+
+# Delete old vector store directory(/tmp/tmp_ltm_vector_stor)
+shutil.rmtree("/tmp/tmp_ltm_vector_store", ignore_errors=True)
+hybrid_memory = HybridMemory.from_chroma(
+ vstore_name="agent_memory", vstore_path="/tmp/tmp_ltm_vector_store"
+)
+
+agent_memory: AgentMemory = AgentMemory(memory=hybrid_memory)
+```
+
+### Method 2: Creating A Hybrid Memory With Custom Values
+
+The hybrid memory requires sensory memory, short-term memory, and long-term memory to be created.
+
+**Prepare Embedding Model**
+
+You can prepare the embedding model according [Prepare Embedding Model](./short_term_memory#prepare-embedding-model).
+
+Here we use the OpenAI Embedding API as an example:
+
+```python
+import os
+from dbgpt.rag.embedding import DefaultEmbeddingFactory
+
+api_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") + "/embeddings"
+api_key = os.getenv("OPENAI_API_KEY")
+embeddings = DefaultEmbeddingFactory.openai(api_url=api_url, api_key=api_key)
+```
+**Prepare Vector Store**
+
+You need to prepare a vector store, here we use the `ChromaStore` as an example:
+```python
+
+import shutil
+from dbgpt.storage.vector_store.chroma_store import ChromaVectorConfig, ChromaStore
+
+# Delete old vector store directory(/tmp/tmp_ltm_vector_stor)
+shutil.rmtree("/tmp/tmp_ltm_vector_store", ignore_errors=True)
+vector_store = ChromaStore(
+ ChromaVectorConfig(
+ embedding_fn=embeddings,
+ vector_store_config=ChromaVectorConfig(
+ name="ltm_vector_store",
+ persist_path="/tmp/tmp_ltm_vector_store",
+ ),
+ )
+)
+```
+
+**Create Hybrid Memory**
+
+```python
+from datetime import datetime
+from concurrent.futures import ThreadPoolExecutor
+
+from dbgpt.agent import (
+ SensoryMemory,
+ EnhancedShortTermMemory,
+ LongTermMemory,
+ HybridMemory,
+ AgentMemory,
+)
+
+executor = ThreadPoolExecutor()
+
+sensor_memory = SensoryMemory(buffer_size=2)
+
+short_term_memory = EnhancedShortTermMemory(
+ embeddings=embeddings,
+ buffer_size=2,
+ enhance_similarity_threshold=0.7,
+ enhance_threshold=3,
+ executor=executor,
+)
+
+long_term_memory = LongTermMemory(
+ executor=ThreadPoolExecutor(), vector_store=vector_store, _default_importance=0.5
+)
+
+hybrid_memory = HybridMemory(
+ now=datetime.now(),
+ sensory_memory=sensor_memory,
+ short_term_memory=short_term_memory,
+ long_term_memory=long_term_memory,
+)
+
+agent_memory: AgentMemory = AgentMemory(memory=hybrid_memory)
+```
+
+### Method 3: Creating A Hybrid Memory From Vector Store
+
+You can create a hybrid memory from a vector store, it will use the default values for
+sensory memory and short-term memory.
+
+```python
+from dbgpt.agent import HybridMemory, AgentMemory
+
+hybrid_memory = HybridMemory.from_vstore(
+ vector_store=vector_store, embeddings=embeddings
+)
+
+agent_memory: AgentMemory = AgentMemory(memory=hybrid_memory)
+```
+
+## How It Works
+
+When writing a memory fragment:
+1. The hybrid memory will store the memory fragments in sensory memory first,
+if the sensory memory is full, it will discard all the sensory memory fragments, and
+some of discarded memory fragments will be transferred to short-term memory.
+2. Short-term memory will receive some of the sensory memory as outside observations,
+and memory fragments in short-term memory can be enhanced by other observations. Some of
+enhanced memory fragments will be transferred to long-term memory, at the same time, this
+enhanced memory will be reflected to higher-level thoughts and insights to the long-term memory.
+3. Long-term memory will store the agent's experiences and knowledge. When it receives the memory
+fragments from short-term memory, it will compute the importance of the memory fragment, then write
+to vector store.
+
+When reading a memory fragment:
+1. First, the hybrid memory will read the memory fragments from long-term memory according
+to the observation. The long-term memory uses a `TimeWeightedEmbeddingRetriever` to retrieve
+the memory fragments(latest memory fragments have higher weights).
+2. The retrieved memory fragments will be saved to short-term memory(just for enhancing
+the memory fragments, not append a new memory fragment to short-term memory). The retrieved
+memory fragments and all short-term memory fragments will be merged, and as the current memory to LLM.
+After the enhancing process, there are some new short-term memory fragments will be transferred to long-term memory.
\ No newline at end of file
diff --git a/docs/docs/agents/modules/memory/long_term_memory.md b/docs/docs/agents/modules/memory/long_term_memory.md
new file mode 100644
index 000000000..ac435275d
--- /dev/null
+++ b/docs/docs/agents/modules/memory/long_term_memory.md
@@ -0,0 +1,74 @@
+# Long-term Memory
+
+> The short-term memory contains the context information about the agent current situations,
+> while the long-term memory stores the agent past behaviors and thoughts, which can be
+> retrieved according to the current events.
+
+> Long-term memory resembles the external vector storage that agents can rapidly query and retrieve from as needed.
+
+In DB-GPT, the long-term memory stored in the vector storage by default.
+
+
+## Using Long-term Memory
+
+To use long-term memory, you need to provide a vector store.
+
+### Prepare Embedding Model
+
+First, you need to prepare an embedding model, which is used to convert the text into vectors.
+You can prepare the embedding model according [Prepare Embedding Model](./short_term_memory#prepare-embedding-model).
+
+Here we use the OpenAI Embedding API as an example:
+
+```python
+import os
+from dbgpt.rag.embedding import DefaultEmbeddingFactory
+
+api_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") + "/embeddings"
+api_key = os.getenv("OPENAI_API_KEY")
+embeddings = DefaultEmbeddingFactory.openai(api_url=api_url, api_key=api_key)
+```
+
+### Prepare Vector Store
+
+Then you need to prepare a vector store, here we use the `ChromaStore` as an example,
+
+Install the `chroma` package with the following command:
+
+```bash
+pip install chromadb
+```
+
+```python
+
+import shutil
+from dbgpt.storage.vector_store.chroma_store import ChromaVectorConfig, ChromaStore
+
+# Delete old vector store directory(/tmp/tmp_ltm_vector_stor)
+shutil.rmtree("/tmp/tmp_ltm_vector_store", ignore_errors=True)
+vector_store = ChromaStore(
+ ChromaVectorConfig(
+ embedding_fn=embeddings,
+ vector_store_config=ChromaVectorConfig(
+ name="ltm_vector_store",
+ persist_path="/tmp/tmp_ltm_vector_store",
+ ),
+ )
+)
+```
+
+### Using Long-term Memory
+
+```python
+from concurrent.futures import ThreadPoolExecutor
+from dbgpt.agent import AgentMemory, LongTermMemory
+
+# Create an agent memory, which contains a long-term memory
+memory = LongTermMemory(
+ executor=ThreadPoolExecutor(), vector_store=vector_store, _default_importance=0.5
+)
+agent_memory: AgentMemory = AgentMemory(memory=memory)
+```
+
+In above code, `_default_importance` means the default importance of one memory fragment,
+because we use `LongTermMemory` directly, so we need to set the default importance.
\ No newline at end of file
diff --git a/docs/docs/agents/modules/memory/memory.md b/docs/docs/agents/modules/memory/memory.md
new file mode 100644
index 000000000..a8400156d
--- /dev/null
+++ b/docs/docs/agents/modules/memory/memory.md
@@ -0,0 +1,433 @@
+# Memory Introduction
+
+> The memory module plays a very important role in the agent architecture design. It
+> stores information perceived from the environment and leverages the recorded memories
+> to facilitate future actions. The memory module can help the agent to accumulate
+> experiences, self-evolve, and behave in a more consistent, reasonable, and effective manner.
+
+## Memory Module Overview
+
+### Memory Operations
+
+In DB-GPT agents, there are three main memory operations:
+
+1. **Memory reading**: The objective of memory reading is to extract meaningful
+information from memory to enhance the agent’s actions.
+2. **Memory writing**: The purpose of memory writing is to store information about the
+perceived environment in memory. Storing valuable information in memory provides a
+foundation for retrieving informative memories in the future, enabling the agent to act
+more efficiently and rationally.
+3. **Memory reflection**: Memory reflection emulates humans’ ability to witness and
+evaluate their own cognitive, emotional, and behavioral processes. When adapted to agents,
+the objective is to provide agents with the capability to independently summarize and
+infer more abstract, complex and high-level information.
+
+### Memory Structure
+
+In DB-GPT agents, there are four main memory structures:
+1. **Sensory memory**: Like human sensory memory, the sensory memory is registers
+perceptual inputs, and it will receive the observations from the environment, some of sensory
+memory will be transferred to short-term memory.
+2. **Short-term memory**: Short-term memory temporarily buffers recent perceptions, it will receive
+some of the sensory memory, and it can be enhanced by other observations or retrieved memories to enter the long-term memory.
+3. **Long-term memory**: Long-term memory stores the agent’s experiences and knowledge, it can receive
+information from short-term memory, and it will consolidates important information over time.
+4. **Hybrid memory**: Hybrid memory is a combination of sensory memory, short-term memory, and long-term memory.
+
+## Memory In DB-GPT Agents
+
+### Some Concepts Of Memory
+
+- `Memory`: The memory is a class that stores all the memories, it can be `SensorMemory`,
+`ShortTermMemory`, `EnhancedShortTermMemory`, `LongTermMemory` and `HybridMemory` now.
+- `MemoryFragment`: The `MemoryFragment` is an abstract class that stores the memory information,
+The `AgentMemoryFragment` is a class that inherits from `MemoryFragment`, it contains
+the memory content, memory id, memory importance, last access time, etc.
+- `GptsMemory`: The `GptsMemory` is used to store the conversation and plan information, not a part of the memory structure.
+- `AgentMemory`: The `AgentMemory` is a class that contains the `Memory` and `GptsMemory`.
+
+### Create Memory
+
+As mentioned in previous sections, the memory are include in `AgentMemory` class, here is an example:
+```python
+from dbgpt.agent import AgentMemory, ShortTermMemory
+
+# Create an agent memory, default memory is ShortTermMemory
+memory = ShortTermMemory(buffer_size=5)
+agent_memory = AgentMemory(memory=memory)
+```
+
+By the way, in `AgentMemory` class, you can pass a `GptsMemory`, in the conventional sense,
+`GptsMemory` is not included in the memory structure, it is used to store the conversation and plan information.
+
+A example of `GptsMemory`:
+```python
+from dbgpt.agent import AgentMemory, ShortTermMemory, GptsMemory
+
+# Create an agent memory, default memory is ShortTermMemory
+memory = ShortTermMemory(buffer_size=5)
+# Store the conversation and plan information
+gpts_memory = GptsMemory()
+agent_memory = AgentMemory(memory=memory, gpts_memory=gpts_memory)
+```
+
+### Read And Write Memory In Agent
+
+The agent will call the `read_memories` method to read the memory fragments from the memory,
+and call the `write_memories` method to write the memory fragments to the memory.
+
+When agent call the LLM, the memories will write to the LLM prompt, after the LLM return the response,
+the agent will write the query and response to the memory.
+
+As we mentioned in [Profile To Prompt](../profile/profile_to_prompt), there are a
+template variables named `most_recent_memories` in prompt template, it will be replaced by the
+most recent memories.
+
+#### Read Memories To Build Prompt
+
+Here is an example to read memories from memory and build the prompt:
+```python
+import os
+import asyncio
+from dbgpt.agent import (
+ AgentContext,
+ ShortTermMemory,
+ AgentMemory,
+ ConversableAgent,
+ ProfileConfig,
+ LLMConfig,
+ BlankAction,
+ UserProxyAgent,
+)
+from dbgpt.model.proxy import OpenAILLMClient
+
+llm_client = OpenAILLMClient(
+ model_alias="gpt-4o",
+ api_base=os.getenv("OPENAI_API_BASE"),
+ api_key=os.getenv("OPENAI_API_KEY"),
+)
+
+context: AgentContext = AgentContext(
+ conv_id="test123",
+ language="en",
+ temperature=0.9,
+ max_new_tokens=2048,
+ verbose=True, # Add verbose=True to print out the conversation history
+)
+
+# Create an agent memory, which contains a short-term memory
+memory = ShortTermMemory(buffer_size=2)
+agent_memory: AgentMemory = AgentMemory(memory=memory)
+
+# Custom user prompt template, which includes most recent memories and question
+user_prompt_template = """\
+{% if most_recent_memories %}\
+Most recent observations:
+{{ most_recent_memories }}
+{% endif %}\
+
+{% if question %}\
+Question: {{ question }}
+{% endif %}
+"""
+
+# Custom write memory template, which includes question and thought
+write_memory_template = """\
+{% if question %}user: {{ question }} {% endif %}
+{% if thought %}assistant: {{ thought }} {% endif %}\
+"""
+
+
+async def main():
+ # Create a profile with a custom user prompt template
+ joy_profile = ProfileConfig(
+ name="Joy",
+ role="Comedians",
+ user_prompt_template=user_prompt_template,
+ write_memory_template=write_memory_template,
+ )
+ joy = (
+ await ConversableAgent(profile=joy_profile)
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(agent_memory)
+ .bind(BlankAction)
+ .build()
+ )
+ user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
+ await user_proxy.initiate_chat(
+ recipient=joy,
+ reviewer=user_proxy,
+ message="My name is bob, please tell me a joke",
+ )
+ await user_proxy.initiate_chat(
+ recipient=joy,
+ reviewer=user_proxy,
+ message="What's my name?",
+ )
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
+```
+In the above example, we set `verbose=True` in `AgentContext`, to print out the conversation history.
+
+The output will be like this:
+
+``````shell
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"My name is bob, please tell me a joke"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Sure thing, Bob! Here's one for you:
+
+Why don’t scientists trust atoms?
+
+Because they make up everything!
+
+--------------------------------------------------------------------------------
+String Prompt[verbose]:
+system: You are a Comedians, named Joy, your goal is None.
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+
+*** IMPORTANT REMINDER ***
+Please answer in English.
+
+
+
+
+human:
+Question: My name is bob, please tell me a joke
+
+LLM Output[verbose]:
+Sure thing, Bob! Here's one for you:
+
+Why don’t scientists trust atoms?
+
+Because they make up everything!
+--------------------------------------------------------------------------------
+
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Sure thing, Bob! Here's one for you:\n\nWhy don’t scientists trust atoms?\n\nBecause they make up everything!"
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Sure thing, Bob! Here's one for you:
+
+Why don’t scientists trust atoms?
+
+Because they make up everything!
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"What's my name?"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Your name is Bob!
+
+And here's another quick joke for you:
+
+Why don't skeletons fight each other?
+
+They don't have the guts!
+
+--------------------------------------------------------------------------------
+String Prompt[verbose]:
+system: You are a Comedians, named Joy, your goal is None.
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+
+*** IMPORTANT REMINDER ***
+Please answer in English.
+
+
+
+
+human: Most recent observations:
+user: My name is bob, please tell me a joke
+assistant: Sure thing, Bob! Here's one for you:
+
+Why don’t scientists trust atoms?
+
+Because they make up everything!
+
+Question: What's my name?
+
+LLM Output[verbose]:
+Your name is Bob!
+
+And here's another quick joke for you:
+
+Why don't skeletons fight each other?
+
+They don't have the guts!
+--------------------------------------------------------------------------------
+
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Your name is Bob! \n\nAnd here's another quick joke for you:\n\nWhy don't skeletons fight each other?\n\nThey don't have the guts!"
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Your name is Bob!
+
+And here's another quick joke for you:
+
+Why don't skeletons fight each other?
+
+They don't have the guts!
+
+--------------------------------------------------------------------------------
+``````
+
+In the second conversation, you can see the `Most recent observations` in the user prompt,
+``````
+--------------------------------------------------------------------------------
+String Prompt[verbose]:
+system: You are a Comedians, named Joy, your goal is None.
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+
+*** IMPORTANT REMINDER ***
+Please answer in English.
+
+
+
+
+human: Most recent observations:
+user: My name is bob, please tell me a joke
+assistant: Sure thing, Bob! Here's one for you:
+
+Why don’t scientists trust atoms?
+
+Because they make up everything!
+
+Question: What's my name?
+
+LLM Output[verbose]:
+Your name is Bob!
+
+And here's another quick joke for you:
+
+Why don't skeletons fight each other?
+
+They don't have the guts!
+--------------------------------------------------------------------------------
+``````
+
+#### Write Memories
+
+When the agent receives the response from the LLM, it will write the query and response to the memory.
+In memory fragment, the `content` is string, so you should decide how to store the information in the content.
+
+In above example, the `write_memory_template` is:
+```python
+write_memory_template = """\
+{% if question %}user: {{ question }} {% endif %}
+{% if thought %}assistant: {{ thought }} {% endif %}\
+"""
+```
+The `question` is the user query, and the `thought` is the LLM response, we will introduce more in next section.
+
+
+## Custom Memory Reading And Writing
+
+We can customize the memory reading and writing by inheriting the `ConversableAgent` class
+and override the `read_memories` and `write_memories` methods.
+
+```python
+from typing import Optional
+from dbgpt.agent import (
+ ConversableAgent,
+ AgentMemoryFragment,
+ ProfileConfig,
+ BlankAction,
+ ActionOutput,
+)
+
+write_memory_template = """\
+{% if question %}user: {{ question }} {% endif %}
+{% if thought %}assistant: {{ thought }} {% endif %}\
+"""
+
+
+class JoyAgent(ConversableAgent):
+ profile: ProfileConfig = ProfileConfig(
+ name="Joy",
+ role="Comedians",
+ write_memory_template=write_memory_template,
+ )
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self._init_actions([BlankAction])
+
+ async def read_memories(
+ self,
+ question: str,
+ ) -> str:
+ """Read the memories from the memory."""
+ memories = await self.memory.read(observation=question)
+ recent_messages = [m.raw_observation for m in memories]
+ # Merge the recent messages.
+ return "".join(recent_messages)
+
+ async def write_memories(
+ self,
+ question: str,
+ ai_message: str,
+ action_output: Optional[ActionOutput] = None,
+ check_pass: bool = True,
+ check_fail_reason: Optional[str] = None,
+ ) -> None:
+ """Write the memories to the memory.
+
+ We suggest you to override this method to save the conversation to memory
+ according to your needs.
+
+ Args:
+ question(str): The question received.
+ ai_message(str): The AI message, LLM output.
+ action_output(ActionOutput): The action output.
+ check_pass(bool): Whether the check pass.
+ check_fail_reason(str): The check fail reason.
+ """
+ if not action_output:
+ raise ValueError("Action output is required to save to memory.")
+
+ mem_thoughts = action_output.thoughts or ai_message
+ memory_map = {
+ "question": question,
+ "thought": mem_thoughts,
+ }
+ # This is the template to write the memory.
+ # It configured in the agent's profile.
+ write_memory_template = self.write_memory_template
+ memory_content: str = self._render_template(write_memory_template, **memory_map)
+ fragment = AgentMemoryFragment(memory_content)
+ await self.memory.write(fragment)
+```
+
+In the above example, we override the `read_memories` to read the memories from the memory, in DB-GPT,
+the most recent memories will form the `most_recent_memories` in the prompt template,
+And override the `write_memories` to write the memories to the memory.
+
+**So, you can customize the memory reading and writing according to your needs.**
+
+## Summary
+
+In this document, we introduced the memory module in DB-GPT agents, and how to use the memory in agents.
+In following sections, we will introduce how to use each memory structure in DB-GPT agents.
\ No newline at end of file
diff --git a/docs/docs/agents/modules/memory/sensory_memory.md b/docs/docs/agents/modules/memory/sensory_memory.md
new file mode 100644
index 000000000..35d50aad1
--- /dev/null
+++ b/docs/docs/agents/modules/memory/sensory_memory.md
@@ -0,0 +1,351 @@
+# Sensory Memory
+
+Like human sensory memory, the sensory memory is registers perceptual inputs, and it
+will receive the observations from the environment, some of sensory memory will be
+transferred to short-term memory.
+
+:::tip NOTE
+You should not use the `SensoryMemory` directly in most cases, it is designed to receive
+the observations from the environment, only a part of sensory memory will be transferred to short-term memory.
+:::
+
+## Simple Example of Sensory Memory
+
+First, you need to create an instance of `SensoryMemory` and then you can use it to store the observations.
+
+```python
+from dbgpt.agent import AgentMemory, SensoryMemory
+
+# Create an agent memory, which contains a sensory memory
+memory = SensoryMemory(buffer_size=2)
+agent_memory: AgentMemory = AgentMemory(memory=memory)
+```
+
+Then, let's create some user messages for testing.
+
+```python
+import os
+from dbgpt.agent import AgentContext
+from dbgpt.model.proxy import OpenAILLMClient
+
+llm_client = OpenAILLMClient(
+ model_alias="gpt-4o",
+ api_base=os.getenv("OPENAI_API_BASE"),
+ api_key=os.getenv("OPENAI_API_KEY"),
+)
+
+context: AgentContext = AgentContext(
+ conv_id="test123",
+ language="en",
+ temperature=0.9,
+ max_new_tokens=2048,
+)
+
+messages = [
+ "When I was 4 years old, I went to primary school for the first time, please tell me a joke",
+ "When I was 10 years old, I went to middle school for the first time, please tell me a joke",
+ "When I was 16 years old, I went to high school for the first time, please tell me a joke",
+ "When I was 18 years old, I went to college for the first time, please tell me a joke",
+]
+```
+
+### Verifying Remember
+
+```python
+import asyncio
+from dbgpt.agent import (
+ ConversableAgent,
+ ProfileConfig,
+ LLMConfig,
+ BlankAction,
+ UserProxyAgent,
+)
+
+async def verify_remember():
+ joy = (
+ await ConversableAgent(profile=ProfileConfig(name="Joy", role="Comedians"))
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(agent_memory)
+ .bind(BlankAction)
+ .build()
+ )
+ user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
+ # The turns not more than 2, make sure the agent remembers the previous conversation
+ for message in messages[:2]:
+ await user_proxy.initiate_chat(
+ recipient=joy,
+ reviewer=user_proxy,
+ message=message,
+ )
+ await user_proxy.initiate_chat(
+ recipient=joy,
+ reviewer=user_proxy,
+ message="How old was I when I went to primary school?"
+ )
+
+if __name__ == "__main__":
+ asyncio.run(verify_remember())
+
+```
+
+The output will like this:
+
+```
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"When I was 4 years old, I went to primary school for the first time, please tell me a joke"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Sure, here's a fun joke for you:
+
+Why did the kid bring a ladder to school?
+
+Because he wanted to go to high school!
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Sure, here's a fun joke for you:\n\nWhy did the kid bring a ladder to school?\n\nBecause he wanted to go to high school!"
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Sure, here's a fun joke for you:
+
+Why did the kid bring a ladder to school?
+
+Because he wanted to go to high school!
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"When I was 10 years old, I went to middle school for the first time, please tell me a joke"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Sure, here's a joke for you:
+
+Why did the student eat his homework?
+
+Because the teacher said it was a piece of cake!
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Sure, here's a joke for you:\n\nWhy did the student eat his homework?\n\nBecause the teacher said it was a piece of cake!"
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Sure, here's a joke for you:
+
+Why did the student eat his homework?
+
+Because the teacher said it was a piece of cake!
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"How old was I when I went to primary school?"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Based on your previous statements, you went to primary school for the first time when you were 4 years old.
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Based on your previous statements, you went to primary school for the first time when you were 4 years old."
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Based on your previous statements, you went to primary school for the first time when you were 4 years old.
+```
+
+In the above example, the agent remembers the previous conversation and can answer the
+question based on the previous conversation, it is because the `buffer_size=2` in the
+`SensoryMemory` and the agent can remember the previous two conversations.
+
+### Verifying Forget
+
+```python
+async def verify_forget():
+ joy = (
+ await ConversableAgent(profile=ProfileConfig(name="Joy", role="Comedians"))
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(agent_memory)
+ .bind(BlankAction)
+ .build()
+ )
+ user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
+ for message in messages:
+ await user_proxy.initiate_chat(
+ recipient=joy,
+ reviewer=user_proxy,
+ message=message,
+ )
+ await user_proxy.initiate_chat(
+ recipient=joy,
+ reviewer=user_proxy,
+ message="How old was I when I went to primary school?",
+ )
+
+
+if __name__ == "__main__":
+ asyncio.run(verify_forget())
+```
+
+The output will like this:
+
+```
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"When I was 4 years old, I went to primary school for the first time, please tell me a joke"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Sure, here's a joke for you:
+
+Why did the scarecrow become a successful student?
+
+Because he was outstanding in his field!
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Sure, here's a joke for you:\n\nWhy did the scarecrow become a successful student?\n\nBecause he was outstanding in his field!"
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Sure, here's a joke for you:
+
+Why did the scarecrow become a successful student?
+
+Because he was outstanding in his field!
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"When I was 10 years old, I went to middle school for the first time, please tell me a joke"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Of course! Here's a joke for you:
+
+Why was the math book sad when it started middle school?
+
+Because it had too many problems!
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Of course! Here's a joke for you:\n\nWhy was the math book sad when it started middle school?\n\nBecause it had too many problems!"
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Of course! Here's a joke for you:
+
+Why was the math book sad when it started middle school?
+
+Because it had too many problems!
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"When I was 16 years old, I went to high school for the first time, please tell me a joke"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Sure, here's a joke for you:
+
+Why did the geometry teacher go to the beach?
+
+Because she needed to find a new angle!
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Sure, here's a joke for you:\n\nWhy did the geometry teacher go to the beach?\n\nBecause she needed to find a new angle!"
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Sure, here's a joke for you:
+
+Why did the geometry teacher go to the beach?
+
+Because she needed to find a new angle!
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"When I was 18 years old, I went to college for the first time, please tell me a joke"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Sure, here’s a college-themed joke for you:
+
+Why did the scarecrow become a successful college student?
+
+Because he was outstanding in his field! 🌾🎓😄
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Sure, here’s a college-themed joke for you:\n\nWhy did the scarecrow become a successful college student?\n\nBecause he was outstanding in his field! 🌾🎓😄"
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Sure, here’s a college-themed joke for you:
+
+Why did the scarecrow become a successful college student?
+
+Because he was outstanding in his field! 🌾🎓😄
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+User (to Joy)-[]:
+
+"How old was I when I went to primary school?"
+
+--------------------------------------------------------------------------------
+un_stream ai response: Most people typically start primary school around 5 or 6 years old. But if you'd like a joke on that topic, here it goes:
+
+Why did the math book look so sad on its first day of primary school?
+
+Because it had too many problems! 📚😄
+
+--------------------------------------------------------------------------------
+Joy (to User)-[gpt-4o]:
+
+"Most people typically start primary school around 5 or 6 years old. But if you'd like a joke on that topic, here it goes:\n\nWhy did the math book look so sad on its first day of primary school?\n\nBecause it had too many problems! 📚😄"
+>>>>>>>>Joy Review info:
+Pass(None)
+>>>>>>>>Joy Action report:
+execution succeeded,
+Most people typically start primary school around 5 or 6 years old. But if you'd like a joke on that topic, here it goes:
+
+Why did the math book look so sad on its first day of primary school?
+
+Because it had too many problems! 📚😄
+```
+
+In the above example, the agent forgets the previous conversation and can't answer the
+question based on the previous conversation, it is because the `buffer_size=2` in this memory,
+**it will discard all the existing memories when the buffer is full**, this is a special
+feature of the `SensoryMemory` that not like the common buffered memory(FIFO, keep the
+latest buffer_size memories).
+
diff --git a/docs/docs/agents/modules/memory/short_term_memory.md b/docs/docs/agents/modules/memory/short_term_memory.md
new file mode 100644
index 000000000..60d96ac67
--- /dev/null
+++ b/docs/docs/agents/modules/memory/short_term_memory.md
@@ -0,0 +1,117 @@
+# Short-term Memory
+
+Short-term memory temporarily buffers recent perceptions, it will receive some of the
+sensory memory, and it can be **enhanced** by other observations or retrieved memories to
+enter the long-term memory.
+
+In most cases, short-term memory is analogous to the input information within the context
+window constrained by the LLM.
+So you can think of short-term memory will be written into the prompt of the LLM in most cases.
+
+## Using Short-term Memory
+
+```python
+from dbgpt.agent import AgentMemory, ShortTermMemory
+
+# Create an agent memory, which contains a short-term memory
+memory = ShortTermMemory(buffer_size=2)
+agent_memory: AgentMemory = AgentMemory(memory=memory)
+```
+
+Like sensory memory, short-term memory is also has a buffer size, when the buffer is full,
+it will keep the latest buffer_size memories, and some of the discarded memories will
+be transferred to long-term memory.
+
+The default short-term memory is a `FIFO` buffered memory, we won't introduce too much here.
+
+## Enhanced Short-term Memory
+
+Like human short-term memory, the short-term memory in DB-GPT agents can be enhanced by outside observations.
+Here we introduce a kind of enhanced short-term memory, which is called `EnhancedShortTermMemory`,
+it enhances memories by comparing the similarity between the new observation and the existing memories.
+
+To use `EnhancedShortTermMemory`, you need to provide a embeddings model.
+
+### Prepare Embedding Model
+
+DB-GPT supports
+a lot of embedding models, here are some of them:
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+
+
+
+
+```python
+import os
+from dbgpt.rag.embedding import DefaultEmbeddingFactory
+
+api_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") + "/embeddings"
+api_key = os.getenv("OPENAI_API_KEY")
+embeddings = DefaultEmbeddingFactory.openai(api_url=api_url, api_key=api_key)
+```
+
+
+
+
+```python
+from dbgpt.rag.embedding import DefaultEmbeddingFactory
+
+embeddings = DefaultEmbeddingFactory.default("/data/models/text2vec-large-chinese")
+```
+
+
+
+
+If you have deployed [DB-GPT cluster](../../../installation/model_service/cluster) and
+[API server](../../../installation/advanced_usage/OpenAI_SDK_call)
+, you can connect to the API server to get the embeddings.
+
+```python
+from dbgpt.rag.embedding import DefaultEmbeddingFactory
+
+embeddings = DefaultEmbeddingFactory.remote(
+ api_url="http://localhost:8100/api/v1/embeddings",
+ api_key="{your_api_key}",
+ model_name="text2vec"
+)
+```
+
+
+
+
+### Using Enhanced Short-term Memory
+
+```python
+from concurrent.futures import ThreadPoolExecutor
+from dbgpt.agent import AgentMemory, EnhancedShortTermMemory
+
+# Create an agent memory, which contains a short-term memory
+memory = EnhancedShortTermMemory(
+ embeddings=embeddings,
+ buffer_size=2,
+ enhance_similarity_threshold=0.5,
+ enhance_threshold=3,
+ executor=ThreadPoolExecutor(),
+)
+agent_memory: AgentMemory = AgentMemory(memory=memory)
+```
+In DB-GPT, the core interface is asynchronous and non-blocking, so we use `ThreadPoolExecutor` to
+run the similarity calculation in a separate thread for better performance.
+
+In the above code, we set the `enhance_similarity_threshold` to `0.5`, which means if the
+similarity bigger than `0.7`, the new observation has the probability of being enhanced to the
+short-term memory(there are a random factor in the enhancement process).
+And we set the `enhance_threshold` to `3`, which means if the memory is enhanced bigger or equal to `3` times,
+it will be transferred to long-term memory.
+
+Then you can use the enhanced short-term memory in your agent.
+
diff --git a/docs/docs/agents/modules/plan/plan.md b/docs/docs/agents/modules/plan/plan.md
new file mode 100644
index 000000000..ff12c65d2
--- /dev/null
+++ b/docs/docs/agents/modules/plan/plan.md
@@ -0,0 +1,164 @@
+# Planning Introduction
+
+> When faced with a complex task, humans tend to deconstruct it into simpler subtasks
+> and solve them individually. The planning module aims to empower the agents with such
+> human capability, which is expected to make the agent behave more reasonably, powerfully, and reliably.
+
+In previous sections [Agents Planning](../../introduction/planning), we have seen the
+`AutoPlanChatManager` agent, and how it can be used to analyze the database with auto-planning.
+
+## Planning With AWEL
+
+Here we will introduce how to use the planning module in DB-GPT with `WrappedAWELLayoutManager`.
+`WrappedAWELLayoutManager` will run the agents in a sequence, and the agents can be added to the manager by `hire` method.
+
+Here is an example of how to use the `WrappedAWELLayoutManager`:
+
+```python
+import asyncio
+import os
+
+from dbgpt.agent import (
+ AgentContext,
+ AgentMemory,
+ LLMConfig,
+ UserProxyAgent,
+ WrappedAWELLayoutManager,
+)
+from dbgpt.agent.expand.resources.search_tool import baidu_search
+from dbgpt.agent.expand.summary_assistant_agent import SummaryAssistantAgent
+from dbgpt.agent.expand.tool_assistant_agent import ToolAssistantAgent
+from dbgpt.agent.resource import ToolPack
+from dbgpt.model.proxy import OpenAILLMClient
+
+
+async def main():
+ llm_client = OpenAILLMClient(
+ model_alias="gpt-4o",
+ api_base=os.getenv("OPENAI_API_BASE"),
+ api_key=os.getenv("OPENAI_API_KEY"),
+ )
+ context: AgentContext = AgentContext(
+ conv_id="test123", language="en", temperature=0.5, max_new_tokens=2048
+ )
+ agent_memory = AgentMemory()
+
+ user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
+
+ tools = ToolPack([baidu_search])
+ tool_engineer = (
+ await ToolAssistantAgent()
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(agent_memory)
+ .bind(tools)
+ .build()
+ )
+ summarizer = (
+ await SummaryAssistantAgent()
+ .bind(context)
+ .bind(agent_memory)
+ .bind(LLMConfig(llm_client=llm_client))
+ .build()
+ )
+
+ manager = (
+ await WrappedAWELLayoutManager()
+ .bind(context)
+ .bind(agent_memory)
+ .bind(LLMConfig(llm_client=llm_client))
+ .build()
+ )
+ manager.hire([tool_engineer, summarizer])
+
+ await user_proxy.initiate_chat(
+ recipient=manager,
+ reviewer=user_proxy,
+ message="Query the weather in Beijing",
+ )
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
+
+```
+
+Run the above code, and you will see the agents running in sequence. And the output will be like this:
+
+``````bash
+--------------------------------------------------------------------------------
+AWELBaseManager (to LuBan)-[]:
+
+"Query the weather in Beijing"
+
+--------------------------------------------------------------------------------
+un_stream ai response: {
+ "thought": "To find the current weather in Beijing, I will use the baidu_search API to search for the latest weather information.",
+ "tool_name": "baidu_search",
+ "args": {
+ "query": "current weather in Beijing",
+ "num_results": 8
+ }
+}
+
+--------------------------------------------------------------------------------
+LuBan (to Aristotle)-[gpt-4o]:
+
+"{\n \"thought\": \"To find the current weather in Beijing, I will use the baidu_search API to search for the latest weather information.\",\n \"tool_name\": \"baidu_search\",\n \"args\": {\n \"query\": \"current weather in Beijing\",\n \"num_results\": 8\n }\n}"
+>>>>>>>>LuBan Review info:
+Pass(None)
+>>>>>>>>LuBan Action report:
+execution succeeded,
+### [Weather for Beijing, Beijing Municipality, China](http://www.baidu.com/link?url=wpnRKEh7u3CA7C7n3f3wuit8nrIMJXReMRsPJ4gSiZGg_3sSCOuxi4rUSDGkxgG2CEAITa25NLfKcSZOK34kyq)
+Location:Beijing Airport Current Time:25 Jun 2024, 16:31:38 Latest Report:25 Jun 2024, 08:30 Visibility:N/A Pressure:1009 mbar Humidity:32% Dew Point:10 °C Upcom...
+
+### [Beijing, 11 10 天天气预报 - The Weather Channel | Weat...](http://www.baidu.com/link?url=BZa8Z5Ds-rPMRw0EZ0ly8tPzMbpYQ0CusdaFQ7RHkbHNOZMgf6wdK_j9vBISr2kLYE50_F5oxt9DHygAxaickcXVkgMi7e32C9vCZl_pQE0MpCt4sAc0PCjl8-NbThLAupds0gUYMbv2ejZjZN_ghQ6WW8EsJ_DzLSmD_eNfBdi)
+准备好获悉最精确的Beijing, 11 10 天预报,包括最高温度、最低温度和降水几率 - 尽在 The Weather Channel 和 Weather.com
+
+### [北京天气预报,历史气温,旅游指数,北京一周天气预报【携程...](http://www.baidu.com/link?url=BZa8Z5Ds-rPMRw0EZ0ly8wGkeEiW3HVrw0DkMBPB2xdL8dtIcOu3i6Gou-eLBrwmYuoxCdCOQSYuVdghSmPDKK)
+查看城市旅游详情 22°C21℃/34℃ 晴东北风 微风 风向情况:东北风 风力等级:微风 总降水量:0.0mm 相对湿度:98% 日出时间:4:46 日落时间:19:4736小时天气预报07:03发布 今天夜间21℃多云西...
+
+### [【北京天气预报15天_北京天气预报15天查询】-中国天气网](http://www.baidu.com/link?url=giZXlwF5kTZfCHQNvHwkiVMJvxN6YGgBro45EL8_XgdzrAdxFhhg0zhG_qhYxdRUij1d1tl0NGqqRemZp7iT_a)
+6月底前长江中下游及广西贵州等地将有强降雨天气2024-06-27 16:40 安徽今夜到后天强降雨连连 雨带明日北抬后天再度南落2024-06-27 16:20 风雨将至 北京天空出现大片乳状云 20...
+
+### [【北京天气预报15天_北京天气预报15天查询】-中国天气网](http://www.baidu.com/link?url=SMy_6WtAuEY7oKXSbWW9obyaP_VAqzF7w550INK-8CnyaX4wrr1qlCBvhjCrYbaXeRpbT40-6pGXLuo_cQrTUq)
+北方高温今明天短暂缓和 南方强降雨依然频繁2024-06-14 08:11 地质灾害风险预警:浙江福建广东广西等局地地质灾害气象风险高2024-06-14 17:32 渍涝风险气象预报:浙江福建广东...
+
+### [【北京天气预报15天_北京天气预报15天查询】-中国天气网](http://www.baidu.com/link?url=wpnRKEh7u3CA7C7n3f3wun3KHDiGBTRXtwcgVgSV9e7Y7_Ui5d8PBraMakoZpeunwTcHgkwboa_9YZco1d3t6OpiMsXeEaim0ReUKbIZCP7Qa4c7ZZ1ZO1AtnILEXR1F)
+6月底前长江中下游及广西贵州等地将有强降雨天气2024-06-27 16:40 安徽今夜到后天强降雨连连 雨带明日北抬后天再度南落2024-06-27 16:20 风雨将至 北京天空出现大片乳状云 20...
+
+### [【北京天气预报15天_北京天气预报15天查询】-中国天气网](http://www.baidu.com/link?url=Qo1T3nlH9bZ57GJ7SSOVNwSZBKnCpwkkr95rpZY4XcK-W607jhYj10I6g_DwXWzlzLsK-siZKMZNN736q26OiJVP3u0aWyXLCRbRT4etDtu)
+6月底前长江中下游及广西贵州等地将有强降雨天气2024-06-27 16:40 安徽今夜到后天强降雨连连 雨带明日北抬后天再度南落2024-06-27 16:20 风雨将至 北京天空出现大片乳状云 20...
+
+
+--------------------------------------------------------------------------------
+un_stream ai response: Beijing weather summary:
+- Current temperature: 22°C
+- Temperature range: 21°C to 34°C
+- Conditions: Clear
+- Wind: Northeast wind, light breeze
+- Humidity: 98%
+- No precipitation
+- Sunrise: 4:46 AM
+- Sunset: 7:47 PM
+
+--------------------------------------------------------------------------------
+AWELBaseManager (to User)-[]:
+
+"Query the weather in Beijing"
+>>>>>>>>AWELBaseManager Review info:
+Pass(None)
+>>>>>>>>AWELBaseManager Action report:
+execution succeeded,
+Beijing weather summary:
+- Current temperature: 22°C
+- Temperature range: 21°C to 34°C
+- Conditions: Clear
+- Wind: Northeast wind, light breeze
+- Humidity: 98%
+- No precipitation
+- Sunrise: 4:46 AM
+- Sunset: 7:47 PM
+
+--------------------------------------------------------------------------------
+``````
\ No newline at end of file
diff --git a/docs/docs/agents/modules/profile/profile.md b/docs/docs/agents/modules/profile/profile.md
new file mode 100644
index 000000000..3ac50c8e2
--- /dev/null
+++ b/docs/docs/agents/modules/profile/profile.md
@@ -0,0 +1,108 @@
+# Profiling Module
+
+> Agents typically perform tasks by assuming specific roles, such as coders, teachers and domain experts.
+The profiling module aims to indicate the profiles of the agent roles, which are usually
+written into the prompt to influence the LLM behaviors. Agent profiles typically encompass
+basic information such as age, gender, and career, as well as psychology information,
+reflecting the personalities of the agent, and social information, detailing the relationships between agents.
+>
+> The choice of information to profile the agent is largely determined by the specific application scenarios.
+For instance, if the application aims to study human cognitive process, then the psychology information becomes pivotal.
+
+
+## Profiles In DB-GPT Agents
+
+Profiles are essential for agents in DB-GPT, as they are used to influence the agent's behaviors.
+
+You have already seen a basic example of a profile in the [Write Your Custom Agent](../../introduction/custom_agents.md) section.
+
+```python
+from dbgpt.agent import ConversableAgent, ProfileConfig
+
+class MySummarizerAgent(ConversableAgent):
+ profile: ProfileConfig = ProfileConfig(
+ # The name of the agent
+ name="Aristotle",
+ # The role of the agent
+ role="Summarizer",
+ # The core functional goals of the agent tell LLM what it can do with it.
+ goal=(
+ "Summarize answer summaries based on user questions from provided "
+ "resource information or from historical conversation memories."
+ ),
+ # Introduction and description of the agent, used for task assignment and display.
+ # If it is empty, the goal content will be used.
+ desc=(
+ "You can summarize provided text content according to user's questions"
+ " and output the summarization."
+ ),
+ )
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+```
+
+In the above example, the `ProfileConfig` class is used to define the profile of the agent.
+It is a simple way to define the agent's profile, you just need to provide the name, role, goal, and description of the agent.
+
+Let's see the final prompt generated from a profile.
+First, let's create a profile configuration separately.
+
+```python
+from dbgpt.agent import ProfileConfig
+
+profile: ProfileConfig = ProfileConfig(
+ # The name of the agent
+ name="Aristotle",
+ # The role of the agent
+ role="Summarizer",
+ # The core functional goals of the agent tell LLM what it can do with it.
+ goal=(
+ "Summarize answer summaries based on user questions from provided "
+ "resource information or from historical conversation memories."
+ ),
+ # Introduction and description of the agent, used for task assignment and display.
+ # If it is empty, the goal content will be used.
+ desc=(
+ "You can summarize provided text content according to user's questions"
+ " and output the summarization."
+ ),
+)
+
+# Create a profile from the configuration
+real_profile = profile.create_profile()
+system_prompt = real_profile.format_system_prompt(question="What can you do?")
+user_prompt = real_profile.format_user_prompt(question="What can you do?")
+
+print(f"System Prompt: \n{system_prompt}")
+print("#" * 50)
+print(f"User Prompt: \n{user_prompt}")
+```
+Running the above code will generate the following prompts:
+
+```
+System Prompt:
+You are a Summarizer, named Aristotle, your goal is Summarize answer summaries based on user questions from provided resource information or from historical conversation memories..
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+
+*** IMPORTANT REMINDER ***
+Please answer in English.
+
+
+
+
+##################################################
+User Prompt:
+
+Question: What can you do?
+```
+As you can see, the profile is used to generate the system and user prompts, they will
+be passed to the LLM to generate the response.
+
+So, you can easily see the real prompt generated from the profile, this is very useful
+in debugging and understanding the agent's behavior, we don't hide too much details from you.
+
+
+## What Next?
+- How many ways can you create a profile for an agent? [Learn more](./profile_creation.md)
+- How is profile converted to LLM prompt? [Learn more](./profile_to_prompt.md)
\ No newline at end of file
diff --git a/docs/docs/agents/modules/profile/profile_creation.md b/docs/docs/agents/modules/profile/profile_creation.md
new file mode 100644
index 000000000..c335bf4d9
--- /dev/null
+++ b/docs/docs/agents/modules/profile/profile_creation.md
@@ -0,0 +1,187 @@
+# Profile Creation
+
+In this section, you will learn more about creating a profile for your agent.
+
+## Method 1: Using the ProfileConfig Class
+
+As mentioned in the [Profile](profile.md) section, the `ProfileConfig` class is used to
+define the profile of the agent. It is a simple way to define the agent's profile.
+
+Formally, the `ProfileConfig` class supports the following parameters:
+- `name`: The name of the agent.
+- `role`: What is the role of the agent.
+- `goal`: The core functional goals of the agent tell LLM what it can do with it.
+- `desc`: Introduction and description of the agent, used for task assignment and display. If it is empty, the goal content will be used.
+- `constraints`: It can contain multiple constraints and reasoning restriction logic
+- `expand_prompt`: A expand content to add to the prompt, you can pass some custom text to be added to the prompt.
+- `examples`: Some examples in your prompt
+
+This is a full example of creating a profile using the `ProfileConfig` class:
+
+```python
+from dbgpt.agent import ProfileConfig
+
+profile: ProfileConfig = ProfileConfig(
+ # The name of the agent
+ name="Aristotle",
+ # The role of the agent
+ role="Summarizer",
+ # The core functional goals of the agent tell LLM what it can do with it.
+ goal=(
+ "Summarize answer summaries based on user questions from provided "
+ "resource information or from historical conversation memories."
+ ),
+ # Constraints of the agent
+ constraints=[
+ "Prioritize the summary of answers to user questions from the improved "
+ "resource text. If no relevant information is found, summarize it from "
+ "the historical dialogue memory given. It is forbidden to make up your "
+ "own.",
+ "You need to first detect user's question that you need to answer with "
+ "your summarization.",
+ "Extract the provided text content used for summarization.",
+ "Then you need to summarize the extracted text content.",
+ "Output the content of summarization ONLY related to user's question. "
+ "The output language must be the same to user's question language.",
+ "If you think the provided text content is not related to user "
+ "questions at all, ONLY output 'Did not find the information you "
+ "want.'!!.",
+ ],
+ # Introduction and description of the agent, used for task assignment and display.
+ # If it is empty, the goal content will be used.
+ desc=(
+ "You can summarize provided text content according to user's questions"
+ " and output the summarization."
+ ),
+ expand_prompt="Keep your answer concise",
+ # Some examples in your prompt
+ examples=""
+)
+```
+In the above example, we can see 'constraints' and 'expand_prompt' added to the profile.
+
+Let's see the final prompt generated from a profile.
+
+```python
+real_profile = profile.create_profile()
+system_prompt = real_profile.format_system_prompt(question="What can you do?")
+user_prompt = real_profile.format_user_prompt(question="What can you do?")
+print(f"System Prompt: \n{system_prompt}")
+print("#" * 50)
+print(f"User Prompt: \n{user_prompt}")
+```
+
+Running the above code will generate the following prompts:
+
+```
+System Prompt:
+You are a Summarizer, named Aristotle, your goal is Summarize answer summaries based on user questions from provided resource information or from historical conversation memories..
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+Keep your answer concise
+
+*** IMPORTANT REMINDER ***
+Please answer in English.
+
+1. Prioritize the summary of answers to user questions from the improved resource text. If no relevant information is found, summarize it from the historical dialogue memory given. It is forbidden to make up your own.
+2. You need to first detect user's question that you need to answer with your summarization.
+3. Extract the provided text content used for summarization.
+4. Then you need to summarize the extracted text content.
+5. Output the content of summarization ONLY related to user's question. The output language must be the same to user's question language.
+6. If you think the provided text content is not related to user questions at all, ONLY output 'Did not find the information you want.'!!.
+
+
+
+##################################################
+User Prompt:
+
+Question: What can you do?
+```
+
+## Method 2: Using `ProfileFactory`
+
+It is a more flexible way to create a profile using the `ProfileFactory`.
+
+
+### Create a Profile Factory
+
+```python
+from typing import Optional
+from dbgpt.agent import ProfileFactory, Profile, DefaultProfile
+
+class MyProfileFactory(ProfileFactory):
+ def create_profile(
+ self,
+ profile_id: int,
+ name: Optional[str] = None,
+ role: Optional[str] = None,
+ goal: Optional[str] = None,
+ prefer_prompt_language: Optional[str] = None,
+ prefer_model: Optional[str] = None,
+ ) -> Optional[Profile]:
+ return DefaultProfile(
+ name="Aristotle",
+ role="Summarizer",
+ goal=(
+ "Summarize answer summaries based on user questions from provided "
+ "resource information or from historical conversation memories."
+ ),
+ desc=(
+ "You can summarize provided text content according to user's questions"
+ " and output the summarization."
+ ),
+ expand_prompt="Keep your answer concise",
+ examples=""
+ )
+```
+
+### Use the Profile Factory
+
+For using the profile factory, you need to pass the factory to the `ProfileConfig` class.
+You don't need to provide the name, role, goal, and description of the agent in this case.
+
+```python
+from dbgpt.agent import ProfileConfig
+
+profile: ProfileConfig = ProfileConfig(
+ factory=MyProfileFactory(),
+)
+```
+Let's see the final prompt generated from a profile.
+
+```python
+real_profile = profile.create_profile()
+system_prompt = real_profile.format_system_prompt(question="What can you do?")
+user_prompt = real_profile.format_user_prompt(question="What can you do?")
+print(f"System Prompt: \n{system_prompt}")
+print("#" * 50)
+print(f"User Prompt: \n{user_prompt}")
+```
+
+Running the above code will generate the following prompts:
+
+```
+System Prompt:
+You are a Summarizer, named Aristotle, your goal is Summarize answer summaries based on user questions from provided resource information or from historical conversation memories..
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+Keep your answer concise
+
+*** IMPORTANT REMINDER ***
+Please answer in English.
+
+
+
+
+##################################################
+User Prompt:
+
+Question: What can you do?
+```
+
+## Summary
+
+In this section, you learned how to create a profile for your agent using the
+`ProfileConfig` class and `ProfileFactory`.
+It is flexible and easy to define the agent's profile using these methods, especially
+when you need to create thousands of agent scenarios.
\ No newline at end of file
diff --git a/docs/docs/agents/modules/profile/profile_dynamic.md b/docs/docs/agents/modules/profile/profile_dynamic.md
new file mode 100644
index 000000000..d462eba5b
--- /dev/null
+++ b/docs/docs/agents/modules/profile/profile_dynamic.md
@@ -0,0 +1,88 @@
+# Dynamic Profile
+
+In the previous sections, we have introduced how generate prompts from the profile.
+In sometimes, you just want to modify a part of the profile in a simple way, here we
+introduce how to create a dynamic profile.
+
+## Dynamic Fields Of Profile
+
+Here we use `DynConfig` to create a dynamic profile, you can modify the fields of the original profile.
+
+Create a python file named `profile_dynamic.py` and add the following code:
+
+```python
+from dbgpt.agent import ProfileConfig, DynConfig
+
+profile: ProfileConfig = ProfileConfig(
+ # The name of the agent
+ name=DynConfig(
+ "Aristotle",
+ key="summary_profile_name",
+ provider="env"
+ ),
+ # The role of the agent
+ role="Summarizer",
+)
+```
+In the above example, we use `DynConfig` to create a dynamic profile field "name", the
+default value is "Aristotle", and the key is "summary_profile_name", the provider is "env",
+`provider="env"` means the value of the field will be read from the environment variable
+
+Then, you can create a profile from the configuration and generate the prompt.
+
+```python
+real_profile = profile.create_profile()
+system_prompt = real_profile.format_system_prompt(question="What can you do?")
+user_prompt = real_profile.format_user_prompt(question="What can you do?")
+print(f"System Prompt: \n{system_prompt}")
+print("#" * 50)
+print(f"User Prompt: \n{user_prompt}")
+```
+
+Running the above code without setting the environment variable:
+```bash
+python profile_dynamic.py
+```
+
+The output will be:
+```
+System Prompt:
+You are a Summarizer, named Aristotle, your goal is None.
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+
+*** IMPORTANT REMINDER ***
+Please answer in English.
+
+
+
+
+##################################################
+User Prompt:
+
+Question: What can you do?
+```
+
+Running the above code with setting the environment variable:
+```bash
+summary_profile_name="Plato" python profile_dynamic.py
+```
+
+The output will be:
+```
+System Prompt:
+You are a Summarizer, named Plato, your goal is None.
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+
+*** IMPORTANT REMINDER ***
+Please answer in English.
+
+
+
+
+##################################################
+User Prompt:
+
+Question: What can you do?
+```
\ No newline at end of file
diff --git a/docs/docs/agents/modules/profile/profile_to_prompt.md b/docs/docs/agents/modules/profile/profile_to_prompt.md
new file mode 100644
index 000000000..5b02431fe
--- /dev/null
+++ b/docs/docs/agents/modules/profile/profile_to_prompt.md
@@ -0,0 +1,136 @@
+# Profile To Prompt
+
+In previous sections, we have introduced how to create a profile for your agent, and
+see how to generate prompts from the profile.
+
+In this section, we will introduce more about how to generate prompts from the profile.
+
+## What's The Prompt Template
+
+In previous sections, we use the internal template to generate the prompt, let's see the template:
+
+```python
+from dbgpt.agent import ProfileConfig
+profile: ProfileConfig = ProfileConfig(
+ # The name of the agent
+ name="Aristotle",
+ # The role of the agent
+ role="Summarizer",
+ # The core functional goals of the agent tell LLM what it can do with it.
+ goal=(
+ "Summarize answer summaries based on user questions from provided "
+ "resource information or from historical conversation memories."
+ ),
+ # Introduction and description of the agent, used for task assignment and display.
+ # If it is empty, the goal content will be used.
+ desc=(
+ "You can summarize provided text content according to user's questions"
+ " and output the summarization."
+ ),
+)
+
+real_profile = profile.create_profile()
+
+print(f"System Prompt Template: \n{real_profile.get_system_prompt_template()}")
+print("#" * 50)
+print(f"User Prompt Template: \n{real_profile.get_user_prompt_template()}")
+```
+
+Running the above code will generate the following output:
+```
+System Prompt Template:
+You are a {{ role }}, {% if name %}named {{ name }}, {% endif %}your goal is {{ goal }}.
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+{% if resource_prompt %}{{ resource_prompt }}
+{% endif %}{% if expand_prompt %}{{ expand_prompt }}
+{% endif %}
+*** IMPORTANT REMINDER ***
+{% if language == 'zh' %}Please answer in simplified Chinese.
+{% else %}Please answer in English.
+{% endif %}
+{% if constraints %}{% for constraint in constraints %}{{ loop.index }}. {{ constraint }}
+{% endfor %}{% endif %}
+{% if examples %}You can refer to the following examples:
+{{ examples }}{% endif %}
+{% if out_schema %} {{ out_schema }} {% endif %}
+##################################################
+User Prompt Template:
+{% if most_recent_memories %}Most recent observations:
+{{ most_recent_memories }}
+{% endif %}
+{% if question %}Question: {{ question }}
+{% endif %}
+```
+
+The template is a jinja2 template, we only use the jinja2 in agents now for its simplicity and flexibility.
+
+## Use Your Custom Prompt Template
+
+Firstly, create a simple system prompt template and user prompt template:
+
+```python
+my_system_prompt_template = """\
+You are a {{ role }}, {% if name %}named {{ name }}, {% endif %}your goal is {{ goal }}.
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+
+*** IMPORTANT REMINDER ***
+{% if language == 'zh' %}\
+Please answer in simplified Chinese.
+{% else %}\
+Please answer in English.
+{% endif %}\
+""" # noqa
+
+my_user_prompt_template = "User question: {{ question }}"
+```
+
+Then, create a profile with the custom prompt template:
+
+```python
+from dbgpt.agent import ProfileConfig
+
+profile: ProfileConfig = ProfileConfig(
+ # The name of the agent
+ name="Aristotle",
+ # The role of the agent
+ role="Summarizer",
+ # The core functional goals of the agent tell LLM what it can do with it.
+ goal=(
+ "Summarize answer summaries based on user questions from provided "
+ "resource information or from historical conversation memories."
+ ),
+ # Introduction and description of the agent, used for task assignment and display.
+ # If it is empty, the goal content will be used.
+ desc=(
+ "You can summarize provided text content according to user's questions"
+ " and output the summarization."
+ ),
+ system_prompt_template=my_system_prompt_template,
+ user_prompt_template=my_user_prompt_template,
+)
+
+real_profile = profile.create_profile()
+
+system_prompt = real_profile.format_system_prompt(question="What can you do?")
+user_prompt = real_profile.format_user_prompt(question="What can you do?")
+print(f"System Prompt: \n{system_prompt}")
+print("#" * 50)
+print(f"User Prompt: \n{user_prompt}")
+```
+
+Running the above code will generate the following prompts:
+```
+System Prompt:
+You are a Summarizer, named Aristotle, your goal is Summarize answer summaries based on user questions from provided resource information or from historical conversation memories..
+Please think step by step to achieve the goal. You can use the resources given below.
+At the same time, please strictly abide by the constraints and specifications in IMPORTANT REMINDER.
+
+*** IMPORTANT REMINDER ***
+Please answer in English.
+
+##################################################
+User Prompt:
+User question: What can you do?
+```
\ No newline at end of file
diff --git a/docs/docs/agents/modules/resource/database.md b/docs/docs/agents/modules/resource/database.md
new file mode 100644
index 000000000..4e4df6f1c
--- /dev/null
+++ b/docs/docs/agents/modules/resource/database.md
@@ -0,0 +1,17 @@
+# Database Resource
+
+The database resource `DBResource` is a resource that can be used to interact with databases.
+It is a subclass of the `Resource` class and provides a way to interact with databases.
+
+Here are some implementations of the `DBResource` class:
+- `RDBMSConnectorResource`: A resource that can be used to connect to a relational database management system (RDBMS) like MySQL, PostgreSQL, etc.
+- `SQLiteDBResource`: A specific implementation of the `RDBMSConnectorResource` class that can be used to connect to a SQLite database.
+- `DatasourceResource`: A resource that can be used to connect to various data sources in DB-GPT.
+It just works when you run your agent in the DB-GPT environment(running in the DB-GPT webserver).
+
+In previous sections [Agents With Database](../../introduction/database), we have introduced
+how to use the database resource in the DB-GPT agent, so you can refer to it for more details.
+
+## How It Works
+
+(Coming soon...)
\ No newline at end of file
diff --git a/docs/docs/agents/modules/resource/knowledge.md b/docs/docs/agents/modules/resource/knowledge.md
new file mode 100644
index 000000000..108695c22
--- /dev/null
+++ b/docs/docs/agents/modules/resource/knowledge.md
@@ -0,0 +1,16 @@
+# Knowledge Resource
+
+Sometimes, the agent needs to access external knowledge bases to enrich its knowledge.
+Here, we introduce the `RetrieverResource`, it is a resource that can be used to retrieve
+knowledge from external knowledge bases.
+
+Now, there are two types of `RetrieverResource`:
+- `RetrieverResource`: A resource that can be used to retrieve knowledge from external knowledge bases.
+- `KnowledgeSpaceRetrieverResource`: A specific implementation of the `RetrieverResource`
+class that can be used to retrieve knowledge from DB-GPT's knowledge space.
+It just works when you run your agent in the DB-GPT environment(running in the DB-GPT webserver).
+
+
+## Using RetrieverResource
+
+(Coming soon...)
\ No newline at end of file
diff --git a/docs/docs/agents/modules/resource/pack.md b/docs/docs/agents/modules/resource/pack.md
new file mode 100644
index 000000000..32a2a0991
--- /dev/null
+++ b/docs/docs/agents/modules/resource/pack.md
@@ -0,0 +1,7 @@
+# Resource Pack
+
+The resource pack is a collection of resources that can be used by agents. It usually
+contains some tools, databases, knowledge bases, etc.
+
+In previous sections [tools#wrap-your-tools-to-toolpack](../../introduction/tools#wrap-your-tools-to-toolpack),
+we have introduced how to wrap optional tools into `ToolPack`, so you can refer to it for more details.
\ No newline at end of file
diff --git a/docs/docs/agents/modules/resource/resource.md b/docs/docs/agents/modules/resource/resource.md
new file mode 100644
index 000000000..e9628438d
--- /dev/null
+++ b/docs/docs/agents/modules/resource/resource.md
@@ -0,0 +1,25 @@
+# Resource Introduction
+
+Resources are a bridge for DB-GPT agents to interact with the outside world. They include
+tools, databases, knowledge bases, etc.
+
+## What Includes In Resources?
+
+- **Tools**: The tools for this are similar to those used in some function calls.
+- **Databases**: You can query and analyze data from databases.
+- **Knowledge Bases**: External knowledge bases can be used to enrich the knowledge of the agent.
+- **APIs**: You can call third-party APIs to get data or perform operations.
+- **Files**: You can read and write some files.
+- **Third-party Plugins**: You can use third-party plugins to enrich the functionality of the agent.
+- ...
+
+## Resource Pack
+
+The resource pack is a collection of resources that can be used by agents. It usually contains some
+tools, databases, knowledge bases, etc.
+
+You can wrap optional tools into `ToolPack`, or wrap all resources into `ResourcePack`.
+
+## What's Next?
+
+In following sections, we will introduce most of the resources that can be used in DB-GPT.
\ No newline at end of file
diff --git a/docs/docs/agents/modules/resource/tools.md b/docs/docs/agents/modules/resource/tools.md
new file mode 100644
index 000000000..b47d6e0c6
--- /dev/null
+++ b/docs/docs/agents/modules/resource/tools.md
@@ -0,0 +1,503 @@
+# Tool Resource
+
+In previous [Tool Use](../../introduction/tools), we have introduced how to use tools in the agent.
+In this section, we will introduce the tools in detail.
+
+## Function Tools
+
+### Create Tool With `@tool` Decorator
+
+You can create a tool just add a function with the `@tool` decorator.
+
+```python
+from dbgpt.agent.resource import tool
+
+@tool
+def simple_calculator(first_number: int, second_number: int, operator: str) -> float:
+ """Simple calculator tool. Just support +, -, *, /."""
+ if isinstance(first_number, str):
+ first_number = int(first_number)
+ if isinstance(second_number, str):
+ second_number = int(second_number)
+ if operator == "+":
+ return first_number + second_number
+ elif operator == "-":
+ return first_number - second_number
+ elif operator == "*":
+ return first_number * second_number
+ elif operator == "/":
+ return first_number / second_number
+ else:
+ raise ValueError(f"Invalid operator: {operator}")
+```
+
+Let's see more details about the `simple_calculator`,
+
+```python
+print("Type: ", type(simple_calculator._tool))
+print("")
+print(simple_calculator._tool)
+```
+Run the code above, you will see the output like this:
+
+```bash
+Type:
+
+Tool: simple_calculator (Simple calculator tool. Just support +, -, *, /.)
+```
+
+As you can see, the `_tool` of `simple_calculator` is a `FunctionTool` object, and the description of the tool is `Simple calculator tool. Just support +, -, *, /.`.
+
+### How Does Agent Use Tools?
+
+#### Generate Tool Prompts
+
+You can call the `get_prompt` method of the tool to generate the prompt for the tool.
+
+```python
+async def show_prompts():
+ from dbgpt.agent.resource import BaseTool
+ tool: BaseTool= simple_calculator._tool
+
+ tool_prompt = await tool.get_prompt()
+ openai_tool_prompt = await tool.get_prompt(prompt_type="openai")
+ print(f"Tool Prompt: \n{tool_prompt}\n")
+ print(f"OpenAI Tool Prompt: \n{openai_tool_prompt}\n")
+
+
+if __name__ == "__main__":
+ import asyncio
+ asyncio.run(show_prompts())
+```
+
+Run the code above, you will see the output like this:
+
+```bash
+Tool Prompt:
+simple_calculator: Call this tool to interact with the simple_calculator API. What is the simple_calculator API useful for? Simple calculator tool. Just support +, -, *, /. Parameters: [{"name": "first_number", "type": "integer", "description": "First Number", "required": true}, {"name": "second_number", "type": "integer", "description": "Second Number", "required": true}, {"name": "operator", "type": "string", "description": "Operator", "required": true}]
+
+OpenAI Tool Prompt:
+simple_calculator: Call this tool to interact with the simple_calculator API. What is the simple_calculator API useful for? Simple calculator tool. Just support +, -, *, /. Parameters: {"type": "object", "properties": {"first_number": {"type": "integer", "description": "First Number"}, "second_number": {"type": "integer", "description": "Second Number"}, "operator": {"type": "string", "description": "Operator"}}, "required": ["first_number", "second_number", "operator"]}
+
+```
+
+In above code, you will see two types of prompts generated by the tool, one is the
+default prompt, and the other is the OpenAI prompt(`prompt_type="openai"`, the format is similar to OpenAI function calling API).
+
+So, you may have already guessed that the LLM only needs to read the tool description
+in the prompt, and then tell the user what tool to use and what parameters to pass
+to the tool in LLM's response.
+
+Why `get_prompt` is an async function? Because it is a abstract method in the `Resource` class,
+and it is implemented in the `BaseTool` class. If resource is a knowledge base, it will be
+retrieved from the knowledge base that may take some time, so it is an async function for high performance.
+
+#### Wrap Tools To `ToolPack`
+
+To test multiple tools, let's write another tool to help LLMs to count the number of files in a directory.
+And then wrap the two tools to a `ToolPack`.
+
+```python
+import os
+from typing_extensions import Annotated, Doc
+
+from dbgpt.agent.resource import ToolPack
+
+@tool
+def count_directory_files(path: Annotated[str, Doc("The directory path")]) -> int:
+ """Count the number of files in a directory."""
+ if not os.path.isdir(path):
+ raise ValueError(f"Invalid directory path: {path}")
+ return len(os.listdir(path))
+
+tools = ToolPack([simple_calculator, count_directory_files])
+```
+
+Let's see the prompt of the `ToolPack`,
+
+```python
+async def show_tool_pack_prompts():
+
+ # Just show the default prompt type
+ tool_pack_prompt = await tools.get_prompt()
+ print(tool_pack_prompt)
+
+if __name__ == "__main__":
+ import asyncio
+ asyncio.run(show_tool_pack_prompts())
+```
+
+Run the code above, you will see the output like this:
+
+```bash
+You have access to the following APIs:
+simple_calculator: Call this tool to interact with the simple_calculator API. What is the simple_calculator API useful for? Simple calculator tool. Just support +, -, *, /. Parameters: [{"name": "first_number", "type": "integer", "description": "First Number", "required": true}, {"name": "second_number", "type": "integer", "description": "Second Number", "required": true}, {"name": "operator", "type": "string", "description": "Operator", "required": true}]
+count_directory_files: Call this tool to interact with the count_directory_files API. What is the count_directory_files API useful for? Count the number of files in a directory. Parameters: [{"name": "path", "type": "string", "description": "The directory path", "required": true}]
+```
+
+#### Use Tools In Your Agent
+
+In previous [Tool Use](../../introduction/tools#use-tools-in-your-agent), we have introduced how to use tools in the agent.
+
+
+Just like the example below:
+```python
+
+import asyncio
+import os
+from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
+from dbgpt.agent.expand.tool_assistant_agent import ToolAssistantAgent
+from dbgpt.model.proxy import OpenAILLMClient
+
+async def main():
+
+ llm_client = OpenAILLMClient(
+ model_alias="gpt-3.5-turbo", # or other models, eg. "gpt-4o"
+ api_base=os.getenv("OPENAI_API_BASE"),
+ api_key=os.getenv("OPENAI_API_KEY"),
+ )
+ context: AgentContext = AgentContext(
+ conv_id="test123", language="en", temperature=0.5, max_new_tokens=2048
+ )
+ agent_memory = AgentMemory()
+
+ user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
+
+ tool_man = (
+ await ToolAssistantAgent()
+ .bind(context)
+ .bind(LLMConfig(llm_client=llm_client))
+ .bind(agent_memory)
+ .bind(tools)
+ .build()
+ )
+
+ await user_proxy.initiate_chat(
+ recipient=tool_man,
+ reviewer=user_proxy,
+ message="Calculate the product of 10 and 99",
+ )
+
+ await user_proxy.initiate_chat(
+ recipient=tool_man,
+ reviewer=user_proxy,
+ message="Count the number of files in /tmp",
+ )
+
+if __name__ == "__main__":
+ asyncio.run(main())
+```
+
+Run the code above, you will see the output like this:
+
+```bash
+--------------------------------------------------------------------------------
+User (to LuBan)-[]:
+
+"Calculate the product of 10 and 99"
+
+--------------------------------------------------------------------------------
+un_stream ai response: {
+ "thought": "To calculate the product of 10 and 99, we need to use a tool that can perform multiplication operation.",
+ "tool_name": "simple_calculator",
+ "args": {
+ "first_number": 10,
+ "second_number": 99,
+ "operator": "*"
+ }
+}
+
+--------------------------------------------------------------------------------
+LuBan (to User)-[gpt-3.5-turbo]:
+
+"{\n \"thought\": \"To calculate the product of 10 and 99, we need to use a tool that can perform multiplication operation.\",\n \"tool_name\": \"simple_calculator\",\n \"args\": {\n \"first_number\": 10,\n \"second_number\": 99,\n \"operator\": \"*\"\n }\n}"
+>>>>>>>>LuBan Review info:
+Pass(None)
+>>>>>>>>LuBan Action report:
+execution succeeded,
+990
+
+--------------------------------------------------------------------------------
+
+--------------------------------------------------------------------------------
+User (to LuBan)-[]:
+
+"Count the number of files in /tmp"
+
+--------------------------------------------------------------------------------
+un_stream ai response: {
+ "thought": "To count the number of files in the directory /tmp, we can use the count_directory_files tool.",
+ "tool_name": "count_directory_files",
+ "args": {
+ "path": "/tmp"
+ }
+}
+
+--------------------------------------------------------------------------------
+LuBan (to User)-[gpt-3.5-turbo]:
+
+"{\n \"thought\": \"To count the number of files in the directory /tmp, we can use the count_directory_files tool.\",\n \"tool_name\": \"count_directory_files\",\n \"args\": {\n \"path\": \"/tmp\"\n }\n}"
+>>>>>>>>LuBan Review info:
+Pass(None)
+>>>>>>>>LuBan Action report:
+execution succeeded,
+16
+
+```
+
+### More Details About `@tool` Decorator
+
+`@tool` is a decorator to create a tool. It will parse the function signature and
+docstring to generate the tool description by default. But you can also customize them according to your needs.
+
+### Customize Tool Name And Description
+
+You can customize the tool name and description by the `name` and `description` parameters of the `@tool` decorator.
+```python
+from dbgpt.agent.resource import tool
+
+@tool("my_two_sum", description="Add two numbers and return the sum.")
+def two_sum(a: int, b: int) -> int:
+ return a + b
+```
+
+The prompt of the `two_sum` tool will be like this:
+```bash
+my_two_sum: Call this tool to interact with the my_two_sum API. What is the my_two_sum API useful for? Add two numbers and return the sum. Parameters: [{"name": "a", "type": "integer", "description": "A", "required": true}, {"name": "b", "type": "integer", "description": "B", "required": true}]
+```
+
+### Customize Tool Parameters
+
+You can customize the tool parameters by the `args` parameter of the `@tool` decorator.
+
+```python
+from dbgpt.agent.resource import tool
+
+@tool(
+ args={
+ "a": {
+ "type": "integer",
+ "description": "First number to add",
+ "required": True,
+ },
+ "b": {
+ "type": "integer",
+ "description": "Second number to add",
+ "required": True,
+ },
+ }
+)
+def two_sum(a: int, b: int) -> int:
+ """Add two numbers and return the sum."""
+ return a + b
+```
+
+The prompt of the `two_sum` tool will be like this:
+```bash
+two_sum: Call this tool to interact with the two_sum API. What is the two_sum API useful for? Add two numbers and return the sum. Parameters: [{"name": "a", "type": "integer", "description": "First number to add", "required": true}, {"name": "b", "type": "integer", "description": "Second number to add", "required": true}]
+```
+
+You can also use the `pydantic` model to define the tool parameters by the `args_schema` parameter of the `@tool` decorator.
+
+```python
+from pydantic import BaseModel, Field
+from dbgpt.agent.resource import tool
+
+class ArgsSchema(BaseModel):
+ a: int = Field(description="The first number.")
+ b: int = Field(description="The second number.")
+
+
+@tool(args_schema=ArgsSchema)
+def two_sum(a: int, b: int) -> int:
+ """Add two numbers and return the sum."""
+ return a + b
+```
+
+The prompt of the `two_sum` tool will be like this:
+```bash
+two_sum: Call this tool to interact with the two_sum API. What is the two_sum API useful for? Add two numbers and return the sum. Parameters: [{"name": "a", "type": "integer", "description": "The first number.", "required": true}, {"name": "b", "type": "integer", "description": "The second number.", "required": true}]
+```
+
+You can also use the `Annotated` and `Doc` to define the tool parameters.
+
+```python
+from typing_extensions import Annotated, Doc
+
+from dbgpt.agent.resource import tool
+
+@tool
+def two_sum(
+ a: Annotated[int, Doc("The first number.")],
+ b: Annotated[int, Doc("The second number.")],
+) -> int:
+ """Add two numbers and return the sum."""
+ return a + b
+```
+
+### Run Function Tool Directly
+
+Can I run the function tool directly? Yes, you can. Just call the function directly.
+```python
+from typing_extensions import Annotated, Doc
+
+from dbgpt.agent.resource import tool
+
+@tool
+def two_sum(
+ a: Annotated[int, Doc("The first number.")],
+ b: Annotated[int, Doc("The second number.")],
+) -> int:
+ """Add two numbers and return the sum."""
+ return a + b
+
+print("Result: ", two_sum(1, 2))
+```
+
+Run the code above, you will see the output like this:
+
+```bash
+Result: 3
+```
+
+### Create Tool With `FunctionTool` Class
+
+You can also create a tool with the `FunctionTool` by passing the function name and a function.
+
+```python
+from dbgpt.agent.resource import FunctionTool
+
+
+def two_sum(a: int, b: int) -> int:
+ """Add two numbers and return the sum."""
+ return a + b
+
+
+tool = FunctionTool("two_sum", two_sum)
+
+
+async def show_prompts():
+ print(await tool.get_prompt())
+
+
+if __name__ == "__main__":
+ import asyncio
+
+ asyncio.run(show_prompts())
+```
+
+Run the code above, you will see the output like this:
+
+```bash
+two_sum: Call this tool to interact with the two_sum API. What is the two_sum API useful for? Add two numbers and return the sum. Parameters: [{"name": "a", "type": "integer", "description": "A", "required": true}, {"name": "b", "type": "integer", "description": "B", "required": true}]
+```
+
+You can also pass `args`, `args_schema`, `name`, `description` parameters to the `FunctionTool`
+to customize the tool just like the `@tool` decorator.
+
+## Writing Your Own Tools
+
+`FunctionTool` is a class inherited from `BaseTool`, and you can also create your own tool class by inheriting from `BaseTool`.
+
+In the following example, we create a `MyTwoSumTool` class to add two numbers and return the sum.
+
+
+First, prepare an internal function `_two_sum` to add two numbers and return the sum.
+And define the tool parameters `_TWO_SUM_ARGS` with the `ToolParameter` class.
+
+```python
+from dbgpt.agent.resource import ToolParameter
+
+def _two_sum(a: int, b: int) -> int:
+ return a + b
+
+_TWO_SUM_ARGS = {
+ "a": ToolParameter(
+ name="a",
+ type="integer",
+ required=True,
+ description="First number to add",
+ ),
+ "b": ToolParameter(
+ name="b",
+ type="integer",
+ required=True,
+ description="Second number to add",
+ ),
+}
+```
+
+Then create the `MyTwoSumTool` class inherited from `BaseTool` and implement the `execute` method.
+
+```python
+from typing import Any, Dict, Optional
+from dbgpt.agent.resource import BaseTool, ToolParameter
+
+class MyTwoSumTool(BaseTool):
+
+ def __init__(self, name: Optional[str] = None) -> None:
+ self._name = name or "two_sum"
+ self._args = _TWO_SUM_ARGS
+
+ @property
+ def name(self) -> str:
+ """Return the name of the tool."""
+ return self._name
+
+ @property
+ def description(self) -> str:
+ return "Add two numbers and return the sum."
+
+ @property
+ def args(self) -> Dict[str, ToolParameter]:
+ return self._args
+
+ @property
+ def is_async(self) -> bool:
+ """Return whether the tool is asynchronous."""
+ return False
+
+ def execute(
+ self,
+ *args,
+ resource_name: Optional[str] = None,
+ **kwargs,
+ ) -> Any:
+ return _two_sum(*args, **kwargs)
+
+ async def async_execute(
+ self,
+ *args,
+ resource_name: Optional[str] = None,
+ **kwargs,
+ ) -> Any:
+ raise ValueError("The function is not asynchronous")
+```
+
+Now, you can show the prompt of the `MyTwoSumTool` like this:
+
+```python
+# Just create an instance of MyTwoSumTool
+tool = MyTwoSumTool()
+
+async def show_prompts():
+ print(await tool.get_prompt())
+
+
+if __name__ == "__main__":
+ import asyncio
+
+ asyncio.run(show_prompts())
+```
+
+Run the code above, you will see the output like this:
+
+```bash
+two_sum: Call this tool to interact with the two_sum API. What is the two_sum API useful for? Add two numbers and return the sum. Parameters: [{"name": "a", "type": "integer", "description": "First number to add", "required": true}, {"name": "b", "type": "integer", "description": "Second number to add", "required": true}]
+```
+
+## Summary
+
+In this section, we have introduced how to create tools with the `@tool` decorator and `FunctionTool` class.
+And we have also introduced how to customize the tool name, description, and parameters.
+Finally, we have shown how to create a tool class inherited from `BaseTool` to create a tool.
\ No newline at end of file
diff --git a/docs/docs/awel/cookbook/write_your_chat_database.md b/docs/docs/awel/cookbook/write_your_chat_database.md
index 1e9e9d26d..9e2b57422 100644
--- a/docs/docs/awel/cookbook/write_your_chat_database.md
+++ b/docs/docs/awel/cookbook/write_your_chat_database.md
@@ -91,11 +91,9 @@ shutil.rmtree("/tmp/awel_with_data_vector_store", ignore_errors=True)
vector_store = ChromaStore(
ChromaVectorConfig(
+ persist_path="/tmp/tmp_ltm_vector_store",
+ name="ltm_vector_store",
embedding_fn=embeddings,
- vector_store_config=ChromaVectorConfig(
- name="db_schema_vector_store",
- persist_path="/tmp/awel_with_data_vector_store",
- ),
)
)
@@ -490,10 +488,8 @@ db_conn.create_temp_tables(
vector_store = ChromaStore(
ChromaVectorConfig(
embedding_fn=embeddings,
- vector_store_config=ChromaVectorConfig(
- name="db_schema_vector_store",
- persist_path="/tmp/awel_with_data_vector_store",
- ),
+ name="db_schema_vector_store",
+ persist_path="/tmp/awel_with_data_vector_store",
)
)
diff --git a/docs/sidebars.js b/docs/sidebars.js
index e4fe838bf..b2057c5e4 100755
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -459,19 +459,139 @@ const sidebars = {
label: 'Agents',
items: [
{
- type: "category",
- label: "Cookbook",
- items: [
- {
- type: "doc",
- id: "agents/cookbook/calculator_with_agents"
- },
- ],
+ type: 'doc',
+ id: "agents/introduction/introduction"
},
{
type: 'doc',
- id: 'agents/custom_agents',
- }
+ id: "agents/introduction/tools"
+ },
+ {
+ type: 'doc',
+ id: "agents/introduction/database"
+ },
+ {
+ type: 'doc',
+ id: "agents/introduction/planning"
+ },
+ {
+ type: 'doc',
+ id: "agents/introduction/conversation"
+ },
+ {
+ type: 'doc',
+ id: "agents/introduction/custom_agents"
+ },
+ // {
+ // type: "category",
+ // label: "Cookbook",
+ // items: [
+ // {
+ // type: "doc",
+ // id: "agents/cookbook/calculator_with_agents"
+ // },
+ // ],
+ // },
+ {
+ type: "category",
+ label: "Modules",
+ items: [
+ {
+ type: "category",
+ label: "Profile",
+ items: [
+ {
+ type: "doc",
+ id: "agents/modules/profile/profile"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/profile/profile_creation"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/profile/profile_to_prompt"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/profile/profile_dynamic"
+ },
+ ]
+ },
+ {
+ type: "category",
+ label: "Memory",
+ items: [
+ {
+ type: "doc",
+ id: "agents/modules/memory/memory"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/memory/sensory_memory"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/memory/short_term_memory"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/memory/long_term_memory"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/memory/hybrid_memory"
+ },
+ ]
+ },
+ {
+ type: "category",
+ label: "Plan",
+ items: [
+ {
+ type: "doc",
+ id: "agents/modules/plan/plan"
+ },
+ ]
+ },
+ {
+ type: "category",
+ label: "Action",
+ items: [
+ {
+ type: "doc",
+ id: "agents/modules/action/action"
+ },
+ ]
+ },
+ {
+ type: "category",
+ label: "Resource",
+ items: [
+ {
+ type: "doc",
+ id: "agents/modules/resource/resource"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/resource/tools"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/resource/database"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/resource/knowledge"
+ },
+ {
+ type: "doc",
+ id: "agents/modules/resource/pack"
+ },
+ ]
+ },
+ ],
+ },
],
},
{
diff --git a/docs/static/img/agents/introduction/agents_introduction.png b/docs/static/img/agents/introduction/agents_introduction.png
new file mode 100644
index 000000000..192017b19
Binary files /dev/null and b/docs/static/img/agents/introduction/agents_introduction.png differ
diff --git a/docs/static/img/agents/introduction/agents_planning.png b/docs/static/img/agents/introduction/agents_planning.png
new file mode 100644
index 000000000..48644ec71
Binary files /dev/null and b/docs/static/img/agents/introduction/agents_planning.png differ
diff --git a/examples/sdk/simple_sdk_llm_sql_example.py b/examples/sdk/simple_sdk_llm_sql_example.py
index 9bde1e9d2..473be161f 100644
--- a/examples/sdk/simple_sdk_llm_sql_example.py
+++ b/examples/sdk/simple_sdk_llm_sql_example.py
@@ -23,8 +23,8 @@ from dbgpt.rag.operators.datasource import DatasourceRetrieverOperator
def _create_temporary_connection():
"""Create a temporary database connection for testing."""
- connect = SQLiteTempConnector.create_temporary_db()
- connect.create_temp_tables(
+ conn = SQLiteTempConnector.create_temporary_db()
+ conn.create_temp_tables(
{
"user": {
"columns": {
@@ -42,7 +42,7 @@ def _create_temporary_connection():
}
}
)
- return connect
+ return conn
def _sql_prompt() -> str: