mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-03 09:34:04 +00:00
fix(core): Fix chat chat tracer no spans bug
This commit is contained in:
parent
48cd2d6a4a
commit
e5e4f54734
@ -1,7 +1,6 @@
|
|||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from pilot.base_modules.agent.commands.command_mange import command
|
from pilot.base_modules.agent.commands.command_mange import command
|
||||||
from pilot.configs.config import Config
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import uuid
|
import uuid
|
||||||
import os
|
import os
|
||||||
|
@ -12,7 +12,7 @@ from ..db.my_plugin_db import MyPluginDao, MyPluginEntity
|
|||||||
from ..common.schema import PluginStorageType
|
from ..common.schema import PluginStorageType
|
||||||
from ..plugins_util import scan_plugins, update_from_git
|
from ..plugins_util import scan_plugins, update_from_git
|
||||||
|
|
||||||
logger = logging.getLogger("agent_hub")
|
logger = logging.getLogger(__name__)
|
||||||
Default_User = "default"
|
Default_User = "default"
|
||||||
DEFAULT_PLUGIN_REPO = "https://github.com/eosphoros-ai/DB-GPT-Plugins.git"
|
DEFAULT_PLUGIN_REPO = "https://github.com/eosphoros-ai/DB-GPT-Plugins.git"
|
||||||
TEMP_PLUGIN_PATH = ""
|
TEMP_PLUGIN_PATH = ""
|
||||||
|
@ -9,6 +9,7 @@ import requests
|
|||||||
import git
|
import git
|
||||||
import threading
|
import threading
|
||||||
import datetime
|
import datetime
|
||||||
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List
|
from typing import List
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
@ -19,7 +20,8 @@ from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
|||||||
|
|
||||||
from pilot.configs.config import Config
|
from pilot.configs.config import Config
|
||||||
from pilot.configs.model_config import PLUGINS_DIR
|
from pilot.configs.model_config import PLUGINS_DIR
|
||||||
from pilot.logs import logger
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
|
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
|
||||||
|
@ -20,7 +20,7 @@ from urllib.parse import quote
|
|||||||
from pilot.configs.config import Config
|
from pilot.configs.config import Config
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("meta_data")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
CFG = Config()
|
CFG = Config()
|
||||||
default_db_path = os.path.join(os.getcwd(), "meta_data")
|
default_db_path = os.path.join(os.getcwd(), "meta_data")
|
||||||
|
@ -249,7 +249,8 @@ def remove_color_codes(s: str) -> str:
|
|||||||
return ansi_escape.sub("", s)
|
return ansi_escape.sub("", s)
|
||||||
|
|
||||||
|
|
||||||
logger: Logger = Logger()
|
# Remove current logger
|
||||||
|
# logger: Logger = Logger()
|
||||||
|
|
||||||
|
|
||||||
def print_assistant_thoughts(
|
def print_assistant_thoughts(
|
||||||
|
@ -14,7 +14,7 @@ from ..chat_history_db import ChatHistoryEntity, ChatHistoryDao
|
|||||||
from pilot.memory.chat_history.base import MemoryStoreType
|
from pilot.memory.chat_history.base import MemoryStoreType
|
||||||
|
|
||||||
CFG = Config()
|
CFG = Config()
|
||||||
logger = logging.getLogger("db_chat_history")
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DbHistoryMemory(BaseChatHistoryMemory):
|
class DbHistoryMemory(BaseChatHistoryMemory):
|
||||||
|
@ -47,6 +47,7 @@ from pilot.summary.db_summary_client import DBSummaryClient
|
|||||||
from pilot.memory.chat_history.chat_hisotry_factory import ChatHistory
|
from pilot.memory.chat_history.chat_hisotry_factory import ChatHistory
|
||||||
from pilot.model.cluster import BaseModelController, WorkerManager, WorkerManagerFactory
|
from pilot.model.cluster import BaseModelController, WorkerManager, WorkerManagerFactory
|
||||||
from pilot.model.base import FlatSupportedModel
|
from pilot.model.base import FlatSupportedModel
|
||||||
|
from pilot.utils.tracer import root_tracer, SpanType
|
||||||
from pilot.utils.executor_utils import ExecutorFactory, blocking_func_to_async
|
from pilot.utils.executor_utils import ExecutorFactory, blocking_func_to_async
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
@ -389,7 +390,10 @@ async def chat_completions(dialogue: ConversationVo = Body()):
|
|||||||
print(
|
print(
|
||||||
f"chat_completions:{dialogue.chat_mode},{dialogue.select_param},{dialogue.model_name}"
|
f"chat_completions:{dialogue.chat_mode},{dialogue.select_param},{dialogue.model_name}"
|
||||||
)
|
)
|
||||||
chat: BaseChat = await get_chat_instance(dialogue)
|
with root_tracer.start_span(
|
||||||
|
"get_chat_instance", span_type=SpanType.CHAT, metadata=dialogue.dict()
|
||||||
|
):
|
||||||
|
chat: BaseChat = await get_chat_instance(dialogue)
|
||||||
# background_tasks = BackgroundTasks()
|
# background_tasks = BackgroundTasks()
|
||||||
# background_tasks.add_task(release_model_semaphore)
|
# background_tasks.add_task(release_model_semaphore)
|
||||||
headers = {
|
headers = {
|
||||||
@ -440,8 +444,9 @@ async def model_supports(worker_manager: WorkerManager = Depends(get_worker_mana
|
|||||||
|
|
||||||
|
|
||||||
async def no_stream_generator(chat):
|
async def no_stream_generator(chat):
|
||||||
msg = await chat.nostream_call()
|
with root_tracer.start_span("no_stream_generator"):
|
||||||
yield f"data: {msg}\n\n"
|
msg = await chat.nostream_call()
|
||||||
|
yield f"data: {msg}\n\n"
|
||||||
|
|
||||||
|
|
||||||
async def stream_generator(chat, incremental: bool, model_name: str):
|
async def stream_generator(chat, incremental: bool, model_name: str):
|
||||||
@ -458,6 +463,7 @@ async def stream_generator(chat, incremental: bool, model_name: str):
|
|||||||
Yields:
|
Yields:
|
||||||
_type_: streaming responses
|
_type_: streaming responses
|
||||||
"""
|
"""
|
||||||
|
span = root_tracer.start_span("stream_generator")
|
||||||
msg = "[LLM_ERROR]: llm server has no output, maybe your prompt template is wrong."
|
msg = "[LLM_ERROR]: llm server has no output, maybe your prompt template is wrong."
|
||||||
|
|
||||||
stream_id = f"chatcmpl-{str(uuid.uuid1())}"
|
stream_id = f"chatcmpl-{str(uuid.uuid1())}"
|
||||||
@ -483,6 +489,7 @@ async def stream_generator(chat, incremental: bool, model_name: str):
|
|||||||
await asyncio.sleep(0.02)
|
await asyncio.sleep(0.02)
|
||||||
if incremental:
|
if incremental:
|
||||||
yield "data: [DONE]\n\n"
|
yield "data: [DONE]\n\n"
|
||||||
|
span.end()
|
||||||
|
|
||||||
|
|
||||||
def message2Vo(message: dict, order, model_name) -> MessageVo:
|
def message2Vo(message: dict, order, model_name) -> MessageVo:
|
||||||
|
@ -1,11 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
from typing import Dict, NamedTuple
|
from typing import Dict, NamedTuple
|
||||||
from pilot.utils import build_logger
|
|
||||||
from pilot.out_parser.base import BaseOutputParser, T
|
from pilot.out_parser.base import BaseOutputParser, T
|
||||||
from pilot.configs.model_config import LOGDIR
|
|
||||||
|
|
||||||
|
|
||||||
logger = build_logger("webserver", LOGDIR + "DbChatOutputParser.log")
|
|
||||||
|
|
||||||
|
|
||||||
class PluginAction(NamedTuple):
|
class PluginAction(NamedTuple):
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from pilot.scene.base_chat import BaseChat
|
from pilot.scene.base_chat import BaseChat
|
||||||
from pilot.singleton import Singleton
|
from pilot.singleton import Singleton
|
||||||
|
from pilot.utils.tracer import root_tracer
|
||||||
|
|
||||||
|
|
||||||
class ChatFactory(metaclass=Singleton):
|
class ChatFactory(metaclass=Singleton):
|
||||||
@ -20,7 +21,11 @@ class ChatFactory(metaclass=Singleton):
|
|||||||
implementation = None
|
implementation = None
|
||||||
for cls in chat_classes:
|
for cls in chat_classes:
|
||||||
if cls.chat_scene == chat_mode:
|
if cls.chat_scene == chat_mode:
|
||||||
implementation = cls(**kwargs)
|
metadata = {"cls": str(cls), "params": kwargs}
|
||||||
|
with root_tracer.start_span(
|
||||||
|
"get_implementation_of_chat", metadata=metadata
|
||||||
|
):
|
||||||
|
implementation = cls(**kwargs)
|
||||||
if implementation == None:
|
if implementation == None:
|
||||||
raise Exception(f"Invalid implementation name:{chat_mode}")
|
raise Exception(f"Invalid implementation name:{chat_mode}")
|
||||||
return implementation
|
return implementation
|
||||||
|
Loading…
Reference in New Issue
Block a user