chore: Add pylint for DB-GPT core lib (#1076)

This commit is contained in:
Fangyin Cheng
2024-01-16 17:36:26 +08:00
committed by GitHub
parent 3a54d1ef9a
commit 40c853575a
79 changed files with 2213 additions and 839 deletions

View File

@@ -192,7 +192,8 @@ def _create_mysql_database(db_name: str, db_url: str, try_to_create_db: bool = F
with engine_no_db.connect() as conn:
conn.execute(
DDL(
f"CREATE DATABASE {db_name} CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci"
f"CREATE DATABASE {db_name} CHARACTER SET utf8mb4 COLLATE "
f"utf8mb4_unicode_ci"
)
)
logger.info(f"Database {db_name} successfully created")
@@ -218,26 +219,31 @@ class WebServerParameters(BaseParameters):
controller_addr: Optional[str] = field(
default=None,
metadata={
"help": "The Model controller address to connect. If None, read model controller address from environment key `MODEL_SERVER`."
"help": "The Model controller address to connect. If None, read model "
"controller address from environment key `MODEL_SERVER`."
},
)
model_name: str = field(
default=None,
metadata={
"help": "The default model name to use. If None, read model name from environment key `LLM_MODEL`.",
"help": "The default model name to use. If None, read model name from "
"environment key `LLM_MODEL`.",
"tags": "fixed",
},
)
share: Optional[bool] = field(
default=False,
metadata={
"help": "Whether to create a publicly shareable link for the interface. Creates an SSH tunnel to make your UI accessible from anywhere. "
"help": "Whether to create a publicly shareable link for the interface. "
"Creates an SSH tunnel to make your UI accessible from anywhere. "
},
)
remote_embedding: Optional[bool] = field(
default=False,
metadata={
"help": "Whether to enable remote embedding models. If it is True, you need to start a embedding model through `dbgpt start worker --worker_type text2vec --model_name xxx --model_path xxx`"
"help": "Whether to enable remote embedding models. If it is True, you need"
" to start a embedding model through `dbgpt start worker --worker_type "
"text2vec --model_name xxx --model_path xxx`"
},
)
log_level: Optional[str] = field(
@@ -286,3 +292,10 @@ class WebServerParameters(BaseParameters):
"help": "The directories to search awel files, split by `,`",
},
)
default_thread_pool_size: Optional[int] = field(
default=None,
metadata={
"help": "The default thread pool size, If None, "
"use default config of python thread pool",
},
)

View File

@@ -25,7 +25,9 @@ def initialize_components(
from dbgpt.model.cluster.controller.controller import controller
# Register global default executor factory first
system_app.register(DefaultExecutorFactory)
system_app.register(
DefaultExecutorFactory, max_workers=param.default_thread_pool_size
)
system_app.register_instance(controller)
from dbgpt.serve.agent.hub.controller import module_agent

View File

@@ -3,8 +3,6 @@ import os
import sys
from typing import List
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_PATH)
from fastapi import FastAPI
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
@@ -41,6 +39,10 @@ from dbgpt.util.utils import (
setup_logging,
)
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_PATH)
static_file_path = os.path.join(ROOT_PATH, "dbgpt", "app/static")
CFG = Config()

View File

@@ -5,6 +5,7 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import urljoin
import requests
from prettytable import PrettyTable
from dbgpt.app.knowledge.request.request import (
ChunkQueryRequest,
@@ -193,9 +194,6 @@ def knowledge_init(
return
from prettytable import PrettyTable
class _KnowledgeVisualizer:
def __init__(self, api_address: str, out_format: str):
self.client = KnowledgeApiClient(api_address)

View File

@@ -4,13 +4,14 @@
import os
import sys
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_PATH)
from dbgpt._private.config import Config
from dbgpt.configs.model_config import EMBEDDING_MODEL_CONFIG, LLM_MODEL_CONFIG
from dbgpt.model.cluster import run_worker_manager
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_PATH)
CFG = Config()
model_path = LLM_MODEL_CONFIG.get(CFG.LLM_MODEL)

View File

@@ -313,8 +313,9 @@ class BaseChat(ABC):
)
### store current conversation
span.end(metadata={"error": str(e)})
# self.memory.append(self.current_message)
self.current_message.end_current_round()
await blocking_func_to_async(
self._executor, self.current_message.end_current_round
)
async def nostream_call(self):
payload = await self._build_model_request()
@@ -381,8 +382,9 @@ class BaseChat(ABC):
)
span.end(metadata={"error": str(e)})
### store dialogue
# self.memory.append(self.current_message)
self.current_message.end_current_round()
await blocking_func_to_async(
self._executor, self.current_message.end_current_round
)
return self.current_ai_response()
async def get_llm_response(self):