diff --git a/dbgpt/app/openapi/api_v1/api_v1.py b/dbgpt/app/openapi/api_v1/api_v1.py index a121bdc3b..f782e5655 100644 --- a/dbgpt/app/openapi/api_v1/api_v1.py +++ b/dbgpt/app/openapi/api_v1/api_v1.py @@ -372,7 +372,7 @@ async def chat_completions( incremental=dialogue.incremental, ) return StreamingResponse( - flow_service.chat_flow(dialogue.select_param, flow_req), + flow_service.chat_stream_flow_str(dialogue.select_param, flow_req), headers=headers, media_type="text/event-stream", ) diff --git a/dbgpt/app/openapi/api_v2.py b/dbgpt/app/openapi/api_v2.py index bd283bc17..14cf2f554 100644 --- a/dbgpt/app/openapi/api_v2.py +++ b/dbgpt/app/openapi/api_v2.py @@ -2,11 +2,11 @@ import json import re import time import uuid -from typing import Optional +from typing import AsyncIterator, Optional from fastapi import APIRouter, Body, Depends, HTTPException from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer -from starlette.responses import StreamingResponse +from starlette.responses import JSONResponse, StreamingResponse from dbgpt.app.openapi.api_v1.api_v1 import ( CHAT_FACTORY, @@ -27,6 +27,7 @@ from dbgpt.core.schema.api import ( ChatCompletionStreamResponse, ChatMessage, DeltaMessage, + ErrorResponse, UsageInfo, ) from dbgpt.model.cluster.apiserver.api import APISettings @@ -114,11 +115,14 @@ async def chat_completions( media_type="text/event-stream", ) elif request.chat_mode == ChatMode.CHAT_AWEL_FLOW.value: - return StreamingResponse( - chat_flow_stream_wrapper(request), - headers=headers, - media_type="text/event-stream", - ) + if not request.stream: + return await chat_flow_wrapper(request) + else: + return StreamingResponse( + chat_flow_stream_wrapper(request), + headers=headers, + media_type="text/event-stream", + ) elif ( request.chat_mode is None or request.chat_mode == ChatMode.CHAT_NORMAL.value @@ -244,35 +248,43 @@ async def chat_app_stream_wrapper(request: ChatCompletionRequestBody = None): yield "data: [DONE]\n\n" +async def chat_flow_wrapper(request: ChatCompletionRequestBody): + flow_service = get_chat_flow() + flow_req = CommonLLMHttpRequestBody(**request.dict()) + flow_uid = request.chat_param + output = await flow_service.safe_chat_flow(flow_uid, flow_req) + if not output.success: + return JSONResponse( + ErrorResponse(message=output.text, code=output.error_code).dict(), + status_code=400, + ) + else: + choice_data = ChatCompletionResponseChoice( + index=0, + message=ChatMessage(role="assistant", content=output.text), + ) + if output.usage: + usage = UsageInfo(**output.usage) + else: + usage = UsageInfo() + return ChatCompletionResponse( + id=request.conv_uid, choices=[choice_data], model=request.model, usage=usage + ) + + async def chat_flow_stream_wrapper( - request: ChatCompletionRequestBody = None, -): + request: ChatCompletionRequestBody, +) -> AsyncIterator[str]: """chat app stream Args: request (OpenAPIChatCompletionRequest): request - token (APIToken): token """ flow_service = get_chat_flow() flow_req = CommonLLMHttpRequestBody(**request.dict()) - async for output in flow_service.chat_flow(request.chat_param, flow_req): - if output.startswith("data: [DONE]"): - yield output - if output.startswith("data:"): - output = output[len("data: ") :] - choice_data = ChatCompletionResponseStreamChoice( - index=0, - delta=DeltaMessage(role="assistant", content=output), - ) - chunk = ChatCompletionStreamResponse( - id=request.conv_uid, - choices=[choice_data], - model=request.model, - created=int(time.time()), - ) - chat_completion_response = ( - f"data: {chunk.json(exclude_unset=True, ensure_ascii=False)}\n\n" - ) - yield chat_completion_response + flow_uid = request.chat_param + + async for output in flow_service.chat_stream_openai(flow_uid, flow_req): + yield output def check_chat_request(request: ChatCompletionRequestBody = Body()): diff --git a/dbgpt/cli/cli_scripts.py b/dbgpt/cli/cli_scripts.py index da3225083..bcf065639 100644 --- a/dbgpt/cli/cli_scripts.py +++ b/dbgpt/cli/cli_scripts.py @@ -60,19 +60,26 @@ def db(): @click.group() def new(): - """New a template""" + """New a template.""" pass @click.group() def app(): - """Manage your apps(dbgpts)""" + """Manage your apps(dbgpts).""" pass @click.group() def repo(): - """The repository to install the dbgpts from""" + """The repository to install the dbgpts from.""" + pass + + +@click.group() +def run(): + """Run your dbgpts.""" + pass stop_all_func_list = [] @@ -92,6 +99,7 @@ cli.add_command(db) cli.add_command(new) cli.add_command(app) cli.add_command(repo) +cli.add_command(run) add_command_alias(stop_all, name="all", parent_group=stop) try: @@ -162,8 +170,13 @@ except ImportError as e: try: from dbgpt.util.dbgpts.cli import add_repo from dbgpt.util.dbgpts.cli import install as app_install - from dbgpt.util.dbgpts.cli import list_all_apps as app_list - from dbgpt.util.dbgpts.cli import list_repos, new_dbgpts, remove_repo + from dbgpt.util.dbgpts.cli import list_all_apps as app_list_remote + from dbgpt.util.dbgpts.cli import ( + list_installed_apps, + list_repos, + new_dbgpts, + remove_repo, + ) from dbgpt.util.dbgpts.cli import uninstall as app_uninstall from dbgpt.util.dbgpts.cli import update_repo @@ -173,12 +186,20 @@ try: add_command_alias(update_repo, name="update", parent_group=repo) add_command_alias(app_install, name="install", parent_group=app) add_command_alias(app_uninstall, name="uninstall", parent_group=app) - add_command_alias(app_list, name="list-remote", parent_group=app) + add_command_alias(app_list_remote, name="list-remote", parent_group=app) + add_command_alias(list_installed_apps, name="list", parent_group=app) add_command_alias(new_dbgpts, name="app", parent_group=new) except ImportError as e: logging.warning(f"Integrating dbgpt dbgpts command line tool failed: {e}") +try: + from dbgpt.client._cli import run_flow + + add_command_alias(run_flow, name="flow", parent_group=run) +except ImportError as e: + logging.warning(f"Integrating dbgpt client command line tool failed: {e}") + def main(): return cli() diff --git a/dbgpt/client/_cli.py b/dbgpt/client/_cli.py new file mode 100644 index 000000000..fa73aa69e --- /dev/null +++ b/dbgpt/client/_cli.py @@ -0,0 +1,247 @@ +"""CLI for DB-GPT client.""" + +import functools +import json +import time +import uuid +from typing import Any, Dict + +import click + +from dbgpt.util import get_or_create_event_loop +from dbgpt.util.console import CliLogger +from dbgpt.util.i18n_utils import _ + +from .client import Client +from .flow import list_flow + +cl = CliLogger() + + +def add_base_flow_options(func): + """Add base flow options to the command.""" + + @click.option( + "-n", + "--name", + type=str, + default=None, + required=False, + help=_("The name of the AWEL flow"), + ) + @click.option( + "--uid", + type=str, + default=None, + required=False, + help=_("The uid of the AWEL flow"), + ) + @functools.wraps(func) + def _wrapper(*args, **kwargs): + return func(*args, **kwargs) + + return _wrapper + + +def add_chat_options(func): + """Add chat options to the command.""" + + @click.option( + "-m", + "--messages", + type=str, + default=None, + required=False, + help=_("The messages to run AWEL flow"), + ) + @click.option( + "--model", + type=str, + default=None, + required=False, + help=_("The model name of AWEL flow"), + ) + @click.option( + "-s", + "--stream", + type=bool, + default=False, + required=False, + is_flag=True, + help=_("Whether use stream mode to run AWEL flow"), + ) + @click.option( + "-t", + "--temperature", + type=float, + default=None, + required=False, + help=_("The temperature to run AWEL flow"), + ) + @click.option( + "--max_new_tokens", + type=int, + default=None, + required=False, + help=_("The max new tokens to run AWEL flow"), + ) + @click.option( + "--conv_uid", + type=str, + default=None, + required=False, + help=_("The conversation id of the AWEL flow"), + ) + @click.option( + "-d", + "--data", + type=str, + default=None, + required=False, + help=_("The json data to run AWEL flow, if set, will overwrite other options"), + ) + @click.option( + "-e", + "--extra", + type=str, + default=None, + required=False, + help=_("The extra json data to run AWEL flow."), + ) + @click.option( + "-i", + "--interactive", + type=bool, + default=False, + required=False, + is_flag=True, + help=_("Whether use interactive mode to run AWEL flow"), + ) + @functools.wraps(func) + def _wrapper(*args, **kwargs): + return func(*args, **kwargs) + + return _wrapper + + +@click.command(name="flow") +@add_base_flow_options +@add_chat_options +def run_flow(name: str, uid: str, data: str, interactive: bool, **kwargs): + """Run a AWEL flow.""" + client = Client() + + loop = get_or_create_event_loop() + res = loop.run_until_complete(list_flow(client, name, uid)) + + if not res: + cl.error("Flow not found with the given name or uid", exit_code=1) + if len(res) > 1: + cl.error("More than one flow found", exit_code=1) + flow = res[0] + json_data = _parse_json_data(data, **kwargs) + json_data["chat_param"] = flow.uid + json_data["chat_mode"] = "chat_flow" + stream = "stream" in json_data and str(json_data["stream"]).lower() in ["true", "1"] + if stream: + loop.run_until_complete(_chat_stream(client, interactive, json_data)) + else: + loop.run_until_complete(_chat(client, interactive, json_data)) + + +def _parse_json_data(data: str, **kwargs): + json_data = {} + if data: + try: + json_data = json.loads(data) + except Exception as e: + cl.error(f"Invalid JSON data: {data}, {e}", exit_code=1) + if "extra" in kwargs and kwargs["extra"]: + try: + extra = json.loads(kwargs["extra"]) + kwargs["extra"] = extra + except Exception as e: + cl.error(f"Invalid extra JSON data: {kwargs['extra']}, {e}", exit_code=1) + for k, v in kwargs.items(): + if v is not None and k not in json_data: + json_data[k] = v + if "model" not in json_data: + json_data["model"] = "__empty__model__" + return json_data + + +async def _chat_stream(client: Client, interactive: bool, json_data: Dict[str, Any]): + user_input = json_data.get("messages", "") + if "conv_uid" not in json_data and interactive: + json_data["conv_uid"] = str(uuid.uuid4()) + first_message = True + while True: + try: + if interactive and not user_input: + cl.print("Type 'exit' or 'quit' to exit.") + while not user_input: + user_input = cl.ask("You") + if user_input.lower() in ["exit", "quit", "q"]: + break + start_time = time.time() + json_data["messages"] = user_input + if first_message: + cl.info("You: " + user_input) + cl.info("Chat stream started") + cl.debug(f"JSON data: {json.dumps(json_data, ensure_ascii=False)}") + full_text = "" + cl.print("Bot: ") + async for out in client.chat_stream(**json_data): + if out.choices: + text = out.choices[0].delta.content + if text: + full_text += text + cl.print(text, end="") + end_time = time.time() + time_cost = round(end_time - start_time, 2) + cl.success(f"\n:tada: Chat stream finished, timecost: {time_cost} s") + except Exception as e: + cl.error(f"Chat stream failed: {e}", exit_code=1) + finally: + first_message = False + if interactive: + user_input = "" + else: + break + + +async def _chat(client: Client, interactive: bool, json_data: Dict[str, Any]): + user_input = json_data.get("messages", "") + if "conv_uid" not in json_data and interactive: + json_data["conv_uid"] = str(uuid.uuid4()) + first_message = True + while True: + try: + if interactive and not user_input: + cl.print("Type 'exit' or 'quit' to exit.") + while not user_input: + user_input = cl.ask("You") + if user_input.lower() in ["exit", "quit", "q"]: + break + start_time = time.time() + json_data["messages"] = user_input + if first_message: + cl.info("You: " + user_input) + + cl.info("Chat started") + cl.debug(f"JSON data: {json.dumps(json_data, ensure_ascii=False)}") + res = await client.chat(**json_data) + cl.print("Bot: ") + if res.choices: + text = res.choices[0].message.content + cl.markdown(text) + time_cost = round(time.time() - start_time, 2) + cl.success(f"\n:tada: Chat stream finished, timecost: {time_cost} s") + except Exception as e: + cl.error(f"Chat failed: {e}", exit_code=1) + finally: + first_message = False + if interactive: + user_input = "" + else: + break diff --git a/dbgpt/client/client.py b/dbgpt/client/client.py index eccbd26ae..aaec050c8 100644 --- a/dbgpt/client/client.py +++ b/dbgpt/client/client.py @@ -1,7 +1,8 @@ """This module contains the client for the DB-GPT API.""" +import atexit import json import os -from typing import Any, AsyncGenerator, List, Optional, Union +from typing import Any, AsyncGenerator, Dict, List, Optional, Union from urllib.parse import urlparse import httpx @@ -98,6 +99,7 @@ class Client: self._http_client = httpx.AsyncClient( headers=headers, timeout=timeout if timeout else httpx.Timeout(None) ) + atexit.register(self.close) async def chat( self, @@ -113,6 +115,7 @@ class Client: span_id: Optional[str] = None, incremental: bool = True, enable_vis: bool = True, + **kwargs, ) -> ChatCompletionResponse: """ Chat Completion. @@ -187,6 +190,7 @@ class Client: span_id: Optional[str] = None, incremental: bool = True, enable_vis: bool = True, + **kwargs, ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: """ Chat Stream Completion. @@ -238,10 +242,23 @@ class Client: incremental=incremental, enable_vis=enable_vis, ) + async for chat_completion_response in self._chat_stream(request.dict()): + yield chat_completion_response + + async def _chat_stream( + self, data: Dict[str, Any] + ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: + """Chat Stream Completion. + + Args: + data: dict, The data to send to the API. + Returns: + AsyncGenerator[dict, None]: The chat completion response. + """ async with self._http_client.stream( method="POST", url=self._api_url + "/chat/completions", - json=request.dict(), + json=data, headers={}, ) as response: if response.status_code == 200: @@ -250,7 +267,11 @@ class Client: if line.strip() == "data: [DONE]": break if line.startswith("data:"): - json_data = json.loads(line[len("data: ") :]) + if line.startswith("data: "): + sse_data = line[len("data: ") :] + else: + sse_data = line[len("data:") :] + json_data = json.loads(sse_data) chat_completion_response = ChatCompletionStreamResponse( **json_data ) @@ -265,21 +286,20 @@ class Client: except Exception as e: raise e - async def get(self, path: str, *args): + async def get(self, path: str, *args, **kwargs): """Get method. Args: path: str, The path to get. args: Any, The arguments to pass to the get method. """ - try: - response = await self._http_client.get( - f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", - *args, - ) - return response - finally: - await self._http_client.aclose() + kwargs = {k: v for k, v in kwargs.items() if v is not None} + response = await self._http_client.get( + f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", + *args, + params=kwargs, + ) + return response async def post(self, path: str, args): """Post method. @@ -288,13 +308,10 @@ class Client: path: str, The path to post. args: Any, The arguments to pass to the post """ - try: - return await self._http_client.post( - f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", - json=args, - ) - finally: - await self._http_client.aclose() + return await self._http_client.post( + f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", + json=args, + ) async def post_param(self, path: str, args): """Post method. @@ -303,13 +320,10 @@ class Client: path: str, The path to post. args: Any, The arguments to pass to the post """ - try: - return await self._http_client.post( - f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", - params=args, - ) - finally: - await self._http_client.aclose() + return await self._http_client.post( + f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", + params=args, + ) async def patch(self, path: str, *args): """Patch method. @@ -329,26 +343,20 @@ class Client: path: str, The path to put. args: Any, The arguments to pass to the put. """ - try: - return await self._http_client.put( - f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", json=args - ) - finally: - await self._http_client.aclose() + return await self._http_client.put( + f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", json=args + ) async def delete(self, path: str, *args): """Delete method. Args: path: str, The path to delete. - args: Any, The arguments to pass to the delete. + args: Any, The arguments to pass to delete. """ - try: - return await self._http_client.delete( - f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", *args - ) - finally: - await self._http_client.aclose() + return await self._http_client.delete( + f"{self._api_url}/{CLIENT_SERVE_PATH}{path}", *args + ) async def head(self, path: str, *args): """Head method. @@ -359,6 +367,18 @@ class Client: """ return self._http_client.head(self._api_url + path, *args) + def close(self): + """Close the client.""" + from dbgpt.util import get_or_create_event_loop + + if not self._http_client.is_closed: + loop = get_or_create_event_loop() + loop.run_until_complete(self._http_client.aclose()) + + async def aclose(self): + """Close the client.""" + await self._http_client.aclose() + def is_valid_url(api_url: Any) -> bool: """Check if the given URL is valid. diff --git a/dbgpt/client/flow.py b/dbgpt/client/flow.py index f5206b2d1..15a453593 100644 --- a/dbgpt/client/flow.py +++ b/dbgpt/client/flow.py @@ -93,19 +93,23 @@ async def get_flow(client: Client, flow_id: str) -> FlowPanel: raise ClientException(f"Failed to get flow: {e}") -async def list_flow(client: Client) -> List[FlowPanel]: +async def list_flow( + client: Client, name: str | None = None, uid: str | None = None +) -> List[FlowPanel]: """ List flows. Args: client (Client): The dbgpt client. + name (str): The name of the flow. + uid (str): The uid of the flow. Returns: List[FlowPanel]: The list of flow panels. Raises: ClientException: If the request failed. """ try: - res = await client.get("/awel/flows") + res = await client.get("/awel/flows", **{"name": name, "uid": uid}) result: Result = res.json() if result["success"]: return [FlowPanel(**flow) for flow in result["data"]["items"]] diff --git a/dbgpt/core/awel/operators/base.py b/dbgpt/core/awel/operators/base.py index 76f712b9e..f9ba12169 100644 --- a/dbgpt/core/awel/operators/base.py +++ b/dbgpt/core/awel/operators/base.py @@ -127,6 +127,8 @@ class BaseOperator(DAGNode, ABC, Generic[OUT], metaclass=BaseOperatorMeta): """ streaming_operator: bool = False + incremental_output: bool = False + output_format: Optional[str] = None def __init__( self, @@ -147,6 +149,10 @@ class BaseOperator(DAGNode, ABC, Generic[OUT], metaclass=BaseOperatorMeta): from dbgpt.core.awel import DefaultWorkflowRunner runner = DefaultWorkflowRunner() + if "incremental_output" in kwargs: + self.incremental_output = bool(kwargs["incremental_output"]) + if "output_format" in kwargs: + self.output_format = kwargs["output_format"] self._runner: WorkflowRunner = runner self._dag_ctx: Optional[DAGContext] = None diff --git a/dbgpt/core/awel/trigger/iterator_trigger.py b/dbgpt/core/awel/trigger/iterator_trigger.py index 617216408..abcafc204 100644 --- a/dbgpt/core/awel/trigger/iterator_trigger.py +++ b/dbgpt/core/awel/trigger/iterator_trigger.py @@ -132,9 +132,11 @@ class IteratorTrigger(Trigger[List[Tuple[Any, Any]]]): task_id = self.node_id async def call_stream(call_data: Any): - async for out in await end_node.call_stream(call_data): - yield out - await dag._after_dag_end(end_node.current_event_loop_task_id) + try: + async for out in await end_node.call_stream(call_data): + yield out + finally: + await dag._after_dag_end(end_node.current_event_loop_task_id) async def run_node(call_data: Any) -> Tuple[Any, Any]: async with semaphore: diff --git a/dbgpt/core/interface/llm.py b/dbgpt/core/interface/llm.py index 98b549ba3..d87482af2 100644 --- a/dbgpt/core/interface/llm.py +++ b/dbgpt/core/interface/llm.py @@ -161,6 +161,7 @@ class ModelOutput: error_code: int """The error code of the model inference. If the model inference is successful, the error code is 0.""" + incremental: bool = False model_context: Optional[Dict] = None finish_reason: Optional[str] = None usage: Optional[Dict[str, Any]] = None @@ -171,6 +172,11 @@ class ModelOutput: """Convert the model output to dict.""" return asdict(self) + @property + def success(self) -> bool: + """Check if the model inference is successful.""" + return self.error_code == 0 + _ModelMessageType = Union[List[ModelMessage], List[Dict[str, Any]]] diff --git a/dbgpt/core/interface/operators/llm_operator.py b/dbgpt/core/interface/operators/llm_operator.py index 73b857e50..c716bad66 100644 --- a/dbgpt/core/interface/operators/llm_operator.py +++ b/dbgpt/core/interface/operators/llm_operator.py @@ -470,6 +470,8 @@ class CommonStreamingOutputOperator(TransformStreamAbsOperator[ModelOutput, str] Transform model output to the string output to show in DB-GPT chat flow page. """ + output_format = "SSE" + metadata = ViewMetadata( label=_("Common Streaming Output Operator"), name="common_streaming_output_operator", @@ -510,8 +512,8 @@ class CommonStreamingOutputOperator(TransformStreamAbsOperator[ModelOutput, str] yield f"data:{error_msg}" return decoded_unicode = model_output.text.replace("\ufffd", "") - msg = decoded_unicode.replace("\n", "\\n") - yield f"data:{msg}\n\n" + # msg = decoded_unicode.replace("\n", "\\n") + yield f"data:{decoded_unicode}\n\n" class StringOutput2ModelOutputOperator(MapOperator[str, ModelOutput]): diff --git a/dbgpt/core/schema/api.py b/dbgpt/core/schema/api.py index 647987b0f..1904d841b 100644 --- a/dbgpt/core/schema/api.py +++ b/dbgpt/core/schema/api.py @@ -114,3 +114,11 @@ class ChatCompletionResponse(BaseModel): ..., description="Chat completion response choices" ) usage: UsageInfo = Field(..., description="Usage info") + + +class ErrorResponse(BaseModel): + """Error response entity.""" + + object: str = Field("error", description="Object type") + message: str = Field(..., description="Error message") + code: int = Field(..., description="Error code") diff --git a/dbgpt/model/utils/chatgpt_utils.py b/dbgpt/model/utils/chatgpt_utils.py index 683cbbfd8..057a04bf5 100644 --- a/dbgpt/model/utils/chatgpt_utils.py +++ b/dbgpt/model/utils/chatgpt_utils.py @@ -154,6 +154,9 @@ def _build_openai_client(init_params: OpenAIParameters) -> Tuple[str, ClientType class OpenAIStreamingOutputOperator(TransformStreamAbsOperator[ModelOutput, str]): """Transform ModelOutput to openai stream format.""" + incremental_output = True + output_format = "SSE" + metadata = ViewMetadata( label=_("OpenAI Streaming Output Operator"), name="openai_streaming_output_operator", @@ -177,7 +180,7 @@ class OpenAIStreamingOutputOperator(TransformStreamAbsOperator[ModelOutput, str] str, is_list=True, description=_( - "The model output after transform to openai stream format" + "The model output after transformed to openai stream format." ), ) ], diff --git a/dbgpt/serve/flow/api/endpoints.py b/dbgpt/serve/flow/api/endpoints.py index c3939ceb8..746254f59 100644 --- a/dbgpt/serve/flow/api/endpoints.py +++ b/dbgpt/serve/flow/api/endpoints.py @@ -180,6 +180,8 @@ async def query_page( sys_code: Optional[str] = Query(default=None, description="system code"), page: int = Query(default=1, description="current page"), page_size: int = Query(default=20, description="page size"), + name: Optional[str] = Query(default=None, description="flow name"), + uid: Optional[str] = Query(default=None, description="flow uid"), service: Service = Depends(get_service), ) -> Result[PaginationResult[ServerResponse]]: """Query Flow entities @@ -189,13 +191,17 @@ async def query_page( sys_code (Optional[str]): The system code page (int): The page number page_size (int): The page size + name (Optional[str]): The flow name + uid (Optional[str]): The flow uid service (Service): The service Returns: ServerResponse: The response """ return Result.succ( service.get_list_by_page( - {"user_name": user_name, "sys_code": sys_code}, page, page_size + {"user_name": user_name, "sys_code": sys_code, "name": name, "uid": uid}, + page, + page_size, ) ) diff --git a/dbgpt/serve/flow/service/service.py b/dbgpt/serve/flow/service/service.py index 372525f4e..07d7f5191 100644 --- a/dbgpt/serve/flow/service/service.py +++ b/dbgpt/serve/flow/service/service.py @@ -1,11 +1,13 @@ import json import logging +import time import traceback -from typing import Any, List, Optional, cast +from typing import Any, AsyncIterator, List, Optional, cast import schedule from fastapi import HTTPException +from dbgpt._private.pydantic import model_to_json from dbgpt.component import SystemApp from dbgpt.core.awel import ( DAG, @@ -22,6 +24,13 @@ from dbgpt.core.awel.flow.flow_factory import ( ) from dbgpt.core.awel.trigger.http_trigger import CommonLLMHttpTrigger from dbgpt.core.interface.llm import ModelOutput +from dbgpt.core.schema.api import ( + ChatCompletionResponse, + ChatCompletionResponseChoice, + ChatCompletionResponseStreamChoice, + ChatCompletionStreamResponse, + DeltaMessage, +) from dbgpt.serve.core import BaseService from dbgpt.storage.metadata import BaseDao from dbgpt.storage.metadata._base_dao import QUERY_SPEC @@ -365,39 +374,117 @@ class Service(BaseService[ServeEntity, ServeRequest, ServerResponse]): """ return self.dao.get_list_page(request, page, page_size) - async def chat_flow( - self, - flow_uid: str, - request: CommonLLMHttpRequestBody, - incremental: bool = False, - ): + async def chat_stream_flow_str( + self, flow_uid: str, request: CommonLLMHttpRequestBody + ) -> AsyncIterator[str]: + """Stream chat with the AWEL flow. + + Args: + flow_uid (str): The flow uid + request (CommonLLMHttpRequestBody): The request + """ + # Must be non-incremental + request.incremental = False + async for output in self.safe_chat_stream_flow(flow_uid, request): + text = output.text + # if text: + # text = text.replace("\n", "\\n") + if output.error_code != 0: + yield f"data:[SERVER_ERROR]{text}\n\n" + break + else: + yield f"data:{text}\n\n" + + async def chat_stream_openai( + self, flow_uid: str, request: CommonLLMHttpRequestBody + ) -> AsyncIterator[str]: + conv_uid = request.conv_uid + choice_data = ChatCompletionResponseStreamChoice( + index=0, + delta=DeltaMessage(role="assistant"), + finish_reason=None, + ) + chunk = ChatCompletionStreamResponse( + id=conv_uid, choices=[choice_data], model=request.model + ) + yield f"data: {chunk.json(exclude_unset=True, ensure_ascii=False)}\n\n" + + request.incremental = True + async for output in self.safe_chat_stream_flow(flow_uid, request): + if not output.success: + yield f"data: {json.dumps(output.to_dict(), ensure_ascii=False)}\n\n" + yield "data: [DONE]\n\n" + return + choice_data = ChatCompletionResponseStreamChoice( + index=0, + delta=DeltaMessage(role="assistant", content=output.text), + ) + chunk = ChatCompletionStreamResponse( + id=conv_uid, + choices=[choice_data], + model=request.model, + ) + json_data = model_to_json(chunk, exclude_unset=True, ensure_ascii=False) + yield f"data: {json_data}\n\n" + yield "data: [DONE]\n\n" + + async def safe_chat_flow( + self, flow_uid: str, request: CommonLLMHttpRequestBody + ) -> ModelOutput: """Chat with the AWEL flow. Args: flow_uid (str): The flow uid request (CommonLLMHttpRequestBody): The request - incremental (bool): Whether to return the result incrementally + + Returns: + ModelOutput: The output """ + incremental = request.incremental try: - async for output in self._call_chat_flow(flow_uid, request, incremental): + task = await self._get_callable_task(flow_uid) + return await _safe_chat_with_dag_task(task, request) + except HTTPException as e: + return ModelOutput(error_code=1, text=e.detail, incremental=incremental) + except Exception as e: + return ModelOutput(error_code=1, text=str(e), incremental=incremental) + + async def safe_chat_stream_flow( + self, flow_uid: str, request: CommonLLMHttpRequestBody + ) -> AsyncIterator[ModelOutput]: + """Stream chat with the AWEL flow. + + Args: + flow_uid (str): The flow uid + request (CommonLLMHttpRequestBody): The request + + Returns: + AsyncIterator[ModelOutput]: The output + """ + incremental = request.incremental + try: + task = await self._get_callable_task(flow_uid) + async for output in _safe_chat_stream_with_dag_task( + task, request, incremental + ): yield output except HTTPException as e: - yield f"data:[SERVER_ERROR]{e.detail}\n\n" + yield ModelOutput(error_code=1, text=e.detail, incremental=incremental) except Exception as e: - yield f"data:[SERVER_ERROR]{str(e)}\n\n" + yield ModelOutput(error_code=1, text=str(e), incremental=incremental) - async def _call_chat_flow( + async def _get_callable_task( self, flow_uid: str, - request: CommonLLMHttpRequestBody, - incremental: bool = False, - ): - """Chat with the AWEL flow. + ) -> BaseOperator: + """Return the callable task. - Args: - flow_uid (str): The flow uid - request (CommonLLMHttpRequestBody): The request - incremental (bool): Whether to return the result incrementally + Returns: + BaseOperator: The callable task + + Raises: + HTTPException: If the flow is not found + ValueError: If the flow is not a chat flow or the leaf node is not found. """ flow = self.get({"uid": flow_uid}) if not flow: @@ -416,10 +503,7 @@ class Service(BaseService[ServeEntity, ServeRequest, ServerResponse]): leaf_nodes = dag.leaf_nodes if len(leaf_nodes) != 1: raise ValueError("Chat Flow just support one leaf node in dag") - end_node = cast(BaseOperator, leaf_nodes[0]) - async for output in _chat_with_dag_task(end_node, request, incremental): - yield output - await dag._after_dag_end(end_node.current_event_loop_task_id) + return cast(BaseOperator, leaf_nodes[0]) def _parse_flow_category(self, dag: DAG) -> FlowCategory: """Parse the flow category @@ -470,99 +554,202 @@ def _is_chat_flow_type(output_obj: Any, is_class: bool = False) -> bool: return isinstance(output_obj, chat_types) -async def _chat_with_dag_task( +async def _safe_chat_with_dag_task(task: BaseOperator, request: Any) -> ModelOutput: + """Chat with the DAG task.""" + try: + finish_reason = None + usage = None + metrics = None + error_code = 0 + text = "" + async for output in _safe_chat_stream_with_dag_task(task, request, False): + finish_reason = output.finish_reason + usage = output.usage + metrics = output.metrics + error_code = output.error_code + text = output.text + return ModelOutput( + error_code=error_code, + text=text, + metrics=metrics, + usage=usage, + finish_reason=finish_reason, + ) + except Exception as e: + return ModelOutput(error_code=1, text=str(e), incremental=False) + + +async def _safe_chat_stream_with_dag_task( task: BaseOperator, - request: CommonLLMHttpRequestBody, - incremental: bool = False, -): - """Chat with the DAG task. + request: Any, + incremental: bool, +) -> AsyncIterator[ModelOutput]: + """Chat with the DAG task.""" + try: + async for output in _chat_stream_with_dag_task(task, request, incremental): + yield output + except Exception as e: + yield ModelOutput(error_code=1, text=str(e), incremental=incremental) + finally: + if task.streaming_operator: + if task.dag: + await task.dag._after_dag_end(task.current_event_loop_task_id) - Args: - task (BaseOperator): The task - request (CommonLLMHttpRequestBody): The request - """ - if request.stream and task.streaming_operator: + +async def _chat_stream_with_dag_task( + task: BaseOperator, + request: Any, + incremental: bool, +) -> AsyncIterator[ModelOutput]: + """Chat with the DAG task.""" + is_sse = task.output_format and task.output_format.upper() == "SSE" + if not task.streaming_operator: try: - from dbgpt.model.utils.chatgpt_utils import OpenAIStreamingOutputOperator - except ImportError: - OpenAIStreamingOutputOperator = None - if incremental: - async for output in await task.call_stream(request): - yield output - else: - if OpenAIStreamingOutputOperator and isinstance( - task, OpenAIStreamingOutputOperator - ): - from dbgpt.core.schema.api import ChatCompletionResponseStreamChoice - - previous_text = "" - async for output in await task.call_stream(request): - if not isinstance(output, str): - yield "data:[SERVER_ERROR]The output is not a stream format\n\n" - return - if output == "data: [DONE]\n\n": - return - json_data = "".join(output.split("data: ")[1:]) - dict_data = json.loads(json_data) - if "choices" not in dict_data: - error_msg = dict_data.get("text", "Unknown error") - yield f"data:[SERVER_ERROR]{error_msg}\n\n" - return - choices = dict_data["choices"] - if choices: - choice = choices[0] - delta_data = ChatCompletionResponseStreamChoice(**choice) - if delta_data.delta.content: - previous_text += delta_data.delta.content - if previous_text: - full_text = previous_text.replace("\n", "\\n") - yield f"data:{full_text}\n\n" - else: - async for output in await task.call_stream(request): - str_msg = "" - should_return = False - if isinstance(output, str): - if output.strip(): - str_msg = output - elif isinstance(output, ModelOutput): - if output.error_code != 0: - str_msg = f"[SERVER_ERROR]{output.text}" - should_return = True - else: - str_msg = output.text - else: - str_msg = ( - f"[SERVER_ERROR]The output is not a valid format" - f"({type(output)})" - ) - should_return = True - if str_msg: - str_msg = str_msg.replace("\n", "\\n") - yield f"data:{str_msg}\n\n" - if should_return: - return + result = await task.call(request) + model_output = _parse_single_output(result, is_sse) + model_output.incremental = incremental + yield model_output + except Exception as e: + yield ModelOutput(error_code=1, text=str(e), incremental=incremental) else: - result = await task.call(request) - str_msg = "" - if result is None: - str_msg = "[SERVER_ERROR]The result is None!" - elif isinstance(result, str): - str_msg = result - elif isinstance(result, ModelOutput): - if result.error_code != 0: - str_msg = f"[SERVER_ERROR]{result.text}" - else: - str_msg = result.text - elif isinstance(result, CommonLLMHttpResponseBody): - if result.error_code != 0: - str_msg = f"[SERVER_ERROR]{result.text}" - else: - str_msg = result.text - elif isinstance(result, dict): - str_msg = json.dumps(result, ensure_ascii=False) - else: - str_msg = f"[SERVER_ERROR]The result is not a valid format({type(result)})" + from dbgpt.model.utils.chatgpt_utils import OpenAIStreamingOutputOperator - if str_msg: - str_msg = str_msg.replace("\n", "\\n") - yield f"data:{str_msg}\n\n" + if OpenAIStreamingOutputOperator and isinstance( + task, OpenAIStreamingOutputOperator + ): + full_text = "" + async for output in await task.call_stream(request): + model_output = _parse_openai_output(output) + # The output of the OpenAI streaming API is incremental + full_text += model_output.text + model_output.incremental = incremental + model_output.text = model_output.text if incremental else full_text + yield model_output + if not model_output.success: + break + else: + full_text = "" + previous_text = "" + async for output in await task.call_stream(request): + model_output = _parse_single_output(output, is_sse) + model_output.incremental = incremental + if task.incremental_output: + # Output is incremental, append the text + full_text += model_output.text + else: + # Output is not incremental, last output is the full text + full_text = model_output.text + if not incremental: + # Return the full text + model_output.text = full_text + else: + # Return the incremental text + delta_text = full_text[len(previous_text) :] + previous_text = ( + full_text + if len(full_text) > len(previous_text) + else previous_text + ) + model_output.text = delta_text + yield model_output + if not model_output.success: + break + + +def _parse_single_output(output: Any, is_sse: bool) -> ModelOutput: + """Parse the single output.""" + finish_reason = None + usage = None + metrics = None + if output is None: + error_code = 1 + text = "The output is None!" + elif isinstance(output, str): + if is_sse: + sse_output = _parse_sse_data(output) + if sse_output is None: + error_code = 1 + text = "The output is not a SSE format" + else: + error_code = 0 + text = sse_output + else: + error_code = 0 + text = output + elif isinstance(output, ModelOutput): + error_code = output.error_code + text = output.text + finish_reason = output.finish_reason + usage = output.usage + metrics = output.metrics + elif isinstance(output, CommonLLMHttpResponseBody): + error_code = output.error_code + text = output.text + elif isinstance(output, dict): + error_code = 0 + text = json.dumps(output, ensure_ascii=False) + else: + error_code = 1 + text = f"The output is not a valid format({type(output)})" + return ModelOutput( + error_code=error_code, + text=text, + finish_reason=finish_reason, + usage=usage, + metrics=metrics, + ) + + +def _parse_openai_output(output: Any) -> ModelOutput: + """Parse the OpenAI output.""" + text = "" + if not isinstance(output, str): + return ModelOutput( + error_code=1, + text="The output is not a stream format", + ) + if output.strip() == "data: [DONE]" or output.strip() == "data:[DONE]": + return ModelOutput(error_code=0, text="") + if not output.startswith("data:"): + return ModelOutput( + error_code=1, + text="The output is not a stream format", + ) + + sse_output = _parse_sse_data(output) + if sse_output is None: + return ModelOutput(error_code=1, text="The output is not a SSE format") + json_data = sse_output.strip() + try: + dict_data = json.loads(json_data) + except Exception as e: + return ModelOutput( + error_code=1, + text=f"Invalid JSON data: {json_data}, {e}", + ) + if "choices" not in dict_data: + return ModelOutput( + error_code=1, + text=dict_data.get("text", "Unknown error"), + ) + choices = dict_data["choices"] + finish_reason: Optional[str] = None + if choices: + choice = choices[0] + delta_data = ChatCompletionResponseStreamChoice(**choice) + if delta_data.delta.content: + text = delta_data.delta.content + finish_reason = delta_data.finish_reason + return ModelOutput(error_code=0, text=text, finish_reason=finish_reason) + + +def _parse_sse_data(output: str) -> Optional[str]: + if output.startswith("data:"): + if output.startswith("data: "): + output = output[6:] + else: + output = output[5:] + + return output + else: + return None diff --git a/dbgpt/util/console/__init__.py b/dbgpt/util/console/__init__.py new file mode 100644 index 000000000..10ea12650 --- /dev/null +++ b/dbgpt/util/console/__init__.py @@ -0,0 +1,3 @@ +from .console import CliLogger # noqa: F401 + +__ALL__ = ["CliLogger"] diff --git a/dbgpt/util/console/console.py b/dbgpt/util/console/console.py new file mode 100644 index 000000000..709a2e91a --- /dev/null +++ b/dbgpt/util/console/console.py @@ -0,0 +1,71 @@ +"""Console utility functions for CLI.""" +import dataclasses +import sys +from functools import lru_cache +from typing import Any + +from rich.console import Console +from rich.markdown import Markdown +from rich.prompt import Prompt +from rich.theme import Theme + + +@dataclasses.dataclass +class Output: + """Output file.""" + + title: str + file: str + + +def _get_theme(): + return Theme( + { + "success": "green", + "info": "bright_blue", + "warning": "bright_yellow", + "error": "red", + } + ) + + +@lru_cache(maxsize=None) +def get_console(output: Output | None = None) -> Console: + return Console( + force_terminal=True, + color_system="standard", + theme=_get_theme(), + file=output.file if output else None, + ) + + +class CliLogger: + def __init__(self, output: Output | None = None): + self.console = get_console(output) + + def success(self, msg: str, **kwargs): + self.console.print(f"[success]{msg}[/]", **kwargs) + + def info(self, msg: str, **kwargs): + self.console.print(f"[info]{msg}[/]", **kwargs) + + def warning(self, msg: str, **kwargs): + self.console.print(f"[warning]{msg}[/]", **kwargs) + + def error(self, msg: str, exit_code: int = 0, **kwargs): + self.console.print(f"[error]{msg}[/]", **kwargs) + if exit_code != 0: + sys.exit(exit_code) + + def debug(self, msg: str, **kwargs): + self.console.print(f"[cyan]{msg}[/]", **kwargs) + + def print(self, *objects: Any, sep: str = " ", end: str = "\n", **kwargs): + self.console.print(*objects, sep=sep, end=end, **kwargs) + + def markdown(self, msg: str, **kwargs): + md = Markdown(msg) + self.console.print(md, **kwargs) + + def ask(self, msg: str, **kwargs): + return Prompt.ask(msg, **kwargs) diff --git a/dbgpt/util/dbgpts/cli.py b/dbgpt/util/dbgpts/cli.py index 9192689e8..dd7b35748 100644 --- a/dbgpt/util/dbgpts/cli.py +++ b/dbgpt/util/dbgpts/cli.py @@ -1,12 +1,14 @@ import functools import subprocess -import sys from pathlib import Path import click +from ..console import CliLogger from .base import DEFAULT_PACKAGE_TYPES +cl = CliLogger() + def check_poetry_installed(): try: @@ -18,13 +20,13 @@ def check_poetry_installed(): stderr=subprocess.DEVNULL, ) except (subprocess.CalledProcessError, FileNotFoundError): - print("Poetry is not installed. Please install Poetry to proceed.") - print( - "Visit https://python-poetry.org/docs/#installation for installation " - "instructions." - ) + cl.error("Poetry is not installed. Please install Poetry to proceed.") # Exit with error - sys.exit(1) + cl.error( + "Visit https://python-poetry.org/docs/#installation for installation " + "instructions.", + exit_code=1, + ) def add_tap_options(func): @@ -43,15 +45,41 @@ def add_tap_options(func): return wrapper +def add_add_common_options(func): + @click.option( + "-r", + "--repo", + type=str, + default=None, + required=False, + help="The repository to install the dbgpts from", + ) + @click.option( + "-U", + "--update", + type=bool, + required=False, + default=False, + is_flag=True, + help="Whether to update the repo", + ) + @functools.wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + + return wrapper + + @click.command(name="install") -@add_tap_options +@add_add_common_options @click.argument("name", type=str) -def install(repo: str | None, name: str): +def install(repo: str | None, update: bool, name: str): """Install your dbgpts(operators,agents,workflows or apps)""" - from .repo import install + from .repo import _install_default_repos_if_no_repos, install check_poetry_installed() - install(name, repo) + _install_default_repos_if_no_repos() + install(name, repo, with_update=update) @click.command(name="uninstall") @@ -63,20 +91,33 @@ def uninstall(name: str): uninstall(name) -@click.command(name="list") -def list_all_apps(): - """List all installed dbgpts""" - from .repo import list_repo_apps +@click.command(name="list-remote") +@add_add_common_options +def list_all_apps( + repo: str | None, + update: bool, +): + """List all available dbgpts""" + from .repo import _install_default_repos_if_no_repos, list_repo_apps - list_repo_apps() + _install_default_repos_if_no_repos() + list_repo_apps(repo, with_update=update) + + +@click.command(name="list") +def list_installed_apps(): + """List all installed dbgpts""" + from .repo import list_installed_apps + + list_installed_apps() @click.command(name="list") def list_repos(): """List all repos""" - from .repo import list_repos + from .repo import _print_repos - print("\n".join(list_repos())) + _print_repos() @click.command(name="add") diff --git a/dbgpt/util/dbgpts/loader.py b/dbgpt/util/dbgpts/loader.py index 5628c95ab..b6d53fa6f 100644 --- a/dbgpt/util/dbgpts/loader.py +++ b/dbgpt/util/dbgpts/loader.py @@ -41,6 +41,7 @@ class BasePackage(BaseModel): ) root: str = Field(..., description="The root of the package") repo: str = Field(..., description="The repository of the package") + package: str = Field(..., description="The package name(like name in pypi)") @classmethod def build_from(cls, values: Dict[str, Any], ext_dict: Dict[str, Any]): @@ -131,6 +132,7 @@ class InstalledPackage(BaseModel): name: str = Field(..., description="The name of the package") repo: str = Field(..., description="The repository of the package") root: str = Field(..., description="The root of the package") + package: str = Field(..., description="The package name(like name in pypi)") def _get_classes_from_module(module): @@ -160,6 +162,7 @@ def _parse_package_metadata(package: InstalledPackage) -> BasePackage: ext_metadata[key] = value pkg_dict["root"] = package.root pkg_dict["repo"] = package.repo + pkg_dict["package"] = package.package if pkg_dict["package_type"] == "flow": return FlowPackage.build_from(pkg_dict, ext_metadata) elif pkg_dict["package_type"] == "operator": @@ -186,7 +189,9 @@ def _load_installed_package(path: str) -> List[InstalledPackage]: name = metadata["name"] repo = metadata["repo"] packages.append( - InstalledPackage(name=name, repo=repo, root=str(full_path)) + InstalledPackage( + name=name, repo=repo, root=str(full_path), package=package + ) ) return packages diff --git a/dbgpt/util/dbgpts/repo.py b/dbgpt/util/dbgpts/repo.py index c01a198ab..13b63570b 100644 --- a/dbgpt/util/dbgpts/repo.py +++ b/dbgpt/util/dbgpts/repo.py @@ -1,13 +1,14 @@ import functools -import logging import os import shutil import subprocess from pathlib import Path from typing import List, Tuple -import click +from rich.table import Table +from ..console import CliLogger +from ..i18n_utils import _ from .base import ( DBGPTS_METADATA_FILE, DBGPTS_REPO_HOME, @@ -17,9 +18,9 @@ from .base import ( INSTALL_METADATA_FILE, _print_path, ) +from .loader import _load_package_from_path -logger = logging.getLogger("dbgpt_cli") - +cl = CliLogger() _DEFAULT_REPO = "eosphoros/dbgpts" @@ -45,9 +46,10 @@ def list_repos() -> List[str]: def _get_repo_path(repo: str) -> Path: repo_arr = repo.split("/") if len(repo_arr) != 2: - raise ValueError( + cl.error( f"Invalid repo name '{repo}', repo name must split by '/', " - f"eg.(eosphoros/dbgpts)." + f"eg.(eosphoros/dbgpts).", + exit_code=1, ) return Path(DBGPTS_REPO_HOME) / repo_arr[0] / repo_arr[1] @@ -63,6 +65,35 @@ def _list_repos_details() -> List[Tuple[str, str]]: return results +def _print_repos(): + """Print all repos""" + repos = _list_repos_details() + repos.sort(key=lambda x: (x[0], x[1])) + table = Table(title=_("Repos")) + table.add_column(_("Repository"), justify="right", style="cyan", no_wrap=True) + table.add_column(_("Path"), justify="right", style="green") + for repo, full_path in repos: + if full_path.startswith(str(Path.home())): + full_path = full_path.replace(str(Path.home()), "~") + table.add_row(repo, full_path) + cl.print(table) + + +def _install_default_repos_if_no_repos(): + """Install the default repos if no repos exist.""" + has_repos = False + for repo, full_path in _list_repos_details(): + if os.path.exists(full_path): + has_repos = True + break + if not has_repos: + repo_url = DEFAULT_REPO_MAP[_DEFAULT_REPO] + cl.info( + f"No repos found, installing default repos {_DEFAULT_REPO} from {repo_url}" + ) + add_repo(_DEFAULT_REPO, repo_url) + + def add_repo(repo: str, repo_url: str, branch: str | None = None): """Add a new repo @@ -73,13 +104,14 @@ def add_repo(repo: str, repo_url: str, branch: str | None = None): """ exist_repos = list_repos() if repo in exist_repos and repo_url not in DEFAULT_REPO_MAP.values(): - raise ValueError(f"The repo '{repo}' already exists.") + cl.error(f"The repo '{repo}' already exists.", exit_code=1) repo_arr = repo.split("/") if len(repo_arr) != 2: - raise ValueError( + cl.error( f"Invalid repo name '{repo}', repo name must split by '/', " - f"eg.(eosphoros/dbgpts)." + "eg.(eosphoros/dbgpts).", + exit_code=1, ) repo_name = repo_arr[1] repo_group_dir = os.path.join(DBGPTS_REPO_HOME, repo_arr[0]) @@ -99,12 +131,12 @@ def remove_repo(repo: str): """ repo_path = _get_repo_path(repo) if not os.path.exists(repo_path): - raise ValueError(f"The repo '{repo}' does not exist.") + cl.error(f"The repo '{repo}' does not exist.", exit_code=1) if os.path.islink(repo_path): os.unlink(repo_path) else: shutil.rmtree(repo_path) - logger.info(f"Repo '{repo}' removed successfully.") + cl.info(f"Repo '{repo}' removed successfully.") def clone_repo( @@ -132,11 +164,11 @@ def clone_repo( subprocess.run(clone_command, check=True) if branch: - click.echo( + cl.info( f"Repo '{repo}' cloned from {repo_url} with branch '{branch}' successfully." ) else: - click.echo(f"Repo '{repo}' cloned from {repo_url} successfully.") + cl.info(f"Repo '{repo}' cloned from {repo_url} successfully.") def update_repo(repo: str): @@ -145,20 +177,20 @@ def update_repo(repo: str): Args: repo (str): The name of the repo """ - print(f"Updating repo '{repo}'...") + cl.info(f"Updating repo '{repo}'...") repo_path = os.path.join(DBGPTS_REPO_HOME, repo) if not os.path.exists(repo_path): if repo in DEFAULT_REPO_MAP: add_repo(repo, DEFAULT_REPO_MAP[repo]) if not os.path.exists(repo_path): - raise ValueError(f"The repo '{repo}' does not exist.") + cl.error(f"The repo '{repo}' does not exist.", exit_code=1) else: - raise ValueError(f"The repo '{repo}' does not exist.") + cl.error(f"The repo '{repo}' does not exist.", exit_code=1) os.chdir(repo_path) if not os.path.exists(".git"): - logger.info(f"Repo '{repo}' is not a git repository.") + cl.info(f"Repo '{repo}' is not a git repository.") return - logger.info(f"Updating repo '{repo}'...") + cl.info(f"Updating repo '{repo}'...") subprocess.run(["git", "pull"], check=False) @@ -176,8 +208,7 @@ def install( """ repo_info = check_with_retry(name, repo, with_update=with_update, is_first=True) if not repo_info: - click.echo(f"The specified dbgpt '{name}' does not exist.", err=True) - return + cl.error(f"The specified dbgpt '{name}' does not exist.", exit_code=1) repo, dbgpt_path = repo_info _copy_and_install(repo, name, dbgpt_path) @@ -190,38 +221,34 @@ def uninstall(name: str): """ install_path = INSTALL_DIR / name if not install_path.exists(): - click.echo( - f"The dbgpt '{name}' has not been installed yet.", - err=True, - ) - return + cl.error(f"The dbgpt '{name}' has not been installed yet.", exit_code=1) os.chdir(install_path) subprocess.run(["pip", "uninstall", name, "-y"], check=True) shutil.rmtree(install_path) - logger.info(f"dbgpt '{name}' uninstalled successfully.") + cl.info(f"Uninstalling dbgpt '{name}'...") def _copy_and_install(repo: str, name: str, package_path: Path): if not package_path.exists(): - raise ValueError( - f"The specified dbgpt '{name}' does not exist in the {repo} tap." + cl.error( + f"The specified dbgpt '{name}' does not exist in the {repo} tap.", + exit_code=1, ) install_path = INSTALL_DIR / name if install_path.exists(): - click.echo( + cl.error( f"The dbgpt '{name}' has already been installed" f"({_print_path(install_path)}).", - err=True, + exit_code=1, ) - return try: shutil.copytree(package_path, install_path) - logger.info(f"Installing dbgpts '{name}' from {repo}...") + cl.info(f"Installing dbgpts '{name}' from {repo}...") os.chdir(install_path) subprocess.run(["poetry", "install"], check=True) _write_install_metadata(name, repo, install_path) - click.echo(f"Installed dbgpts at {_print_path(install_path)}.") - click.echo(f"dbgpts '{name}' installed successfully.") + cl.success(f"Installed dbgpts at {_print_path(install_path)}.") + cl.success(f"dbgpts '{name}' installed successfully.") except Exception as e: if install_path.exists(): shutil.rmtree(install_path) @@ -257,10 +284,9 @@ def check_with_retry( """ repos = _list_repos_details() if spec_repo: - repos = list(filter(lambda x: x[0] == repo, repos)) + repos = list(filter(lambda x: x[0] == spec_repo, repos)) if not repos: - logger.error(f"The specified repo '{spec_repo}' does not exist.") - return + cl.error(f"The specified repo '{spec_repo}' does not exist.", exit_code=1) if is_first and with_update: for repo in repos: update_repo(repo[0]) @@ -288,11 +314,17 @@ def list_repo_apps(repo: str | None = None, with_update: bool = True): if repo: repos = list(filter(lambda x: x[0] == repo, repos)) if not repos: - logger.error(f"The specified repo '{repo}' does not exist.") - return + cl.error(f"The specified repo '{repo}' does not exist.", exit_code=1) if with_update: for repo in repos: update_repo(repo[0]) + table = Table(title=_("dbgpts In All Repos")) + + table.add_column(_("Repository"), justify="right", style="cyan", no_wrap=True) + table.add_column(_("Type"), style="magenta") + table.add_column(_("Name"), justify="right", style="green") + + data = [] for repo in repos: repo_path = Path(repo[1]) for package in DEFAULT_PACKAGES: @@ -304,4 +336,28 @@ def list_repo_apps(repo: str | None = None, with_update: bool = True): and dbgpt_path.is_dir() and dbgpt_metadata_path.exists() ): - click.echo(f"{app}({repo[0]}/{package}/{app})") + data.append((repo[0], package, app)) + # Sort by repo name, package name, and app name + data.sort(key=lambda x: (x[0], x[1], x[2])) + for repo, package, app in data: + table.add_row(repo, package, app) + cl.print(table) + + +def list_installed_apps(): + """List all installed dbgpts""" + packages = _load_package_from_path(INSTALL_DIR) + table = Table(title=_("Installed dbgpts")) + + table.add_column(_("Name"), justify="right", style="cyan", no_wrap=True) + table.add_column(_("Type"), style="blue") + table.add_column(_("Repository"), style="magenta") + table.add_column(_("Path"), justify="right", style="green") + + packages.sort(key=lambda x: (x.package, x.package_type, x.repo)) + for package in packages: + str_path = package.root + if str_path.startswith(str(Path.home())): + str_path = str_path.replace(str(Path.home()), "~") + table.add_row(package.package, package.package_type, package.repo, str_path) + cl.print(table) diff --git a/i18n/Makefile b/i18n/Makefile index 7c4fd835a..38c75406f 100644 --- a/i18n/Makefile +++ b/i18n/Makefile @@ -10,14 +10,18 @@ SUBPACKAGES = agent app cli client configs core datasource model rag serve stora # Default: English (en) LANGUAGES = zh_CN ja ko fr ru +# Command line arguments for language and module, use LANGUAGES and SUBPACKAGES as default if not specified +LANGUAGE ?= $(LANGUAGES) +MODULE ?= $(SUBPACKAGES) + .PHONY: help pot po mo clean all: pot po mo -# Generate POT files for each existing subpackage +# Generate POT files for the specified or existing subpackage pot: @echo "Generating POT files for subpackages..." - @$(foreach pkg,$(SUBPACKAGES),\ + @$(foreach pkg,$(MODULE),\ if [ -d $(SRC_DIR)/$(pkg) ]; then \ echo "Processing $(pkg) package..."; \ mkdir -p $(LOCALEDIR)/pot; \ @@ -27,10 +31,10 @@ pot: fi; \ ) -# Update or create new .po files for each language +# Update or create new .po files for the specified language and module po: pot - @for lang in $(LANGUAGES); do \ - for pkg in $(SUBPACKAGES); do \ + @for lang in $(LANGUAGE); do \ + for pkg in $(MODULE); do \ if [ -f $(LOCALEDIR)/pot/$(DOMAIN)_$$pkg.pot ]; then \ mkdir -p $(LOCALEDIR)/$$lang/LC_MESSAGES; \ if [ -f $(LOCALEDIR)/$$lang/LC_MESSAGES/$(DOMAIN)_$$pkg.po ]; then \ @@ -46,11 +50,10 @@ po: pot done \ done - -# Compile .po files to .mo files +# Compile .po files to .mo files for the specified language and module mo: - @for lang in $(LANGUAGES); do \ - for pkg in $(SUBPACKAGES); do \ + @for lang in $(LANGUAGE); do \ + for pkg in $(MODULE); do \ if [ -f $(LOCALEDIR)/$$lang/LC_MESSAGES/$(DOMAIN)_$$pkg.po ]; then \ echo "Compiling $$lang MO file for $$pkg..."; \ msgfmt -o $(LOCALEDIR)/$$lang/LC_MESSAGES/$(DOMAIN)_$$pkg.mo $(LOCALEDIR)/$$lang/LC_MESSAGES/$(DOMAIN)_$$pkg.po; \ @@ -80,3 +83,4 @@ help: @echo "3. Translate the strings in .po files using a PO file editor." @echo "4. Run 'make mo' to compile the translated .po files into .mo files." @echo "5. Run 'make clean' to clean up if necessary." + @echo "Use 'LANGUAGE= MODULE=' to specify a language and a module." diff --git a/i18n/locales/fr/LC_MESSAGES/dbgpt_client.mo b/i18n/locales/fr/LC_MESSAGES/dbgpt_client.mo new file mode 100644 index 000000000..491103e71 Binary files /dev/null and b/i18n/locales/fr/LC_MESSAGES/dbgpt_client.mo differ diff --git a/i18n/locales/fr/LC_MESSAGES/dbgpt_client.po b/i18n/locales/fr/LC_MESSAGES/dbgpt_client.po new file mode 100644 index 000000000..0515cdab3 --- /dev/null +++ b/i18n/locales/fr/LC_MESSAGES/dbgpt_client.po @@ -0,0 +1,78 @@ +# French translations for PACKAGE package +# Traductions françaises du paquet PACKAGE. +# Copyright (C) 2024 THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Automatically generated, 2024. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2024-03-27 09:43+0800\n" +"PO-Revision-Date: 2024-03-27 03:21+0800\n" +"Last-Translator: Automatically generated\n" +"Language-Team: none\n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: ../dbgpt/client/_cli.py:30 +#, fuzzy +msgid "The name of the AWEL flow" +msgstr "Le nom du flux" + +#: ../dbgpt/client/_cli.py:37 +#, fuzzy +msgid "The uid of the AWEL flow" +msgstr "L'UID du flux" + +#: ../dbgpt/client/_cli.py:55 +#, fuzzy +msgid "The messages to run AWEL flow" +msgstr "Les messages du flux" + +#: ../dbgpt/client/_cli.py:62 +#, fuzzy +msgid "The model name of AWEL flow" +msgstr "Le modèle du flux" + +#: ../dbgpt/client/_cli.py:71 +msgid "Whether use stream mode to run AWEL flow" +msgstr "" + +#: ../dbgpt/client/_cli.py:79 +#, fuzzy +msgid "The temperature to run AWEL flow" +msgstr "La température du flux" + +#: ../dbgpt/client/_cli.py:86 +#, fuzzy +msgid "The max new tokens to run AWEL flow" +msgstr "Le nombre maximal de nouveaux tokens du flux" + +#: ../dbgpt/client/_cli.py:93 +#, fuzzy +msgid "The conversation id of the AWEL flow" +msgstr "L'identifiant de conversation du flux" + +#: ../dbgpt/client/_cli.py:101 +msgid "The json data to run AWEL flow, if set, will overwrite other options" +msgstr "" + +#: ../dbgpt/client/_cli.py:109 +#, fuzzy +msgid "The extra json data to run AWEL flow." +msgstr "Les données JSON supplémentaires du flux" + +#: ../dbgpt/client/_cli.py:118 +#, fuzzy +msgid "Whether use interactive mode to run AWEL flow" +msgstr "La température du flux" + +#~ msgid "Whether to stream the flow, default is False" +#~ msgstr "Indique si le flux doit être diffusé en continu, par défaut False" + +#~ msgid "The json data of the flow" +#~ msgstr "Les données JSON du flux" diff --git a/i18n/locales/fr/LC_MESSAGES/dbgpt_model.mo b/i18n/locales/fr/LC_MESSAGES/dbgpt_model.mo index 7822cce82..4d8d19101 100644 Binary files a/i18n/locales/fr/LC_MESSAGES/dbgpt_model.mo and b/i18n/locales/fr/LC_MESSAGES/dbgpt_model.mo differ diff --git a/i18n/locales/fr/LC_MESSAGES/dbgpt_model.po b/i18n/locales/fr/LC_MESSAGES/dbgpt_model.po index 6f667f8b5..a6592cbee 100644 --- a/i18n/locales/fr/LC_MESSAGES/dbgpt_model.po +++ b/i18n/locales/fr/LC_MESSAGES/dbgpt_model.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-03-24 22:53+0800\n" +"POT-Creation-Date: 2024-03-26 05:55+0800\n" "PO-Revision-Date: 2024-03-23 16:45+0800\n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" @@ -35,7 +35,8 @@ msgid "" "Whether to auto convert the messages that are not supported by the LLM to a " "compatible format" msgstr "" -"Indique si les messages non pris en charge par le LLM doivent être convertis automatiquement dans un format compatible" +"Indique si les messages non pris en charge par le LLM doivent être convertis " +"automatiquement dans un format compatible" #: ../dbgpt/model/cluster/client.py:113 msgid "Remote LLM Client" @@ -70,7 +71,8 @@ msgid "" "OpenAI API Key, not required if you have set OPENAI_API_KEY environment " "variable." msgstr "" -"Clé API OpenAI, non requise si vous avez défini la variable d'environnement OPENAI_API_KEY." +"Clé API OpenAI, non requise si vous avez défini la variable d'environnement " +"OPENAI_API_KEY." #: ../dbgpt/model/proxy/llms/chatgpt.py:64 msgid "OpenAI API Base" @@ -81,7 +83,8 @@ msgid "" "OpenAI API Base, not required if you have set OPENAI_API_BASE environment " "variable." msgstr "" -"Base de l'API OpenAI, non requise si vous avez défini la variable d'environnement OPENAI_API_BASE." +"Base de l'API OpenAI, non requise si vous avez défini la variable " +"d'environnement OPENAI_API_BASE." #: ../dbgpt/model/operators/llm_operator.py:66 msgid "LLM Operator" @@ -147,5 +150,5 @@ msgid "The model output of upstream." msgstr "La sortie du modèle amont." #: ../dbgpt/model/utils/chatgpt_utils.py:180 -msgid "The model output after transform to openai stream format" -msgstr "La sortie du modèle après transformation en format de flux openai" +msgid "The model output after transformed to openai stream format." +msgstr "La sortie du modèle après transformation en format de flux OpenAI." \ No newline at end of file diff --git a/i18n/locales/ja/LC_MESSAGES/dbgpt_client.mo b/i18n/locales/ja/LC_MESSAGES/dbgpt_client.mo new file mode 100644 index 000000000..5ae7afffe Binary files /dev/null and b/i18n/locales/ja/LC_MESSAGES/dbgpt_client.mo differ diff --git a/i18n/locales/ja/LC_MESSAGES/dbgpt_client.po b/i18n/locales/ja/LC_MESSAGES/dbgpt_client.po new file mode 100644 index 000000000..a959b0415 --- /dev/null +++ b/i18n/locales/ja/LC_MESSAGES/dbgpt_client.po @@ -0,0 +1,78 @@ +# Japanese translations for PACKAGE package +# PACKAGE パッケージに対する英訳. +# Copyright (C) 2024 THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Automatically generated, 2024. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2024-03-27 09:43+0800\n" +"PO-Revision-Date: 2024-03-27 03:21+0800\n" +"Last-Translator: Automatically generated\n" +"Language-Team: none\n" +"Language: ja\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: ../dbgpt/client/_cli.py:30 +#, fuzzy +msgid "The name of the AWEL flow" +msgstr "フローの名前" + +#: ../dbgpt/client/_cli.py:37 +#, fuzzy +msgid "The uid of the AWEL flow" +msgstr "フローのUID" + +#: ../dbgpt/client/_cli.py:55 +#, fuzzy +msgid "The messages to run AWEL flow" +msgstr "フローのメッセージ" + +#: ../dbgpt/client/_cli.py:62 +#, fuzzy +msgid "The model name of AWEL flow" +msgstr "フローのモデル" + +#: ../dbgpt/client/_cli.py:71 +msgid "Whether use stream mode to run AWEL flow" +msgstr "" + +#: ../dbgpt/client/_cli.py:79 +#, fuzzy +msgid "The temperature to run AWEL flow" +msgstr "フローの温度" + +#: ../dbgpt/client/_cli.py:86 +#, fuzzy +msgid "The max new tokens to run AWEL flow" +msgstr "フローの最大新トークン数" + +#: ../dbgpt/client/_cli.py:93 +#, fuzzy +msgid "The conversation id of the AWEL flow" +msgstr "フローの会話ID" + +#: ../dbgpt/client/_cli.py:101 +msgid "The json data to run AWEL flow, if set, will overwrite other options" +msgstr "" + +#: ../dbgpt/client/_cli.py:109 +#, fuzzy +msgid "The extra json data to run AWEL flow." +msgstr "フローの追加のJSONデータ" + +#: ../dbgpt/client/_cli.py:118 +#, fuzzy +msgid "Whether use interactive mode to run AWEL flow" +msgstr "フローの温度" + +#~ msgid "Whether to stream the flow, default is False" +#~ msgstr "フローをストリーミングするかどうか、デフォルトはFalse" + +#~ msgid "The json data of the flow" +#~ msgstr "フローのJSONデータ" diff --git a/i18n/locales/ja/LC_MESSAGES/dbgpt_model.mo b/i18n/locales/ja/LC_MESSAGES/dbgpt_model.mo index d5e7f4848..691effa6f 100644 Binary files a/i18n/locales/ja/LC_MESSAGES/dbgpt_model.mo and b/i18n/locales/ja/LC_MESSAGES/dbgpt_model.mo differ diff --git a/i18n/locales/ja/LC_MESSAGES/dbgpt_model.po b/i18n/locales/ja/LC_MESSAGES/dbgpt_model.po index 7d9f0b8b4..5a7f0078e 100644 --- a/i18n/locales/ja/LC_MESSAGES/dbgpt_model.po +++ b/i18n/locales/ja/LC_MESSAGES/dbgpt_model.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-03-24 22:53+0800\n" +"POT-Creation-Date: 2024-03-26 05:55+0800\n" "PO-Revision-Date: 2024-03-23 16:45+0800\n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" @@ -18,138 +18,78 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" -#: ../dbgpt/model/cluster/client.py:20 -#, fuzzy -msgid "Default LLM Client" -msgstr "LLM クライアント。" - #: ../dbgpt/model/cluster/client.py:23 msgid "Default LLM client(Connect to your DB-GPT model serving)" -msgstr "" +msgstr "デフォルトのLLMクライアント(DB-GPTモデルサービングに接続)" #: ../dbgpt/model/cluster/client.py:26 ../dbgpt/model/cluster/client.py:127 msgid "Auto Convert Message" -msgstr "" +msgstr "メッセージの自動変換" #: ../dbgpt/model/cluster/client.py:32 ../dbgpt/model/cluster/client.py:133 msgid "" "Whether to auto convert the messages that are not supported by the LLM to a " "compatible format" -msgstr "" - -#: ../dbgpt/model/cluster/client.py:113 -#, fuzzy -msgid "Remote LLM Client" -msgstr "LLM クライアント。" +msgstr "LLMでサポートされていないメッセージを互換性のある形式に自動変換するかどうか" #: ../dbgpt/model/cluster/client.py:116 msgid "Remote LLM client(Connect to the remote DB-GPT model serving)" -msgstr "" +msgstr "リモートLLMクライアント(リモートDB-GPTモデルサービングに接続)" #: ../dbgpt/model/cluster/client.py:119 msgid "Controller Address" -msgstr "" +msgstr "コントローラーアドレス" #: ../dbgpt/model/cluster/client.py:123 msgid "http://127.0.0.1:8000" -msgstr "" - -#: ../dbgpt/model/cluster/client.py:124 -msgid "Model controller address" -msgstr "" - -#: ../dbgpt/model/proxy/llms/chatgpt.py:48 -#, fuzzy -msgid "OpenAI LLM Client" -msgstr "LLM クライアント。" +msgstr "モデルコントローラーアドレス" #: ../dbgpt/model/proxy/llms/chatgpt.py:53 msgid "OpenAI API Key" -msgstr "" +msgstr "OpenAI APIキー" #: ../dbgpt/model/proxy/llms/chatgpt.py:59 msgid "" "OpenAI API Key, not required if you have set OPENAI_API_KEY environment " "variable." msgstr "" +"OpenAI APIキー、OPENAI_API_KEY環境変数を設定している場合は必要ありません。" #: ../dbgpt/model/proxy/llms/chatgpt.py:64 msgid "OpenAI API Base" -msgstr "" +msgstr "OpenAI APIベース" #: ../dbgpt/model/proxy/llms/chatgpt.py:70 msgid "" "OpenAI API Base, not required if you have set OPENAI_API_BASE environment " "variable." msgstr "" - -#: ../dbgpt/model/operators/llm_operator.py:66 -msgid "LLM Operator" -msgstr "LLM 演算子" - -#: ../dbgpt/model/operators/llm_operator.py:69 -msgid "The LLM operator." -msgstr "LLM 演算子。" - -#: ../dbgpt/model/operators/llm_operator.py:72 -#: ../dbgpt/model/operators/llm_operator.py:120 -msgid "LLM Client" -msgstr "LLM クライアント" - -#: ../dbgpt/model/operators/llm_operator.py:77 -#: ../dbgpt/model/operators/llm_operator.py:125 -msgid "The LLM Client." -msgstr "LLM クライアント。" - -#: ../dbgpt/model/operators/llm_operator.py:82 -#: ../dbgpt/model/operators/llm_operator.py:130 -msgid "Model Request" -msgstr "モデルリクエスト" - -#: ../dbgpt/model/operators/llm_operator.py:85 -#: ../dbgpt/model/operators/llm_operator.py:133 -msgid "The model request." -msgstr "モデルリクエスト。" - -#: ../dbgpt/model/operators/llm_operator.py:90 -#: ../dbgpt/model/operators/llm_operator.py:138 -#: ../dbgpt/model/utils/chatgpt_utils.py:175 -msgid "Model Output" -msgstr "モデル出力" - -#: ../dbgpt/model/operators/llm_operator.py:93 -#: ../dbgpt/model/operators/llm_operator.py:141 -msgid "The model output." -msgstr "モデル出力。" +"OpenAI APIベース、OPENAI_API_BASE環境変数を設定している場合は必要ありません。" #: ../dbgpt/model/operators/llm_operator.py:113 msgid "Streaming LLM Operator" -msgstr "ストリーミング LLM 演算子" +msgstr "ストリーミングLLM演算子" #: ../dbgpt/model/operators/llm_operator.py:117 msgid "The streaming LLM operator." -msgstr "ストリーミング LLM 演算子。" +msgstr "ストリーミングLLM演算子。" #: ../dbgpt/model/utils/chatgpt_utils.py:158 -#, fuzzy msgid "OpenAI Streaming Output Operator" -msgstr "ストリーミング LLM 演算子" +msgstr "OpenAIストリーミング出力演算子" #: ../dbgpt/model/utils/chatgpt_utils.py:162 -#, fuzzy msgid "The OpenAI streaming LLM operator." -msgstr "ストリーミング LLM 演算子。" +msgstr "OpenAIストリーミングLLM演算子。" #: ../dbgpt/model/utils/chatgpt_utils.py:166 -#, fuzzy msgid "Upstream Model Output" -msgstr "モデル出力" +msgstr "上流モデル出力" #: ../dbgpt/model/utils/chatgpt_utils.py:170 -#, fuzzy msgid "The model output of upstream." -msgstr "モデル出力。" +msgstr "上流のモデル出力。" #: ../dbgpt/model/utils/chatgpt_utils.py:180 -msgid "The model output after transform to openai stream format" -msgstr "" +msgid "The model output after transformed to openai stream format." +msgstr "OpenAI ストリーム形式に変換されたモデル出力。" \ No newline at end of file diff --git a/i18n/locales/ko/LC_MESSAGES/dbgpt_client.mo b/i18n/locales/ko/LC_MESSAGES/dbgpt_client.mo new file mode 100644 index 000000000..49af1843d Binary files /dev/null and b/i18n/locales/ko/LC_MESSAGES/dbgpt_client.mo differ diff --git a/i18n/locales/ko/LC_MESSAGES/dbgpt_client.po b/i18n/locales/ko/LC_MESSAGES/dbgpt_client.po new file mode 100644 index 000000000..46df88396 --- /dev/null +++ b/i18n/locales/ko/LC_MESSAGES/dbgpt_client.po @@ -0,0 +1,78 @@ +# Korean translations for PACKAGE package +# PACKAGE 패키지에 대한 한국어 번역문. +# Copyright (C) 2024 THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Automatically generated, 2024. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2024-03-27 09:43+0800\n" +"PO-Revision-Date: 2024-03-27 03:21+0800\n" +"Last-Translator: Automatically generated\n" +"Language-Team: none\n" +"Language: ko\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: ../dbgpt/client/_cli.py:30 +#, fuzzy +msgid "The name of the AWEL flow" +msgstr "플로우의 이름" + +#: ../dbgpt/client/_cli.py:37 +#, fuzzy +msgid "The uid of the AWEL flow" +msgstr "플로우의 UID" + +#: ../dbgpt/client/_cli.py:55 +#, fuzzy +msgid "The messages to run AWEL flow" +msgstr "플로우의 메시지" + +#: ../dbgpt/client/_cli.py:62 +#, fuzzy +msgid "The model name of AWEL flow" +msgstr "플로우의 모델" + +#: ../dbgpt/client/_cli.py:71 +msgid "Whether use stream mode to run AWEL flow" +msgstr "" + +#: ../dbgpt/client/_cli.py:79 +#, fuzzy +msgid "The temperature to run AWEL flow" +msgstr "플로우의 온도" + +#: ../dbgpt/client/_cli.py:86 +#, fuzzy +msgid "The max new tokens to run AWEL flow" +msgstr "플로우의 최대 새 토큰" + +#: ../dbgpt/client/_cli.py:93 +#, fuzzy +msgid "The conversation id of the AWEL flow" +msgstr "플로우의 대화 ID" + +#: ../dbgpt/client/_cli.py:101 +msgid "The json data to run AWEL flow, if set, will overwrite other options" +msgstr "" + +#: ../dbgpt/client/_cli.py:109 +#, fuzzy +msgid "The extra json data to run AWEL flow." +msgstr "플로우의 추가 JSON 데이터" + +#: ../dbgpt/client/_cli.py:118 +#, fuzzy +msgid "Whether use interactive mode to run AWEL flow" +msgstr "플로우의 온도" + +#~ msgid "Whether to stream the flow, default is False" +#~ msgstr "플로우를 스트리밍할지 여부, 기본값은 False" + +#~ msgid "The json data of the flow" +#~ msgstr "플로우의 JSON 데이터" diff --git a/i18n/locales/ko/LC_MESSAGES/dbgpt_model.mo b/i18n/locales/ko/LC_MESSAGES/dbgpt_model.mo index d57bd3bba..f3eac6e9a 100644 Binary files a/i18n/locales/ko/LC_MESSAGES/dbgpt_model.mo and b/i18n/locales/ko/LC_MESSAGES/dbgpt_model.mo differ diff --git a/i18n/locales/ko/LC_MESSAGES/dbgpt_model.po b/i18n/locales/ko/LC_MESSAGES/dbgpt_model.po index 14c3d330e..ad0f61596 100644 --- a/i18n/locales/ko/LC_MESSAGES/dbgpt_model.po +++ b/i18n/locales/ko/LC_MESSAGES/dbgpt_model.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-03-24 22:53+0800\n" +"POT-Creation-Date: 2024-03-26 05:55+0800\n" "PO-Revision-Date: 2024-03-23 16:45+0800\n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" @@ -34,8 +34,7 @@ msgstr "메시지 자동 변환" msgid "" "Whether to auto convert the messages that are not supported by the LLM to a " "compatible format" -msgstr "" -"LLM에서 지원하지 않는 메시지를 호환되는 형식으로 자동 변환할지 여부" +msgstr "LLM에서 지원하지 않는 메시지를 호환되는 형식으로 자동 변환할지 여부" #: ../dbgpt/model/cluster/client.py:113 msgid "Remote LLM Client" @@ -117,11 +116,6 @@ msgstr "모델 요청." msgid "Model Output" msgstr "모델 출력" -#: ../dbgpt/model/operators/llm_operator.py:93 -#: ../dbgpt/model/operators/llm_operator.py:141 -msgid "The model output." -msgstr "모델 출력." - #: ../dbgpt/model/operators/llm_operator.py:113 msgid "Streaming LLM Operator" msgstr "스트리밍 LLM 연산자" @@ -147,5 +141,5 @@ msgid "The model output of upstream." msgstr "상류의 모델 출력." #: ../dbgpt/model/utils/chatgpt_utils.py:180 -msgid "The model output after transform to openai stream format" -msgstr "OpenAI 스트림 형식으로 변환된 모델 출력" +msgid "The model output after transformed to openai stream format." +msgstr "OpenAI 스트림 형식으로 변환된 모델 출력" \ No newline at end of file diff --git a/i18n/locales/ru/LC_MESSAGES/dbgpt_client.mo b/i18n/locales/ru/LC_MESSAGES/dbgpt_client.mo new file mode 100644 index 000000000..f389d47bd Binary files /dev/null and b/i18n/locales/ru/LC_MESSAGES/dbgpt_client.mo differ diff --git a/i18n/locales/ru/LC_MESSAGES/dbgpt_client.po b/i18n/locales/ru/LC_MESSAGES/dbgpt_client.po new file mode 100644 index 000000000..8ccaa75d2 --- /dev/null +++ b/i18n/locales/ru/LC_MESSAGES/dbgpt_client.po @@ -0,0 +1,79 @@ +# Russian translations for PACKAGE package +# Английские переводы для пакета PACKAGE. +# Copyright (C) 2024 THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Automatically generated, 2024. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2024-03-27 09:43+0800\n" +"PO-Revision-Date: 2024-03-27 03:21+0800\n" +"Last-Translator: Automatically generated\n" +"Language-Team: none\n" +"Language: ru\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" + +#: ../dbgpt/client/_cli.py:30 +#, fuzzy +msgid "The name of the AWEL flow" +msgstr "Имя потока" + +#: ../dbgpt/client/_cli.py:37 +#, fuzzy +msgid "The uid of the AWEL flow" +msgstr "UID потока" + +#: ../dbgpt/client/_cli.py:55 +#, fuzzy +msgid "The messages to run AWEL flow" +msgstr "Сообщения потока" + +#: ../dbgpt/client/_cli.py:62 +#, fuzzy +msgid "The model name of AWEL flow" +msgstr "Модель потока" + +#: ../dbgpt/client/_cli.py:71 +msgid "Whether use stream mode to run AWEL flow" +msgstr "" + +#: ../dbgpt/client/_cli.py:79 +#, fuzzy +msgid "The temperature to run AWEL flow" +msgstr "Температура потока" + +#: ../dbgpt/client/_cli.py:86 +#, fuzzy +msgid "The max new tokens to run AWEL flow" +msgstr "Максимальное количество новых токенов потока" + +#: ../dbgpt/client/_cli.py:93 +#, fuzzy +msgid "The conversation id of the AWEL flow" +msgstr "Идентификатор беседы потока" + +#: ../dbgpt/client/_cli.py:101 +msgid "The json data to run AWEL flow, if set, will overwrite other options" +msgstr "" + +#: ../dbgpt/client/_cli.py:109 +#, fuzzy +msgid "The extra json data to run AWEL flow." +msgstr "Дополнительные JSON-данные потока" + +#: ../dbgpt/client/_cli.py:118 +#, fuzzy +msgid "Whether use interactive mode to run AWEL flow" +msgstr "Температура потока" + +#~ msgid "Whether to stream the flow, default is False" +#~ msgstr "Потоковая передача, по умолчанию False" + +#~ msgid "The json data of the flow" +#~ msgstr "JSON-данные потока" diff --git a/i18n/locales/ru/LC_MESSAGES/dbgpt_model.mo b/i18n/locales/ru/LC_MESSAGES/dbgpt_model.mo index 6d1b8e047..057005a50 100644 Binary files a/i18n/locales/ru/LC_MESSAGES/dbgpt_model.mo and b/i18n/locales/ru/LC_MESSAGES/dbgpt_model.mo differ diff --git a/i18n/locales/ru/LC_MESSAGES/dbgpt_model.po b/i18n/locales/ru/LC_MESSAGES/dbgpt_model.po index 4bb5d7bc9..6bfdfa7da 100644 --- a/i18n/locales/ru/LC_MESSAGES/dbgpt_model.po +++ b/i18n/locales/ru/LC_MESSAGES/dbgpt_model.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-03-24 22:53+0800\n" +"POT-Creation-Date: 2024-03-26 05:55+0800\n" "PO-Revision-Date: 2024-03-23 16:45+0800\n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" @@ -45,7 +45,8 @@ msgstr "Удаленный клиент LLM" #: ../dbgpt/model/cluster/client.py:116 msgid "Remote LLM client(Connect to the remote DB-GPT model serving)" -msgstr "Удаленный клиент LLM (Подключение к удаленной модели обслуживания DB-GPT)" +msgstr "" +"Удаленный клиент LLM (Подключение к удаленной модели обслуживания DB-GPT)" #: ../dbgpt/model/cluster/client.py:119 msgid "Controller Address" @@ -149,5 +150,5 @@ msgid "The model output of upstream." msgstr "Выход модели выше." #: ../dbgpt/model/utils/chatgpt_utils.py:180 -msgid "The model output after transform to openai stream format" -msgstr "Выход модели после преобразования в формат потока openai" +msgid "The model output after transformed to openai stream format." +msgstr "Выход модели после преобразования в формат потока OpenAI" \ No newline at end of file diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_client.mo b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_client.mo new file mode 100644 index 000000000..d581af0f5 Binary files /dev/null and b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_client.mo differ diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_client.po b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_client.po new file mode 100644 index 000000000..d0ed3f10a --- /dev/null +++ b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_client.po @@ -0,0 +1,62 @@ +# Chinese translations for PACKAGE package +# PACKAGE 软件包的简体中文翻译. +# Copyright (C) 2024 THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# Automatically generated, 2024. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2024-03-27 09:43+0800\n" +"PO-Revision-Date: 2024-03-27 03:21+0800\n" +"Last-Translator: Automatically generated\n" +"Language-Team: none\n" +"Language: zh_CN\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../dbgpt/client/_cli.py:30 +msgid "The name of the AWEL flow" +msgstr "AWEL 工作流的名称" + +#: ../dbgpt/client/_cli.py:37 +msgid "The uid of the AWEL flow" +msgstr "AWEL 工作流的 UID" + +#: ../dbgpt/client/_cli.py:55 +msgid "The messages to run AWEL flow" +msgstr "运行 AWEL 工作流的消息" + +#: ../dbgpt/client/_cli.py:62 +msgid "The model name of AWEL flow" +msgstr "AWEL 工作流的模型名称" + +#: ../dbgpt/client/_cli.py:71 +msgid "Whether use stream mode to run AWEL flow" +msgstr "是否使用流模式运行 AWEL 工作流" + +#: ../dbgpt/client/_cli.py:79 +msgid "The temperature to run AWEL flow" +msgstr "运行 AWEL 工作流的温度" + +#: ../dbgpt/client/_cli.py:86 +msgid "The max new tokens to run AWEL flow" +msgstr "运行 AWEL 工作流的最大新标记数" + +#: ../dbgpt/client/_cli.py:93 +msgid "The conversation id of the AWEL flow" +msgstr "AWEL 工作流的对话 ID" + +#: ../dbgpt/client/_cli.py:101 +msgid "The json data to run AWEL flow, if set, will overwrite other options" +msgstr "用于运行 AWEL 工作流的 JSON 数据,如果设置,将覆盖其他选项" + +#: ../dbgpt/client/_cli.py:109 +msgid "The extra json data to run AWEL flow." +msgstr "用于运行 AWEL 工作流的额外 JSON 数据" + +#: ../dbgpt/client/_cli.py:118 +msgid "Whether use interactive mode to run AWEL flow" +msgstr "是否使用交互模式运行 AWEL 工作流" \ No newline at end of file diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_model.mo b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_model.mo index 6f08c5202..f1d59fcff 100644 Binary files a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_model.mo and b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_model.mo differ diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_model.po b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_model.po index 7395d669b..4ef30c0fd 100644 --- a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_model.po +++ b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_model.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-03-24 22:53+0800\n" +"POT-Creation-Date: 2024-03-26 05:29+0800\n" "PO-Revision-Date: 2024-03-23 16:45+0800\n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" @@ -18,69 +18,66 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" #: ../dbgpt/model/cluster/client.py:20 -#, fuzzy msgid "Default LLM Client" -msgstr "这是 LLM 客户端。" +msgstr "默认 LLM 客户端" #: ../dbgpt/model/cluster/client.py:23 msgid "Default LLM client(Connect to your DB-GPT model serving)" -msgstr "" +msgstr "默认 LLM 客户端(连接到您的 DB-GPT 模型服务)" #: ../dbgpt/model/cluster/client.py:26 ../dbgpt/model/cluster/client.py:127 msgid "Auto Convert Message" -msgstr "" +msgstr "自动转换消息" #: ../dbgpt/model/cluster/client.py:32 ../dbgpt/model/cluster/client.py:133 msgid "" "Whether to auto convert the messages that are not supported by the LLM to a " "compatible format" -msgstr "" +msgstr "是否将 LLM 不支持的消息自动转换为兼容格式" #: ../dbgpt/model/cluster/client.py:113 -#, fuzzy msgid "Remote LLM Client" -msgstr "这是 LLM 客户端。" +msgstr "远程 LLM 客户端" #: ../dbgpt/model/cluster/client.py:116 msgid "Remote LLM client(Connect to the remote DB-GPT model serving)" -msgstr "" +msgstr "远程 LLM 客户端(连接到远程 DB-GPT 模型服务)" #: ../dbgpt/model/cluster/client.py:119 msgid "Controller Address" -msgstr "" +msgstr "控制器地址" #: ../dbgpt/model/cluster/client.py:123 msgid "http://127.0.0.1:8000" -msgstr "" +msgstr "http://127.0.0.1:8000" #: ../dbgpt/model/cluster/client.py:124 msgid "Model controller address" -msgstr "" +msgstr "模型控制器地址" #: ../dbgpt/model/proxy/llms/chatgpt.py:48 -#, fuzzy msgid "OpenAI LLM Client" -msgstr "这是 LLM 客户端。" +msgstr "OpenAI LLM 客户端" #: ../dbgpt/model/proxy/llms/chatgpt.py:53 msgid "OpenAI API Key" -msgstr "" +msgstr "OpenAI API 密钥" #: ../dbgpt/model/proxy/llms/chatgpt.py:59 msgid "" "OpenAI API Key, not required if you have set OPENAI_API_KEY environment " "variable." -msgstr "" +msgstr "OpenAI API 密钥,如果您已设置 OPENAI_API_KEY 环境变量,则不需要。" #: ../dbgpt/model/proxy/llms/chatgpt.py:64 msgid "OpenAI API Base" -msgstr "" +msgstr "OpenAI API 基础" #: ../dbgpt/model/proxy/llms/chatgpt.py:70 msgid "" "OpenAI API Base, not required if you have set OPENAI_API_BASE environment " "variable." -msgstr "" +msgstr "OpenAI API 基础,如果您已设置 OPENAI_API_BASE 环境变量,则不需要。" #: ../dbgpt/model/operators/llm_operator.py:66 msgid "LLM Operator" @@ -88,7 +85,7 @@ msgstr "LLM 算子" #: ../dbgpt/model/operators/llm_operator.py:69 msgid "The LLM operator." -msgstr "这个是 LLM 算子。" +msgstr "这是 LLM 算子。" #: ../dbgpt/model/operators/llm_operator.py:72 #: ../dbgpt/model/operators/llm_operator.py:120 @@ -127,28 +124,24 @@ msgstr "流式 LLM 算子" #: ../dbgpt/model/operators/llm_operator.py:117 msgid "The streaming LLM operator." -msgstr "这是流式 LLM 算子。" +msgstr "流式 LLM 算子。" #: ../dbgpt/model/utils/chatgpt_utils.py:158 -#, fuzzy msgid "OpenAI Streaming Output Operator" -msgstr "流式 LLM 算子" +msgstr "OpenAI 流式输出算子" #: ../dbgpt/model/utils/chatgpt_utils.py:162 -#, fuzzy msgid "The OpenAI streaming LLM operator." -msgstr "这是流式 LLM 算子。" +msgstr "OpenAI 流式 LLM 算子。" #: ../dbgpt/model/utils/chatgpt_utils.py:166 -#, fuzzy msgid "Upstream Model Output" -msgstr "模型输出" +msgstr "上游模型输出" #: ../dbgpt/model/utils/chatgpt_utils.py:170 -#, fuzzy msgid "The model output of upstream." -msgstr "这是模型的输出。" +msgstr "上游模型的输出。" #: ../dbgpt/model/utils/chatgpt_utils.py:180 -msgid "The model output after transform to openai stream format" -msgstr "" +msgid "The model output after transformed to openai stream format." +msgstr "转换为 OpenAI 流格式后的模型输出。" \ No newline at end of file diff --git a/i18n/translate_util.py b/i18n/translate_util.py index 37fcd5e2e..f02e543b8 100644 --- a/i18n/translate_util.py +++ b/i18n/translate_util.py @@ -1,6 +1,4 @@ -"""Translate the po file content to Chinese using GPT-4. - -""" +"""Translate the po file content to Chinese using LLM.""" from typing import List, Dict, Any import asyncio import os @@ -148,6 +146,7 @@ vocabulary_map = { "AWEL": "AWEL", "RAG": "RAG", "DB-GPT": "DB-GPT", + "AWEL flow": "AWEL 工作流", }, "default": { "Transformer": "Transformer", @@ -159,6 +158,7 @@ vocabulary_map = { "AWEL": "AWEL", "RAG": "RAG", "DB-GPT": "DB-GPT", + "AWEL flow": "AWEL flow", }, } @@ -383,8 +383,11 @@ async def run_translate_po_dag( "parallel_num": parallel_num, "model_name": model_name, } - result = await task.call({"file_path": full_path, "ext_dict": ext_dict}) - return result + try: + result = await task.call({"file_path": full_path, "ext_dict": ext_dict}) + return result + except Exception as e: + print(f"Error in {module_name}: {e}") if __name__ == "__main__": @@ -413,34 +416,51 @@ if __name__ == "__main__": } parser = argparse.ArgumentParser() - parser.add_argument("--modules", type=str, default=",".join(all_modules)) - parser.add_argument("--lang", type=str, default="zh_CN") + parser.add_argument( + "--modules", + type=str, + default=",".join(all_modules), + help="Modules to translate, 'all' for all modules, split by ','.", + ) + parser.add_argument( + "--lang", + type=str, + default="zh_CN", + help="Language to translate, 'all' for all languages, split by ','.", + ) parser.add_argument("--max_new_token", type=int, default=1024) parser.add_argument("--parallel_num", type=int, default=10) parser.add_argument("--model_name", type=str, default="gpt-3.5-turbo") args = parser.parse_args() print(f"args: {args}") + # model_name = "gpt-3.5-turbo" # model_name = "gpt-4" model_name = args.model_name # modules = ["app", "core", "model", "rag", "serve", "storage", "util"] - modules = args.modules.strip().split(",") + modules = all_modules if args.modules == "all" else args.modules.strip().split(",") max_new_token = args.max_new_token parallel_num = args.parallel_num - lang = args.lang - if lang not in lang_map: - raise ValueError(f"Language {lang} not supported.") - lang_desc = lang_map[lang] - for module in modules: - asyncio.run( - run_translate_po_dag( - save_translated_po_file_task, - lang, - lang_desc, - module, - max_new_token, - parallel_num, - model_name, + langs = lang_map.keys() if args.lang == "all" else args.lang.strip().split(",") + + for lang in langs: + if lang not in lang_map: + raise ValueError( + f"Language {lang} not supported, now only support {','.join(lang_map.keys())}." + ) + + for lang in langs: + lang_desc = lang_map[lang] + for module in modules: + asyncio.run( + run_translate_po_dag( + save_translated_po_file_task, + lang, + lang_desc, + module, + max_new_token, + parallel_num, + model_name, + ) ) - ) diff --git a/setup.py b/setup.py index a0efbd927..23990a107 100644 --- a/setup.py +++ b/setup.py @@ -382,6 +382,7 @@ def core_requires(): "psutil==5.9.4", "colorama==0.4.6", "tomlkit", + "rich", ] # Just use by DB-GPT internal, we should find the smallest dependency set for run # we core unit test. @@ -561,16 +562,17 @@ def all_datasource_requires(): setup_spec.extras["datasource"] = [ # "sqlparse==0.4.4", "pymysql", - # for doris - # mysqlclient 2.2.x have pkg-config issue on 3.10+ - "mysqlclient==2.1.0", ] + # If you want to install psycopg2 and mysqlclient in ubuntu, you should install + # libpq-dev and libmysqlclient-dev first. setup_spec.extras["datasource_all"] = setup_spec.extras["datasource"] + [ "pyspark", "pymssql", # install psycopg2-binary when you are in a virtual environment # pip install psycopg2-binary "psycopg2", + # mysqlclient 2.2.x have pkg-config issue on 3.10+ + "mysqlclient==2.1.0", # pydoris is too old, we should find a new package to replace it. "pydoris>=1.0.2,<2.0.0", "clickhouse-connect",