mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-10 05:19:44 +00:00
docs: Config documents & i18n supports (#2365)
This commit is contained in:
2
Makefile
2
Makefile
@@ -35,9 +35,11 @@ fmt: setup ## Format Python code
|
||||
# Format code
|
||||
$(VENV_BIN)/ruff format packages
|
||||
$(VENV_BIN)/ruff format --exclude="examples/notebook" examples
|
||||
$(VENV_BIN)/ruff format i18n
|
||||
# Sort imports
|
||||
$(VENV_BIN)/ruff check --select I --fix packages
|
||||
$(VENV_BIN)/ruff check --select I --fix --exclude="examples/notebook" examples
|
||||
$(VENV_BIN)/ruff check --select I --fix i18n
|
||||
|
||||
$(VENV_BIN)/ruff check --fix packages \
|
||||
--exclude="packages/dbgpt-serve/src/**"
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Baichuan Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "BaichuanDeployModelParameters",
|
||||
"description": "Baichuan Proxy LLM",
|
||||
"documentationUrl": "https://platform.baichuan-ai.com/docs/api",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/baichuan"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the Baichuan API.",
|
||||
"defaultValue": "${env:BAICHUAN_API_BASE:-https://api.baichuan-ai.com/v1}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the Baichuan API.",
|
||||
"defaultValue": "${env:BAICHUAN_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,86 @@
|
||||
---
|
||||
title: "RDBMSDatasourceParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "RDBMSDatasourceParameters",
|
||||
"description": "RDBMS datasource parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"description": "Database port, e.g., 3306"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database user to connect"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database name"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database driver, e.g., mysql+pymysql"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "pool_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool size, default 5",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "max_overflow",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max overflow connections, default 10",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "pool_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool timeout, default 30",
|
||||
"defaultValue": "30"
|
||||
},
|
||||
{
|
||||
"name": "pool_recycle",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool recycle, default 3600",
|
||||
"defaultValue": "3600"
|
||||
},
|
||||
{
|
||||
"name": "pool_pre_ping",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Connection pool pre ping, default True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database host, e.g., localhost"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "OpenAI Compatible Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "OpenAICompatibleDeployModelParameters",
|
||||
"description": "OpenAI Compatible Proxy LLM",
|
||||
"documentationUrl": "https://platform.openai.com/docs/api-reference/chat",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/openai"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the OpenAI API.",
|
||||
"defaultValue": "${env:OPENAI_API_BASE:-https://api.openai.com/v1}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the OpenAI API.",
|
||||
"defaultValue": "${env:OPENAI_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Claude Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ClaudeDeployModelParameters",
|
||||
"description": "Claude Proxy LLM",
|
||||
"documentationUrl": "https://docs.anthropic.com/en/api/getting-started",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/claude"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the claude API.",
|
||||
"defaultValue": "${env:ANTHROPIC_BASE_URL:-https://api.anthropic.com}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the claude API.",
|
||||
"defaultValue": "${env:ANTHROPIC_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
186
docs/docs/config-reference/config_applicationconfig_72fd1c.mdx
Normal file
186
docs/docs/config-reference/config_applicationconfig_72fd1c.mdx
Normal file
@@ -0,0 +1,186 @@
|
||||
---
|
||||
title: "ApplicationConfig Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ApplicationConfig",
|
||||
"description": "Application configuration.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "system",
|
||||
"type": "SystemParameters",
|
||||
"required": false,
|
||||
"description": "System configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "systemparameters configuration",
|
||||
"url": "././config_systemparameters_fd92b9"
|
||||
}
|
||||
],
|
||||
"defaultValue": "SystemParameters"
|
||||
},
|
||||
{
|
||||
"name": "service",
|
||||
"type": "ServiceConfig",
|
||||
"required": false,
|
||||
"description": "",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "serviceconfig configuration",
|
||||
"url": "././config_serviceconfig_81a10f"
|
||||
}
|
||||
],
|
||||
"defaultValue": "ServiceConfig"
|
||||
},
|
||||
{
|
||||
"name": "models",
|
||||
"type": "ModelsDeployParameters",
|
||||
"required": false,
|
||||
"description": "Model deployment configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "modelsdeployparameters configuration",
|
||||
"url": "././parameter_modelsdeployparameters_5c7bc5"
|
||||
}
|
||||
],
|
||||
"defaultValue": "ModelsDeployParameters"
|
||||
},
|
||||
{
|
||||
"name": "serves",
|
||||
"type": "BaseServeConfig",
|
||||
"required": false,
|
||||
"description": "Serve configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "datasource configuration",
|
||||
"url": "././config_serveconfig_63f1e9"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "agent/chat configuration",
|
||||
"url": "././config_serveconfig_adbd6f"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "conversation configuration",
|
||||
"url": "././config_serveconfig_313252"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "dbgpts_hub configuration",
|
||||
"url": "././config_serveconfig_ec2d70"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "dbgpts_my configuration",
|
||||
"url": "././config_serveconfig_1a9284"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "evaluate configuration",
|
||||
"url": "././config_serveconfig_8839e0"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "feedback configuration",
|
||||
"url": "././config_serveconfig_fa1f35"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "file configuration",
|
||||
"url": "././config_serveconfig_cb64c6"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "flow configuration",
|
||||
"url": "././config_serveconfig_c0b589"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "libro configuration",
|
||||
"url": "././config_serveconfig_b1c2b9"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "model configuration",
|
||||
"url": "././config_serveconfig_7a0577"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "prompt configuration",
|
||||
"url": "././config_serveconfig_854dad"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "rag configuration",
|
||||
"url": "././config_serveconfig_7889f9"
|
||||
}
|
||||
],
|
||||
"defaultValue": "[]"
|
||||
},
|
||||
{
|
||||
"name": "rag",
|
||||
"type": "RagParameters",
|
||||
"required": false,
|
||||
"description": "Rag Knowledge Parameters",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "ragparameters configuration",
|
||||
"url": "././config_ragparameters_7483b2"
|
||||
}
|
||||
],
|
||||
"defaultValue": "RagParameters"
|
||||
},
|
||||
{
|
||||
"name": "trace",
|
||||
"type": "TracerParameters",
|
||||
"required": false,
|
||||
"description": "Global tracer configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "tracerparameters configuration",
|
||||
"url": "././tracer_impl_tracerparameters_f8f272"
|
||||
}
|
||||
],
|
||||
"defaultValue": "TracerParameters"
|
||||
},
|
||||
{
|
||||
"name": "log",
|
||||
"type": "LoggingParameters",
|
||||
"required": false,
|
||||
"description": "Logging configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "loggingparameters configuration",
|
||||
"url": "././utils_loggingparameters_4ba5c6"
|
||||
}
|
||||
],
|
||||
"defaultValue": "LoggingParameters"
|
||||
},
|
||||
{
|
||||
"name": "hooks",
|
||||
"type": "HookConfig",
|
||||
"required": false,
|
||||
"description": "Configuration hooks, which will be executed before the configuration loading",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "hookconfig configuration",
|
||||
"url": "././manager_hookconfig_d9a481"
|
||||
}
|
||||
],
|
||||
"defaultValue": "[]"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
98
docs/docs/config-reference/config_ragparameters_7483b2.mdx
Normal file
98
docs/docs/config-reference/config_ragparameters_7483b2.mdx
Normal file
@@ -0,0 +1,98 @@
|
||||
---
|
||||
title: "RagParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "RagParameters",
|
||||
"description": "Rag configuration.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "chunk_overlap",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The default thread pool size, If None, use default config of python thread pool",
|
||||
"defaultValue": "50"
|
||||
},
|
||||
{
|
||||
"name": "similarity_top_k",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge search top k",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "similarity_score_threshold",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge search top similarity score",
|
||||
"defaultValue": "0.0"
|
||||
},
|
||||
{
|
||||
"name": "query_rewrite",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "knowledge search rewrite",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "max_chunks_once_load",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge max chunks once load",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "max_threads",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge max load thread",
|
||||
"defaultValue": "1"
|
||||
},
|
||||
{
|
||||
"name": "rerank_top_k",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge rerank top k",
|
||||
"defaultValue": "3"
|
||||
},
|
||||
{
|
||||
"name": "storage",
|
||||
"type": "StorageConfig",
|
||||
"required": false,
|
||||
"description": "Storage configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "storageconfig configuration",
|
||||
"url": "././config_storageconfig_028579"
|
||||
}
|
||||
],
|
||||
"defaultValue": "StorageConfig"
|
||||
},
|
||||
{
|
||||
"name": "graph_search_top_k",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge graph search top k",
|
||||
"defaultValue": "3"
|
||||
},
|
||||
{
|
||||
"name": "graph_community_summary_enabled",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "graph community summary enabled",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "chunk_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Whether to verify the SSL certificate of the database",
|
||||
"defaultValue": "500"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
20
docs/docs/config-reference/config_serveconfig_1a9284.mdx
Normal file
20
docs/docs/config-reference/config_serveconfig_1a9284.mdx
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: "My dbgpts Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the my dbgpts serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
26
docs/docs/config-reference/config_serveconfig_313252.mdx
Normal file
26
docs/docs/config-reference/config_serveconfig_313252.mdx
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
title: "Conversation Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the conversation serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "default_model",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Default model for the conversation"
|
||||
},
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
20
docs/docs/config-reference/config_serveconfig_63f1e9.mdx
Normal file
20
docs/docs/config-reference/config_serveconfig_63f1e9.mdx
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: "Datasource Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the datasource serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
90
docs/docs/config-reference/config_serveconfig_7889f9.mdx
Normal file
90
docs/docs/config-reference/config_serveconfig_7889f9.mdx
Normal file
@@ -0,0 +1,90 @@
|
||||
---
|
||||
title: "RAG Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the RAG serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "embedding_model",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Embedding Model",
|
||||
"defaultValue": "None"
|
||||
},
|
||||
{
|
||||
"name": "rerank_model",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Embedding Model",
|
||||
"defaultValue": "None"
|
||||
},
|
||||
{
|
||||
"name": "chunk_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Whether to verify the SSL certificate of the database",
|
||||
"defaultValue": "500"
|
||||
},
|
||||
{
|
||||
"name": "chunk_overlap",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The default thread pool size, If None, use default config of python thread pool",
|
||||
"defaultValue": "50"
|
||||
},
|
||||
{
|
||||
"name": "similarity_top_k",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge search top k",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "similarity_score_threshold",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge search top similarity score",
|
||||
"defaultValue": "0.0"
|
||||
},
|
||||
{
|
||||
"name": "query_rewrite",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "knowledge search rewrite",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "max_chunks_once_load",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge max chunks once load",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "max_threads",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge max load thread",
|
||||
"defaultValue": "1"
|
||||
},
|
||||
{
|
||||
"name": "rerank_top_k",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge rerank top k",
|
||||
"defaultValue": "3"
|
||||
},
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
30
docs/docs/config-reference/config_serveconfig_7a0577.mdx
Normal file
30
docs/docs/config-reference/config_serveconfig_7a0577.mdx
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
title: "Model Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the model serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "model_storage",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The storage type of model configures, if None, use the default storage(current database). When you run in light mode, it will not use any storage.",
|
||||
"validValues": [
|
||||
"database",
|
||||
"memory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
32
docs/docs/config-reference/config_serveconfig_854dad.mdx
Normal file
32
docs/docs/config-reference/config_serveconfig_854dad.mdx
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
title: "Prompt Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the prompt serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "default_user",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Default user name for prompt"
|
||||
},
|
||||
{
|
||||
"name": "default_sys_code",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Default system code for prompt"
|
||||
},
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
34
docs/docs/config-reference/config_serveconfig_8839e0.mdx
Normal file
34
docs/docs/config-reference/config_serveconfig_8839e0.mdx
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
title: "Evaluate Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the evaluate serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "embedding_model",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Embedding Model",
|
||||
"defaultValue": "None"
|
||||
},
|
||||
{
|
||||
"name": "similarity_top_k",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "knowledge search top k",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
20
docs/docs/config-reference/config_serveconfig_adbd6f.mdx
Normal file
20
docs/docs/config-reference/config_serveconfig_adbd6f.mdx
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: "ServeConfig Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "Parameters for the serve command",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
20
docs/docs/config-reference/config_serveconfig_b1c2b9.mdx
Normal file
20
docs/docs/config-reference/config_serveconfig_b1c2b9.mdx
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: "Libro Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the libro serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
33
docs/docs/config-reference/config_serveconfig_c0b589.mdx
Normal file
33
docs/docs/config-reference/config_serveconfig_c0b589.mdx
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
title: "AWEL Flow Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the flow serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "load_dbgpts_interval",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Interval to load dbgpts from installed packages",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "encrypt_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The key to encrypt the data"
|
||||
},
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
74
docs/docs/config-reference/config_serveconfig_cb64c6.mdx
Normal file
74
docs/docs/config-reference/config_serveconfig_cb64c6.mdx
Normal file
@@ -0,0 +1,74 @@
|
||||
---
|
||||
title: "File Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the file serve module. In DB-GPT, you can store yourfiles in the file server.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "check_hash",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Check the hash of the file when downloading",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The host of the file server"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The port of the file server, default is 5670",
|
||||
"defaultValue": "5670"
|
||||
},
|
||||
{
|
||||
"name": "download_chunk_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The chunk size when downloading the file",
|
||||
"defaultValue": "1048576"
|
||||
},
|
||||
{
|
||||
"name": "save_chunk_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The chunk size when saving the file",
|
||||
"defaultValue": "1048576"
|
||||
},
|
||||
{
|
||||
"name": "transfer_chunk_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The chunk size when transferring the file",
|
||||
"defaultValue": "1048576"
|
||||
},
|
||||
{
|
||||
"name": "transfer_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The timeout when transferring the file",
|
||||
"defaultValue": "360"
|
||||
},
|
||||
{
|
||||
"name": "local_storage_path",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The local storage path"
|
||||
},
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
20
docs/docs/config-reference/config_serveconfig_ec2d70.mdx
Normal file
20
docs/docs/config-reference/config_serveconfig_ec2d70.mdx
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: "Hub dbgpts Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the hub dbgpts serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
20
docs/docs/config-reference/config_serveconfig_fa1f35.mdx
Normal file
20
docs/docs/config-reference/config_serveconfig_fa1f35.mdx
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: "Feedback Serve Configurations Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServeConfig",
|
||||
"description": "This configuration is for the feedback serve module.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys for the endpoint, if None, allow all"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
42
docs/docs/config-reference/config_serviceconfig_81a10f.mdx
Normal file
42
docs/docs/config-reference/config_serviceconfig_81a10f.mdx
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
title: "ServiceConfig Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServiceConfig",
|
||||
"description": "ServiceConfig(web: dbgpt_app.config.ServiceWebParameters = <factory>, model: dbgpt.model.parameter.ModelServiceConfig = <factory>)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "ModelServiceConfig",
|
||||
"required": false,
|
||||
"description": "Model service configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "modelserviceconfig configuration",
|
||||
"url": "././parameter_modelserviceconfig_20d67d"
|
||||
}
|
||||
],
|
||||
"defaultValue": "ModelServiceConfig"
|
||||
},
|
||||
{
|
||||
"name": "web",
|
||||
"type": "ServiceWebParameters",
|
||||
"required": false,
|
||||
"description": "Web service configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "servicewebparameters configuration",
|
||||
"url": "././config_servicewebparameters_3ab7fd"
|
||||
}
|
||||
],
|
||||
"defaultValue": "ServiceWebParameters"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,224 @@
|
||||
---
|
||||
title: "ServiceWebParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ServiceWebParameters",
|
||||
"description": "ServiceWebParameters(host: str = '0.0.0.0', port: int = 5670, light: Optional[bool] = False, controller_addr: Optional[str] = None, database: dbgpt.datasource.parameter.BaseDatasourceParameters = <factory>, model_storage: Optional[str] = None, trace: Optional[dbgpt.util.tracer.tracer_impl.TracerParameters] = None, log: Optional[dbgpt.util.utils.LoggingParameters] = None, disable_alembic_upgrade: Optional[bool] = False, db_ssl_verify: Optional[bool] = False, default_thread_pool_size: Optional[int] = None, remote_embedding: Optional[bool] = False, remote_rerank: Optional[bool] = False, awel_dirs: Optional[str] = None, new_web_ui: bool = True, model_cache: dbgpt.storage.cache.manager.ModelCacheParameters = <factory>, embedding_model_max_seq_len: Optional[int] = 512)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Webserver deploy port, default is 5670",
|
||||
"defaultValue": "5670"
|
||||
},
|
||||
{
|
||||
"name": "light",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Run Webserver in light mode",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "controller_addr",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The Model controller address to connect. If None, read model controller address from environment key `MODEL_SERVER`."
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "BaseDatasourceParameters",
|
||||
"required": false,
|
||||
"description": "Database connection config, now support SQLite, OceanBase and MySQL",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "rdbmsdatasourceparameters configuration",
|
||||
"url": "././base_rdbmsdatasourceparameters_4f774f"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "sqlite configuration",
|
||||
"url": "././conn_sqlite_sqliteconnectorparameters_82c8b5"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "tugraph configuration",
|
||||
"url": "././conn_tugraph_tugraphparameters_0c844e"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "spark configuration",
|
||||
"url": "././conn_spark_sparkparameters_174bbc"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "clickhouse configuration",
|
||||
"url": "././conn_clickhouse_clickhouseparameters_4a1237"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "doris configuration",
|
||||
"url": "././conn_doris_dorisparameters_e33c53"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "duckdb configuration",
|
||||
"url": "././conn_duckdb_duckdbconnectorparameters_c672c7"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "hive configuration",
|
||||
"url": "././conn_hive_hiveparameters_ec3601"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "mssql configuration",
|
||||
"url": "././conn_mssql_mssqlparameters_d79d1c"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "mysql configuration",
|
||||
"url": "././conn_mysql_mysqlparameters_4393c4"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "oceanbase configuration",
|
||||
"url": "././conn_oceanbase_oceanbaseparameters_260d2d"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "postgresql configuration",
|
||||
"url": "././conn_postgresql_postgresqlparameters_22efa5"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "starrocks configuration",
|
||||
"url": "././conn_starrocks_starrocksparameters_e511f7"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "vertica configuration",
|
||||
"url": "././conn_vertica_verticaparameters_c712b8"
|
||||
}
|
||||
],
|
||||
"defaultValue": "SQLiteConnectorParameters"
|
||||
},
|
||||
{
|
||||
"name": "model_storage",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The storage type of model configures, if None, use the default storage(current database). When you run in light mode, it will not use any storage.",
|
||||
"validValues": [
|
||||
"database",
|
||||
"memory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "trace",
|
||||
"type": "TracerParameters",
|
||||
"required": false,
|
||||
"description": "Tracer config for web server, if None, use global tracer config",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "tracerparameters configuration",
|
||||
"url": "././tracer_impl_tracerparameters_f8f272"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "log",
|
||||
"type": "LoggingParameters",
|
||||
"required": false,
|
||||
"description": "Logging configuration for web server, if None, use global config",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "loggingparameters configuration",
|
||||
"url": "././utils_loggingparameters_4ba5c6"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "disable_alembic_upgrade",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to disable alembic to initialize and upgrade database metadata",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "db_ssl_verify",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to verify the SSL certificate of the database",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "default_thread_pool_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The default thread pool size, If None, use default config of python thread pool"
|
||||
},
|
||||
{
|
||||
"name": "remote_embedding",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to enable remote embedding models. If it is True, you need to start a embedding model through `dbgpt start worker --worker_type text2vec --model_name xxx --model_path xxx`",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "remote_rerank",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to enable remote rerank models. If it is True, you need to start a rerank model through `dbgpt start worker --worker_type text2vec --rerank --model_name xxx --model_path xxx`",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "awel_dirs",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The directories to search awel files, split by `,`"
|
||||
},
|
||||
{
|
||||
"name": "new_web_ui",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to use the new web UI, default is True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "model_cache",
|
||||
"type": "ModelCacheParameters",
|
||||
"required": false,
|
||||
"description": "Model cache configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "modelcacheparameters configuration",
|
||||
"url": "././manager_modelcacheparameters_152367"
|
||||
}
|
||||
],
|
||||
"defaultValue": "ModelCacheParameters"
|
||||
},
|
||||
{
|
||||
"name": "embedding_model_max_seq_len",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The max sequence length of the embedding model, default is 512",
|
||||
"defaultValue": "512"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Webserver deploy host",
|
||||
"defaultValue": "0.0.0.0"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
35
docs/docs/config-reference/config_storageconfig_028579.mdx
Normal file
35
docs/docs/config-reference/config_storageconfig_028579.mdx
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
title: "StorageConfig Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "StorageConfig",
|
||||
"description": "StorageConfig(vector: dbgpt.storage.vector_store.base.VectorStoreConfig = <factory>, graph: dbgpt_ext.storage.knowledge_graph.knowledge_graph.BuiltinKnowledgeGraphConfig = <factory>, full_text: dbgpt_ext.storage.knowledge_graph.knowledge_graph.BuiltinKnowledgeGraphConfig = <factory>)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "graph",
|
||||
"type": "BuiltinKnowledgeGraphConfig",
|
||||
"required": false,
|
||||
"description": "default graph type",
|
||||
"defaultValue": "type='TuGraph' name='dbgpt_collection' embedding_fn=None max_chunks_once_load=10 max_threads=1 llm_client=None model_name=None"
|
||||
},
|
||||
{
|
||||
"name": "full_text",
|
||||
"type": "BuiltinKnowledgeGraphConfig",
|
||||
"required": false,
|
||||
"description": "default graph type",
|
||||
"defaultValue": "type='TuGraph' name='dbgpt_collection' embedding_fn=None max_chunks_once_load=10 max_threads=1 llm_client=None model_name=None"
|
||||
},
|
||||
{
|
||||
"name": "vector",
|
||||
"type": "VectorStoreConfig",
|
||||
"required": false,
|
||||
"description": "default vector type",
|
||||
"defaultValue": "type=None name='dbgpt_collection' embedding_fn=None max_chunks_once_load=10 max_threads=1 user=None password=None topk=5 score_threshold=0.3"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,21 @@
|
||||
---
|
||||
title: "StorageGraphConfig Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "StorageGraphConfig",
|
||||
"description": "StorageGraphConfig(type: str = 'TuGraph')",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "default graph type",
|
||||
"defaultValue": "TuGraph"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,21 @@
|
||||
---
|
||||
title: "StorageVectorConfig Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "StorageVectorConfig",
|
||||
"description": "StorageVectorConfig(type: str = 'Chroma')",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "default vector type",
|
||||
"defaultValue": "Chroma"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,57 @@
|
||||
---
|
||||
title: "SystemParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "SystemParameters",
|
||||
"description": "System parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "log_level",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Logging level",
|
||||
"defaultValue": "INFO",
|
||||
"validValues": [
|
||||
"DEBUG",
|
||||
"INFO",
|
||||
"WARNING",
|
||||
"ERROR",
|
||||
"CRITICAL"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API keys",
|
||||
"defaultValue": "[]"
|
||||
},
|
||||
{
|
||||
"name": "encrypt_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The key to encrypt the data",
|
||||
"defaultValue": "your_secret_key"
|
||||
},
|
||||
{
|
||||
"name": "language",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Language setting",
|
||||
"defaultValue": "en",
|
||||
"validValues": [
|
||||
"en",
|
||||
"zh",
|
||||
"fr",
|
||||
"ja",
|
||||
"ko",
|
||||
"ru"
|
||||
]
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,73 @@
|
||||
---
|
||||
title: "Clickhouse datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ClickhouseParameters",
|
||||
"description": "Columnar database for high-performance analytics and real-time queries.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"description": "Database port, e.g., 3306"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database user to connect"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database name"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "http_pool_maxsize",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "http pool maxsize",
|
||||
"defaultValue": "16"
|
||||
},
|
||||
{
|
||||
"name": "http_pool_num_pools",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "http pool num_pools",
|
||||
"defaultValue": "12"
|
||||
},
|
||||
{
|
||||
"name": "connect_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Database connect timeout, default 15s",
|
||||
"defaultValue": "15"
|
||||
},
|
||||
{
|
||||
"name": "distributed_ddl_task_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Distributed ddl task timeout, default 300s",
|
||||
"defaultValue": "300"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database host, e.g., localhost"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: "Apache Doris datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "DorisParameters",
|
||||
"description": "A new-generation open-source real-time data warehouse.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"description": "Database port, e.g., 3306"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database user to connect"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database name"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name for Doris, default is doris.",
|
||||
"defaultValue": "doris"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "pool_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool size, default 5",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "max_overflow",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max overflow connections, default 10",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "pool_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool timeout, default 30",
|
||||
"defaultValue": "30"
|
||||
},
|
||||
{
|
||||
"name": "pool_recycle",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool recycle, default 3600",
|
||||
"defaultValue": "3600"
|
||||
},
|
||||
{
|
||||
"name": "pool_pre_ping",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Connection pool pre ping, default True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database host, e.g., localhost"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,27 @@
|
||||
---
|
||||
title: "DuckDB datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "DuckDbConnectorParameters",
|
||||
"description": "In-memory analytical database with efficient query processing.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name for DuckDB, default is duckdb.",
|
||||
"defaultValue": "duckdb"
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Path to the DuckDB file."
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,83 @@
|
||||
---
|
||||
title: "Apache Hive datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "HiveParameters",
|
||||
"description": "A distributed fault-tolerant data warehouse system.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Hive server port, default 10000",
|
||||
"defaultValue": "10000"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database name, default 'default'",
|
||||
"defaultValue": "default"
|
||||
},
|
||||
{
|
||||
"name": "auth",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Authentication mode: NONE, NOSASL, LDAP, KERBEROS, CUSTOM",
|
||||
"defaultValue": "NONE",
|
||||
"validValues": [
|
||||
"NONE",
|
||||
"NOSASL",
|
||||
"LDAP",
|
||||
"KERBEROS",
|
||||
"CUSTOM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "username",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Username for authentication",
|
||||
"defaultValue": ""
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Password for LDAP or CUSTOM auth",
|
||||
"defaultValue": ""
|
||||
},
|
||||
{
|
||||
"name": "kerberos_service_name",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Kerberos service name",
|
||||
"defaultValue": "hive"
|
||||
},
|
||||
{
|
||||
"name": "transport_mode",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Transport mode: binary or http",
|
||||
"defaultValue": "binary"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name for Hive, default is hive.",
|
||||
"defaultValue": "hive"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Hive server host"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: "MSSQL datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "MSSQLParameters",
|
||||
"description": "Powerful, scalable, secure relational database system by Microsoft.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"description": "Database port, e.g., 3306"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database user to connect"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database name"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name for MSSQL, default is mssql+pymssql.",
|
||||
"defaultValue": "mssql+pymssql"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "pool_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool size, default 5",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "max_overflow",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max overflow connections, default 10",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "pool_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool timeout, default 30",
|
||||
"defaultValue": "30"
|
||||
},
|
||||
{
|
||||
"name": "pool_recycle",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool recycle, default 3600",
|
||||
"defaultValue": "3600"
|
||||
},
|
||||
{
|
||||
"name": "pool_pre_ping",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Connection pool pre ping, default True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database host, e.g., localhost"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: "MySQL datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "MySQLParameters",
|
||||
"description": "Fast, reliable, scalable open-source relational database management system.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"description": "Database port, e.g., 3306"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database user to connect"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database name"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name for MySQL, default is mysql+pymysql.",
|
||||
"defaultValue": "mysql+pymysql"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "pool_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool size, default 5",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "max_overflow",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max overflow connections, default 10",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "pool_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool timeout, default 30",
|
||||
"defaultValue": "30"
|
||||
},
|
||||
{
|
||||
"name": "pool_recycle",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool recycle, default 3600",
|
||||
"defaultValue": "3600"
|
||||
},
|
||||
{
|
||||
"name": "pool_pre_ping",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Connection pool pre ping, default True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database host, e.g., localhost"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: "OceanBase datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "OceanBaseParameters",
|
||||
"description": "An Ultra-Fast & Cost-Effective Distributed SQL Database.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"description": "Database port, e.g., 3306"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database user to connect"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database name"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name for oceanbase, default is mysql+ob.",
|
||||
"defaultValue": "mysql+ob"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "pool_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool size, default 5",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "max_overflow",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max overflow connections, default 10",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "pool_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool timeout, default 30",
|
||||
"defaultValue": "30"
|
||||
},
|
||||
{
|
||||
"name": "pool_recycle",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool recycle, default 3600",
|
||||
"defaultValue": "3600"
|
||||
},
|
||||
{
|
||||
"name": "pool_pre_ping",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Connection pool pre ping, default True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database host, e.g., localhost"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,94 @@
|
||||
---
|
||||
title: "PostreSQL datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "PostgreSQLParameters",
|
||||
"description": "Powerful open-source relational database with extensibility and SQL standards.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"description": "Database port, e.g., 3306"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database user to connect"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database name"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name for postgres, default is postgresql+psycopg2.",
|
||||
"defaultValue": "postgresql+psycopg2"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "pool_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool size, default 5",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "max_overflow",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max overflow connections, default 10",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "pool_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool timeout, default 30",
|
||||
"defaultValue": "30"
|
||||
},
|
||||
{
|
||||
"name": "pool_recycle",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool recycle, default 3600",
|
||||
"defaultValue": "3600"
|
||||
},
|
||||
{
|
||||
"name": "pool_pre_ping",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Connection pool pre ping, default True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "schema",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database schema, defaults to 'public'",
|
||||
"defaultValue": "public"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database host, e.g., localhost"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: "Apache Spark datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "SparkParameters",
|
||||
"description": "Unified engine for large-scale data analytics.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The file path of the data source."
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,34 @@
|
||||
---
|
||||
title: "SQLite datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "SQLiteConnectorParameters",
|
||||
"description": "Lightweight embedded relational database with simplicity and portability.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "check_same_thread",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Check same thread or not, default is False. Set False to allow sharing connection across threads",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name, default is sqlite",
|
||||
"defaultValue": "sqlite"
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "SQLite database file path. Use ':memory:' for in-memory database"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: "StarRocks datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "StarRocksParameters",
|
||||
"description": "An Open-Source, High-Performance Analytical Database.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"description": "Database port, e.g., 3306"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database user to connect"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database name"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name for starrocks, default is starrocks.",
|
||||
"defaultValue": "starrocks"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "pool_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool size, default 5",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "max_overflow",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max overflow connections, default 10",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "pool_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool timeout, default 30",
|
||||
"defaultValue": "30"
|
||||
},
|
||||
{
|
||||
"name": "pool_recycle",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool recycle, default 3600",
|
||||
"defaultValue": "3600"
|
||||
},
|
||||
{
|
||||
"name": "pool_pre_ping",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Connection pool pre ping, default True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database host, e.g., localhost"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,47 @@
|
||||
---
|
||||
title: "TuGraph datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "TuGraphParameters",
|
||||
"description": "TuGraph is a high-performance graph database jointly developed by Ant Group and Tsinghua University.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "TuGraph server user"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "TuGraph server port, default 7687",
|
||||
"defaultValue": "7687"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database name, default 'default'",
|
||||
"defaultValue": "default"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "TuGraph server host"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: "Vertica datasource Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "VerticaParameters",
|
||||
"description": "Vertica is a strongly consistent, ACID-compliant, SQL data warehouse, built for the scale and complexity of today`s data-driven world.",
|
||||
"documentationUrl": null,
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"description": "Database port, e.g., 3306"
|
||||
},
|
||||
{
|
||||
"name": "user",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database user to connect"
|
||||
},
|
||||
{
|
||||
"name": "database",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database name"
|
||||
},
|
||||
{
|
||||
"name": "driver",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Driver name for vertica, default is vertica+vertica_python",
|
||||
"defaultValue": "vertica+vertica_python"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
|
||||
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
|
||||
},
|
||||
{
|
||||
"name": "pool_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool size, default 5",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "max_overflow",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max overflow connections, default 10",
|
||||
"defaultValue": "10"
|
||||
},
|
||||
{
|
||||
"name": "pool_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool timeout, default 30",
|
||||
"defaultValue": "30"
|
||||
},
|
||||
{
|
||||
"name": "pool_recycle",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Connection pool recycle, default 3600",
|
||||
"defaultValue": "3600"
|
||||
},
|
||||
{
|
||||
"name": "pool_pre_ping",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Connection pool pre ping, default True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Database host, e.g., localhost"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Deepseek Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "DeepSeekDeployModelParameters",
|
||||
"description": "Deepseek proxy LLM configuration.",
|
||||
"documentationUrl": "https://api-docs.deepseek.com/",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/deepseek"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the DeepSeek API.",
|
||||
"defaultValue": "${env:DEEPSEEK_API_BASE:-https://api.deepseek.com/v1}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the DeepSeek API.",
|
||||
"defaultValue": "${env:DEEPSEEK_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,99 @@
|
||||
---
|
||||
title: "HFEmbeddingDeployModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "HFEmbeddingDeployModelParameters",
|
||||
"description": "HFEmbeddingDeployModelParameters(name: str, provider: str = 'hf', verbose: Optional[bool] = False, concurrency: Optional[int] = 100, path: Optional[str] = None, device: Optional[str] = None, cache_folder: Optional[str] = None, normalize_embeddings: bool = False, multi_process: bool = False, model_kwargs: Dict[str, Any] = <factory>, encode_kwargs: Dict[str, Any] = <factory>, embed_instruction: Optional[str] = None, query_instruction: Optional[str] = None)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The path of the model, if you want to deploy a local model."
|
||||
},
|
||||
{
|
||||
"name": "device",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Device to run model. If None, the device is automatically determined"
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "hf"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "cache_folder",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Path of the cache folder."
|
||||
},
|
||||
{
|
||||
"name": "normalize_embeddings",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Normalize embeddings.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "multi_process",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Run encode() on multiple GPUs.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "model_kwargs",
|
||||
"type": "object",
|
||||
"required": false,
|
||||
"description": "Keyword arguments to pass to the model.",
|
||||
"defaultValue": "{}"
|
||||
},
|
||||
{
|
||||
"name": "encode_kwargs",
|
||||
"type": "object",
|
||||
"required": false,
|
||||
"description": "Keyword arguments to pass when calling the `encode` method.",
|
||||
"defaultValue": "{}"
|
||||
},
|
||||
{
|
||||
"name": "embed_instruction",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Instruction to use for embedding documents. Just for Instructor model."
|
||||
},
|
||||
{
|
||||
"name": "query_instruction",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Instruction to use for embedding query. Just for Instructor model."
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,67 @@
|
||||
---
|
||||
title: "OpenAPIEmbeddingDeployModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "OpenAPIEmbeddingDeployModelParameters",
|
||||
"description": "OpenAPI embedding deploy model parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/openai"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "api_url",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The URL of the embeddings API.",
|
||||
"defaultValue": "http://localhost:8100/api/v1/embeddings"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key for the embeddings API."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The timeout for the request in seconds.",
|
||||
"defaultValue": "60"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Gemini Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "GeminiDeployModelParameters",
|
||||
"description": "Google Gemini proxy LLM configuration.",
|
||||
"documentationUrl": "https://ai.google.dev/gemini-api/docs",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/gemini"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the gemini API.",
|
||||
"defaultValue": "${env:GEMINI_PROXY_API_BASE}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the gemini API.",
|
||||
"defaultValue": "${env:GEMINI_PROXY_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Gitee Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "GiteeDeployModelParameters",
|
||||
"description": "Gitee proxy LLM configuration.",
|
||||
"documentationUrl": "https://ai.gitee.com/docs/getting-started/intro",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/gitee"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the Gitee API.",
|
||||
"defaultValue": "${env:GITEE_API_BASE:-https://ai.gitee.com/v1}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the Gitee API.",
|
||||
"defaultValue": "${env:GITEE_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: "HFLLMDeployModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "HFLLMDeployModelParameters",
|
||||
"description": "Local deploy model parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The path of the model, if you want to deploy a local model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "device",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Device to run model. If None, the device is automatically determined"
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "hf"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the model. If None, it is automatically determined from model."
|
||||
},
|
||||
{
|
||||
"name": "trust_remote_code",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Trust remote code or not.",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "quantization",
|
||||
"type": "BaseHFQuantization",
|
||||
"required": false,
|
||||
"description": "The quantization parameters.",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "bitsandbytes configuration",
|
||||
"url": "././parameter_bitsandbytesquantization_d40e3b"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "bitsandbytes_8bits configuration",
|
||||
"url": "././parameter_bitsandbytesquantization8bits_909aed"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "bitsandbytes_4bits configuration",
|
||||
"url": "././parameter_bitsandbytesquantization4bits_52b778"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "low_cpu_mem_usage",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to use low CPU memory usage mode. It can reduce the memory when loading the model, if you load your model with quantization, it will be True by default. You must install `accelerate` to make it work."
|
||||
},
|
||||
{
|
||||
"name": "num_gpus",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The number of gpus you expect to use, if it is empty, use all of them as much as possible"
|
||||
},
|
||||
{
|
||||
"name": "max_gpu_memory",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The maximum memory limit of each GPU, only valid in multi-GPU configuration, eg: 10GiB, 24GiB"
|
||||
},
|
||||
{
|
||||
"name": "torch_dtype",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The dtype of the model, default is None.",
|
||||
"validValues": [
|
||||
"auto",
|
||||
"float16",
|
||||
"bfloat16",
|
||||
"float",
|
||||
"float32"
|
||||
]
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,68 @@
|
||||
---
|
||||
title: "JinaEmbeddingsDeployModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "JinaEmbeddingsDeployModelParameters",
|
||||
"description": "Jina AI Embeddings deploy model parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/jina"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "api_url",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The URL of the embeddings API.",
|
||||
"defaultValue": "https://api.jina.ai/v1/embeddings"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key for the embeddings API."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name.",
|
||||
"defaultValue": "jina-embeddings-v2-base-en"
|
||||
},
|
||||
{
|
||||
"name": "timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The timeout for the request in seconds.",
|
||||
"defaultValue": "60"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,281 @@
|
||||
---
|
||||
title: "LlamaServerParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "LlamaServerParameters",
|
||||
"description": "LlamaServerParameters(name: str, provider: str = 'llama.cpp.server', verbose: Optional[bool] = False, concurrency: Optional[int] = 20, backend: Optional[str] = None, prompt_template: Optional[str] = None, context_length: Optional[int] = None, path: Optional[str] = None, model_hf_repo: Optional[str] = None, model_hf_file: Optional[str] = None, device: Optional[str] = None, server_bin_path: Optional[str] = None, server_host: str = '127.0.0.1', server_port: int = 0, temperature: float = 0.8, seed: int = 42, debug: bool = False, model_url: Optional[str] = None, model_draft: Optional[str] = None, threads: Optional[int] = None, n_gpu_layers: Optional[int] = None, batch_size: Optional[int] = None, ubatch_size: Optional[int] = None, ctx_size: Optional[int] = None, grp_attn_n: Optional[int] = None, grp_attn_w: Optional[int] = None, n_predict: Optional[int] = None, slot_save_path: Optional[str] = None, n_slots: Optional[int] = None, cont_batching: bool = False, embedding: bool = False, reranking: bool = False, metrics: bool = False, slots: bool = False, draft: Optional[int] = None, draft_max: Optional[int] = None, draft_min: Optional[int] = None, api_key: Optional[str] = None, lora_files: List[str] = <factory>, no_context_shift: bool = False, no_webui: Optional[bool] = None, startup_timeout: Optional[int] = None)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Local model file path"
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "device",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Device to run model. If None, the device is automatically determined"
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "llama.cpp.server"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "20"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the model. If None, it is automatically determined from model."
|
||||
},
|
||||
{
|
||||
"name": "model_hf_repo",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Hugging Face repository for model download"
|
||||
},
|
||||
{
|
||||
"name": "model_hf_file",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Model file name in the Hugging Face repository"
|
||||
},
|
||||
{
|
||||
"name": "server_bin_path",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Path to the server binary executable"
|
||||
},
|
||||
{
|
||||
"name": "server_host",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Host address to bind the server",
|
||||
"defaultValue": "127.0.0.1"
|
||||
},
|
||||
{
|
||||
"name": "server_port",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Port to bind the server. 0 for random available port",
|
||||
"defaultValue": "0"
|
||||
},
|
||||
{
|
||||
"name": "temperature",
|
||||
"type": "number",
|
||||
"required": false,
|
||||
"description": "Sampling temperature for text generation",
|
||||
"defaultValue": "0.8"
|
||||
},
|
||||
{
|
||||
"name": "seed",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Random seed for reproducibility",
|
||||
"defaultValue": "42"
|
||||
},
|
||||
{
|
||||
"name": "debug",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Enable debug mode",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "model_url",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Model download URL (env: LLAMA_ARG_MODEL_URL)"
|
||||
},
|
||||
{
|
||||
"name": "model_draft",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Draft model file path"
|
||||
},
|
||||
{
|
||||
"name": "threads",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Number of threads to use during generation (default: -1) (env: LLAMA_ARG_THREADS)"
|
||||
},
|
||||
{
|
||||
"name": "n_gpu_layers",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Number of layers to store in VRAM (env: LLAMA_ARG_N_GPU_LAYERS), set 1000000000 to use all layers"
|
||||
},
|
||||
{
|
||||
"name": "batch_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Logical maximum batch size (default: 2048) (env: LLAMA_ARG_BATCH)"
|
||||
},
|
||||
{
|
||||
"name": "ubatch_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Physical maximum batch size (default: 512) (env: LLAMA_ARG_UBATCH)"
|
||||
},
|
||||
{
|
||||
"name": "ctx_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Size of the prompt context (default: 4096, 0 = loaded from model) (env: LLAMA_ARG_CTX_SIZE)"
|
||||
},
|
||||
{
|
||||
"name": "grp_attn_n",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Group-attention factor (default: 1)"
|
||||
},
|
||||
{
|
||||
"name": "grp_attn_w",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Group-attention width (default: 512)"
|
||||
},
|
||||
{
|
||||
"name": "n_predict",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Number of tokens to predict (default: -1, -1 = infinity, -2 = until context filled) (env: LLAMA_ARG_N_PREDICT)"
|
||||
},
|
||||
{
|
||||
"name": "slot_save_path",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Path to save slot kv cache (default: disabled)"
|
||||
},
|
||||
{
|
||||
"name": "n_slots",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Number of slots for KV cache"
|
||||
},
|
||||
{
|
||||
"name": "cont_batching",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Enable continuous batching (a.k.a dynamic batching)",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "embedding",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Restrict to only support embedding use case; use only with dedicated embedding models (env: LLAMA_ARG_EMBEDDINGS)",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "reranking",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Enable reranking endpoint on server (env: LLAMA_ARG_RERANKING)",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "metrics",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Enable prometheus compatible metrics endpoint (env: LLAMA_ARG_ENDPOINT_METRICS)",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "slots",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Enable slots monitoring endpoint (env: LLAMA_ARG_ENDPOINT_SLOTS)",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "draft",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Number of tokens to draft for speculative decoding (default: 16) (env: LLAMA_ARG_DRAFT_MAX)"
|
||||
},
|
||||
{
|
||||
"name": "draft_max",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Same as draft"
|
||||
},
|
||||
{
|
||||
"name": "draft_min",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Minimum number of draft tokens to use for speculative decoding (default: 5)"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "API key to use for authentication (env: LLAMA_API_KEY)"
|
||||
},
|
||||
{
|
||||
"name": "lora_files",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Path to LoRA adapter (can be repeated to use multiple adapters)",
|
||||
"defaultValue": "[]"
|
||||
},
|
||||
{
|
||||
"name": "no_context_shift",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Disables context shift on infinite text generation",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "no_webui",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Disable web UI"
|
||||
},
|
||||
{
|
||||
"name": "startup_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Server startup timeout in seconds"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,124 @@
|
||||
---
|
||||
title: "LlamaCppModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "LlamaCppModelParameters",
|
||||
"description": "LlamaCppModelParameters(name: str, provider: str = 'llama.cpp', verbose: Optional[bool] = False, concurrency: Optional[int] = 5, backend: Optional[str] = None, prompt_template: Optional[str] = None, context_length: Optional[int] = None, path: Optional[str] = None, device: Optional[str] = None, seed: Optional[int] = -1, n_threads: Optional[int] = None, n_batch: Optional[int] = 512, n_gpu_layers: Optional[int] = 1000000000, n_gqa: Optional[int] = None, rms_norm_eps: Optional[float] = 5e-06, cache_capacity: Optional[str] = None, prefer_cpu: Optional[bool] = False)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The path of the model, if you want to deploy a local model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "device",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Device to run model. If None, the device is automatically determined"
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "llama.cpp"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "5"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the model. If None, it is automatically determined from model."
|
||||
},
|
||||
{
|
||||
"name": "seed",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Random seed for llama-cpp models. -1 for random",
|
||||
"defaultValue": "-1"
|
||||
},
|
||||
{
|
||||
"name": "n_threads",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Number of threads to use. If None, the number of threads is automatically determined"
|
||||
},
|
||||
{
|
||||
"name": "n_batch",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Maximum number of prompt tokens to batch together when calling llama_eval",
|
||||
"defaultValue": "512"
|
||||
},
|
||||
{
|
||||
"name": "n_gpu_layers",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Number of layers to offload to the GPU, Set this to 1000000000 to offload all layers to the GPU.",
|
||||
"defaultValue": "1000000000"
|
||||
},
|
||||
{
|
||||
"name": "n_gqa",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Grouped-query attention. Must be 8 for llama-2 70b."
|
||||
},
|
||||
{
|
||||
"name": "rms_norm_eps",
|
||||
"type": "number",
|
||||
"required": false,
|
||||
"description": "5e-6 is a good value for llama-2 models.",
|
||||
"defaultValue": "5e-06"
|
||||
},
|
||||
{
|
||||
"name": "cache_capacity",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed. "
|
||||
},
|
||||
{
|
||||
"name": "prefer_cpu",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "If a GPU is available, it will be preferred by default, unless prefer_cpu=False is configured.",
|
||||
"defaultValue": "False"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
41
docs/docs/config-reference/manager_hookconfig_d9a481.mdx
Normal file
41
docs/docs/config-reference/manager_hookconfig_d9a481.mdx
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
title: "HookConfig Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "HookConfig",
|
||||
"description": "Hook configuration.\n\n You can define a hook configuration with a path and optional parameters.\n It will be used to dynamically load and execute a hook function or a callable\n object.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "init_params",
|
||||
"type": "object",
|
||||
"required": false,
|
||||
"description": "Hook init params to pass to the hook constructor(Just for class hook), must be key-value pairs",
|
||||
"defaultValue": "{}"
|
||||
},
|
||||
{
|
||||
"name": "params",
|
||||
"type": "object",
|
||||
"required": false,
|
||||
"description": "Hook params to pass to the hook, must be key-value pairs",
|
||||
"defaultValue": "{}"
|
||||
},
|
||||
{
|
||||
"name": "enabled",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether the hook is enabled, default is True",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Hook path, it can be a class path or a function path. eg: 'dbgpt.config.hooks.env_var_hook'"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,42 @@
|
||||
---
|
||||
title: "ModelCacheParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ModelCacheParameters",
|
||||
"description": "Model cache configuration.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "storage_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The storage type, default is memory",
|
||||
"defaultValue": "memory"
|
||||
},
|
||||
{
|
||||
"name": "max_memory_mb",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The max memory in MB, default is 256",
|
||||
"defaultValue": "256"
|
||||
},
|
||||
{
|
||||
"name": "persist_dir",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The persist directory, default is model_cache",
|
||||
"defaultValue": "model_cache"
|
||||
},
|
||||
{
|
||||
"name": "enable_model_cache",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to enable model cache, default is True",
|
||||
"defaultValue": "True"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Moonshot Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "MoonshotDeployModelParameters",
|
||||
"description": "Moonshot proxy LLM configuration.",
|
||||
"documentationUrl": "https://platform.moonshot.cn/docs/api/chat#%E5%85%AC%E5%BC%80%E7%9A%84%E6%9C%8D%E5%8A%A1%E5%9C%B0%E5%9D%80",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/moonshot"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the Moonshot API.",
|
||||
"defaultValue": "${env:MOONSHOT_API_BASE:-https://api.moonshot.cn/v1}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the Moonshot API.",
|
||||
"defaultValue": "${env:MOONSHOT_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Ollama Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "OllamaDeployModelParameters",
|
||||
"description": "Ollama proxy LLM configuration.",
|
||||
"documentationUrl": "https://ollama.com/library",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/ollama"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the Ollama API.",
|
||||
"defaultValue": "${env:OLLAMA_API_BASE:-http://localhost:11434}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the OpenAI API.",
|
||||
"defaultValue": "${env:OPENAI_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,54 @@
|
||||
---
|
||||
title: "OllamaEmbeddingDeployModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "OllamaEmbeddingDeployModelParameters",
|
||||
"description": "Ollama Embeddings deploy model parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/ollama"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "api_url",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The URL of the embeddings API.",
|
||||
"defaultValue": "http://localhost:11434"
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
483
docs/docs/config-reference/overview.mdx
Normal file
483
docs/docs/config-reference/overview.mdx
Normal file
@@ -0,0 +1,483 @@
|
||||
import { ConfigDiagram } from "@site/src/components/mdx/ConfigDiagram";
|
||||
|
||||
# Configuration Overview
|
||||
|
||||
## The layout of configuration classes
|
||||
|
||||
<ConfigDiagram relationships={[
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "HookConfig",
|
||||
"label": "hooks"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "SystemParameters",
|
||||
"label": "system"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServiceConfig",
|
||||
"label": "service"
|
||||
},
|
||||
{
|
||||
"from": "ServiceConfig",
|
||||
"to": "ServiceWebParameters",
|
||||
"label": "web"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "RDBMSDatasourceParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "SQLiteConnectorParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "TuGraphParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "SparkParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "ClickhouseParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "DorisParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "DuckDbConnectorParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "HiveParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "MSSQLParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "MySQLParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "OceanBaseParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "PostgreSQLParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "StarRocksParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "VerticaParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "TracerParameters",
|
||||
"label": "trace"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "LoggingParameters",
|
||||
"label": "log"
|
||||
},
|
||||
{
|
||||
"from": "ServiceWebParameters",
|
||||
"to": "ModelCacheParameters",
|
||||
"label": "model_cache"
|
||||
},
|
||||
{
|
||||
"from": "ServiceConfig",
|
||||
"to": "ModelServiceConfig",
|
||||
"label": "model"
|
||||
},
|
||||
{
|
||||
"from": "ModelServiceConfig",
|
||||
"to": "ModelWorkerParameters",
|
||||
"label": "worker"
|
||||
},
|
||||
{
|
||||
"from": "ModelWorkerParameters",
|
||||
"to": "LoggingParameters",
|
||||
"label": "log"
|
||||
},
|
||||
{
|
||||
"from": "ModelWorkerParameters",
|
||||
"to": "TracerParameters",
|
||||
"label": "trace"
|
||||
},
|
||||
{
|
||||
"from": "ModelServiceConfig",
|
||||
"to": "ModelAPIServerParameters",
|
||||
"label": "api"
|
||||
},
|
||||
{
|
||||
"from": "ModelAPIServerParameters",
|
||||
"to": "LoggingParameters",
|
||||
"label": "log"
|
||||
},
|
||||
{
|
||||
"from": "ModelAPIServerParameters",
|
||||
"to": "TracerParameters",
|
||||
"label": "trace"
|
||||
},
|
||||
{
|
||||
"from": "ModelServiceConfig",
|
||||
"to": "ModelControllerParameters",
|
||||
"label": "controller"
|
||||
},
|
||||
{
|
||||
"from": "ModelControllerParameters",
|
||||
"to": "LoggingParameters",
|
||||
"label": "log"
|
||||
},
|
||||
{
|
||||
"from": "ModelControllerParameters",
|
||||
"to": "TracerParameters",
|
||||
"label": "trace"
|
||||
},
|
||||
{
|
||||
"from": "ModelControllerParameters",
|
||||
"to": "DBModelRegistryParameters",
|
||||
"label": "registry"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "RDBMSDatasourceParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "SQLiteConnectorParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "TuGraphParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "SparkParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "ClickhouseParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "DorisParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "DuckDbConnectorParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "HiveParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "MSSQLParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "MySQLParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "OceanBaseParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "PostgreSQLParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "StarRocksParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "DBModelRegistryParameters",
|
||||
"to": "VerticaParameters",
|
||||
"label": "database"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ModelsDeployParameters",
|
||||
"label": "models"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "HFLLMDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "HFLLMDeployModelParameters",
|
||||
"to": "BitsandbytesQuantization",
|
||||
"label": "quantization"
|
||||
},
|
||||
{
|
||||
"from": "HFLLMDeployModelParameters",
|
||||
"to": "BitsandbytesQuantization8bits",
|
||||
"label": "quantization"
|
||||
},
|
||||
{
|
||||
"from": "HFLLMDeployModelParameters",
|
||||
"to": "BitsandbytesQuantization4bits",
|
||||
"label": "quantization"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "VLLMDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "LlamaServerParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "LlamaCppModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "OpenAICompatibleDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "SiliconFlowDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "ZhipuDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "MoonshotDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "GiteeDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "DeepSeekDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "OllamaDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "YiDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "SparkDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "BaichuanDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "GeminiDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "TongyiDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "VolcengineDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "WenxinDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "ClaudeDeployModelParameters",
|
||||
"label": "llms"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "HFEmbeddingDeployModelParameters",
|
||||
"label": "embeddings"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "OpenAPIEmbeddingDeployModelParameters",
|
||||
"label": "embeddings"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "JinaEmbeddingsDeployModelParameters",
|
||||
"label": "embeddings"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "OllamaEmbeddingDeployModelParameters",
|
||||
"label": "embeddings"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "QianfanEmbeddingDeployModelParameters",
|
||||
"label": "embeddings"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "TongyiEmbeddingDeployModelParameters",
|
||||
"label": "embeddings"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "CrossEncoderRerankEmbeddingsParameters",
|
||||
"label": "rerankers"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "OpenAPIRerankerDeployModelParameters",
|
||||
"label": "rerankers"
|
||||
},
|
||||
{
|
||||
"from": "ModelsDeployParameters",
|
||||
"to": "SiliconFlowRerankEmbeddingsParameters",
|
||||
"label": "rerankers"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "ServeConfig",
|
||||
"label": "serves"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "RagParameters",
|
||||
"label": "rag"
|
||||
},
|
||||
{
|
||||
"from": "RagParameters",
|
||||
"to": "StorageConfig",
|
||||
"label": "storage"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "TracerParameters",
|
||||
"label": "trace"
|
||||
},
|
||||
{
|
||||
"from": "ApplicationConfig",
|
||||
"to": "LoggingParameters",
|
||||
"label": "log"
|
||||
}
|
||||
]} />
|
@@ -0,0 +1,57 @@
|
||||
---
|
||||
title: "BitsandbytesQuantization4bits Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "BitsandbytesQuantization4bits",
|
||||
"description": "Bits and bytes quantization 4 bits parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "load_in_4bits",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to load the model in 4 bits.",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "bnb_4bit_compute_dtype",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "To speedup computation, you can change the data type from float32 (the default value) to bfloat16",
|
||||
"validValues": [
|
||||
"bfloat16",
|
||||
"float16",
|
||||
"float32"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "bnb_4bit_quant_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Quantization datatypes, `fp4` (four bit float) and `nf4` (normal four bit float), only valid when load_4bit=True",
|
||||
"defaultValue": "nf4",
|
||||
"validValues": [
|
||||
"nf4",
|
||||
"fp4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "bnb_4bit_use_double_quant",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Nested quantization is a technique that can save additional memory at no additional performance cost. This feature performs a second quantization of the already quantized weights to save an additional 0.4 bits/parameter. ",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "load_in_8bits",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to load the model in 8 bits(LLM.int8() algorithm), default is False.",
|
||||
"defaultValue": "False"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,49 @@
|
||||
---
|
||||
title: "BitsandbytesQuantization8bits Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "BitsandbytesQuantization8bits",
|
||||
"description": "Bits and bytes quantization 8 bits parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "load_in_4bits",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to load the model in 4 bits, default is False.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "llm_int8_enable_fp32_cpu_offload",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "8-bit models can offload weights between the CPU and GPU to support fitting very large models into memory. The weights dispatched to the CPU are actually stored in float32, and aren’t converted to 8-bit. ",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "llm_int8_threshold",
|
||||
"type": "number",
|
||||
"required": false,
|
||||
"description": "An “outlier” is a hidden state value greater than a certain threshold, and these values are computed in fp16. While the values are usually normally distributed ([-3.5, 3.5]), this distribution can be very different for large models ([-60, 6] or [6, 60]). 8-bit quantization works well for values ~5, but beyond that, there is a significant performance penalty. A good default threshold value is 6, but a lower threshold may be needed for more unstable models (small models or finetuning).",
|
||||
"defaultValue": "6.0"
|
||||
},
|
||||
{
|
||||
"name": "llm_int8_skip_modules",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as Jukebox that has several heads in different places and not necessarily at the last position. For example for `CausalLM` models, the last `lm_head` is kept in its original `dtype`",
|
||||
"defaultValue": "[]"
|
||||
},
|
||||
{
|
||||
"name": "load_in_8bits",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to load the model in 8 bits(LLM.int8() algorithm).",
|
||||
"defaultValue": "True"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,28 @@
|
||||
---
|
||||
title: "BitsandbytesQuantization Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "BitsandbytesQuantization",
|
||||
"description": "Bits and bytes quantization parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "load_in_4bits",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to load the model in 4 bits, default is False.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "load_in_8bits",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to load the model in 8 bits(LLM.int8() algorithm), default is False.",
|
||||
"defaultValue": "False"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,92 @@
|
||||
---
|
||||
title: "DBModelRegistryParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "DBModelRegistryParameters",
|
||||
"description": "Database model registry parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "database",
|
||||
"type": "BaseDatasourceParameters",
|
||||
"required": false,
|
||||
"description": "Database configuration for model registry",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "rdbmsdatasourceparameters configuration",
|
||||
"url": "././base_rdbmsdatasourceparameters_4f774f"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "sqlite configuration",
|
||||
"url": "././conn_sqlite_sqliteconnectorparameters_82c8b5"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "tugraph configuration",
|
||||
"url": "././conn_tugraph_tugraphparameters_0c844e"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "spark configuration",
|
||||
"url": "././conn_spark_sparkparameters_174bbc"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "clickhouse configuration",
|
||||
"url": "././conn_clickhouse_clickhouseparameters_4a1237"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "doris configuration",
|
||||
"url": "././conn_doris_dorisparameters_e33c53"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "duckdb configuration",
|
||||
"url": "././conn_duckdb_duckdbconnectorparameters_c672c7"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "hive configuration",
|
||||
"url": "././conn_hive_hiveparameters_ec3601"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "mssql configuration",
|
||||
"url": "././conn_mssql_mssqlparameters_d79d1c"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "mysql configuration",
|
||||
"url": "././conn_mysql_mysqlparameters_4393c4"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "oceanbase configuration",
|
||||
"url": "././conn_oceanbase_oceanbaseparameters_260d2d"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "postgresql configuration",
|
||||
"url": "././conn_postgresql_postgresqlparameters_22efa5"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "starrocks configuration",
|
||||
"url": "././conn_starrocks_starrocksparameters_e511f7"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "vertica configuration",
|
||||
"url": "././conn_vertica_verticaparameters_c712b8"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,88 @@
|
||||
---
|
||||
title: "ModelAPIServerParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ModelAPIServerParameters",
|
||||
"description": "ModelAPIServerParameters(host: Optional[str] = '0.0.0.0', port: Optional[int] = 8100, daemon: Optional[bool] = False, log: dbgpt.util.utils.LoggingParameters = <factory>, trace: Optional[dbgpt.util.tracer.tracer_impl.TracerParameters] = None, controller_addr: Optional[str] = 'http://127.0.0.1:8000', api_keys: Optional[str] = None, embedding_batch_size: Optional[int] = None, ignore_stop_exceeds_error: Optional[bool] = False)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model API server deploy port",
|
||||
"defaultValue": "8100"
|
||||
},
|
||||
{
|
||||
"name": "daemon",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Run the server as a daemon.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "log",
|
||||
"type": "LoggingParameters",
|
||||
"required": false,
|
||||
"description": "Logging configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "loggingparameters configuration",
|
||||
"url": "././utils_loggingparameters_4ba5c6"
|
||||
}
|
||||
],
|
||||
"defaultValue": "LoggingParameters"
|
||||
},
|
||||
{
|
||||
"name": "trace",
|
||||
"type": "TracerParameters",
|
||||
"required": false,
|
||||
"description": "Tracer configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "tracerparameters configuration",
|
||||
"url": "././tracer_impl_tracerparameters_f8f272"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "controller_addr",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The Model controller address to connect",
|
||||
"defaultValue": "http://127.0.0.1:8000"
|
||||
},
|
||||
{
|
||||
"name": "api_keys",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Optional list of comma separated API keys"
|
||||
},
|
||||
{
|
||||
"name": "embedding_batch_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Embedding batch size"
|
||||
},
|
||||
{
|
||||
"name": "ignore_stop_exceeds_error",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Ignore exceeds stop words error",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The host IP address to bind to.",
|
||||
"defaultValue": "0.0.0.0"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,89 @@
|
||||
---
|
||||
title: "ModelControllerParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ModelControllerParameters",
|
||||
"description": "ModelControllerParameters(host: Optional[str] = '0.0.0.0', port: Optional[int] = 8000, daemon: Optional[bool] = False, log: dbgpt.util.utils.LoggingParameters = <factory>, trace: Optional[dbgpt.util.tracer.tracer_impl.TracerParameters] = None, registry: Optional[dbgpt.model.parameter.BaseModelRegistryParameters] = None, heartbeat_interval_secs: Optional[int] = 20, heartbeat_timeout_secs: Optional[int] = 60)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model Controller deploy port",
|
||||
"defaultValue": "8000"
|
||||
},
|
||||
{
|
||||
"name": "daemon",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Run the server as a daemon.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "log",
|
||||
"type": "LoggingParameters",
|
||||
"required": false,
|
||||
"description": "Logging configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "loggingparameters configuration",
|
||||
"url": "././utils_loggingparameters_4ba5c6"
|
||||
}
|
||||
],
|
||||
"defaultValue": "LoggingParameters"
|
||||
},
|
||||
{
|
||||
"name": "trace",
|
||||
"type": "TracerParameters",
|
||||
"required": false,
|
||||
"description": "Tracer configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "tracerparameters configuration",
|
||||
"url": "././tracer_impl_tracerparameters_f8f272"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "registry",
|
||||
"type": "BaseModelRegistryParameters",
|
||||
"required": false,
|
||||
"description": "Model registry configuration. If None, use embedded registry",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "___model_registry_placeholder___ configuration",
|
||||
"url": "././parameter_dbmodelregistryparameters_87d036"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "heartbeat_interval_secs",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The interval for checking heartbeats (seconds)",
|
||||
"defaultValue": "20"
|
||||
},
|
||||
{
|
||||
"name": "heartbeat_timeout_secs",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The timeout for checking heartbeats (seconds), it will be set unhealthy if the worker is not responding in this time",
|
||||
"defaultValue": "60"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The host IP address to bind to.",
|
||||
"defaultValue": "0.0.0.0"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,199 @@
|
||||
---
|
||||
title: "ModelsDeployParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ModelsDeployParameters",
|
||||
"description": "ModelsDeployParameters(default_llm: Optional[str] = None, default_embedding: Optional[str] = None, default_reranker: Optional[str] = None, llms: List[dbgpt.core.interface.parameter.LLMDeployModelParameters] = <factory>, embeddings: List[dbgpt.core.interface.parameter.EmbeddingDeployModelParameters] = <factory>, rerankers: List[dbgpt.core.interface.parameter.RerankerDeployModelParameters] = <factory>)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "default_embedding",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Default embedding model name, used to specify which model to use when you have multiple embedding models"
|
||||
},
|
||||
{
|
||||
"name": "default_reranker",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Default reranker model name, used to specify which model to use when you have multiple reranker models"
|
||||
},
|
||||
{
|
||||
"name": "llms",
|
||||
"type": "LLMDeployModelParameters",
|
||||
"required": false,
|
||||
"description": "LLM model deploy configuration. If you deploy in cluster mode, you just deploy one model.",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "hf configuration",
|
||||
"url": "././hf_adapter_hfllmdeploymodelparameters_103e81"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "vllm configuration",
|
||||
"url": "././vllm_adapter_vllmdeploymodelparameters_1d4a24"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "llama.cpp.server configuration",
|
||||
"url": "././llama_cpp_adapter_llamaserverparameters_421f40"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "llama.cpp configuration",
|
||||
"url": "././llama_cpp_py_adapter_llamacppmodelparameters_e88874"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/openai configuration",
|
||||
"url": "././chatgpt_openaicompatibledeploymodelparameters_c3d426"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/siliconflow configuration",
|
||||
"url": "././siliconflow_siliconflowdeploymodelparameters_abe22f"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/zhipu configuration",
|
||||
"url": "././zhipu_zhipudeploymodelparameters_c51e31"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/moonshot configuration",
|
||||
"url": "././moonshot_moonshotdeploymodelparameters_aa2f6b"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/gitee configuration",
|
||||
"url": "././gitee_giteedeploymodelparameters_d1bdb3"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/deepseek configuration",
|
||||
"url": "././deepseek_deepseekdeploymodelparameters_194cbd"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/ollama configuration",
|
||||
"url": "././ollama_ollamadeploymodelparameters_d55be6"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/yi configuration",
|
||||
"url": "././yi_yideploymodelparameters_92dbaa"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/spark configuration",
|
||||
"url": "././spark_sparkdeploymodelparameters_afba3c"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/baichuan configuration",
|
||||
"url": "././baichuan_baichuandeploymodelparameters_0bf9cc"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/gemini configuration",
|
||||
"url": "././gemini_geminideploymodelparameters_5113b9"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/tongyi configuration",
|
||||
"url": "././tongyi_tongyideploymodelparameters_02a91b"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/volcengine configuration",
|
||||
"url": "././volcengine_volcenginedeploymodelparameters_938015"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/wenxin configuration",
|
||||
"url": "././wenxin_wenxindeploymodelparameters_63c66b"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/claude configuration",
|
||||
"url": "././claude_claudedeploymodelparameters_1f0c45"
|
||||
}
|
||||
],
|
||||
"defaultValue": "[]"
|
||||
},
|
||||
{
|
||||
"name": "embeddings",
|
||||
"type": "EmbeddingDeployModelParameters",
|
||||
"required": false,
|
||||
"description": "Embedding model deploy configuration. If you deploy in cluster mode, you just deploy one model.",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "hf configuration",
|
||||
"url": "././embeddings_hfembeddingdeploymodelparameters_f588e1"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/openai configuration",
|
||||
"url": "././embeddings_openapiembeddingdeploymodelparameters_f9ba47"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/jina configuration",
|
||||
"url": "././jina_jinaembeddingsdeploymodelparameters_40b0f2"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/ollama configuration",
|
||||
"url": "././ollama_ollamaembeddingdeploymodelparameters_b511e0"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/qianfan configuration",
|
||||
"url": "././qianfan_qianfanembeddingdeploymodelparameters_257d2a"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/tongyi configuration",
|
||||
"url": "././tongyi_tongyiembeddingdeploymodelparameters_a7cbb4"
|
||||
}
|
||||
],
|
||||
"defaultValue": "[]"
|
||||
},
|
||||
{
|
||||
"name": "rerankers",
|
||||
"type": "RerankerDeployModelParameters",
|
||||
"required": false,
|
||||
"description": "Reranker model deploy configuration. If you deploy in cluster mode, you just deploy one model.",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "hf configuration",
|
||||
"url": "././rerank_crossencoderrerankembeddingsparameters_63ec13"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/openapi configuration",
|
||||
"url": "././rerank_openapirerankerdeploymodelparameters_778108"
|
||||
},
|
||||
{
|
||||
"type": "link",
|
||||
"text": "proxy/siliconflow configuration",
|
||||
"url": "././rerank_siliconflowrerankembeddingsparameters_af0257"
|
||||
}
|
||||
],
|
||||
"defaultValue": "[]"
|
||||
},
|
||||
{
|
||||
"name": "default_llm",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Default LLM model name, used to specify which model to use when you have multiple LLMs"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,56 @@
|
||||
---
|
||||
title: "ModelServiceConfig Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ModelServiceConfig",
|
||||
"description": "Model service configuration.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "api",
|
||||
"type": "ModelAPIServerParameters",
|
||||
"required": false,
|
||||
"description": "Model API",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "modelapiserverparameters configuration",
|
||||
"url": "././parameter_modelapiserverparameters_763bec"
|
||||
}
|
||||
],
|
||||
"defaultValue": "ModelControllerParameters"
|
||||
},
|
||||
{
|
||||
"name": "controller",
|
||||
"type": "ModelControllerParameters",
|
||||
"required": false,
|
||||
"description": "Model controller",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "modelcontrollerparameters configuration",
|
||||
"url": "././parameter_modelcontrollerparameters_689309"
|
||||
}
|
||||
],
|
||||
"defaultValue": "ModelControllerParameters"
|
||||
},
|
||||
{
|
||||
"name": "worker",
|
||||
"type": "ModelWorkerParameters",
|
||||
"required": false,
|
||||
"description": "Model worker configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "modelworkerparameters configuration",
|
||||
"url": "././parameter_modelworkerparameters_3fd00b"
|
||||
}
|
||||
],
|
||||
"defaultValue": "ModelWorkerParameters"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,119 @@
|
||||
---
|
||||
title: "ModelWorkerParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ModelWorkerParameters",
|
||||
"description": "ModelWorkerParameters(host: Optional[str] = '0.0.0.0', port: Optional[int] = 8001, daemon: Optional[bool] = False, log: dbgpt.util.utils.LoggingParameters = <factory>, trace: Optional[dbgpt.util.tracer.tracer_impl.TracerParameters] = None, worker_type: Optional[str] = None, worker_class: Optional[str] = None, standalone: Optional[bool] = False, register: Optional[bool] = True, worker_register_host: Optional[str] = None, controller_addr: Optional[str] = None, send_heartbeat: Optional[bool] = True, heartbeat_interval: Optional[int] = 20)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "port",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model worker deploy port",
|
||||
"defaultValue": "8001"
|
||||
},
|
||||
{
|
||||
"name": "daemon",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Run the server as a daemon.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "log",
|
||||
"type": "LoggingParameters",
|
||||
"required": false,
|
||||
"description": "Logging configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "loggingparameters configuration",
|
||||
"url": "././utils_loggingparameters_4ba5c6"
|
||||
}
|
||||
],
|
||||
"defaultValue": "LoggingParameters"
|
||||
},
|
||||
{
|
||||
"name": "trace",
|
||||
"type": "TracerParameters",
|
||||
"required": false,
|
||||
"description": "Tracer configuration",
|
||||
"nestedTypes": [
|
||||
{
|
||||
"type": "link",
|
||||
"text": "tracerparameters configuration",
|
||||
"url": "././tracer_impl_tracerparameters_f8f272"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "worker_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Worker type",
|
||||
"validValues": [
|
||||
"llm",
|
||||
"text2vec",
|
||||
"reranker"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "worker_class",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Model worker class, dbgpt.model.cluster.DefaultModelWorker"
|
||||
},
|
||||
{
|
||||
"name": "standalone",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Standalone mode. If True, embedded Run ModelController",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "register",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Register current worker to model controller",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "worker_register_host",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The ip address of current worker to register to ModelController. If None, the address is automatically determined"
|
||||
},
|
||||
{
|
||||
"name": "controller_addr",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The Model controller address to register"
|
||||
},
|
||||
{
|
||||
"name": "send_heartbeat",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Send heartbeat to model controller",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "heartbeat_interval",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The interval for sending heartbeats (seconds)",
|
||||
"defaultValue": "20"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The host IP address to bind to.",
|
||||
"defaultValue": "0.0.0.0"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,59 @@
|
||||
---
|
||||
title: "QianfanEmbeddingDeployModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "QianfanEmbeddingDeployModelParameters",
|
||||
"description": "Qianfan Embeddings deploy model parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/qianfan"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key for the embeddings API."
|
||||
},
|
||||
{
|
||||
"name": "api_secret",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The Secret key for the embeddings API. It's the sk for qianfan."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,66 @@
|
||||
---
|
||||
title: "CrossEncoderRerankEmbeddingsParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "CrossEncoderRerankEmbeddingsParameters",
|
||||
"description": "CrossEncoder Rerank Embeddings Parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The path of the model, if you want to deploy a local model."
|
||||
},
|
||||
{
|
||||
"name": "device",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Device to run model. If None, the device is automatically determined"
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "hf"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "50"
|
||||
},
|
||||
{
|
||||
"name": "max_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max length for input sequences. Longer sequences will be truncated."
|
||||
},
|
||||
{
|
||||
"name": "model_kwargs",
|
||||
"type": "object",
|
||||
"required": false,
|
||||
"description": "Keyword arguments to pass to the model.",
|
||||
"defaultValue": "{}"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,67 @@
|
||||
---
|
||||
title: "OpenAPIRerankerDeployModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "OpenAPIRerankerDeployModelParameters",
|
||||
"description": "OpenAPI Reranker Deploy Model Parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/openapi"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "50"
|
||||
},
|
||||
{
|
||||
"name": "api_url",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The URL of the rerank API.",
|
||||
"defaultValue": "http://localhost:8100/v1/beta/relevance"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key for the rerank API."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The timeout for the request in seconds.",
|
||||
"defaultValue": "60"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,68 @@
|
||||
---
|
||||
title: "SiliconFlowRerankEmbeddingsParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "SiliconFlowRerankEmbeddingsParameters",
|
||||
"description": "SiliconFlow Rerank Embeddings Parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/siliconflow"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "50"
|
||||
},
|
||||
{
|
||||
"name": "api_url",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The URL of the rerank API.",
|
||||
"defaultValue": "https://api.siliconflow.cn/v1/rerank"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key for the rerank API.",
|
||||
"defaultValue": "${env:SILICONFLOW_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The timeout for the request in seconds.",
|
||||
"defaultValue": "60"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "SiliconFlow Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "SiliconFlowDeployModelParameters",
|
||||
"description": "SiliconFlow proxy LLM configuration.",
|
||||
"documentationUrl": "https://docs.siliconflow.cn/en/api-reference/chat-completions/chat-completions",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/siliconflow"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the SiliconFlow API.",
|
||||
"defaultValue": "${env:SILICONFLOW_API_BASE:-https://api.siliconflow.cn/v1}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the SiliconFlow API.",
|
||||
"defaultValue": "${env:SILICONFLOW_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Xunfei Spark Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "SparkDeployModelParameters",
|
||||
"description": "Xunfei Spark proxy LLM configuration.",
|
||||
"documentationUrl": "https://www.xfyun.cn/doc/spark/HTTP%E8%B0%83%E7%94%A8%E6%96%87%E6%A1%A3.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/spark"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the Spark API.",
|
||||
"defaultValue": "${env:XUNFEI_SPARK_API_BASE:-https://spark-api-open.xf-yun.com/v1}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the Spark API.",
|
||||
"defaultValue": "${env:XUNFEI_SPARK_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Tongyi Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "TongyiDeployModelParameters",
|
||||
"description": "Tongyi proxy LLM configuration.",
|
||||
"documentationUrl": "https://help.aliyun.com/zh/model-studio/getting-started/first-api-call-to-qwen",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/tongyi"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the tongyi API.",
|
||||
"defaultValue": "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the tongyi API.",
|
||||
"defaultValue": "${env:DASHSCOPE_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,54 @@
|
||||
---
|
||||
title: "TongyiEmbeddingDeployModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "TongyiEmbeddingDeployModelParameters",
|
||||
"description": "Qianfan Embeddings deploy model parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/tongyi"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key for the embeddings API."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name.",
|
||||
"defaultValue": "text-embedding-v1"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,56 @@
|
||||
---
|
||||
title: "TracerParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "TracerParameters",
|
||||
"description": "TracerParameters(file: Optional[str] = None, root_operation_name: Optional[str] = None, exporter: Optional[str] = None, otlp_endpoint: Optional[str] = None, otlp_insecure: Optional[bool] = None, otlp_timeout: Optional[int] = None, tracer_storage_cls: Optional[str] = None)",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "root_operation_name",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The root operation name of the tracer"
|
||||
},
|
||||
{
|
||||
"name": "exporter",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The exporter of the tracer, e.g. telemetry"
|
||||
},
|
||||
{
|
||||
"name": "otlp_endpoint",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The endpoint of the OpenTelemetry Protocol, you can set '${env:OTEL_EXPORTER_OTLP_TRACES_ENDPOINT}' to use the environment variable"
|
||||
},
|
||||
{
|
||||
"name": "otlp_insecure",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Whether to use insecure connection, you can set '${env:OTEL_EXPORTER_OTLP_TRACES_INSECURE}' to use the environment "
|
||||
},
|
||||
{
|
||||
"name": "otlp_timeout",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The timeout of the connection, in seconds, you can set '${env:OTEL_EXPORTER_OTLP_TRACES_TIMEOUT}' to use the environment "
|
||||
},
|
||||
{
|
||||
"name": "tracer_storage_cls",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The class of the tracer storage"
|
||||
},
|
||||
{
|
||||
"name": "file",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The file to store the tracer, e.g. dbgpt_webserver_tracer.jsonl"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: "LoggingParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "LoggingParameters",
|
||||
"description": "Logging parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "file",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The filename to store logs"
|
||||
},
|
||||
{
|
||||
"name": "level",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Logging level, just support FATAL, ERROR, WARNING, INFO, DEBUG, NOTSET",
|
||||
"defaultValue": "${env:DBGPT_LOG_LEVEL:-INFO}",
|
||||
"validValues": [
|
||||
"FATAL",
|
||||
"ERROR",
|
||||
"WARNING",
|
||||
"WARNING",
|
||||
"INFO",
|
||||
"DEBUG",
|
||||
"NOTSET"
|
||||
]
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,330 @@
|
||||
---
|
||||
title: "VLLMDeployModelParameters Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "VLLMDeployModelParameters",
|
||||
"description": "Local deploy model parameters.",
|
||||
"documentationUrl": "",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The path of the model, if you want to deploy a local model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "device",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Device to run model. If None, the device is automatically determined",
|
||||
"defaultValue": "auto"
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "vllm"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the model. If None, it is automatically determined from model."
|
||||
},
|
||||
{
|
||||
"name": "trust_remote_code",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Trust remote code or not.",
|
||||
"defaultValue": "True"
|
||||
},
|
||||
{
|
||||
"name": "download_dir",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Directory to download and load the weights, default to the default cache dir of huggingface."
|
||||
},
|
||||
{
|
||||
"name": "load_format",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The format of the model weights to load.\n\n* \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.\n* \"pt\" will load the weights in the pytorch bin format.\n* \"safetensors\" will load the weights in the safetensors format.\n* \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading.\n* \"dummy\" will initialize the weights with random values, which is mainly for profiling.\n* \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information.\n* \"runai_streamer\" will load the Safetensors weights using Run:aiModel Streamer \n* \"bitsandbytes\" will load the weights using bitsandbytes quantization.\n",
|
||||
"defaultValue": "auto",
|
||||
"validValues": [
|
||||
"auto",
|
||||
"pt",
|
||||
"safetensors",
|
||||
"npcache",
|
||||
"dummy",
|
||||
"tensorizer",
|
||||
"runai_streamer",
|
||||
"bitsandbytes",
|
||||
"sharded_state",
|
||||
"gguf",
|
||||
"mistral"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "config_format",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The format of the model config to load.\n\n* \"auto\" will try to load the config in hf format if available else it will try to load in mistral format ",
|
||||
"defaultValue": "auto",
|
||||
"validValues": [
|
||||
"auto",
|
||||
"hf",
|
||||
"mistral"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "dtype",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Data type for model weights and activations.\n\n* \"auto\" will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.\n* \"half\" for FP16. Recommended for AWQ quantization.\n* \"float16\" is the same as \"half\".\n* \"bfloat16\" for a balance between precision and range.\n* \"float\" is shorthand for FP32 precision.\n* \"float32\" for FP32 precision.",
|
||||
"defaultValue": "auto",
|
||||
"validValues": [
|
||||
"auto",
|
||||
"half",
|
||||
"float16",
|
||||
"bfloat16",
|
||||
"float",
|
||||
"float32"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "kv_cache_dtype",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Data type for kv cache storage. If \"auto\", will use model data type. CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. ROCm (AMD GPU) supports fp8 (=fp8_e4m3)",
|
||||
"defaultValue": "auto",
|
||||
"validValues": [
|
||||
"auto",
|
||||
"fp8",
|
||||
"fp8_e5m2",
|
||||
"fp8_e4m3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "seed",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Random seed for operations.",
|
||||
"defaultValue": "0"
|
||||
},
|
||||
{
|
||||
"name": "max_model_len",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model context length. If unspecified, will be automatically derived from the model config."
|
||||
},
|
||||
{
|
||||
"name": "distributed_executor_backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Backend to use for distributed model workers, either \"ray\" or \"mp\" (multiprocessing). If the product of pipeline_parallel_size and tensor_parallel_size is less than or equal to the number of GPUs available, \"mp\" will be used to keep processing on a single host. Otherwise, this will default to \"ray\" if Ray is installed and fail otherwise. Note that tpu only supports Ray for distributed inference.",
|
||||
"validValues": [
|
||||
"ray",
|
||||
"mp",
|
||||
"uni",
|
||||
"external_launcher"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "pipeline_parallel_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Number of pipeline stages.",
|
||||
"defaultValue": "1"
|
||||
},
|
||||
{
|
||||
"name": "tensor_parallel_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Number of tensor parallel replicas.",
|
||||
"defaultValue": "1"
|
||||
},
|
||||
{
|
||||
"name": "max_parallel_loading_workers",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Load model sequentially in multiple batches, to avoid RAM OOM when using tensor parallel and large models."
|
||||
},
|
||||
{
|
||||
"name": "block_size",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Token block size for contiguous chunks of tokens. This is ignored on neuron devices and set to ``--max-model-len``. On CUDA devices, only block sizes up to 32 are supported. On HPU devices, block size defaults to 128.",
|
||||
"validValues": [
|
||||
"8",
|
||||
"16",
|
||||
"32",
|
||||
"64",
|
||||
"128"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "enable_prefix_caching",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Enables automatic prefix caching. "
|
||||
},
|
||||
{
|
||||
"name": "swap_space",
|
||||
"type": "number",
|
||||
"required": false,
|
||||
"description": "CPU swap space size (GiB) per GPU.",
|
||||
"defaultValue": "4"
|
||||
},
|
||||
{
|
||||
"name": "cpu_offload_gb",
|
||||
"type": "number",
|
||||
"required": false,
|
||||
"description": "The space in GiB to offload to CPU, per GPU. Default is 0, which means no offloading. Intuitively, this argument can be seen as a virtual way to increase the GPU memory size. For example, if you have one 24 GB GPU and set this to 10, virtually you can think of it as a 34 GB GPU. Then you can load a 13B model with BF16 weight, which requires at least 26GB GPU memory. Note that this requires fast CPU-GPU interconnect, as part of the model is loaded from CPU memory to GPU memory on the fly in each model forward pass.",
|
||||
"defaultValue": "0"
|
||||
},
|
||||
{
|
||||
"name": "gpu_memory_utilization",
|
||||
"type": "number",
|
||||
"required": false,
|
||||
"description": "The fraction of GPU memory to be used for the model executor, which can range from 0 to 1. For example, a value of 0.5 would imply 50%% GPU memory utilization. If unspecified, will use the default value of 0.9. This is a per-instance limit, and only applies to the current vLLM instance.It does not matter if you have another vLLM instance running on the same GPU. For example, if you have two vLLM instances running on the same GPU, you can set the GPU memory utilization to 0.5 for each instance.",
|
||||
"defaultValue": "0.9"
|
||||
},
|
||||
{
|
||||
"name": "max_num_batched_tokens",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Maximum number of batched tokens per iteration."
|
||||
},
|
||||
{
|
||||
"name": "max_num_seqs",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Maximum number of sequences per iteration."
|
||||
},
|
||||
{
|
||||
"name": "max_logprobs",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Max number of log probs to return logprobs is specified in SamplingParams.",
|
||||
"defaultValue": "20"
|
||||
},
|
||||
{
|
||||
"name": "revision",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version."
|
||||
},
|
||||
{
|
||||
"name": "code_revision",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version."
|
||||
},
|
||||
{
|
||||
"name": "tokenizer_revision",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Revision of the huggingface tokenizer to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version."
|
||||
},
|
||||
{
|
||||
"name": "tokenizer_mode",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The tokenizer mode.\n\n* \"auto\" will use the fast tokenizer if available.\n* \"slow\" will always use the slow tokenizer. \n* \"mistral\" will always use the `mistral_common` tokenizer.",
|
||||
"defaultValue": "auto",
|
||||
"validValues": [
|
||||
"auto",
|
||||
"slow",
|
||||
"mistral"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "quantization",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.",
|
||||
"validValues": [
|
||||
"aqlm",
|
||||
"awq",
|
||||
"deepspeedfp",
|
||||
"tpu_int8",
|
||||
"fp8",
|
||||
"ptpc_fp8",
|
||||
"fbgemm_fp8",
|
||||
"modelopt",
|
||||
"marlin",
|
||||
"gguf",
|
||||
"gptq_marlin_24",
|
||||
"gptq_marlin",
|
||||
"awq_marlin",
|
||||
"gptq",
|
||||
"compressed-tensors",
|
||||
"bitsandbytes",
|
||||
"qqq",
|
||||
"hqq",
|
||||
"experts_int8",
|
||||
"neuron_quant",
|
||||
"ipex",
|
||||
"quark",
|
||||
"moe_wna16"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "max_seq_len_to_capture",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Maximum sequence length covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode.",
|
||||
"defaultValue": "8192"
|
||||
},
|
||||
{
|
||||
"name": "worker_cls",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The worker class to use for distributed execution.",
|
||||
"defaultValue": "auto"
|
||||
},
|
||||
{
|
||||
"name": "extras",
|
||||
"type": "object",
|
||||
"required": false,
|
||||
"description": "Extra parameters, it will be passed to the vllm engine."
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Volcengine Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "VolcengineDeployModelParameters",
|
||||
"description": "Volcengine proxy LLM configuration.",
|
||||
"documentationUrl": "https://www.volcengine.com/docs/82379/1298454",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/volcengine"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the Volcengine API.",
|
||||
"defaultValue": "${env:ARK_API_BASE:-https://ark.cn-beijing.volces.com/api/v3}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the Volcengine API.",
|
||||
"defaultValue": "${env:ARK_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,73 @@
|
||||
---
|
||||
title: "Baidu Wenxin Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "WenxinDeployModelParameters",
|
||||
"description": "Baidu Wenxin proxy LLM configuration.",
|
||||
"documentationUrl": "https://cloud.baidu.com/doc/WENXINWORKSHOP/s/clntwmv7t",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/wenxin"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the Wenxin API.",
|
||||
"defaultValue": "${env:WEN_XIN_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_secret",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API secret key of the Wenxin API.",
|
||||
"defaultValue": "${env:WEN_XIN_API_SECRET}"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Yi Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "YiDeployModelParameters",
|
||||
"description": "Yi proxy LLM configuration.",
|
||||
"documentationUrl": "https://platform.lingyiwanwu.com/docs",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/yi"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the Yi API.",
|
||||
"defaultValue": "${env:YI_API_BASE:-https://api.lingyiwanwu.com/v1}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the Yi API.",
|
||||
"defaultValue": "${env:YI_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: "Zhipu Proxy LLM Configuration"
|
||||
---
|
||||
|
||||
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
|
||||
|
||||
<ConfigDetail config={{
|
||||
"name": "ZhipuDeployModelParameters",
|
||||
"description": "Zhipu proxy LLM configuration.",
|
||||
"documentationUrl": "https://open.bigmodel.cn/dev/api/normal-model/glm-4#overview",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The name of the model."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
|
||||
},
|
||||
{
|
||||
"name": "provider",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
|
||||
"defaultValue": "proxy/zhipu"
|
||||
},
|
||||
{
|
||||
"name": "verbose",
|
||||
"type": "boolean",
|
||||
"required": false,
|
||||
"description": "Show verbose output.",
|
||||
"defaultValue": "False"
|
||||
},
|
||||
{
|
||||
"name": "concurrency",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "Model concurrency limit",
|
||||
"defaultValue": "100"
|
||||
},
|
||||
{
|
||||
"name": "prompt_template",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
|
||||
},
|
||||
{
|
||||
"name": "context_length",
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"description": "The context length of the OpenAI API. If None, it is determined by the model."
|
||||
},
|
||||
{
|
||||
"name": "api_base",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The base url of the Zhipu API.",
|
||||
"defaultValue": "${env:ZHIPUAI_BASE_URL:-https://open.bigmodel.cn/api/paas/v4}"
|
||||
},
|
||||
{
|
||||
"name": "api_key",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The API key of the Zhipu API.",
|
||||
"defaultValue": "${env:ZHIPUAI_API_KEY}"
|
||||
},
|
||||
{
|
||||
"name": "api_type",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The type of the OpenAI API, if you use Azure, it can be: azure"
|
||||
},
|
||||
{
|
||||
"name": "api_version",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The version of the OpenAI API."
|
||||
},
|
||||
{
|
||||
"name": "http_proxy",
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "The http or https proxy to use openai"
|
||||
}
|
||||
]
|
||||
}} />
|
||||
|
@@ -19,7 +19,6 @@ const sidebars = {
|
||||
tutorials: [
|
||||
"tutorials/index",
|
||||
],
|
||||
|
||||
docs: [
|
||||
{
|
||||
type: "doc",
|
||||
@@ -758,6 +757,23 @@ const sidebars = {
|
||||
id: "use_cases",
|
||||
label: "Use Cases"
|
||||
},
|
||||
|
||||
{
|
||||
type: 'category',
|
||||
label: 'Config Reference',
|
||||
link: {
|
||||
type: 'generated-index',
|
||||
title: 'Config Reference',
|
||||
description: 'All system configurable parameters and their detailed descriptions',
|
||||
// slug: '/config-reference'
|
||||
},
|
||||
items: [
|
||||
{
|
||||
type: 'autogenerated',
|
||||
dirName: 'config-reference' // make sure it matches the generated docs directory
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
|
111
docs/src/components/mdx/ConfigDetail.js
Normal file
111
docs/src/components/mdx/ConfigDetail.js
Normal file
@@ -0,0 +1,111 @@
|
||||
import React from 'react';
|
||||
import Link from '@docusaurus/Link';
|
||||
|
||||
function CodeValue({ value }) {
|
||||
if (!value) return null;
|
||||
|
||||
if (value.includes('\n') || value.length > 50) {
|
||||
return (
|
||||
<pre>
|
||||
<code>{value}</code>
|
||||
</pre>
|
||||
);
|
||||
}
|
||||
|
||||
return <code>{value}</code>;
|
||||
}
|
||||
|
||||
export function ConfigDetail({ config }) {
|
||||
const styles = {
|
||||
descriptionSection: {
|
||||
marginBottom: '2rem',
|
||||
},
|
||||
documentationLink: {
|
||||
color: 'var(--ifm-color-primary)',
|
||||
textDecoration: 'none',
|
||||
}
|
||||
};
|
||||
|
||||
const { name, description, documentationUrl, parameters } = config;
|
||||
|
||||
return (
|
||||
<div>
|
||||
{description && (
|
||||
<div style={styles.descriptionSection}>
|
||||
<p>{description}</p>
|
||||
{documentationUrl && (
|
||||
<p>
|
||||
Details can be found in: <br />
|
||||
<Link
|
||||
to={documentationUrl}
|
||||
style={styles.documentationLink}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
>
|
||||
{documentationUrl}
|
||||
</Link>
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<h2>Parameters </h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Type</th>
|
||||
<th>Required</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{parameters.map((param) => (
|
||||
<tr key={param.name}>
|
||||
<td><code>{param.name}</code></td>
|
||||
<td>
|
||||
{param.type}
|
||||
{param.nestedTypes && param.nestedTypes.length > 0 && (
|
||||
<span>
|
||||
{' ('}
|
||||
{param.nestedTypes.map((type, idx) => (
|
||||
<React.Fragment key={idx}>
|
||||
{idx > 0 && ', '}
|
||||
{type.type === 'link' ? (
|
||||
<Link to={type.url}>{type.text}</Link>
|
||||
) : (
|
||||
type.content
|
||||
)}
|
||||
</React.Fragment>
|
||||
))}
|
||||
{')'}
|
||||
</span>
|
||||
)}
|
||||
</td>
|
||||
<td>{param.required ? '✅' : '❌'}</td>
|
||||
<td>
|
||||
<div>{param.description}</div>
|
||||
{param.validValues && (
|
||||
<div>
|
||||
Valid values:
|
||||
{param.validValues.map((value, idx) => (
|
||||
<React.Fragment key={idx}>
|
||||
{idx > 0 && ', '}
|
||||
<CodeValue value={value} />
|
||||
</React.Fragment>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
{param.defaultValue && (
|
||||
<div>
|
||||
Defaults:<CodeValue value={param.defaultValue} />
|
||||
</div>
|
||||
)}
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
);
|
||||
}
|
11
docs/src/components/mdx/ConfigDiagram.js
Normal file
11
docs/src/components/mdx/ConfigDiagram.js
Normal file
@@ -0,0 +1,11 @@
|
||||
import React from 'react';
|
||||
import Mermaid from '@theme/Mermaid';
|
||||
|
||||
export function ConfigDiagram({ relationships }) {
|
||||
const diagram = `
|
||||
graph TD
|
||||
${relationships.map(r => ` ${r.from} -->|${r.label}| ${r.to}`).join('\n')}
|
||||
`.trim();
|
||||
|
||||
return <Mermaid value={diagram} />;
|
||||
}
|
@@ -65,7 +65,7 @@ po: pot
|
||||
msgmerge --update $(LOCALEDIR)/$$lang/LC_MESSAGES/$$domain.po $$pot; \
|
||||
else \
|
||||
echo "Creating $$lang translation for $$domain"; \
|
||||
msginit --no-translator -l $$lang -o $(LOCALEDIR)/$$lang/LC_MESSAGES/$$domain.po -i $$pot; \
|
||||
msginit --no-translator -l $$lang -o $(LOCALEDIR)/$$lang/LC_MESSAGES/$$domain.po -i $$pot --locale=$$lang.UTF-8; \
|
||||
fi; \
|
||||
fi; \
|
||||
done; \
|
||||
@@ -83,10 +83,26 @@ mo:
|
||||
fi; \
|
||||
done; \
|
||||
done
|
||||
|
||||
# Fix encoding issues in .po files
|
||||
fix-encoding:
|
||||
@echo "Fixing encoding issues in .po files..."
|
||||
@for lang in $(LANGUAGES); do \
|
||||
echo "Processing language: $$lang"; \
|
||||
for po in $(LOCALEDIR)/$$lang/LC_MESSAGES/*.po; do \
|
||||
if [ -f "$$po" ]; then \
|
||||
echo " Fixing $$po..."; \
|
||||
# Ensure charset is UTF-8 \
|
||||
sed -i 's/charset=.*/charset=UTF-8\\n"/' "$$po"; \
|
||||
# Ensure Content-Transfer-Encoding is 8bit \
|
||||
sed -i 's/Content-Transfer-Encoding: .*/Content-Transfer-Encoding: 8bit\\n"/' "$$po"; \
|
||||
fi; \
|
||||
done; \
|
||||
done
|
||||
@echo "Encoding fixes complete."
|
||||
# Clean up generated files
|
||||
clean:
|
||||
@find $(LOCALEDIR) -type f -name "*.mo" -delete
|
||||
@find $(LOCALEDIR) -type f -name "*.po" -delete
|
||||
@rm -rf $(LOCALEDIR)/pot
|
||||
@echo "Cleanup complete."
|
||||
|
||||
@@ -95,6 +111,7 @@ help:
|
||||
@echo " pot - Generate POT files for each package module."
|
||||
@echo " po - Update existing .po files or create them if they don't exist."
|
||||
@echo " mo - Compile .po files into .mo files for use in the application."
|
||||
@echo " fix-encoding - Fix charset and encoding issues in .po files."
|
||||
@echo " clean - Clean up generated files."
|
||||
@echo " help - Show this help message."
|
||||
@echo ""
|
||||
|
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_agent.mo
Normal file
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_agent.mo
Normal file
Binary file not shown.
67
i18n/locales/fr/LC_MESSAGES/dbgpt_agent.po
Normal file
67
i18n/locales/fr/LC_MESSAGES/dbgpt_agent.po
Normal file
@@ -0,0 +1,67 @@
|
||||
# French translations for PACKAGE package.
|
||||
# Copyright (C) 2025 THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# Automatically generated, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2025-02-23 13:40+0800\n"
|
||||
"PO-Revision-Date: 2025-02-23 13:40+0800\n"
|
||||
"Last-Translator: Automatically generated\n"
|
||||
"Language-Team: none\n"
|
||||
"Language: fr\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:362
|
||||
msgid "Agent Branch Operator"
|
||||
msgstr "Opérateur de branchement d'Agent"
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:367
|
||||
msgid ""
|
||||
"Branch the workflow based on the agent actionreport nexspeakers of the "
|
||||
"request."
|
||||
msgstr ""
|
||||
"Brancher le flux de travail en fonction du rapport d'action de l'Agent et des "
|
||||
"prochains intervenants de la requête."
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:372
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:380
|
||||
msgid "Agent Request"
|
||||
msgstr "Requête d'Agent"
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:375
|
||||
msgid "The input value of the operator."
|
||||
msgstr "La valeur d'entrée de l'opérateur."
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:383
|
||||
msgid "The agent request to agent Operator."
|
||||
msgstr "La requête d'Agent à l'opérateur d'Agent."
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:427
|
||||
msgid "Agent Branch Join Operator"
|
||||
msgstr "Opérateur de jonction de branchement d'Agent"
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:431
|
||||
msgid "Just keep the first non-empty output."
|
||||
msgstr "Conserver uniquement la première sortie non vide."
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:435
|
||||
msgid "Agent Output"
|
||||
msgstr "Sortie d'Agent"
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:438
|
||||
msgid "The Agent output."
|
||||
msgstr "La sortie de l'Agent."
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:443
|
||||
msgid "Branch Output"
|
||||
msgstr "Sortie de branche"
|
||||
|
||||
#:../packages/dbgpt-core/src/dbgpt/agent/core/plan/awel/agent_operator.py:446
|
||||
msgid "The output value of the operator."
|
||||
msgstr "La valeur de sortie de l'opérateur."
|
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_app__cli.mo
Normal file
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_app__cli.mo
Normal file
Binary file not shown.
22
i18n/locales/fr/LC_MESSAGES/dbgpt_app__cli.po
Normal file
22
i18n/locales/fr/LC_MESSAGES/dbgpt_app__cli.po
Normal file
@@ -0,0 +1,22 @@
|
||||
# French translations for PACKAGE package.
|
||||
# Copyright (C) 2025 THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# Automatically generated, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2025-02-23 13:40+0800\n"
|
||||
"PO-Revision-Date: 2025-02-23 13:40+0800\n"
|
||||
"Last-Translator: Automatically generated\n"
|
||||
"Language-Team: none\n"
|
||||
"Language: fr\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/_cli.py:51
|
||||
msgid "The database configuration file"
|
||||
msgstr "Le fichier de configuration de la base de données"
|
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_app_config.mo
Normal file
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_app_config.mo
Normal file
Binary file not shown.
210
i18n/locales/fr/LC_MESSAGES/dbgpt_app_config.po
Normal file
210
i18n/locales/fr/LC_MESSAGES/dbgpt_app_config.po
Normal file
@@ -0,0 +1,210 @@
|
||||
# French translations for PACKAGE package.
|
||||
# Copyright (C) 2025 THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# Automatically generated, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2025-02-23 13:40+0800\n"
|
||||
"PO-Revision-Date: 2025-02-23 13:40+0800\n"
|
||||
"Last-Translator: Automatically generated\n"
|
||||
"Language-Team: none\n"
|
||||
"Language: fr\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:26
|
||||
msgid "Language setting"
|
||||
msgstr "Paramètre de langue"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:33
|
||||
msgid "Logging level"
|
||||
msgstr "Niveau de journalisation"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:40
|
||||
msgid "API keys"
|
||||
msgstr "Clés API"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:45
|
||||
msgid "The key to encrypt the data"
|
||||
msgstr "La clé pour chiffrer les données"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:54
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:74
|
||||
msgid "default vector type"
|
||||
msgstr "type de vecteur par défaut"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:64
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:80
|
||||
msgid "default graph type"
|
||||
msgstr "type de graphe par défaut"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:91
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:203
|
||||
msgid "Whether to verify the SSL certificate of the database"
|
||||
msgstr "S'il faut vérifier le certificat SSL de la base de données"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:97
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:209
|
||||
msgid ""
|
||||
"The default thread pool size, If None, use default config of python thread "
|
||||
"pool"
|
||||
msgstr ""
|
||||
"La taille par défaut du pool de threads. Si None, utilise la configuration par "
|
||||
"défaut du pool de threads Python"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:104
|
||||
msgid "knowledge search top k"
|
||||
msgstr "top k de recherche de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:108
|
||||
msgid "knowledge search top similarity score"
|
||||
msgstr "score de similarité top de recherche de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:112
|
||||
msgid "knowledge search rewrite"
|
||||
msgstr "réécriture de recherche de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:116
|
||||
msgid "knowledge max chunks once load"
|
||||
msgstr "nombre maximal de segments de connaissances chargés en une fois"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:120
|
||||
msgid "knowledge max load thread"
|
||||
msgstr "nombre maximal de threads de chargement de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:124
|
||||
msgid "knowledge rerank top k"
|
||||
msgstr "top k de reclassement de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:128
|
||||
msgid "Storage configuration"
|
||||
msgstr "Configuration de stockage"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:132
|
||||
msgid "knowledge graph search top k"
|
||||
msgstr "recherche top k du graphe de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:136
|
||||
msgid "graph community summary enabled"
|
||||
msgstr "résumé de la communauté du graphe activé"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:142
|
||||
msgid "Webserver deploy host"
|
||||
msgstr "Hôte de déploiement du serveur web"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:144
|
||||
msgid "Webserver deploy port, default is 5670"
|
||||
msgstr "Port de déploiement du serveur web, par défaut 5670"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:147
|
||||
msgid "Run Webserver in light mode"
|
||||
msgstr "Exécuter le serveur web en mode léger"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:153
|
||||
msgid ""
|
||||
"The Model controller address to connect. If None, read model controller "
|
||||
"address from environment key `MODEL_SERVER`."
|
||||
msgstr ""
|
||||
"Adresse du contrôleur de modèle à connecter. Si None, lire l'adresse du "
|
||||
"contrôleur de modèle à partir de la clé d'environnement `MODEL_SERVER`."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:164
|
||||
msgid "Database connection config, now support SQLite, OceanBase and MySQL"
|
||||
msgstr "Configuration de connexion à la base de données, supporte actuellement SQLite, OceanBase et MySQL"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:172
|
||||
msgid ""
|
||||
"The storage type of model configures, if None, use the default "
|
||||
"storage(current database). When you run in light mode, it will not use any "
|
||||
"storage."
|
||||
msgstr ""
|
||||
"Le type de stockage des configurations de modèle, si None, utilise le stockage "
|
||||
"par défaut (base de données actuelle). Lorsque vous exécutez en mode léger, il "
|
||||
"n'utilisera aucun stockage."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:182
|
||||
msgid "Tracer config for web server, if None, use global tracer config"
|
||||
msgstr "Configuration du traceur pour le serveur web, si None, utiliser la configuration globale du traceur"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:189
|
||||
msgid "Logging configuration for web server, if None, use global config"
|
||||
msgstr "Configuration de journalisation pour le serveur web, si None, utiliser la configuration globale"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:197
|
||||
msgid "Whether to disable alembic to initialize and upgrade database metadata"
|
||||
msgstr "Si oui ou non désactiver alembic pour initialiser et mettre à jour les métadonnées de la base de données"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:218
|
||||
msgid ""
|
||||
"Whether to enable remote embedding models. If it is True, you need to start "
|
||||
"a embedding model through `dbgpt start worker --worker_type text2vec --"
|
||||
"model_name xxx --model_path xxx`"
|
||||
msgstr ""
|
||||
"Si oui ou non activer les modèles d'embedding distants. Si c'est True, vous devez démarrer un modèle "
|
||||
"d'embedding via `dbgpt start worker --worker_type text2vec --model_name xxx --"
|
||||
"model_path xxx`"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:228
|
||||
msgid ""
|
||||
"Whether to enable remote rerank models. If it is True, you need to start a "
|
||||
"rerank model through `dbgpt start worker --worker_type text2vec --rerank --"
|
||||
"model_name xxx --model_path xxx`"
|
||||
msgstr "Indique si les modèles de rerank à distance doivent être activés. Si c'est True, vous devez démarrer un modèle de rerank via `dbgpt start worker --worker_type text2vec --rerank --model_name xxx --model_path xxx`"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:236
|
||||
msgid "The directories to search awel files, split by `,`"
|
||||
msgstr "Les répertoires pour rechercher les fichiers awel, séparés par `,`"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:240
|
||||
msgid "Whether to use the new web UI, default is True"
|
||||
msgstr "Indique si la nouvelle interface web doit être utilisée, la valeur par défaut est True"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:244
|
||||
msgid "Model cache configuration"
|
||||
msgstr "Configuration du cache des modèles"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:249
|
||||
msgid "The max sequence length of the embedding model, default is 512"
|
||||
msgstr "La longueur maximale de séquence du modèle d'embedding, la valeur par défaut est 512"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:258
|
||||
msgid "Web service configuration"
|
||||
msgstr "Configuration du service web"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:262
|
||||
msgid "Model service configuration"
|
||||
msgstr "Configuration du service de modèles"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:274
|
||||
msgid ""
|
||||
"Configuration hooks, which will be executed before the configuration loading"
|
||||
msgstr "Hooks de configuration, qui seront exécutés avant le chargement de la configuration"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:283
|
||||
msgid "System configuration"
|
||||
msgstr "Configuration du système"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:290
|
||||
msgid "Model deployment configuration"
|
||||
msgstr "Configuration du déploiement des modèles"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:296
|
||||
msgid "Serve configuration"
|
||||
msgstr "Configuration du service"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:301
|
||||
msgid "Rag Knowledge Parameters"
|
||||
msgstr "Paramètres de connaissances RAG"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:306
|
||||
msgid "Global tracer configuration"
|
||||
msgstr "Configuration globale du traceur"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/config.py:312
|
||||
msgid "Logging configuration"
|
||||
msgstr "Configuration de la journalisation"
|
Binary file not shown.
@@ -1,15 +1,14 @@
|
||||
# French translations for PACKAGE package
|
||||
# Traductions françaises du paquet PACKAGE.
|
||||
# Copyright (C) 2024 THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# French translations for PACKAGE package.
|
||||
# Copyright (C) 2025 THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# Automatically generated, 2024.
|
||||
# Automatically generated, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2024-03-24 11:28+0800\n"
|
||||
"PO-Revision-Date: 2024-03-23 11:22+0800\n"
|
||||
"POT-Creation-Date: 2025-02-23 13:40+0800\n"
|
||||
"PO-Revision-Date: 2025-02-23 13:40+0800\n"
|
||||
"Last-Translator: Automatically generated\n"
|
||||
"Language-Team: none\n"
|
||||
"Language: fr\n"
|
||||
@@ -18,6 +17,7 @@ msgstr ""
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||
|
||||
#: ../dbgpt/app/dbgpt_server.py:54 ../dbgpt/app/dbgpt_server.py:55
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/dbgpt_server.py:44
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/dbgpt_server.py:45
|
||||
msgid "DB-GPT Open API"
|
||||
msgstr "API ouverte DB-GPT"
|
||||
msgstr "DB-GPT API Ouverte"
|
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_app_knowledge.mo
Normal file
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_app_knowledge.mo
Normal file
Binary file not shown.
30
i18n/locales/fr/LC_MESSAGES/dbgpt_app_knowledge.po
Normal file
30
i18n/locales/fr/LC_MESSAGES/dbgpt_app_knowledge.po
Normal file
@@ -0,0 +1,30 @@
|
||||
# French translations for PACKAGE package.
|
||||
# Copyright (C) 2025 THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# Automatically generated, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2025-02-23 13:40+0800\n"
|
||||
"PO-Revision-Date: 2025-02-23 13:40+0800\n"
|
||||
"Last-Translator: Automatically generated\n"
|
||||
"Language-Team: none\n"
|
||||
"Language: fr\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/knowledge/api.py:277
|
||||
msgid "Vector Store"
|
||||
msgstr "Stockage de Vecteurs"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/knowledge/api.py:285
|
||||
msgid "Knowledge Graph"
|
||||
msgstr "Graphe de Connaissance"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/knowledge/api.py:293
|
||||
msgid "Full Text"
|
||||
msgstr "Texte Intégral"
|
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_app_operators.mo
Normal file
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_app_operators.mo
Normal file
Binary file not shown.
557
i18n/locales/fr/LC_MESSAGES/dbgpt_app_operators.po
Normal file
557
i18n/locales/fr/LC_MESSAGES/dbgpt_app_operators.po
Normal file
@@ -0,0 +1,557 @@
|
||||
# French translations for PACKAGE package.
|
||||
# Copyright (C) 2025 THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# Automatically generated, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2025-02-23 13:40+0800\n"
|
||||
"PO-Revision-Date: 2025-02-23 13:40+0800\n"
|
||||
"Last-Translator: Automatically generated\n"
|
||||
"Language-Team: none\n"
|
||||
"Language: fr\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:35
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:145
|
||||
msgid "Context Key"
|
||||
msgstr "Clé de contexte"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:40
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:150
|
||||
msgid "The key of the context, it will be used in building the prompt"
|
||||
msgstr "La clé du contexte, elle sera utilisée pour construire l'invite"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:43
|
||||
msgid "Top K"
|
||||
msgstr "Top K"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:48
|
||||
msgid "The number of chunks to retrieve"
|
||||
msgstr "Le nombre de segments à récupérer"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:51
|
||||
msgid "Minimum Match Score"
|
||||
msgstr "Score de correspondance minimum"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:58
|
||||
msgid ""
|
||||
"The minimum match score for the retrieved chunks, it will be dropped if the "
|
||||
"match score is less than the threshold"
|
||||
msgstr ""
|
||||
"Le score de correspondance minimum pour les segments récupérés, il sera "
|
||||
"abandonné si le score de correspondance est inférieur au seuil"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:66
|
||||
msgid "Reranker Enabled"
|
||||
msgstr "Reranker activé"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:71
|
||||
msgid "Whether to enable the reranker"
|
||||
msgstr "Indique si le reranker doit être activé"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:74
|
||||
msgid "Reranker Top K"
|
||||
msgstr "Top K du reranker"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:79
|
||||
msgid "The top k for the reranker"
|
||||
msgstr "Le top k pour le reranker"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:83
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:153
|
||||
msgid "User question"
|
||||
msgstr "Question de l'utilisateur"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:86
|
||||
msgid "The user question to retrieve the knowledge"
|
||||
msgstr "La question de l'utilisateur pour récupérer les connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:89
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:159
|
||||
msgid "Retrieved context"
|
||||
msgstr "Contexte récupéré"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:92
|
||||
msgid "The retrieved context from the knowledge space"
|
||||
msgstr "Le contexte récupéré de l'espace de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:107
|
||||
msgid "Knowledge Space Operator"
|
||||
msgstr "Opérateur d'espace de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:111
|
||||
msgid "Knowledge Space Operator, retrieve your knowledge from knowledge space"
|
||||
msgstr "Opérateur d'espace de connaissances, récupérez vos connaissances de l'espace de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:115
|
||||
msgid "Knowledge Space Name"
|
||||
msgstr "Nom de l'espace de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:119
|
||||
msgid "The name of the knowledge space"
|
||||
msgstr "Le nom de l'espace de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:133
|
||||
msgid "Chunks"
|
||||
msgstr "Morceaux"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/rag.py:137
|
||||
msgid "The retrieved chunks from the knowledge space"
|
||||
msgstr "Les morceaux récupérés de l'espace de connaissances"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:15
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:40
|
||||
msgid "String"
|
||||
msgstr "Chaîne"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:18
|
||||
msgid "The string to be converted to other types."
|
||||
msgstr "La chaîne à convertir en d'autres types."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:21
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:46
|
||||
msgid "Integer"
|
||||
msgstr "Entier"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:24
|
||||
msgid "The integer to be converted to other types."
|
||||
msgstr "L'entier à convertir en d'autres types."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:27
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:52
|
||||
msgid "Float"
|
||||
msgstr "Flottant"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:30
|
||||
msgid "The float to be converted to other types."
|
||||
msgstr "Le flottant à convertir en d'autres types."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:33
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:58
|
||||
msgid "Boolean"
|
||||
msgstr "Booléen"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:36
|
||||
msgid "The boolean to be converted to other types."
|
||||
msgstr "Le booléen à convertir en d'autres types."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:43
|
||||
msgid "The string converted from other types."
|
||||
msgstr "La chaîne convertie à partir d'autres types."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:49
|
||||
msgid "The integer converted from other types."
|
||||
msgstr "L'entier converti à partir d'autres types."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:55
|
||||
msgid "The float converted from other types."
|
||||
msgstr "Le flottant converti à partir d'autres types."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:61
|
||||
msgid "The boolean converted from other types."
|
||||
msgstr "Le booléen converti à partir d'autres types."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:69
|
||||
msgid "String to Integer"
|
||||
msgstr "Chaîne vers Entier"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:71
|
||||
msgid "Converts a string to an integer."
|
||||
msgstr "Convertit une chaîne en un entier."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:88
|
||||
msgid "String to Float"
|
||||
msgstr "Chaîne vers Flottant"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:90
|
||||
msgid "Converts a string to a float."
|
||||
msgstr "Convertit une chaîne en un nombre flottant."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:107
|
||||
msgid "String to Boolean"
|
||||
msgstr "Chaîne vers Booléen"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:109
|
||||
msgid "Converts a string to a boolean, true: 'true', '1', 'y'"
|
||||
msgstr "Convertit une chaîne en un booléen, vrai : 'true', '1', 'y'"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:113
|
||||
msgid "True Values"
|
||||
msgstr "Valeurs Vraies"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:118
|
||||
msgid "Comma-separated values that should be treated as True."
|
||||
msgstr "Valeurs séparées par des virgules qui doivent être considérées comme Vraies."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:137
|
||||
msgid "Integer to String"
|
||||
msgstr "Entier vers Chaîne"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:139
|
||||
msgid "Converts an integer to a string."
|
||||
msgstr "Convertit un entier en une chaîne."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:156
|
||||
msgid "Float to String"
|
||||
msgstr "Flottant vers Chaîne"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:158
|
||||
msgid "Converts a float to a string."
|
||||
msgstr "Convertit un nombre flottant en une chaîne de caractères."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:175
|
||||
msgid "Boolean to String"
|
||||
msgstr "Booléen en Chaîne de Caractères"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:177
|
||||
msgid "Converts a boolean to a string."
|
||||
msgstr "Convertit un booléen en chaîne de caractères."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:194
|
||||
msgid "Model Output to Dict"
|
||||
msgstr "Sortie de Modèle en Dictionnaire"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:196
|
||||
msgid "Converts a model output to a dictionary."
|
||||
msgstr "Convertit une sortie de modèle en dictionnaire."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:199
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:374
|
||||
msgid "Model Output"
|
||||
msgstr "Sortie de Modèle"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/converter.py:200
|
||||
msgid "Dictionary"
|
||||
msgstr "Dictionnaire"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:103
|
||||
msgid "Datasource"
|
||||
msgstr "Source de Données"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:106
|
||||
msgid "The datasource to retrieve the context"
|
||||
msgstr "La source de données pour récupérer le contexte"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:109
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:268
|
||||
msgid "Prompt Template"
|
||||
msgstr "Modèle de Prompt"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:114
|
||||
msgid "The prompt template to build a database prompt"
|
||||
msgstr "Le modèle de prompt pour construire un prompt de base de données"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:118
|
||||
msgid "Display Type"
|
||||
msgstr "Type d'Affichage"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:123
|
||||
msgid "The display type for the data"
|
||||
msgstr "Le type d'affichage pour les données"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:127
|
||||
msgid "Max Number of Results"
|
||||
msgstr "Nombre Maximum de Résultats"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:132
|
||||
msgid "The maximum number of results to return"
|
||||
msgstr "Le nombre maximum de résultats à retourner"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:135
|
||||
msgid "Response Format"
|
||||
msgstr "Format de réponse"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:140
|
||||
msgid "The response format, default is a JSON format"
|
||||
msgstr "Le format de réponse, par défaut est un format JSON"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:156
|
||||
msgid "The user question to retrieve table schemas from the datasource"
|
||||
msgstr "La question de l'utilisateur pour récupérer les schémas de table à partir de la source de données"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:162
|
||||
msgid "The retrieved context from the datasource"
|
||||
msgstr "Le contexte récupéré à partir de la source de données"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:166
|
||||
msgid "SQL dict"
|
||||
msgstr "Dictionnaire SQL"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:169
|
||||
msgid "The SQL to be executed wrapped in a dictionary, generated by LLM"
|
||||
msgstr "Le SQL à exécuter enveloppé dans un dictionnaire, généré par LLM"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:172
|
||||
msgid "SQL result"
|
||||
msgstr "Résultat SQL"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:175
|
||||
msgid "The result of the SQL execution"
|
||||
msgstr "Le résultat de l'exécution SQL"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:179
|
||||
msgid "SQL dict list"
|
||||
msgstr "Liste de dictionnaires SQL"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:183
|
||||
msgid "The SQL list to be executed wrapped in a dictionary, generated by LLM"
|
||||
msgstr "La liste SQL à exécuter enveloppée dans un dictionnaire, générée par le Grand Modèle de Langage"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:212
|
||||
msgid "Datasource Retriever Operator"
|
||||
msgstr "Opérateur de récupération des données sources"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:214
|
||||
msgid "Retrieve the table schemas from the datasource."
|
||||
msgstr "Récupérer les schémas de table à partir de la source de données."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:228
|
||||
msgid "Retrieved schema chunks"
|
||||
msgstr "Morceaux de schéma récupérés"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:232
|
||||
msgid "The retrieved schema chunks from the datasource"
|
||||
msgstr "Les morceaux de schéma récupérés à partir de la source de données"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:290
|
||||
msgid "Datasource Executor Operator"
|
||||
msgstr "Opérateur d'exécution des données sources"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:292
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:329
|
||||
msgid "Execute the context from the datasource."
|
||||
msgstr "Exécuter le contexte provenant de la source de données."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/datasource.py:327
|
||||
msgid "Datasource Dashboard Operator"
|
||||
msgstr "Opérateur du tableau de bord des données sources"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:60
|
||||
msgid "Code Map Operator"
|
||||
msgstr "Opérateur de carte de code"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:63
|
||||
msgid ""
|
||||
"Handle input dictionary with code and return output dictionary after "
|
||||
"execution."
|
||||
msgstr "Traiter le dictionnaire d'entrée contenant le code et retourner le dictionnaire de sortie après exécution."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:69
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:211
|
||||
msgid "Code Editor"
|
||||
msgstr "Éditeur de code"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:74
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:216
|
||||
msgid "Please input your code"
|
||||
msgstr "Veuillez saisir votre code"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:75
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:217
|
||||
msgid "The code to be executed."
|
||||
msgstr "Le code à exécuter."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:81
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:223
|
||||
msgid "Language"
|
||||
msgstr "Langage"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:86
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:228
|
||||
msgid "Please select the language"
|
||||
msgstr "Veuillez sélectionner le langage"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:87
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:229
|
||||
msgid "The language of the code."
|
||||
msgstr "Le langage du code."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:97
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:239
|
||||
msgid "Call Name"
|
||||
msgstr "Nom d'appel"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:102
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:244
|
||||
msgid "Please input the call name"
|
||||
msgstr "Veuillez saisir le nom d'appel"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:103
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:245
|
||||
msgid "The call name of the function."
|
||||
msgstr "Le nom d'appel de la fonction."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:108
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:250
|
||||
msgid "Input Data"
|
||||
msgstr "Données d'entrée"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:111
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:253
|
||||
msgid "The input dictionary."
|
||||
msgstr "Le dictionnaire d'entrée."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:116
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:258
|
||||
msgid "Output Data"
|
||||
msgstr "Données de sortie"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:119
|
||||
msgid "The output dictionary."
|
||||
msgstr "Le dictionnaire de sortie."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:202
|
||||
msgid "Code Dict to Model Request Operator"
|
||||
msgstr "Opérateur de conversion de dictionnaire de code en requête de modèle"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:205
|
||||
msgid ""
|
||||
"Handle input dictionary with code and return output ModelRequest after "
|
||||
"execution."
|
||||
msgstr "Traite le dictionnaire d'entrée avec du code et retourne une ModelRequest de sortie après exécution."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/code.py:261
|
||||
msgid "The output ModelRequest."
|
||||
msgstr "La ModelRequest de sortie."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:50
|
||||
msgid "The context key can be used as the key for formatting prompt."
|
||||
msgstr "La clé de contexte peut être utilisée comme clé pour formater l'invite."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:54
|
||||
msgid "The context."
|
||||
msgstr "Le contexte."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:271
|
||||
msgid "The prompt template for the conversation."
|
||||
msgstr "Le modèle d'invite pour la conversation."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:274
|
||||
msgid "Model Name"
|
||||
msgstr "Nom du modèle"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:279
|
||||
msgid "The model name."
|
||||
msgstr "Le nom du modèle."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:283
|
||||
msgid "LLM Client"
|
||||
msgstr "Client LLM"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:289
|
||||
msgid ""
|
||||
"The LLM Client, how to connect to the LLM model, if not provided, it will "
|
||||
"use the default client deployed by DB-GPT."
|
||||
msgstr "Le client LLM, comment se connecter au modèle LLM. Si non fourni, il utilisera le client par défaut déployé par DB-GPT."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:294
|
||||
msgid "History Message Merge Mode"
|
||||
msgstr "Mode de fusion des messages historiques"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:305
|
||||
msgid ""
|
||||
"The history merge mode, supports 'none', 'window' and 'token'. 'none': no "
|
||||
"history merge, 'window': merge by conversation window, 'token': merge by "
|
||||
"token length."
|
||||
msgstr "Le mode de fusion de l'historique prend en charge 'none', 'window' et 'token'. 'none' : pas de fusion de l'historique, 'window' : fusion par fenêtre de conversation, 'token' : fusion par longueur de token."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:312
|
||||
msgid "User Message Key"
|
||||
msgstr "Clé du message utilisateur"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:318
|
||||
msgid "The key of the user message in your prompt, default is 'user_input'."
|
||||
msgstr "La clé du message utilisateur dans votre invite, la valeur par défaut est 'user_input'."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:322
|
||||
msgid "History Key"
|
||||
msgstr "Clé de l'historique"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:328
|
||||
msgid ""
|
||||
"The chat history key, with chat history message pass to prompt template, if "
|
||||
"not provided, it will parse the prompt template to get the key."
|
||||
msgstr "La clé de l'historique de chat, avec le message de l'historique de chat passé au modèle de prompt, si non fourni, il analysera le modèle de prompt pour obtenir la clé."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:333
|
||||
msgid "Keep Start Rounds"
|
||||
msgstr "Conserver les tours de début"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:338
|
||||
msgid "The start rounds to keep in the chat history."
|
||||
msgstr "Les tours de début à conserver dans l'historique de chat."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:341
|
||||
msgid "Keep End Rounds"
|
||||
msgstr "Conserver les tours de fin"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:346
|
||||
msgid "The end rounds to keep in the chat history."
|
||||
msgstr "Les tours de fin à conserver dans l'historique de chat."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:349
|
||||
msgid "Max Token Limit"
|
||||
msgstr "Limite maximale de tokens"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:354
|
||||
msgid "The max token limit to keep in the chat history."
|
||||
msgstr "La limite maximale de tokens à conserver dans l'historique de chat."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:358
|
||||
msgid "Common LLM Request Body"
|
||||
msgstr "Corps de requête LLM commun"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:361
|
||||
msgid "The common LLM request body."
|
||||
msgstr "Le corps de requête LLM commun."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:364
|
||||
msgid "Extra Context"
|
||||
msgstr "Contexte supplémentaire"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:368
|
||||
msgid ""
|
||||
"Extra context for building prompt(Knowledge context, database schema, etc), "
|
||||
"you can add multiple context."
|
||||
msgstr ""
|
||||
"Contexte supplémentaire pour la construction de l'invite (contexte de connaissances, schéma de base de données, etc.), vous pouvez ajouter plusieurs contextes."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:377
|
||||
msgid "The model output."
|
||||
msgstr "La sortie du modèle."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:380
|
||||
msgid "Streaming Model Output"
|
||||
msgstr "Sortie de modèle en streaming"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:384
|
||||
msgid "The streaming model output."
|
||||
msgstr "La sortie du modèle en streaming."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:390
|
||||
msgid "LLM Operator"
|
||||
msgstr "Opérateur LLM"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:394
|
||||
msgid ""
|
||||
"High-level LLM operator, supports multi-round conversation (conversation "
|
||||
"window, token length and no multi-round)."
|
||||
msgstr ""
|
||||
"Opérateur LLM de haut niveau, prend en charge les conversations multi-tours (fenêtre de conversation, longueur des tokens et sans multi-tours)."
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:424
|
||||
msgid "Streaming LLM Operator"
|
||||
msgstr "Opérateur LLM en streaming"
|
||||
|
||||
#:../packages/dbgpt-app/src/dbgpt_app/operators/llm.py:428
|
||||
msgid ""
|
||||
"High-level streaming LLM operator, supports multi-round conversation "
|
||||
"(conversation window, token length and no multi-round)."
|
||||
msgstr ""
|
||||
"Opérateur LLM en streaming de haut niveau, prend en charge les conversations multi-tours (fenêtre de conversation, longueur des tokens et sans multi-tours)."
|
Binary file not shown.
@@ -1,78 +0,0 @@
|
||||
# French translations for PACKAGE package
|
||||
# Traductions françaises du paquet PACKAGE.
|
||||
# Copyright (C) 2024 THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# Automatically generated, 2024.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2024-03-27 09:43+0800\n"
|
||||
"PO-Revision-Date: 2024-03-27 03:21+0800\n"
|
||||
"Last-Translator: Automatically generated\n"
|
||||
"Language-Team: none\n"
|
||||
"Language: fr\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||
|
||||
#: ../dbgpt/client/_cli.py:30
|
||||
#, fuzzy
|
||||
msgid "The name of the AWEL flow"
|
||||
msgstr "Le nom du flux"
|
||||
|
||||
#: ../dbgpt/client/_cli.py:37
|
||||
#, fuzzy
|
||||
msgid "The uid of the AWEL flow"
|
||||
msgstr "L'UID du flux"
|
||||
|
||||
#: ../dbgpt/client/_cli.py:55
|
||||
#, fuzzy
|
||||
msgid "The messages to run AWEL flow"
|
||||
msgstr "Les messages du flux"
|
||||
|
||||
#: ../dbgpt/client/_cli.py:62
|
||||
#, fuzzy
|
||||
msgid "The model name of AWEL flow"
|
||||
msgstr "Le modèle du flux"
|
||||
|
||||
#: ../dbgpt/client/_cli.py:71
|
||||
msgid "Whether use stream mode to run AWEL flow"
|
||||
msgstr ""
|
||||
|
||||
#: ../dbgpt/client/_cli.py:79
|
||||
#, fuzzy
|
||||
msgid "The temperature to run AWEL flow"
|
||||
msgstr "La température du flux"
|
||||
|
||||
#: ../dbgpt/client/_cli.py:86
|
||||
#, fuzzy
|
||||
msgid "The max new tokens to run AWEL flow"
|
||||
msgstr "Le nombre maximal de nouveaux tokens du flux"
|
||||
|
||||
#: ../dbgpt/client/_cli.py:93
|
||||
#, fuzzy
|
||||
msgid "The conversation id of the AWEL flow"
|
||||
msgstr "L'identifiant de conversation du flux"
|
||||
|
||||
#: ../dbgpt/client/_cli.py:101
|
||||
msgid "The json data to run AWEL flow, if set, will overwrite other options"
|
||||
msgstr ""
|
||||
|
||||
#: ../dbgpt/client/_cli.py:109
|
||||
#, fuzzy
|
||||
msgid "The extra json data to run AWEL flow."
|
||||
msgstr "Les données JSON supplémentaires du flux"
|
||||
|
||||
#: ../dbgpt/client/_cli.py:118
|
||||
#, fuzzy
|
||||
msgid "Whether use interactive mode to run AWEL flow"
|
||||
msgstr "La température du flux"
|
||||
|
||||
#~ msgid "Whether to stream the flow, default is False"
|
||||
#~ msgstr "Indique si le flux doit être diffusé en continu, par défaut False"
|
||||
|
||||
#~ msgid "The json data of the flow"
|
||||
#~ msgstr "Les données JSON du flux"
|
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_client__cli.mo
Normal file
BIN
i18n/locales/fr/LC_MESSAGES/dbgpt_client__cli.mo
Normal file
Binary file not shown.
73
i18n/locales/fr/LC_MESSAGES/dbgpt_client__cli.po
Normal file
73
i18n/locales/fr/LC_MESSAGES/dbgpt_client__cli.po
Normal file
@@ -0,0 +1,73 @@
|
||||
# French translations for PACKAGE package.
|
||||
# Copyright (C) 2025 THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# Automatically generated, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PACKAGE VERSION\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2025-02-23 13:40+0800\n"
|
||||
"PO-Revision-Date: 2025-02-23 13:40+0800\n"
|
||||
"Last-Translator: Automatically generated\n"
|
||||
"Language-Team: none\n"
|
||||
"Language: fr\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:45
|
||||
msgid "The path of the AWEL flow"
|
||||
msgstr "Le chemin du flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:63
|
||||
msgid "The name of the AWEL flow"
|
||||
msgstr "Le nom du flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:70
|
||||
msgid "The uid of the AWEL flow"
|
||||
msgstr "L'UID du flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:88
|
||||
msgid "The messages to run AWEL flow"
|
||||
msgstr "Les messages pour exécuter le flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:95
|
||||
msgid "The model name of AWEL flow"
|
||||
msgstr "Le nom du modèle du flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:104
|
||||
msgid "Whether use stream mode to run AWEL flow"
|
||||
msgstr "Si l'on doit utiliser le mode stream pour exécuter le flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:112
|
||||
msgid "The temperature to run AWEL flow"
|
||||
msgstr "La température pour exécuter le flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:119
|
||||
msgid "The max new tokens to run AWEL flow"
|
||||
msgstr "Le nombre maximum de nouveaux tokens pour exécuter le flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:126
|
||||
msgid "The conversation id of the AWEL flow"
|
||||
msgstr "L'ID de conversation du flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:134
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:199
|
||||
msgid "The json data to run AWEL flow, if set, will overwrite other options"
|
||||
msgstr "Les données JSON pour exécuter le flux AWEL, si elles sont définies, écraseront les autres options"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:142
|
||||
msgid "The extra json data to run AWEL flow."
|
||||
msgstr "Les données JSON supplémentaires pour exécuter le flux AWEL."
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:151
|
||||
msgid "Whether use interactive mode to run AWEL flow"
|
||||
msgstr "Si l'on doit utiliser le mode interactif pour exécuter le flux AWEL"
|
||||
|
||||
#:../packages/dbgpt-client/src/dbgpt_client/_cli.py:207
|
||||
msgid ""
|
||||
"The output key of the AWEL flow, if set, it will try to get the output by "
|
||||
"the key"
|
||||
msgstr "La clé de sortie du flux AWEL, si elle est définie, le système tentera d'obtenir la sortie à l'aide de cette clé"
|
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user