mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-09 12:18:12 +00:00
WEB API independent
This commit is contained in:
parent
42d8b2b74b
commit
b3dde34ec4
@ -13,13 +13,16 @@ PROMPT_SCENE_DEFINE = """You are a {dialect} data analysis expert, please provid
|
|||||||
_DEFAULT_TEMPLATE = """
|
_DEFAULT_TEMPLATE = """
|
||||||
According to the structure definition in the following tables:
|
According to the structure definition in the following tables:
|
||||||
{table_info}
|
{table_info}
|
||||||
Provide a professional data analysis with as few dimensions as possible, and the upper limit does not exceed 5 dimensions.
|
Provide professional data analysis, use as few dimensions as possible, but no less than three, and no more than eight dimensions.
|
||||||
Used to support goal: {input}
|
Used to support goal: {input}
|
||||||
|
|
||||||
Use the chart display method in the following range:
|
Pay attention to the length of the output content of the analysis result, do not exceed 4000tokens
|
||||||
|
According to the characteristics of the analyzed data, choose the best one from the charts provided below to display,chart types:
|
||||||
{supported_chat_type}
|
{supported_chat_type}
|
||||||
give {dialect} data analysis SQL, analysis title, display method and analytical thinking,respond in the following json format:
|
|
||||||
|
Give {dialect} data analysis SQL, analysis title, display method and analytical thinking,respond in the following json format:
|
||||||
{response}
|
{response}
|
||||||
|
Do not use unprovided fields and do not use unprovided data in the where condition of sql.
|
||||||
Ensure the response is correct json and can be parsed by Python json.loads
|
Ensure the response is correct json and can be parsed by Python json.loads
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ async def api_generate_stream(request: Request):
|
|||||||
|
|
||||||
|
|
||||||
@app.post("/generate")
|
@app.post("/generate")
|
||||||
def generate(prompt_request: PromptRequest):
|
def generate(prompt_request: PromptRequest)->str:
|
||||||
params = {
|
params = {
|
||||||
"prompt": prompt_request.prompt,
|
"prompt": prompt_request.prompt,
|
||||||
"temperature": prompt_request.temperature,
|
"temperature": prompt_request.temperature,
|
||||||
|
@ -690,9 +690,6 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--model_list_mode", type=str, default="once", choices=["once", "reload"]
|
"--model_list_mode", type=str, default="once", choices=["once", "reload"]
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
|
||||||
"-new", "--new", action="store_true", help="enable new http mode"
|
|
||||||
)
|
|
||||||
|
|
||||||
# old version server config
|
# old version server config
|
||||||
parser.add_argument("--host", type=str, default="0.0.0.0")
|
parser.add_argument("--host", type=str, default="0.0.0.0")
|
||||||
@ -704,12 +701,6 @@ if __name__ == "__main__":
|
|||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
server_init(args)
|
server_init(args)
|
||||||
|
|
||||||
if args.new:
|
|
||||||
import uvicorn
|
|
||||||
|
|
||||||
uvicorn.run(app, host="0.0.0.0", port=5000)
|
|
||||||
else:
|
|
||||||
### Compatibility mode starts the old version server by default
|
|
||||||
demo = build_webdemo()
|
demo = build_webdemo()
|
||||||
demo.queue(
|
demo.queue(
|
||||||
concurrency_count=args.concurrency_count,
|
concurrency_count=args.concurrency_count,
|
||||||
|
Loading…
Reference in New Issue
Block a user