diff --git a/.env.template b/.env.template index 3ff2d9077..7212e7c6d 100644 --- a/.env.template +++ b/.env.template @@ -31,6 +31,9 @@ QUANTIZE_QLORA=True ## FAST_LLM_MODEL - Fast language model (Default: chatglm-6b) # SMART_LLM_MODEL=vicuna-13b # FAST_LLM_MODEL=chatglm-6b +## Proxy llm backend, this configuration is only valid when "LLM_MODEL=proxyllm", When we use the rest API provided by deployment frameworks like fastchat as a proxyllm, +## "PROXYLLM_BACKEND" is the model they actually deploy. We can use "PROXYLLM_BACKEND" to load the prompt of the corresponding scene. +# PROXYLLM_BACKEND= #*******************************************************************# diff --git a/README.md b/README.md index 782ad2cc1..cec1ae36e 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,10 @@ # DB-GPT: Revolutionizing Database Interactions with Private LLM Technology +
+
+
+
+