support multi process to launch llmserver, add openai proxy api.

This commit is contained in:
xuyuan23
2023-06-12 21:29:16 +08:00
parent 62eb6c383e
commit 6e3b48c7c4
4 changed files with 21 additions and 12 deletions

View File

@@ -7,6 +7,10 @@
## For example, to disable coding related features, uncomment the next line
# DISABLED_COMMAND_CATEGORIES=
#*******************************************************************#
#** Webserver Port **#
#*******************************************************************#
WEB_SERVER_PORT=7860
#*******************************************************************#
#*** LLM PROVIDER ***#
@@ -17,6 +21,7 @@
#*******************************************************************#
#** LLM MODELS **#
#*******************************************************************#
# LLM_MODEL, see /pilot/configs/model_config.LLM_MODEL_CONFIG
LLM_MODEL=vicuna-13b
MODEL_SERVER=http://127.0.0.1:8000
LIMIT_MODEL_CONCURRENCY=5
@@ -98,15 +103,20 @@ VECTOR_STORE_TYPE=Chroma
#MILVUS_SECURE=
#*******************************************************************#
#** WebServer Language Support **#
#*******************************************************************#
LANGUAGE=en
#LANGUAGE=zh
#*******************************************************************#
# ** PROXY_SERVER
# ** PROXY_SERVER (openai interface | chatGPT proxy service), use chatGPT as your LLM.
# ** if your server can visit openai, please set PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions
# ** else if you have a chatgpt proxy server, you can set PROXY_SERVER_URL={your-proxy-serverip:port/xxx}
#*******************************************************************#
PROXY_API_KEY=
PROXY_SERVER_URL=http://127.0.0.1:3000/proxy_address
PROXY_API_KEY={your-openai-sk}
PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions
#*******************************************************************#