#*******************************************************************# #** DB-GPT - GENERAL SETTINGS **# #*******************************************************************# ## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled. Each of the below are an option: ## pilot.commands.query_execute ## For example, to disable coding related features, uncomment the next line # DISABLED_COMMAND_CATEGORIES= #*******************************************************************# #** Webserver Port **# #*******************************************************************# WEB_SERVER_PORT=7860 #*******************************************************************# #*** LLM PROVIDER ***# #*******************************************************************# # TEMPERATURE=0 #*******************************************************************# #** LLM MODELS **# #*******************************************************************# # LLM_MODEL, see /pilot/configs/model_config.LLM_MODEL_CONFIG LLM_MODEL=vicuna-13b MODEL_SERVER=http://127.0.0.1:8000 LIMIT_MODEL_CONCURRENCY=5 MAX_POSITION_EMBEDDINGS=4096 QUANTIZE_QLORA=True ## SMART_LLM_MODEL - Smart language model (Default: vicuna-13b) ## FAST_LLM_MODEL - Fast language model (Default: chatglm-6b) # SMART_LLM_MODEL=vicuna-13b # FAST_LLM_MODEL=chatglm-6b #*******************************************************************# #** EMBEDDING SETTINGS **# #*******************************************************************# EMBEDDING_MODEL=text2vec KNOWLEDGE_CHUNK_SIZE=500 KNOWLEDGE_SEARCH_TOP_SIZE=5 ## EMBEDDING_TOKENIZER - Tokenizer to use for chunking large inputs ## EMBEDDING_TOKEN_LIMIT - Chunk size limit for large inputs # EMBEDDING_MODEL=all-MiniLM-L6-v2 # EMBEDDING_TOKENIZER=all-MiniLM-L6-v2 # EMBEDDING_TOKEN_LIMIT=8191 #*******************************************************************# #** DATABASE SETTINGS **# #*******************************************************************# LOCAL_DB_USER=root LOCAL_DB_PASSWORD=aa123456 LOCAL_DB_HOST=127.0.0.1 LOCAL_DB_PORT=3306 ### MILVUS ## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530) ## MILVUS_USERNAME - username for your Milvus database ## MILVUS_PASSWORD - password for your Milvus database ## MILVUS_SECURE - True to enable TLS. (Default: False) ## Setting MILVUS_ADDR to a `https://` URL will override this setting. ## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory. # MILVUS_ADDR=localhost:19530 # MILVUS_USERNAME= # MILVUS_PASSWORD= # MILVUS_SECURE= # MILVUS_COLLECTION=dbgpt #*******************************************************************# #** COMMANDS **# #*******************************************************************# EXECUTE_LOCAL_COMMANDS=False #*******************************************************************# #** ALLOWLISTED PLUGINS **# #*******************************************************************# #ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) #DENYLISTED_PLUGINS - Sets the listed plugins that are not allowed (Example: plugin1,plugin2,plugin3) ALLOWLISTED_PLUGINS= DENYLISTED_PLUGINS= #*******************************************************************# #** CHAT PLUGIN SETTINGS **# #*******************************************************************# # CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False) # CHAT_MESSAGES_ENABLED=False #*******************************************************************# #** VECTOR STORE SETTINGS **# #*******************************************************************# VECTOR_STORE_TYPE=Chroma #MILVUS_URL=127.0.0.1 #MILVUS_PORT=19530 #MILVUS_USERNAME #MILVUS_PASSWORD #MILVUS_SECURE= #*******************************************************************# #** WebServer Language Support **# #*******************************************************************# LANGUAGE=en #LANGUAGE=zh #*******************************************************************# # ** PROXY_SERVER (openai interface | chatGPT proxy service), use chatGPT as your LLM. # ** if your server can visit openai, please set PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions # ** else if you have a chatgpt proxy server, you can set PROXY_SERVER_URL={your-proxy-serverip:port/xxx} #*******************************************************************# PROXY_API_KEY={your-openai-sk} PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions #*******************************************************************# # ** SUMMARY_CONFIG #*******************************************************************# SUMMARY_CONFIG=FAST