Merge branch 'main' into llm_fxp

This commit is contained in:
csunny 2023-05-20 14:23:20 +08:00
commit b466f4afdb
2 changed files with 3 additions and 2 deletions

View File

@ -105,7 +105,8 @@ class Config(metaclass=Singleton):
self.LLM_MODEL = os.getenv("LLM_MODEL", "vicuna-13b")
self.LIMIT_MODEL_CONCURRENCY = int(os.getenv("LIMIT_MODEL_CONCURRENCY", 5))
self.MAX_POSITION_EMBEDDINGS = int(os.getenv("MAX_POSITION_EMBEDDINGS", 4096))
self.MODEL_SERVER = os.getenv("MODEL_SERVER", "http://121.41.167.183:8000")
self.MODEL_PORT = os.getenv("MODEL_PORT", 8000)
self.MODEL_SERVER = os.getenv("MODEL_SERVER", "http://127.0.0.1" + ":" + str(self.MODEL_PORT))
self.ISLOAD_8BIT = os.getenv("ISLOAD_8BIT", "True") == "True"
def set_debug_mode(self, value: bool) -> None:

View File

@ -130,4 +130,4 @@ def embeddings(prompt_request: EmbeddingRequest):
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", log_level="info")
uvicorn.run(app, host="0.0.0.0", port=CFG.MODEL_PORT, log_level="info")