From 1f78c21c088e43fc81ba4f81245436e42267fdb0 Mon Sep 17 00:00:00 2001 From: csunny Date: Tue, 13 Jun 2023 19:46:03 +0800 Subject: [PATCH] debug: more detail error info --- pilot/server/llmserver.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pilot/server/llmserver.py b/pilot/server/llmserver.py index 30653a16e..003641807 100644 --- a/pilot/server/llmserver.py +++ b/pilot/server/llmserver.py @@ -86,6 +86,9 @@ class ModelWorker: except torch.cuda.CudaError: ret = {"text": "**GPU OutOfMemory, Please Refresh.**", "error_code": 0} yield json.dumps(ret).encode() + b"\0" + except Exception as e: + ret = {"text": f"**LLMServer Generate Error, Please CheckErrorInfo.**: {e}", "error_code": 0} + yield json.dumps(ret).encode() + b"\0" def get_embeddings(self, prompt): return get_embeddings(self.model, self.tokenizer, prompt)