debug: more detail error info

This commit is contained in:
csunny 2023-06-13 19:46:03 +08:00
parent 77bc959819
commit 1f78c21c08

View File

@ -86,6 +86,9 @@ class ModelWorker:
except torch.cuda.CudaError:
ret = {"text": "**GPU OutOfMemory, Please Refresh.**", "error_code": 0}
yield json.dumps(ret).encode() + b"\0"
except Exception as e:
ret = {"text": f"**LLMServer Generate Error, Please CheckErrorInfo.**: {e}", "error_code": 0}
yield json.dumps(ret).encode() + b"\0"
def get_embeddings(self, prompt):
return get_embeddings(self.model, self.tokenizer, prompt)