From 30c533515a19f9c9be20ee026a0df1f66922d33c Mon Sep 17 00:00:00 2001 From: csunny Date: Wed, 14 Jun 2023 22:02:23 +0800 Subject: [PATCH] fix: async output --- pilot/server/llmserver.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pilot/server/llmserver.py b/pilot/server/llmserver.py index 1e3a4dcb3..5633a2adc 100644 --- a/pilot/server/llmserver.py +++ b/pilot/server/llmserver.py @@ -76,7 +76,10 @@ class ModelWorker: # Please do not open the output in production! # The gpt4all thread shares stdout with the parent process, # and opening it may affect the frontend output. - # print("output: ", output) + + if not ("gptj" in CFG.LLM_MODEL or "guanaco" in CFG.LLM_MODEL): + print("output: ", output) + ret = { "text": output, "error_code": 0,