From a16df5d261c595660ffd4109c0e7e216aa333b51 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Thu, 30 May 2024 16:33:45 -0400 Subject: [PATCH] chatllm: do not report 100% progress until actually complete Signed-off-by: Jared Van Bortel --- gpt4all-chat/chatllm.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 999c233d..972aaa75 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -374,6 +374,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) m_llModelInfo.model->setProgressCallback([this](float progress) -> bool { progress = std::max(progress, std::numeric_limits::min()); // keep progress above zero + progress = std::min(progress, std::nextafter(1.0f, 0.0f)); // keep progress below 100% until we are actually done emit modelLoadingPercentageChanged(progress); return m_shouldBeLoaded; });