diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 999c233d..972aaa75 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -374,6 +374,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) m_llModelInfo.model->setProgressCallback([this](float progress) -> bool { progress = std::max(progress, std::numeric_limits::min()); // keep progress above zero + progress = std::min(progress, std::nextafter(1.0f, 0.0f)); // keep progress below 100% until we are actually done emit modelLoadingPercentageChanged(progress); return m_shouldBeLoaded; });