mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-04-29 12:14:35 +00:00
chatllm: do not report 100% progress until actually complete
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
cff5a53718
commit
a16df5d261
@ -374,6 +374,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
|
||||
m_llModelInfo.model->setProgressCallback([this](float progress) -> bool {
|
||||
progress = std::max(progress, std::numeric_limits<float>::min()); // keep progress above zero
|
||||
progress = std::min(progress, std::nextafter(1.0f, 0.0f)); // keep progress below 100% until we are actually done
|
||||
emit modelLoadingPercentageChanged(progress);
|
||||
return m_shouldBeLoaded;
|
||||
});
|
||||
|
Loading…
Reference in New Issue
Block a user