mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-08 20:09:12 +00:00
Fix VRAM leak when model loading fails (#1901)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -228,6 +228,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
|
||||
m_llModelInfo = LLModelInfo();
|
||||
emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to insufficient memory. You should either remove this model or decrease your system RAM usage by closing other applications.").arg(modelInfo.filename()));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (fileInfo.exists()) {
|
||||
|
Reference in New Issue
Block a user