Fix VRAM leak when model loading fails (#1901)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel
2024-02-01 15:45:45 -05:00
committed by GitHub
parent e1eac00ee0
commit 10e3f7bbf5
3 changed files with 16 additions and 3 deletions

View File

@@ -228,6 +228,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
m_llModelInfo = LLModelInfo();
emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to insufficient memory. You should either remove this model or decrease your system RAM usage by closing other applications.").arg(modelInfo.filename()));
return false;
}
if (fileInfo.exists()) {