From 75deee9adb440b1cbe783784cc4d73c4b4b1aff2 Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Mon, 2 Oct 2023 10:23:11 -0400 Subject: [PATCH] chat: make sure to clear fallback reason on success --- gpt4all-chat/chatllm.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index a1bbb604..60dce440 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -264,7 +264,9 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) // Pick the best match for the device QString actualDevice = m_llModelInfo.model->implementation().buildVariant() == "metal" ? "Metal" : "CPU"; const QString requestedDevice = MySettings::globalInstance()->device(); - if (requestedDevice != "CPU") { + if (requestedDevice == "CPU") { + emit reportFallbackReason(""); // fallback not applicable + } else { const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString()); std::vector availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory); LLModel::GPUDevice *device = nullptr; @@ -286,6 +288,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) emit reportFallbackReason("
Using CPU: failed to init device"); } else { actualDevice = QString::fromStdString(device->name); + emit reportFallbackReason(""); // no fallback yet } }