mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-13 05:35:45 +00:00
chat: make sure to clear fallback reason on success
This commit is contained in:
parent
2eb83b9f2a
commit
75deee9adb
@ -264,7 +264,9 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
|||||||
// Pick the best match for the device
|
// Pick the best match for the device
|
||||||
QString actualDevice = m_llModelInfo.model->implementation().buildVariant() == "metal" ? "Metal" : "CPU";
|
QString actualDevice = m_llModelInfo.model->implementation().buildVariant() == "metal" ? "Metal" : "CPU";
|
||||||
const QString requestedDevice = MySettings::globalInstance()->device();
|
const QString requestedDevice = MySettings::globalInstance()->device();
|
||||||
if (requestedDevice != "CPU") {
|
if (requestedDevice == "CPU") {
|
||||||
|
emit reportFallbackReason(""); // fallback not applicable
|
||||||
|
} else {
|
||||||
const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString());
|
const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString());
|
||||||
std::vector<LLModel::GPUDevice> availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory);
|
std::vector<LLModel::GPUDevice> availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory);
|
||||||
LLModel::GPUDevice *device = nullptr;
|
LLModel::GPUDevice *device = nullptr;
|
||||||
@ -286,6 +288,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
|||||||
emit reportFallbackReason("<br>Using CPU: failed to init device");
|
emit reportFallbackReason("<br>Using CPU: failed to init device");
|
||||||
} else {
|
} else {
|
||||||
actualDevice = QString::fromStdString(device->name);
|
actualDevice = QString::fromStdString(device->name);
|
||||||
|
emit reportFallbackReason(""); // no fallback yet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user