diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp
index 4a61f782..7fd9fce1 100644
--- a/gpt4all-backend/llamamodel.cpp
+++ b/gpt4all-backend/llamamodel.cpp
@@ -303,11 +303,11 @@ bool LLamaModel::initializeGPUDevice(const LLModel::GPUDevice &device, std::stri
vkDevice.vendor = device.vendor;
result = ggml_vk_init_device(vkDevice);
if (!result && unavail_reason) {
- *unavail_reason = "failed to init device";
+ *unavail_reason = "failed to init GPU";
}
#else
if (unavail_reason) {
- *unavail_reason = "built without kompute";
+ *unavail_reason = "built without Kompute";
}
#endif
return result;
diff --git a/gpt4all-backend/llmodel.h b/gpt4all-backend/llmodel.h
index ad33c82d..3001281b 100644
--- a/gpt4all-backend/llmodel.h
+++ b/gpt4all-backend/llmodel.h
@@ -99,7 +99,7 @@ public:
virtual bool initializeGPUDevice(size_t /*memoryRequired*/, const std::string& /*device*/) { return false; }
virtual bool initializeGPUDevice(const GPUDevice &/*device*/, std::string *unavail_reason = nullptr) {
if (unavail_reason) {
- *unavail_reason = "unsupported model type";
+ *unavail_reason = "model has no GPU support";
}
return false;
}
diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp
index 1b950829..9325b763 100644
--- a/gpt4all-chat/chatllm.cpp
+++ b/gpt4all-chat/chatllm.cpp
@@ -287,7 +287,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
if (!device) {
// GPU not available
} else if (!m_llModelInfo.model->initializeGPUDevice(*device, &unavail_reason)) {
- emit reportFallbackReason(QString::fromStdString("
Using CPU: " + unavail_reason));
+ emit reportFallbackReason(QString::fromStdString("
" + unavail_reason));
} else {
actualDevice = QString::fromStdString(device->name);
}
@@ -302,14 +302,14 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
} else if (!success) {
// llama_init_from_file returned nullptr
emit reportDevice("CPU");
- emit reportFallbackReason("
Using CPU: loading failed (out of VRAM?)");
+ emit reportFallbackReason("
GPU loading failed (out of VRAM?)");
success = m_llModelInfo.model->loadModel(filePath.toStdString());
} else if (!m_llModelInfo.model->usingGPUDevice()) {
// ggml_vk_init was not called in llama.cpp
// We might have had to fallback to CPU after load if the model is not possible to accelerate
// for instance if the quantization method is not supported on Vulkan yet
emit reportDevice("CPU");
- emit reportFallbackReason("
Using CPU: unsupported model or quant");
+ emit reportFallbackReason("
model or quant has no GPU support");
}
MySettings::globalInstance()->setAttemptModelLoad(QString());