mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-03 09:34:50 +00:00
backend: do not use Vulkan with non-LLaMA models
This commit is contained in:
@@ -309,8 +309,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
|||||||
// We might have had to fallback to CPU after load if the model is not possible to accelerate
|
// We might have had to fallback to CPU after load if the model is not possible to accelerate
|
||||||
// for instance if the quantization method is not supported on Vulkan yet
|
// for instance if the quantization method is not supported on Vulkan yet
|
||||||
emit reportDevice("CPU");
|
emit reportDevice("CPU");
|
||||||
// TODO(cebtenzzre): report somewhere if llamamodel decided the model was not supported
|
emit reportFallbackReason("<br>Using CPU: unsupported model or quant");
|
||||||
emit reportFallbackReason("<br>Using CPU: unsupported quantization type");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MySettings::globalInstance()->setAttemptModelLoad(QString());
|
MySettings::globalInstance()->setAttemptModelLoad(QString());
|
||||||
|
Reference in New Issue
Block a user