mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-31 16:16:45 +00:00
backend: do not use Vulkan with non-LLaMA models
This commit is contained in:
@@ -309,8 +309,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
// We might have had to fallback to CPU after load if the model is not possible to accelerate
|
||||
// for instance if the quantization method is not supported on Vulkan yet
|
||||
emit reportDevice("CPU");
|
||||
// TODO(cebtenzzre): report somewhere if llamamodel decided the model was not supported
|
||||
emit reportFallbackReason("<br>Using CPU: unsupported quantization type");
|
||||
emit reportFallbackReason("<br>Using CPU: unsupported model or quant");
|
||||
}
|
||||
|
||||
MySettings::globalInstance()->setAttemptModelLoad(QString());
|
||||
|
Reference in New Issue
Block a user