From 6f038c136bad26931609d5521c9d136f12eb3b75 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Wed, 13 Sep 2023 12:32:42 -0700 Subject: [PATCH] init at most one vulkan device, submodule update fixes issues w/ multiple of the same gpu --- gpt4all-backend/llama.cpp-mainline | 2 +- gpt4all-chat/chatllm.cpp | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/gpt4all-backend/llama.cpp-mainline b/gpt4all-backend/llama.cpp-mainline index 2f7732b6..e5ab32aa 160000 --- a/gpt4all-backend/llama.cpp-mainline +++ b/gpt4all-backend/llama.cpp-mainline @@ -1 +1 @@ -Subproject commit 2f7732b667b5c7786da0fa59fd612cc87b04b325 +Subproject commit e5ab32aab84c9252e865114483dbd7505e5caabb diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 4ae8c843..afdf6bdc 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -279,8 +279,10 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) m_llModelInfo.model->initializeGPUDevice(devices.front()); } else { for (LLModel::GPUDevice &d : availableDevices) { - if (QString::fromStdString(d.name) == requestedDevice) + if (QString::fromStdString(d.name) == requestedDevice) { m_llModelInfo.model->initializeGPUDevice(d); + break; + } } } }