mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 19:10:15 +00:00
Link against ggml in bin so we can get the available devices without loading a model.
This commit is contained in:
@@ -4,6 +4,10 @@
|
||||
#include <iostream>
|
||||
#include <unordered_set>
|
||||
|
||||
#ifdef GGML_USE_KOMPUTE
|
||||
#include "ggml-vulkan.h"
|
||||
#endif
|
||||
|
||||
void LLModel::recalculateContext(PromptContext &promptCtx, std::function<bool(bool)> recalculate) {
|
||||
size_t i = 0;
|
||||
promptCtx.n_past = 0;
|
||||
@@ -174,3 +178,26 @@ std::vector<float> LLModel::embedding(const std::string &/*text*/)
|
||||
}
|
||||
return std::vector<float>();
|
||||
}
|
||||
|
||||
std::vector<LLModel::GPUDevice> LLModel::availableGPUDevices()
|
||||
{
|
||||
#if defined(GGML_USE_KOMPUTE)
|
||||
std::vector<ggml_vk_device> vkDevices = ggml_vk_available_devices(0);
|
||||
|
||||
std::vector<LLModel::GPUDevice> devices;
|
||||
for(const auto& vkDevice : vkDevices) {
|
||||
LLModel::GPUDevice device;
|
||||
device.index = vkDevice.index;
|
||||
device.type = vkDevice.type;
|
||||
device.heapSize = vkDevice.heapSize;
|
||||
device.name = vkDevice.name;
|
||||
device.vendor = vkDevice.vendor;
|
||||
|
||||
devices.push_back(device);
|
||||
}
|
||||
|
||||
return devices;
|
||||
#else
|
||||
return std::vector<LLModel::GPUDevice>();
|
||||
#endif
|
||||
}
|
||||
|
Reference in New Issue
Block a user