backend: update to latest commit of llama.cpp Vulkan PR

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel
2024-01-25 16:58:46 -05:00
committed by AT
parent 29d2c936d1
commit 38c61493d2
9 changed files with 85 additions and 125 deletions

View File

@@ -26,7 +26,7 @@ public:
void setThreadCount(int32_t n_threads) override;
int32_t threadCount() const override;
std::vector<GPUDevice> availableGPUDevices(size_t memoryRequired) override;
bool initializeGPUDevice(size_t memoryRequired, const std::string& device) override;
bool initializeGPUDevice(size_t memoryRequired, const std::string& name) override;
bool initializeGPUDevice(const GPUDevice &device, std::string *unavail_reason) override;
bool initializeGPUDevice(int device) override;
bool hasGPUDevice() override;