mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 02:50:36 +00:00
fix AVX support by removing direct linking to AVX2 libs (#1750)
This commit is contained in:
@@ -82,7 +82,7 @@ const std::vector<LLModel::Implementation> &LLModel::Implementation::implementat
|
||||
static auto* libs = new std::vector<Implementation>([] () {
|
||||
std::vector<Implementation> fres;
|
||||
|
||||
std::string impl_name_re = "(bert|llama|gptj|llamamodel-mainline)";
|
||||
std::string impl_name_re = "(bert|gptj|llamamodel-mainline)";
|
||||
if (requires_avxonly()) {
|
||||
impl_name_re += "-avxonly";
|
||||
} else {
|
||||
@@ -186,6 +186,27 @@ LLModel *LLModel::Implementation::construct(const std::string &modelPath, std::s
|
||||
return fres;
|
||||
}
|
||||
|
||||
LLModel *LLModel::Implementation::constructCpuLlama() {
|
||||
const LLModel::Implementation *impl = nullptr;
|
||||
for (const auto &i : implementationList()) {
|
||||
if (i.m_buildVariant == "metal" || i.m_modelType != "LLaMA") continue;
|
||||
impl = &i;
|
||||
}
|
||||
if (!impl) {
|
||||
std::cerr << "LLModel ERROR: Could not find CPU LLaMA implementation\n";
|
||||
return nullptr;
|
||||
}
|
||||
auto fres = impl->m_construct();
|
||||
fres->m_implementation = impl;
|
||||
return fres;
|
||||
}
|
||||
|
||||
std::vector<LLModel::GPUDevice> LLModel::Implementation::availableGPUDevices() {
|
||||
static LLModel *cpuLlama = LLModel::Implementation::constructCpuLlama(); // (memory leak)
|
||||
if (cpuLlama) { return cpuLlama->availableGPUDevices(0); }
|
||||
return {};
|
||||
}
|
||||
|
||||
void LLModel::Implementation::setImplementationsSearchPath(const std::string& path) {
|
||||
s_implementations_search_path = path;
|
||||
}
|
||||
|
Reference in New Issue
Block a user