From 92407438c8fb80f76c563acccb779aee08cbe50e Mon Sep 17 00:00:00 2001 From: niansa/tuxifan Date: Thu, 1 Jun 2023 03:26:18 +0200 Subject: [PATCH] Advanced avxonly autodetection (#744) * Advanced avxonly requirement detection --- gpt4all-backend/llmodel.cpp | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index bd466921..07c56a34 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -6,6 +6,24 @@ #include #include + + +static +bool requires_avxonly() { +#ifdef __x86_64__ + #ifndef _MSC_VER + return !__builtin_cpu_supports("avx2"); + #else + int cpuInfo[4]; + __cpuidex(cpuInfo, 7, 0); + return !(cpuInfo[1] & (1 << 5)); + #endif +#else + return false; // Don't know how to handle non-x86_64 +#endif +} + + static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVariant) { // Collect all model implementation libraries // NOTE: allocated on heap so we leak intentionally on exit so we have a chance to clean up the @@ -56,14 +74,6 @@ static Dlhandle *get_implementation(std::ifstream& f, const std::string& buildVa return nullptr; } -static bool requires_avxonly() { -#ifdef __x86_64__ - return !__builtin_cpu_supports("avx2") && !__builtin_cpu_supports("fma"); -#else - return false; // Don't know how to handle ARM -#endif -} - LLModel *LLModel::construct(const std::string &modelPath, std::string buildVariant) { //TODO: Auto-detect if (buildVariant == "auto") {