From 577ebd482697d15087a209471041de4f4348170d Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Thu, 2 May 2024 16:09:41 -0400 Subject: [PATCH] mixpanel: report cpu_supports_avx2 on startup (#2299) Signed-off-by: Jared Van Bortel --- gpt4all-backend/llmodel.cpp | 12 ++++++++---- gpt4all-backend/llmodel.h | 2 ++ gpt4all-chat/network.cpp | 1 + 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index 78ba1106..e54effc3 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -33,13 +33,13 @@ std::string s_implementations_search_path = "."; } // AVX via EAX=1: Processor Info and Feature Bits, bit 28 of ECX - #define cpu_supports_avx() (get_cpu_info(1, 2) & (1 << 28)) + #define cpu_supports_avx() !!(get_cpu_info(1, 2) & (1 << 28)) // AVX2 via EAX=7, ECX=0: Extended Features, bit 5 of EBX - #define cpu_supports_avx2() (get_cpu_info(7, 1) & (1 << 5)) + #define cpu_supports_avx2() !!(get_cpu_info(7, 1) & (1 << 5)) #else // gcc/clang - #define cpu_supports_avx() __builtin_cpu_supports("avx") - #define cpu_supports_avx2() __builtin_cpu_supports("avx2") + #define cpu_supports_avx() !!__builtin_cpu_supports("avx") + #define cpu_supports_avx2() !!__builtin_cpu_supports("avx2") #endif LLModel::Implementation::Implementation(Dlhandle &&dlhandle_) @@ -260,3 +260,7 @@ const std::string& LLModel::Implementation::implementationsSearchPath() { bool LLModel::Implementation::hasSupportedCPU() { return cpu_supports_avx() != 0; } + +int LLModel::Implementation::cpuSupportsAVX2() { + return cpu_supports_avx2(); +} diff --git a/gpt4all-backend/llmodel.h b/gpt4all-backend/llmodel.h index 28f587cd..1aca1e44 100644 --- a/gpt4all-backend/llmodel.h +++ b/gpt4all-backend/llmodel.h @@ -68,6 +68,8 @@ public: static void setImplementationsSearchPath(const std::string &path); static const std::string &implementationsSearchPath(); static bool hasSupportedCPU(); + // 0 for no, 1 for yes, -1 for non-x86_64 + static int cpuSupportsAVX2(); private: Implementation(Dlhandle &&); diff --git a/gpt4all-chat/network.cpp b/gpt4all-chat/network.cpp index 45a9a4f7..ec85d8f2 100644 --- a/gpt4all-chat/network.cpp +++ b/gpt4all-chat/network.cpp @@ -259,6 +259,7 @@ void Network::sendStartup() {"display", QString("%1x%2").arg(display->size().width()).arg(display->size().height())}, {"ram", LLM::globalInstance()->systemTotalRAMInGB()}, {"cpu", getCPUModel()}, + {"cpu_supports_avx2", LLModel::Implementation::cpuSupportsAVX2()}, {"datalake_active", mySettings->networkIsActive()}, }); sendIpify();