ci: fix missing Kompute support in python bindings (#1953)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-02-09 21:40:32 -05:00 committed by GitHub
parent 79b0866c62
commit fc7e5f4a09
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 108 additions and 118 deletions

View File

@ -235,10 +235,8 @@ jobs:
name: Build name: Build
command: | command: |
export CMAKE_PREFIX_PATH=~/Qt/6.5.1/gcc_64/lib/cmake export CMAKE_PREFIX_PATH=~/Qt/6.5.1/gcc_64/lib/cmake
mkdir build ~/Qt/Tools/CMake/bin/cmake -DCMAKE_BUILD_TYPE=Release -S gpt4all-chat -B build
cd build ~/Qt/Tools/CMake/bin/cmake --build build --target all
~/Qt/Tools/CMake/bin/cmake -DCMAKE_BUILD_TYPE=Release -S ../gpt4all-chat -B .
~/Qt/Tools/CMake/bin/cmake --build . --target all
build-gpt4all-chat-windows: build-gpt4all-chat-windows:
machine: machine:
@ -291,17 +289,15 @@ jobs:
$Env:INCLUDE = "${Env:INCLUDE};C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" $Env:INCLUDE = "${Env:INCLUDE};C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include"
$Env:INCLUDE = "${Env:INCLUDE};C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" $Env:INCLUDE = "${Env:INCLUDE};C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include"
$Env:VULKAN_SDK = "C:\VulkanSDK\1.3.261.1" $Env:VULKAN_SDK = "C:\VulkanSDK\1.3.261.1"
mkdir build
cd build
& "C:\Qt\Tools\CMake_64\bin\cmake.exe" ` & "C:\Qt\Tools\CMake_64\bin\cmake.exe" `
"-DCMAKE_GENERATOR:STRING=Ninja" ` "-DCMAKE_GENERATOR:STRING=Ninja" `
"-DCMAKE_BUILD_TYPE=Release" ` "-DCMAKE_BUILD_TYPE=Release" `
"-DCMAKE_PREFIX_PATH:PATH=C:\Qt\6.5.1\msvc2019_64" ` "-DCMAKE_PREFIX_PATH:PATH=C:\Qt\6.5.1\msvc2019_64" `
"-DCMAKE_MAKE_PROGRAM:FILEPATH=C:\Qt\Tools\Ninja\ninja.exe" ` "-DCMAKE_MAKE_PROGRAM:FILEPATH=C:\Qt\Tools\Ninja\ninja.exe" `
"-DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON" ` "-DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON" `
"-S ..\gpt4all-chat" ` "-S gpt4all-chat" `
"-B ." "-B build"
& "C:\Qt\Tools\Ninja\ninja.exe" & "C:\Qt\Tools\Ninja\ninja.exe" -C build
build-gpt4all-chat-macos: build-gpt4all-chat-macos:
macos: macos:
@ -332,17 +328,15 @@ jobs:
- run: - run:
name: Build name: Build
command: | command: |
mkdir build
cd build
~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake \ ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake \
-DCMAKE_GENERATOR:STRING=Ninja \ -DCMAKE_GENERATOR:STRING=Ninja \
-DBUILD_UNIVERSAL=ON \ -DBUILD_UNIVERSAL=ON \
-DCMAKE_BUILD_TYPE=Release \ -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_PREFIX_PATH:PATH=~/Qt/6.5.1/macos/lib/cmake/Qt6 \ -DCMAKE_PREFIX_PATH:PATH=~/Qt/6.5.1/macos/lib/cmake/Qt6 \
-DCMAKE_MAKE_PROGRAM:FILEPATH=~/Qt/Tools/Ninja/ninja \ -DCMAKE_MAKE_PROGRAM:FILEPATH=~/Qt/Tools/Ninja/ninja \
-S ../gpt4all-chat \ -S gpt4all-chat \
-B . -B build
~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build . --target all ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build build --target all
build-ts-docs: build-ts-docs:
docker: docker:
- image: cimg/base:stable - image: cimg/base:stable
@ -407,13 +401,10 @@ jobs:
- run: - run:
name: Build C library name: Build C library
command: | command: |
git submodule init git submodule update --init --recursive
git submodule update
cd gpt4all-backend cd gpt4all-backend
mkdir build cmake -B build
cd build cmake --build build --parallel
cmake ..
cmake --build . --parallel
- run: - run:
name: Build wheel name: Build wheel
command: | command: |
@ -440,13 +431,10 @@ jobs:
- run: - run:
name: Build C library name: Build C library
command: | command: |
git submodule init git submodule update --init # don't use --recursive because macOS doesn't use Kompute
git submodule update
cd gpt4all-backend cd gpt4all-backend
mkdir build cmake -B build -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64"
cd build cmake --build build --parallel
cmake .. -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64"
cmake --build . --parallel
- run: - run:
name: Build wheel name: Build wheel
command: | command: |
@ -482,16 +470,13 @@ jobs:
- run: - run:
name: Build C library name: Build C library
command: | command: |
git submodule init git submodule update --init --recursive
git submodule update
cd gpt4all-backend cd gpt4all-backend
mkdir build
cd build
$Env:Path += ";C:\ProgramData\mingw64\mingw64\bin" $Env:Path += ";C:\ProgramData\mingw64\mingw64\bin"
$Env:Path += ";C:\VulkanSDK\1.3.261.1\bin" $Env:Path += ";C:\VulkanSDK\1.3.261.1\bin"
$Env:VULKAN_SDK = "C:\VulkanSDK\1.3.261.1" $Env:VULKAN_SDK = "C:\VulkanSDK\1.3.261.1"
cmake -G "MinGW Makefiles" .. -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DKOMPUTE_OPT_USE_BUILT_IN_VULKAN_HEADER=OFF cmake -G "MinGW Makefiles" -B build -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DKOMPUTE_OPT_USE_BUILT_IN_VULKAN_HEADER=OFF
cmake --build . --parallel cmake --build build --parallel
- run: - run:
name: Build wheel name: Build wheel
# TODO: As part of this task, we need to move mingw64 binaries into package. # TODO: As part of this task, we need to move mingw64 binaries into package.

View File

@ -39,10 +39,6 @@ else()
message(STATUS "Interprocedural optimization support detected") message(STATUS "Interprocedural optimization support detected")
endif() endif()
if(NOT APPLE)
set(LLAMA_KOMPUTE YES)
endif()
include(llama.cpp.cmake) include(llama.cpp.cmake)
set(BUILD_VARIANTS default avxonly) set(BUILD_VARIANTS default avxonly)

View File

@ -71,12 +71,19 @@ option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer"
# option(LLAMA_F16C "llama: enable F16C" ON) # option(LLAMA_F16C "llama: enable F16C" ON)
#endif() #endif()
if (APPLE)
set(LLAMA_KOMPUTE_DEFAULT ON)
else()
set(LLAMA_KOMPUTE_DEFAULT OFF)
endif()
# 3rd party libs # 3rd party libs
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON) option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
option(LLAMA_OPENBLAS "llama: use OpenBLAS" OFF) option(LLAMA_OPENBLAS "llama: use OpenBLAS" OFF)
#option(LLAMA_CUBLAS "llama: use cuBLAS" OFF) #option(LLAMA_CUBLAS "llama: use cuBLAS" OFF)
#option(LLAMA_CLBLAST "llama: use CLBlast" OFF) #option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
#option(LLAMA_METAL "llama: use Metal" OFF) #option(LLAMA_METAL "llama: use Metal" OFF)
option(LLAMA_KOMPUTE "llama: use Kompute" ${LLAMA_KOMPUTE_DEFAULT})
set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor") set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor")
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels") set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
set(LLAMA_CUDA_DMMV_Y "1" CACHE STRING "llama: y block size for dmmv CUDA kernels") set(LLAMA_CUDA_DMMV_Y "1" CACHE STRING "llama: y block size for dmmv CUDA kernels")
@ -153,6 +160,11 @@ if (LLAMA_OPENBLAS)
endif() endif()
if (LLAMA_KOMPUTE) if (LLAMA_KOMPUTE)
if (NOT EXISTS "${LLAMA_DIR}/kompute/CMakeLists.txt")
message(FATAL_ERROR "Kompute not found")
endif()
message(STATUS "Kompute found")
add_compile_definitions(VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1) add_compile_definitions(VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1)
find_package(Vulkan COMPONENTS glslc REQUIRED) find_package(Vulkan COMPONENTS glslc REQUIRED)
find_program(glslc_executable NAMES glslc HINTS Vulkan::glslc) find_program(glslc_executable NAMES glslc HINTS Vulkan::glslc)
@ -220,8 +232,6 @@ if (LLAMA_KOMPUTE)
endforeach() endforeach()
endfunction() endfunction()
if (EXISTS "${LLAMA_DIR}/kompute/CMakeLists.txt")
message(STATUS "Kompute found")
set(KOMPUTE_OPT_LOG_LEVEL Critical CACHE STRING "Kompute log level") set(KOMPUTE_OPT_LOG_LEVEL Critical CACHE STRING "Kompute log level")
add_subdirectory(${LLAMA_DIR}/kompute) add_subdirectory(${LLAMA_DIR}/kompute)
@ -302,9 +312,6 @@ if (LLAMA_KOMPUTE)
add_compile_definitions(GGML_USE_KOMPUTE) add_compile_definitions(GGML_USE_KOMPUTE)
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} kompute) set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} kompute)
set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${CMAKE_BINARY_DIR}) set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${CMAKE_BINARY_DIR})
else()
message(WARNING "Kompute not found")
endif()
endif() endif()
if (LLAMA_ALL_WARNINGS) if (LLAMA_ALL_WARNINGS)

View File

@ -432,6 +432,8 @@ std::vector<LLModel::GPUDevice> LLamaModel::availableGPUDevices(size_t memoryReq
free(vkDevices); free(vkDevices);
return devices; return devices;
} }
#else
std::cerr << __func__ << ": built without Kompute\n";
#endif #endif
return {}; return {};

View File

@ -61,7 +61,7 @@ copy_prebuilt_C_lib(SRC_CLIB_DIRECTORY,
setup( setup(
name=package_name, name=package_name,
version="2.2.0", version="2.2.1",
description="Python bindings for GPT4All", description="Python bindings for GPT4All",
author="Nomic and the Open Source Community", author="Nomic and the Open Source Community",
author_email="support@nomic.ai", author_email="support@nomic.ai",