mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-22 13:41:08 +00:00
fix llama.cpp k-quants (#988)
* enable k-quants on *all* mainline builds
This commit is contained in:
parent
b004c53a7b
commit
abc081e48d
@ -60,16 +60,16 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
|
||||
set(LLAMA_FMA ${GPT4ALL_ALLOW_NON_AVX})
|
||||
|
||||
if (BUILD_VARIANT STREQUAL metal)
|
||||
set(LLAMA_K_QUANTS YES)
|
||||
set(LLAMA_METAL YES)
|
||||
else()
|
||||
set(LLAMA_K_QUANTS NO)
|
||||
set(LLAMA_METAL NO)
|
||||
endif()
|
||||
|
||||
# Include GGML
|
||||
set(LLAMA_K_QUANTS YES)
|
||||
include_ggml(llama.cpp-mainline -mainline-${BUILD_VARIANT} ON)
|
||||
if (NOT LLAMA_METAL)
|
||||
set(LLAMA_K_QUANTS NO)
|
||||
include_ggml(llama.cpp-230511 -230511-${BUILD_VARIANT} ON)
|
||||
include_ggml(llama.cpp-230519 -230519-${BUILD_VARIANT} ON)
|
||||
endif()
|
||||
|
Loading…
Reference in New Issue
Block a user