Metal+LLama take two (#929)

Support latest llama with Metal
---------

Co-authored-by: Adam Treat <adam@nomic.ai>
Co-authored-by: niansa/tuxifan <tuxifan@posteo.de>
This commit is contained in:
Aaron Miller
2023-06-09 13:48:46 -07:00
committed by GitHub
parent b162b5c64e
commit d3ba1295a7
8 changed files with 141 additions and 66 deletions

View File

@@ -58,6 +58,11 @@ set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
add_subdirectory(../gpt4all-backend llmodel)
set(METAL_SHADER_FILE)
if(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
set(METAL_SHADER_FILE ../gpt4all-backend/llama.cpp-mainline/ggml-metal.metal)
endif()
qt_add_executable(chat
main.cpp
chat.h chat.cpp
@@ -72,6 +77,7 @@ qt_add_executable(chat
server.h server.cpp
logger.h logger.cpp
sysinfo.h
${METAL_SHADER_FILE}
)
qt_add_qml_module(chat
@@ -132,6 +138,13 @@ if(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
)
endif()
if(METAL_SHADER_FILE)
set_target_properties(chat PROPERTIES
RESOURCE ${METAL_SHADER_FILE}
)
configure_file(${METAL_SHADER_FILE} bin/ggml-metal.metal COPYONLY)
endif()
target_compile_definitions(chat
PRIVATE $<$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>:QT_QML_DEBUG>)
target_link_libraries(chat