From f4f7de51e74af66f154fbb3cd560087dfeb91bfd Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Fri, 24 Jan 2025 13:21:34 -0500 Subject: [PATCH] Revert "cmake: do not modify gpt4all.app after signing it (#3413)" This reverts commit c01ac7fa933ae135dc8d9eed9dcbc2890dff38e3. Signed-off-by: Jared Van Bortel --- gpt4all-chat/CHANGELOG.md | 2 -- gpt4all-chat/CMakeLists.txt | 26 +++++++++-------------- gpt4all-chat/cmake/deploy-qt-mac.cmake.in | 6 ++++++ gpt4all-chat/src/main.cpp | 23 ++++++++++---------- 4 files changed, 28 insertions(+), 29 deletions(-) diff --git a/gpt4all-chat/CHANGELOG.md b/gpt4all-chat/CHANGELOG.md index e12a4f6a..676485fe 100644 --- a/gpt4all-chat/CHANGELOG.md +++ b/gpt4all-chat/CHANGELOG.md @@ -9,7 +9,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ### Fixed - Fix regression while using localdocs with server API ([#3410](https://github.com/nomic-ai/gpt4all/pull/3410)) - Don't show system messages in server chat view ([#3411](https://github.com/nomic-ai/gpt4all/pull/3411)) -- Fix `codesign --verify` failure on macOS ([#3413](https://github.com/nomic-ai/gpt4all/pull/3413)) ## [3.7.0] - 2025-01-21 @@ -271,7 +270,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Fix several Vulkan resource management issues ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694)) - Fix crash/hang when some models stop generating, by showing special tokens ([#2701](https://github.com/nomic-ai/gpt4all/pull/2701)) -[Unreleased]: https://github.com/nomic-ai/gpt4all/compare/v3.7.0...HEAD [3.7.0]: https://github.com/nomic-ai/gpt4all/compare/v3.6.1...v3.7.0 [3.6.1]: https://github.com/nomic-ai/gpt4all/compare/v3.6.0...v3.6.1 [3.6.0]: https://github.com/nomic-ai/gpt4all/compare/v3.5.3...v3.6.0 diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index 19de3a5e..53ef127f 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -445,18 +445,12 @@ endif() # -- install -- -if (APPLE) - set(GPT4ALL_LIB_DEST bin/gpt4all.app/Contents/Frameworks) -else() - set(GPT4ALL_LIB_DEST lib) -endif() - install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN}) install( TARGETS llmodel - LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib - RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll + LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib + RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll ) # We should probably iterate through the list of the cmake for backend, but these need to be installed @@ -479,8 +473,8 @@ endif() install( TARGETS ${MODEL_IMPL_TARGETS} - LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib - RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll + LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib + RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll ) if(APPLE AND GPT4ALL_SIGN_INSTALL) @@ -509,7 +503,7 @@ if (LLMODEL_CUDA) TARGETS llamamodel-mainline-cuda llamamodel-mainline-cuda-avxonly RUNTIME_DEPENDENCY_SET llama-cuda-deps - LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so + LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll ) if (WIN32) @@ -525,11 +519,11 @@ endif() if (NOT GPT4ALL_USING_QTPDF) # Install PDFium - install( - FILES ${PDFium_LIBRARY} - LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib - RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll - ) + if (WIN32) + install(FILES "${PDFium_LIBRARY}" DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN}) + else() + install(FILES "${PDFium_LIBRARY}" DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) + endif() endif() if (NOT APPLE) diff --git a/gpt4all-chat/cmake/deploy-qt-mac.cmake.in b/gpt4all-chat/cmake/deploy-qt-mac.cmake.in index 8798e21e..2e1ac38b 100644 --- a/gpt4all-chat/cmake/deploy-qt-mac.cmake.in +++ b/gpt4all-chat/cmake/deploy-qt-mac.cmake.in @@ -8,6 +8,12 @@ if (GPT4ALL_SIGN_INSTALL) set(MAC_NOTARIZE -sign-for-notarization=${GPT4ALL_SIGNING_ID}) endif() execute_process(COMMAND ${MACDEPLOYQT} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -verbose=2 ${MAC_NOTARIZE}) +file(GLOB MYLLAMALIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllama*) +file(GLOB MYLLMODELLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllmodel.*) +file(COPY ${MYLLAMALIBS} + DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) +file(COPY ${MYLLMODELLIBS} + DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png" DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png" diff --git a/gpt4all-chat/src/main.cpp b/gpt4all-chat/src/main.cpp index 5edf863d..22b44169 100644 --- a/gpt4all-chat/src/main.cpp +++ b/gpt4all-chat/src/main.cpp @@ -88,18 +88,19 @@ int main(int argc, char *argv[]) #endif // set search path before constructing the MySettings instance, which relies on this - { - auto appDirPath = QCoreApplication::applicationDirPath(); - QStringList searchPaths { -#ifdef Q_OS_DARWIN - u"%1/../Frameworks"_s.arg(appDirPath), -#else - appDirPath, - u"%1/../lib"_s.arg(appDirPath), + QString llmodelSearchPaths = QCoreApplication::applicationDirPath(); + const QString libDir = QCoreApplication::applicationDirPath() + "/../lib/"; + if (LLM::directoryExists(libDir)) + llmodelSearchPaths += ";" + libDir; +#if defined(Q_OS_MAC) + const QString binDir = QCoreApplication::applicationDirPath() + "/../../../"; + if (LLM::directoryExists(binDir)) + llmodelSearchPaths += ";" + binDir; + const QString frameworksDir = QCoreApplication::applicationDirPath() + "/../Frameworks/"; + if (LLM::directoryExists(frameworksDir)) + llmodelSearchPaths += ";" + frameworksDir; #endif - }; - LLModel::Implementation::setImplementationsSearchPath(searchPaths.join(u';').toStdString()); - } + LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString()); // Set the local and language translation before the qml engine has even been started. This will // use the default system locale unless the user has explicitly set it to use a different one.