diff --git a/gpt4all-chat/CHANGELOG.md b/gpt4all-chat/CHANGELOG.md index 676485fe..e12a4f6a 100644 --- a/gpt4all-chat/CHANGELOG.md +++ b/gpt4all-chat/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ### Fixed - Fix regression while using localdocs with server API ([#3410](https://github.com/nomic-ai/gpt4all/pull/3410)) - Don't show system messages in server chat view ([#3411](https://github.com/nomic-ai/gpt4all/pull/3411)) +- Fix `codesign --verify` failure on macOS ([#3413](https://github.com/nomic-ai/gpt4all/pull/3413)) ## [3.7.0] - 2025-01-21 @@ -270,6 +271,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Fix several Vulkan resource management issues ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694)) - Fix crash/hang when some models stop generating, by showing special tokens ([#2701](https://github.com/nomic-ai/gpt4all/pull/2701)) +[Unreleased]: https://github.com/nomic-ai/gpt4all/compare/v3.7.0...HEAD [3.7.0]: https://github.com/nomic-ai/gpt4all/compare/v3.6.1...v3.7.0 [3.6.1]: https://github.com/nomic-ai/gpt4all/compare/v3.6.0...v3.6.1 [3.6.0]: https://github.com/nomic-ai/gpt4all/compare/v3.5.3...v3.6.0 diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index 53ef127f..19de3a5e 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -445,12 +445,18 @@ endif() # -- install -- +if (APPLE) + set(GPT4ALL_LIB_DEST bin/gpt4all.app/Contents/Frameworks) +else() + set(GPT4ALL_LIB_DEST lib) +endif() + install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN}) install( TARGETS llmodel - LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib - RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll + LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib + RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll ) # We should probably iterate through the list of the cmake for backend, but these need to be installed @@ -473,8 +479,8 @@ endif() install( TARGETS ${MODEL_IMPL_TARGETS} - LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib - RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll + LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib + RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll ) if(APPLE AND GPT4ALL_SIGN_INSTALL) @@ -503,7 +509,7 @@ if (LLMODEL_CUDA) TARGETS llamamodel-mainline-cuda llamamodel-mainline-cuda-avxonly RUNTIME_DEPENDENCY_SET llama-cuda-deps - LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib + LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll ) if (WIN32) @@ -519,11 +525,11 @@ endif() if (NOT GPT4ALL_USING_QTPDF) # Install PDFium - if (WIN32) - install(FILES "${PDFium_LIBRARY}" DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN}) - else() - install(FILES "${PDFium_LIBRARY}" DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) - endif() + install( + FILES ${PDFium_LIBRARY} + LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib + RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll + ) endif() if (NOT APPLE) diff --git a/gpt4all-chat/cmake/deploy-qt-mac.cmake.in b/gpt4all-chat/cmake/deploy-qt-mac.cmake.in index 2e1ac38b..8798e21e 100644 --- a/gpt4all-chat/cmake/deploy-qt-mac.cmake.in +++ b/gpt4all-chat/cmake/deploy-qt-mac.cmake.in @@ -8,12 +8,6 @@ if (GPT4ALL_SIGN_INSTALL) set(MAC_NOTARIZE -sign-for-notarization=${GPT4ALL_SIGNING_ID}) endif() execute_process(COMMAND ${MACDEPLOYQT} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -verbose=2 ${MAC_NOTARIZE}) -file(GLOB MYLLAMALIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllama*) -file(GLOB MYLLMODELLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllmodel.*) -file(COPY ${MYLLAMALIBS} - DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) -file(COPY ${MYLLMODELLIBS} - DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png" DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png" diff --git a/gpt4all-chat/src/main.cpp b/gpt4all-chat/src/main.cpp index 22b44169..5edf863d 100644 --- a/gpt4all-chat/src/main.cpp +++ b/gpt4all-chat/src/main.cpp @@ -88,19 +88,18 @@ int main(int argc, char *argv[]) #endif // set search path before constructing the MySettings instance, which relies on this - QString llmodelSearchPaths = QCoreApplication::applicationDirPath(); - const QString libDir = QCoreApplication::applicationDirPath() + "/../lib/"; - if (LLM::directoryExists(libDir)) - llmodelSearchPaths += ";" + libDir; -#if defined(Q_OS_MAC) - const QString binDir = QCoreApplication::applicationDirPath() + "/../../../"; - if (LLM::directoryExists(binDir)) - llmodelSearchPaths += ";" + binDir; - const QString frameworksDir = QCoreApplication::applicationDirPath() + "/../Frameworks/"; - if (LLM::directoryExists(frameworksDir)) - llmodelSearchPaths += ";" + frameworksDir; + { + auto appDirPath = QCoreApplication::applicationDirPath(); + QStringList searchPaths { +#ifdef Q_OS_DARWIN + u"%1/../Frameworks"_s.arg(appDirPath), +#else + appDirPath, + u"%1/../lib"_s.arg(appDirPath), #endif - LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString()); + }; + LLModel::Implementation::setImplementationsSearchPath(searchPaths.join(u';').toStdString()); + } // Set the local and language translation before the qml engine has even been started. This will // use the default system locale unless the user has explicitly set it to use a different one.