From 7745f208bc8724351896aed6bc27b8bf26a300a7 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Tue, 11 Mar 2025 13:33:06 -0400 Subject: [PATCH] WIP (clang is crashing) --- .codespellrc | 2 +- .gitmodules | 6 + docs/gpt4all_desktop/settings.md | 1 - gpt4all-backend/deps/CMakeLists.txt | 2 + gpt4all-backend/deps/date | 1 + .../include/gpt4all-backend/formatters.h | 9 +- .../include/gpt4all-backend/ollama-client.h | 20 +- gpt4all-backend/src/CMakeLists.txt | 1 + gpt4all-backend/src/ollama-client.cpp | 14 +- gpt4all-backend/src/ollama-types.cpp | 3 +- gpt4all-chat/CMakeLists.txt | 18 +- gpt4all-chat/deps/CMakeLists.txt | 4 + gpt4all-chat/deps/generator | 1 + gpt4all-chat/qml/ApplicationSettings.qml | 32 -- gpt4all-chat/qml/ModelSettings.qml | 47 -- gpt4all-chat/src/chatlistmodel.cpp | 2 +- gpt4all-chat/src/chatllm.cpp | 431 ++++++------------ gpt4all-chat/src/chatllm.h | 23 +- gpt4all-chat/src/embllm.cpp | 4 - gpt4all-chat/src/json-helpers.cpp | 46 ++ gpt4all-chat/src/json-helpers.h | 15 + gpt4all-chat/src/llmodel/chat.h | 32 -- gpt4all-chat/src/llmodel/openai.h | 75 --- gpt4all-chat/src/llmodel/provider.cpp | 26 -- gpt4all-chat/src/llmodel/provider.h | 47 -- gpt4all-chat/src/llmodel_chat.h | 34 ++ gpt4all-chat/src/llmodel_description.cpp | 22 + gpt4all-chat/src/llmodel_description.h | 40 ++ gpt4all-chat/src/llmodel_ollama.cpp | 82 ++++ gpt4all-chat/src/llmodel_ollama.h | 122 +++++ .../openai.cpp => llmodel_openai.cpp} | 113 +++-- gpt4all-chat/src/llmodel_openai.h | 139 ++++++ gpt4all-chat/src/llmodel_provider.cpp | 139 ++++++ gpt4all-chat/src/llmodel_provider.h | 199 ++++++++ gpt4all-chat/src/llmodel_provider.inl | 30 ++ gpt4all-chat/src/modellist.h | 11 + gpt4all-chat/src/mysettings.cpp | 25 - gpt4all-chat/src/mysettings.h | 12 +- gpt4all-chat/src/server.cpp | 75 +-- gpt4all-chat/src/store_base.cpp | 164 +++++++ gpt4all-chat/src/store_base.h | 119 +++++ gpt4all-chat/src/store_base.inl | 128 ++++++ gpt4all-chat/src/store_provider.cpp | 25 + gpt4all-chat/src/store_provider.h | 49 ++ gpt4all-chat/src/utils.h | 40 ++ gpt4all-chat/src/utils.inl | 34 ++ 46 files changed, 1760 insertions(+), 704 deletions(-) create mode 160000 gpt4all-backend/deps/date create mode 160000 gpt4all-chat/deps/generator create mode 100644 gpt4all-chat/src/json-helpers.cpp create mode 100644 gpt4all-chat/src/json-helpers.h delete mode 100644 gpt4all-chat/src/llmodel/chat.h delete mode 100644 gpt4all-chat/src/llmodel/openai.h delete mode 100644 gpt4all-chat/src/llmodel/provider.cpp delete mode 100644 gpt4all-chat/src/llmodel/provider.h create mode 100644 gpt4all-chat/src/llmodel_chat.h create mode 100644 gpt4all-chat/src/llmodel_description.cpp create mode 100644 gpt4all-chat/src/llmodel_description.h create mode 100644 gpt4all-chat/src/llmodel_ollama.cpp create mode 100644 gpt4all-chat/src/llmodel_ollama.h rename gpt4all-chat/src/{llmodel/openai.cpp => llmodel_openai.cpp} (59%) create mode 100644 gpt4all-chat/src/llmodel_openai.h create mode 100644 gpt4all-chat/src/llmodel_provider.cpp create mode 100644 gpt4all-chat/src/llmodel_provider.h create mode 100644 gpt4all-chat/src/llmodel_provider.inl create mode 100644 gpt4all-chat/src/store_base.cpp create mode 100644 gpt4all-chat/src/store_base.h create mode 100644 gpt4all-chat/src/store_base.inl create mode 100644 gpt4all-chat/src/store_provider.cpp create mode 100644 gpt4all-chat/src/store_provider.h diff --git a/.codespellrc b/.codespellrc index 0f401f6b..9a625e32 100644 --- a/.codespellrc +++ b/.codespellrc @@ -1,3 +1,3 @@ [codespell] -ignore-words-list = blong, afterall, assistent, crasher, requestor +ignore-words-list = blong, afterall, assistent, crasher, requestor, nam skip = ./.git,./gpt4all-chat/translations,*.pdf,*.svg,*.lock diff --git a/.gitmodules b/.gitmodules index 9549e256..6afea7f0 100644 --- a/.gitmodules +++ b/.gitmodules @@ -26,3 +26,9 @@ [submodule "gpt4all-backend/deps/qcoro"] path = deps/qcoro url = https://github.com/nomic-ai/qcoro.git +[submodule "gpt4all-backend/deps/date"] + path = gpt4all-backend/deps/date + url = https://github.com/HowardHinnant/date.git +[submodule "gpt4all-chat/deps/generator"] + path = gpt4all-chat/deps/generator + url = https://github.com/TartanLlama/generator.git diff --git a/docs/gpt4all_desktop/settings.md b/docs/gpt4all_desktop/settings.md index e9d5eb85..d48fe069 100644 --- a/docs/gpt4all_desktop/settings.md +++ b/docs/gpt4all_desktop/settings.md @@ -49,7 +49,6 @@ You can **clone** an existing model, which allows you to save a configuration of |----------------------------|------------------------------------------|-----------| | **Context Length** | Maximum length of input sequence in tokens | 2048 | | **Max Length** | Maximum length of response in tokens | 4096 | - | **Prompt Batch Size** | Token batch size for parallel processing | 128 | | **Temperature** | Lower temperature gives more likely generations | 0.7 | | **Top P** | Prevents choosing highly unlikely tokens | 0.4 | | **Top K** | Size of selection pool for tokens | 40 | diff --git a/gpt4all-backend/deps/CMakeLists.txt b/gpt4all-backend/deps/CMakeLists.txt index 382e83e3..e74dcbe7 100644 --- a/gpt4all-backend/deps/CMakeLists.txt +++ b/gpt4all-backend/deps/CMakeLists.txt @@ -12,3 +12,5 @@ FetchContent_Declare( URL_HASH "SHA256=7da75f171837577a52bbf217e17f8ea576c7c246e4594d617bfde7fafd408be5" ) FetchContent_MakeAvailable(boost) + +add_subdirectory(date) diff --git a/gpt4all-backend/deps/date b/gpt4all-backend/deps/date new file mode 160000 index 00000000..5bdb7e6f --- /dev/null +++ b/gpt4all-backend/deps/date @@ -0,0 +1 @@ +Subproject commit 5bdb7e6f31fac909c090a46dbd9fea27b6e609a4 diff --git a/gpt4all-backend/include/gpt4all-backend/formatters.h b/gpt4all-backend/include/gpt4all-backend/formatters.h index e64f55b4..84b8c5ba 100644 --- a/gpt4all-backend/include/gpt4all-backend/formatters.h +++ b/gpt4all-backend/include/gpt4all-backend/formatters.h @@ -26,7 +26,8 @@ } \ } -MAKE_FORMATTER(QUtf8StringView, value ); -MAKE_FORMATTER(QStringView, value.toUtf8() ); -MAKE_FORMATTER(QString, value.toUtf8() ); -MAKE_FORMATTER(QVariant, value.toString().toUtf8()); +MAKE_FORMATTER(QLatin1StringView, value ); +MAKE_FORMATTER(QString, value.toUtf8() ); +MAKE_FORMATTER(QStringView, value.toUtf8() ); +MAKE_FORMATTER(QUtf8StringView, value ); +MAKE_FORMATTER(QVariant, value.toString().toUtf8()); diff --git a/gpt4all-backend/include/gpt4all-backend/ollama-client.h b/gpt4all-backend/include/gpt4all-backend/ollama-client.h index 4c251024..c891b64f 100644 --- a/gpt4all-backend/include/gpt4all-backend/ollama-client.h +++ b/gpt4all-backend/include/gpt4all-backend/ollama-client.h @@ -11,7 +11,6 @@ #include #include -#include #include #include #include @@ -26,26 +25,21 @@ namespace gpt4all::backend { struct ResponseError { public: struct BadStatus { int code; }; - -private: using ErrorCode = std::variant< QNetworkReply::NetworkError, boost::system::error_code, BadStatus >; -public: - ErrorCode error; - QString errorString; - ResponseError(const QRestReply *reply); + ResponseError(const boost::system::system_error &e); - ResponseError(const boost::system::system_error &e) - : error(e.code()) - , errorString(QString::fromUtf8(e.what())) - { - assert(e.code()); - } + const ErrorCode &error () { return m_error; } + const QString &errorString() { return m_errorString; } + +private: + ErrorCode m_error; + QString m_errorString; }; template diff --git a/gpt4all-backend/src/CMakeLists.txt b/gpt4all-backend/src/CMakeLists.txt index 212264c3..5fc030ad 100644 --- a/gpt4all-backend/src/CMakeLists.txt +++ b/gpt4all-backend/src/CMakeLists.txt @@ -21,6 +21,7 @@ target_link_libraries(${TARGET} PUBLIC ) target_link_libraries(${TARGET} PRIVATE QCoro6::Network + date::date fmt::fmt ) diff --git a/gpt4all-backend/src/ollama-client.cpp b/gpt4all-backend/src/ollama-client.cpp index eda623d8..d32d2ae6 100644 --- a/gpt4all-backend/src/ollama-client.cpp +++ b/gpt4all-backend/src/ollama-client.cpp @@ -13,7 +13,6 @@ #include #include -#include #include #include @@ -28,13 +27,20 @@ namespace gpt4all::backend { ResponseError::ResponseError(const QRestReply *reply) { if (reply->hasError()) { - error = reply->networkReply()->error(); + m_error = reply->networkReply()->error(); } else if (!reply->isHttpStatusSuccess()) { - error = BadStatus(reply->httpStatus()); + m_error = BadStatus(reply->httpStatus()); } else Q_UNREACHABLE(); - errorString = restErrorString(*reply); + m_errorString = restErrorString(*reply); +} + +ResponseError::ResponseError(const boost::system::system_error &e) + : m_error(e.code()) + , m_errorString(QString::fromUtf8(e.what())) +{ + Q_ASSERT(e.code()); } QNetworkRequest OllamaClient::makeRequest(const QString &path) const diff --git a/gpt4all-backend/src/ollama-types.cpp b/gpt4all-backend/src/ollama-types.cpp index d4f9addb..c3a0eaf5 100644 --- a/gpt4all-backend/src/ollama-types.cpp +++ b/gpt4all-backend/src/ollama-types.cpp @@ -4,6 +4,7 @@ #include // IWYU pragma: keep #include +#include #include #include @@ -40,7 +41,7 @@ Time tag_invoke(const json::value_to_tag