diff --git a/gpt4all-backend/llamacpp_backend_impl.cpp b/gpt4all-backend/llamacpp_backend_impl.cpp index cd92b15e..d666f3e9 100644 --- a/gpt4all-backend/llamacpp_backend_impl.cpp +++ b/gpt4all-backend/llamacpp_backend_impl.cpp @@ -884,7 +884,8 @@ static const EmbModelGroup EMBEDDING_MODEL_SPECS[] { "multilingual-e5-large-instruct"}}, }; -static const EmbModelSpec *getEmbedSpec(const std::string &modelName) { +static const EmbModelSpec *getEmbedSpec(const std::string &modelName) +{ static const auto &specs = EMBEDDING_MODEL_SPECS; auto it = std::find_if(specs, std::end(specs), [&modelName](auto &spec) { diff --git a/gpt4all-chat/llamacpp_model.cpp b/gpt4all-chat/llamacpp_model.cpp index 8691717b..d65ad445 100644 --- a/gpt4all-chat/llamacpp_model.cpp +++ b/gpt4all-chat/llamacpp_model.cpp @@ -94,7 +94,8 @@ void LLModelStore::destroy() m_availableModel.reset(); } -void LLModelInfo::resetModel(LlamaCppModel *cllm, ModelBackend *model) { +void LLModelInfo::resetModel(LlamaCppModel *cllm, ModelBackend *model) +{ this->model.reset(model); fallbackReason.reset(); emit cllm->loadedModelInfoChanged(); @@ -582,7 +583,7 @@ bool LlamaCppModel::loadNewModel(const ModelInfo &modelInfo, QVariantMap &modelL modelLoadProps.insert("$duration", modelLoadTimer.elapsed() / 1000.); return true; -}; +} bool LlamaCppModel::isModelLoaded() const { diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index c80c1955..93ccbe95 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -514,16 +514,18 @@ ModelList::ModelList() QCoreApplication::instance()->installEventFilter(this); } -QString ModelList::compatibleModelNameHash(QUrl baseUrl, QString modelName) { +QString ModelList::compatibleModelNameHash(QUrl baseUrl, QString modelName) +{ QCryptographicHash sha256(QCryptographicHash::Sha256); sha256.addData((baseUrl.toString() + "_" + modelName).toUtf8()); return sha256.result().toHex(); -}; +} -QString ModelList::compatibleModelFilename(QUrl baseUrl, QString modelName) { +QString ModelList::compatibleModelFilename(QUrl baseUrl, QString modelName) +{ QString hash(compatibleModelNameHash(baseUrl, modelName)); return QString(u"gpt4all-%1-capi.rmodel"_s).arg(hash); -}; +} bool ModelList::eventFilter(QObject *obj, QEvent *ev) { diff --git a/gpt4all-chat/network.cpp b/gpt4all-chat/network.cpp index a9b1ea6b..2b5f332c 100644 --- a/gpt4all-chat/network.cpp +++ b/gpt4all-chat/network.cpp @@ -99,7 +99,8 @@ Network *Network::globalInstance() return networkInstance(); } -bool Network::isHttpUrlValid(QUrl url) { +bool Network::isHttpUrlValid(QUrl url) +{ if (!url.isValid()) return false; QString scheme(url.scheme());