diff --git a/gpt4all-chat/download.cpp b/gpt4all-chat/download.cpp index 6eafc512..d3c309d9 100644 --- a/gpt4all-chat/download.cpp +++ b/gpt4all-chat/download.cpp @@ -109,7 +109,9 @@ void Download::downloadModel(const QString &modelFile) = QString("ERROR: Could not open temp file: %1 %2").arg(tempFile->fileName()).arg(modelFile); qWarning() << error; clearRetry(modelFile); - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::DownloadErrorRole, error); + QVector> data; + data.append(qMakePair(ModelList::DownloadErrorRole, error)); + ModelList::globalInstance()->updateDataByFilename(modelFile, data); return; } tempFile->flush(); @@ -128,7 +130,9 @@ void Download::downloadModel(const QString &modelFile) return; } - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::DownloadingRole, true); + QVector> data; + data.append(qMakePair(ModelList::DownloadingRole, true)); + ModelList::globalInstance()->updateDataByFilename(modelFile, data); ModelInfo info = ModelList::globalInstance()->modelInfoByFilename(modelFile); QString url = !info.url().isEmpty() ? info.url() : "http://gpt4all.io/models/gguf/" + modelFile; Network::globalInstance()->sendDownloadStarted(modelFile); @@ -166,7 +170,9 @@ void Download::cancelDownload(const QString &modelFile) tempFile->deleteLater(); m_activeDownloads.remove(modelReply); - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::DownloadingRole, false); + QVector> data; + data.append(qMakePair(ModelList::DownloadingRole, false)); + ModelList::globalInstance()->updateDataByFilename(modelFile, data); break; } } @@ -188,7 +194,9 @@ void Download::installModel(const QString &modelFile, const QString &apiKey) ModelList::globalInstance()->updateModelsFromDirectory(); } - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::InstalledRole, true); + QVector> data; + data.append(qMakePair(ModelList::InstalledRole, true)); + ModelList::globalInstance()->updateDataByFilename(modelFile, data); } void Download::removeModel(const QString &modelFile) @@ -199,20 +207,27 @@ void Download::removeModel(const QString &modelFile) incompleteFile.remove(); } + bool shouldRemoveInstalled = false; QFile file(filePath); if (file.exists()) { const ModelInfo info = ModelList::globalInstance()->modelInfoByFilename(modelFile); - ModelList::globalInstance()->removeInstalled(info); + shouldRemoveInstalled = info.installed && !info.isClone() && (info.isDiscovered() || info.description() == ""); + if (shouldRemoveInstalled) + ModelList::globalInstance()->removeInstalled(info); Network::globalInstance()->sendRemoveModel(modelFile); file.remove(); } - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::InstalledRole, false); - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::BytesReceivedRole, 0); - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::BytesTotalRole, 0); - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::TimestampRole, 0); - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::SpeedRole, QString()); - ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::DownloadErrorRole, QString()); + if (!shouldRemoveInstalled) { + QVector> data; + data.append(qMakePair(ModelList::InstalledRole, false)); + data.append(qMakePair(ModelList::BytesReceivedRole, 0)); + data.append(qMakePair(ModelList::BytesTotalRole, 0)); + data.append(qMakePair(ModelList::TimestampRole, 0)); + data.append(qMakePair(ModelList::SpeedRole, QString())); + data.append(qMakePair(ModelList::DownloadErrorRole, QString())); + ModelList::globalInstance()->updateDataByFilename(modelFile, data); + } } void Download::handleSslErrors(QNetworkReply *reply, const QList &errors) @@ -313,7 +328,9 @@ void Download::handleErrorOccurred(QNetworkReply::NetworkError code) .arg(code) .arg(modelReply->errorString()); qWarning() << error; - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::DownloadErrorRole, error); + QVector> data; + data.append(qMakePair(ModelList::DownloadErrorRole, error)); + ModelList::globalInstance()->updateDataByFilename(modelFilename, data); Network::globalInstance()->sendDownloadError(modelFilename, (int)code, modelReply->errorString()); cancelDownload(modelFilename); } @@ -352,10 +369,12 @@ void Download::handleDownloadProgress(qint64 bytesReceived, qint64 bytesTotal) else speedText = QString::number(static_cast(speed / (1024.0 * 1024.0)), 'f', 2) + " MB/s"; - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::BytesReceivedRole, currentBytesReceived); - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::BytesTotalRole, bytesTotal); - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::SpeedRole, speedText); - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::TimestampRole, currentUpdate); + QVector> data; + data.append(qMakePair(ModelList::BytesReceivedRole, currentBytesReceived)); + data.append(qMakePair(ModelList::BytesTotalRole, bytesTotal)); + data.append(qMakePair(ModelList::SpeedRole, speedText)); + data.append(qMakePair(ModelList::TimestampRole, currentUpdate)); + ModelList::globalInstance()->updateDataByFilename(modelFilename, data); } HashAndSaveFile::HashAndSaveFile() @@ -457,8 +476,10 @@ void Download::handleModelDownloadFinished() modelReply->deleteLater(); tempFile->deleteLater(); if (!hasRetry(modelFilename)) { - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::DownloadingRole, false); - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::DownloadErrorRole, errorString); + QVector> data; + data.append(qMakePair(ModelList::DownloadingRole, false)); + data.append(qMakePair(ModelList::DownloadErrorRole, errorString)); + ModelList::globalInstance()->updateDataByFilename(modelFilename, data); } return; } @@ -476,7 +497,9 @@ void Download::handleModelDownloadFinished() } // Notify that we are calculating hash - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::CalcHashRole, true); + QVector> data; + data.append(qMakePair(ModelList::CalcHashRole, true)); + ModelList::globalInstance()->updateDataByFilename(modelFilename, data); QByteArray hash = ModelList::globalInstance()->modelInfoByFilename(modelFilename).hash; ModelInfo::HashAlgorithm hashAlgorithm = ModelList::globalInstance()->modelInfoByFilename(modelFilename).hashAlgorithm; const QString saveFilePath = MySettings::globalInstance()->modelPath() + modelFilename; @@ -492,19 +515,24 @@ void Download::handleHashAndSaveFinished(bool success, const QString &error, Q_ASSERT(!tempFile->isOpen()); QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString(); Network::globalInstance()->sendDownloadFinished(modelFilename, success); - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::CalcHashRole, false); + + QVector> data; + data.append(qMakePair(ModelList::CalcHashRole, false)); + data.append(qMakePair(ModelList::DownloadingRole, false)); + modelReply->deleteLater(); tempFile->deleteLater(); - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::DownloadingRole, false); if (!success) { - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::DownloadErrorRole, error); + data.append(qMakePair(ModelList::DownloadErrorRole, error)); } else { + data.append(qMakePair(ModelList::DownloadErrorRole, QString())); ModelInfo info = ModelList::globalInstance()->modelInfoByFilename(modelFilename); if (info.isDiscovered()) ModelList::globalInstance()->updateDiscoveredInstalled(info); - ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::DownloadErrorRole, QString()); } + + ModelList::globalInstance()->updateDataByFilename(modelFilename, data); } void Download::handleReadyRead() diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index 2df9cf58..16fa18f9 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -785,13 +785,6 @@ QVariant ModelList::data(const QModelIndex &index, int role) const return dataInternal(info, role); } -void ModelList::updateData(const QString &id, int role, const QVariant &value) -{ - QVector> data; - data.append(qMakePair(role, value)); - updateData(id, data); -} - void ModelList::updateData(const QString &id, const QVector> &data) { int index; @@ -936,7 +929,7 @@ void ModelList::resortModel() emit layoutChanged(); } -void ModelList::updateDataByFilename(const QString &filename, int role, const QVariant &value) +void ModelList::updateDataByFilename(const QString &filename, const QVector> &data) { QVector modelsById; { @@ -952,7 +945,7 @@ void ModelList::updateDataByFilename(const QString &filename, int role, const QV } for (const QString &id : modelsById) - updateData(id, role, value);; + updateData(id, data); } ModelInfo ModelList::modelInfo(const QString &id) const @@ -986,24 +979,27 @@ QString ModelList::clone(const ModelInfo &model) { const QString id = Network::globalInstance()->generateUniqueId(); addModel(id); - updateData(id, ModelList::IsCloneRole, true); - updateData(id, ModelList::NameRole, uniqueModelName(model)); - updateData(id, ModelList::FilenameRole, model.filename()); - updateData(id, ModelList::DirpathRole, model.dirpath); - updateData(id, ModelList::InstalledRole, model.installed); - updateData(id, ModelList::OnlineRole, model.isOnline); - updateData(id, ModelList::TemperatureRole, model.temperature()); - updateData(id, ModelList::TopPRole, model.topP()); - updateData(id, ModelList::MinPRole, model.minP()); - updateData(id, ModelList::TopKRole, model.topK()); - updateData(id, ModelList::MaxLengthRole, model.maxLength()); - updateData(id, ModelList::PromptBatchSizeRole, model.promptBatchSize()); - updateData(id, ModelList::ContextLengthRole, model.contextLength()); - updateData(id, ModelList::GpuLayersRole, model.gpuLayers()); - updateData(id, ModelList::RepeatPenaltyRole, model.repeatPenalty()); - updateData(id, ModelList::RepeatPenaltyTokensRole, model.repeatPenaltyTokens()); - updateData(id, ModelList::PromptTemplateRole, model.promptTemplate()); - updateData(id, ModelList::SystemPromptRole, model.systemPrompt()); + + QVector> data; + data.append(qMakePair(ModelList::InstalledRole, model.installed)); + data.append(qMakePair(ModelList::IsCloneRole, true)); + data.append(qMakePair(ModelList::NameRole, uniqueModelName(model))); + data.append(qMakePair(ModelList::FilenameRole, model.filename())); + data.append(qMakePair(ModelList::DirpathRole, model.dirpath)); + data.append(qMakePair(ModelList::OnlineRole, model.isOnline)); + data.append(qMakePair(ModelList::TemperatureRole, model.temperature())); + data.append(qMakePair(ModelList::TopPRole, model.topP())); + data.append(qMakePair(ModelList::MinPRole, model.minP())); + data.append(qMakePair(ModelList::TopKRole, model.topK())); + data.append(qMakePair(ModelList::MaxLengthRole, model.maxLength())); + data.append(qMakePair(ModelList::PromptBatchSizeRole, model.promptBatchSize())); + data.append(qMakePair(ModelList::ContextLengthRole, model.contextLength())); + data.append(qMakePair(ModelList::GpuLayersRole, model.gpuLayers())); + data.append(qMakePair(ModelList::RepeatPenaltyRole, model.repeatPenalty())); + data.append(qMakePair(ModelList::RepeatPenaltyTokensRole, model.repeatPenaltyTokens())); + data.append(qMakePair(ModelList::PromptTemplateRole, model.promptTemplate())); + data.append(qMakePair(ModelList::SystemPromptRole, model.systemPrompt())); + updateData(id, data); return id; } @@ -1019,18 +1015,9 @@ void ModelList::removeClone(const ModelInfo &model) void ModelList::removeInstalled(const ModelInfo &model) { - // We only remove a model if it is installed and is discovered or sideloaded - if (!model.installed || !(model.isDiscovered() || model.description() == "" /*indicates sideloaded*/)) - return; - - // We shouldn't remove it if a discovered search is in progress or completed because the - // clearing of that search will erase it... - - // FIXME: This won't be true in a bit when save the installed portion to settings, then we'll need to - // use this... or at least unflag the installed bit... - if (m_discoverNumberOfResults) - return; - + Q_ASSERT(model.installed); + Q_ASSERT(!model.isClone()); + Q_ASSERT(model.isDiscovered() || model.description() == "" /*indicates sideloaded*/); removeInternal(model); emit layoutChanged(); } @@ -1156,12 +1143,14 @@ void ModelList::updateModelsFromDirectory() } for (const QString &id : modelsById) { - updateData(id, InstalledRole, true); - updateData(id, FilenameRole, filename); + QVector> data; + data.append(qMakePair(InstalledRole, true)); + data.append(qMakePair(FilenameRole, filename)); // FIXME: WE should change this to use a consistent filename for online models - updateData(id, OnlineRole, filename.startsWith("chatgpt-") || filename.startsWith("nomic-")); - updateData(id, DirpathRole, info.dir().absolutePath() + "/"); - updateData(id, FilesizeRole, toFileSize(info.size())); + data.append(qMakePair(OnlineRole, filename.startsWith("chatgpt-") || filename.startsWith("nomic-"))); + data.append(qMakePair(DirpathRole, info.dir().absolutePath() + "/")); + data.append(qMakePair(FilesizeRole, toFileSize(info.size()))); + updateData(id, data); } } } @@ -1359,46 +1348,48 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) if (!contains(id)) addModel(id); - updateData(id, ModelList::NameRole, modelName); - updateData(id, ModelList::FilenameRole, modelFilename); - updateData(id, ModelList::FilesizeRole, modelFilesize); - updateData(id, ModelList::HashRole, modelHash); - updateData(id, ModelList::HashAlgorithmRole, ModelInfo::Md5); - updateData(id, ModelList::DefaultRole, isDefault); - updateData(id, ModelList::DescriptionRole, description); - updateData(id, ModelList::RequiresVersionRole, requiresVersion); - updateData(id, ModelList::VersionRemovedRole, versionRemoved); - updateData(id, ModelList::UrlRole, url); - updateData(id, ModelList::DisableGUIRole, disableGUI); - updateData(id, ModelList::OrderRole, order); - updateData(id, ModelList::RamrequiredRole, ramrequired); - updateData(id, ModelList::ParametersRole, parameters); - updateData(id, ModelList::QuantRole, quant); - updateData(id, ModelList::TypeRole, type); + QVector> data; + data.append(qMakePair(ModelList::NameRole, modelName)); + data.append(qMakePair(ModelList::FilenameRole, modelFilename)); + data.append(qMakePair(ModelList::FilesizeRole, modelFilesize)); + data.append(qMakePair(ModelList::HashRole, modelHash)); + data.append(qMakePair(ModelList::HashAlgorithmRole, ModelInfo::Md5)); + data.append(qMakePair(ModelList::DefaultRole, isDefault)); + data.append(qMakePair(ModelList::DescriptionRole, description)); + data.append(qMakePair(ModelList::RequiresVersionRole, requiresVersion)); + data.append(qMakePair(ModelList::VersionRemovedRole, versionRemoved)); + data.append(qMakePair(ModelList::UrlRole, url)); + data.append(qMakePair(ModelList::DisableGUIRole, disableGUI)); + data.append(qMakePair(ModelList::OrderRole, order)); + data.append(qMakePair(ModelList::RamrequiredRole, ramrequired)); + data.append(qMakePair(ModelList::ParametersRole, parameters)); + data.append(qMakePair(ModelList::QuantRole, quant)); + data.append(qMakePair(ModelList::TypeRole, type)); if (obj.contains("temperature")) - updateData(id, ModelList::TemperatureRole, obj["temperature"].toDouble()); + data.append(qMakePair(ModelList::TemperatureRole, obj["temperature"].toDouble())); if (obj.contains("topP")) - updateData(id, ModelList::TopPRole, obj["topP"].toDouble()); + data.append(qMakePair(ModelList::TopPRole, obj["topP"].toDouble())); if (obj.contains("minP")) - updateData(id, ModelList::MinPRole, obj["minP"].toDouble()); + data.append(qMakePair(ModelList::MinPRole, obj["minP"].toDouble())); if (obj.contains("topK")) - updateData(id, ModelList::TopKRole, obj["topK"].toInt()); + data.append(qMakePair(ModelList::TopKRole, obj["topK"].toInt())); if (obj.contains("maxLength")) - updateData(id, ModelList::MaxLengthRole, obj["maxLength"].toInt()); + data.append(qMakePair(ModelList::MaxLengthRole, obj["maxLength"].toInt())); if (obj.contains("promptBatchSize")) - updateData(id, ModelList::PromptBatchSizeRole, obj["promptBatchSize"].toInt()); + data.append(qMakePair(ModelList::PromptBatchSizeRole, obj["promptBatchSize"].toInt())); if (obj.contains("contextLength")) - updateData(id, ModelList::ContextLengthRole, obj["contextLength"].toInt()); + data.append(qMakePair(ModelList::ContextLengthRole, obj["contextLength"].toInt())); if (obj.contains("gpuLayers")) - updateData(id, ModelList::GpuLayersRole, obj["gpuLayers"].toInt()); + data.append(qMakePair(ModelList::GpuLayersRole, obj["gpuLayers"].toInt())); if (obj.contains("repeatPenalty")) - updateData(id, ModelList::RepeatPenaltyRole, obj["repeatPenalty"].toDouble()); + data.append(qMakePair(ModelList::RepeatPenaltyRole, obj["repeatPenalty"].toDouble())); if (obj.contains("repeatPenaltyTokens")) - updateData(id, ModelList::RepeatPenaltyTokensRole, obj["repeatPenaltyTokens"].toInt()); + data.append(qMakePair(ModelList::RepeatPenaltyTokensRole, obj["repeatPenaltyTokens"].toInt())); if (obj.contains("promptTemplate")) - updateData(id, ModelList::PromptTemplateRole, obj["promptTemplate"].toString()); + data.append(qMakePair(ModelList::PromptTemplateRole, obj["promptTemplate"].toString())); if (obj.contains("systemPrompt")) - updateData(id, ModelList::SystemPromptRole, obj["systemPrompt"].toString()); + data.append(qMakePair(ModelList::SystemPromptRole, obj["systemPrompt"].toString())); + updateData(id, data); } const QString chatGPTDesc = tr("
  • Requires personal OpenAI API key.
  • WARNING: Will send" @@ -1414,18 +1405,20 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) changeId(modelFilename, id); if (!contains(id)) addModel(id); - updateData(id, ModelList::NameRole, modelName); - updateData(id, ModelList::FilenameRole, modelFilename); - updateData(id, ModelList::FilesizeRole, "minimal"); - updateData(id, ModelList::OnlineRole, true); - updateData(id, ModelList::DescriptionRole, - tr("OpenAI's ChatGPT model GPT-3.5 Turbo
    ") + chatGPTDesc); - updateData(id, ModelList::RequiresVersionRole, "2.4.2"); - updateData(id, ModelList::OrderRole, "ca"); - updateData(id, ModelList::RamrequiredRole, 0); - updateData(id, ModelList::ParametersRole, "?"); - updateData(id, ModelList::QuantRole, "NA"); - updateData(id, ModelList::TypeRole, "GPT"); + QVector> data; + data.append(qMakePair(ModelList::NameRole, modelName)); + data.append(qMakePair(ModelList::FilenameRole, modelFilename)); + data.append(qMakePair(ModelList::FilesizeRole, "minimal")); + data.append(qMakePair(ModelList::OnlineRole, true)); + data.append(qMakePair(ModelList::DescriptionRole, + tr("OpenAI's ChatGPT model GPT-3.5 Turbo
    ") + chatGPTDesc)); + data.append(qMakePair(ModelList::RequiresVersionRole, "2.4.2")); + data.append(qMakePair(ModelList::OrderRole, "ca")); + data.append(qMakePair(ModelList::RamrequiredRole, 0)); + data.append(qMakePair(ModelList::ParametersRole, "?")); + data.append(qMakePair(ModelList::QuantRole, "NA")); + data.append(qMakePair(ModelList::TypeRole, "GPT")); + updateData(id, data); } { @@ -1438,18 +1431,20 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) changeId(modelFilename, id); if (!contains(id)) addModel(id); - updateData(id, ModelList::NameRole, modelName); - updateData(id, ModelList::FilenameRole, modelFilename); - updateData(id, ModelList::FilesizeRole, "minimal"); - updateData(id, ModelList::OnlineRole, true); - updateData(id, ModelList::DescriptionRole, - tr("OpenAI's ChatGPT model GPT-4
    ") + chatGPTDesc + chatGPT4Warn); - updateData(id, ModelList::RequiresVersionRole, "2.4.2"); - updateData(id, ModelList::OrderRole, "cb"); - updateData(id, ModelList::RamrequiredRole, 0); - updateData(id, ModelList::ParametersRole, "?"); - updateData(id, ModelList::QuantRole, "NA"); - updateData(id, ModelList::TypeRole, "GPT"); + QVector> data; + data.append(qMakePair(ModelList::NameRole, modelName)); + data.append(qMakePair(ModelList::FilenameRole, modelFilename)); + data.append(qMakePair(ModelList::FilesizeRole, "minimal")); + data.append(qMakePair(ModelList::OnlineRole, true)); + data.append(qMakePair(ModelList::DescriptionRole, + tr("OpenAI's ChatGPT model GPT-4
    ") + chatGPTDesc + chatGPT4Warn)); + data.append(qMakePair(ModelList::RequiresVersionRole, "2.4.2")); + data.append(qMakePair(ModelList::OrderRole, "cb")); + data.append(qMakePair(ModelList::RamrequiredRole, 0)); + data.append(qMakePair(ModelList::ParametersRole, "?")); + data.append(qMakePair(ModelList::QuantRole, "NA")); + data.append(qMakePair(ModelList::TypeRole, "GPT")); + updateData(id, data); } { @@ -1465,35 +1460,39 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) changeId(modelFilename, id); if (!contains(id)) addModel(id); - updateData(id, ModelList::NameRole, modelName); - updateData(id, ModelList::FilenameRole, modelFilename); - updateData(id, ModelList::FilesizeRole, "minimal"); - updateData(id, ModelList::OnlineRole, true); - updateData(id, ModelList::DisableGUIRole, true); - updateData(id, ModelList::DescriptionRole, - tr("LocalDocs Nomic Atlas Embed
    ") + nomicEmbedDesc); - updateData(id, ModelList::RequiresVersionRole, "2.6.3"); - updateData(id, ModelList::OrderRole, "na"); - updateData(id, ModelList::RamrequiredRole, 0); - updateData(id, ModelList::ParametersRole, "?"); - updateData(id, ModelList::QuantRole, "NA"); - updateData(id, ModelList::TypeRole, "Bert"); + QVector> data; + data.append(qMakePair(ModelList::NameRole, modelName)); + data.append(qMakePair(ModelList::FilenameRole, modelFilename)); + data.append(qMakePair(ModelList::FilesizeRole, "minimal")); + data.append(qMakePair(ModelList::OnlineRole, true)); + data.append(qMakePair(ModelList::DisableGUIRole, true)); + data.append(qMakePair(ModelList::DescriptionRole, + tr("LocalDocs Nomic Atlas Embed
    ") + nomicEmbedDesc)); + data.append(qMakePair(ModelList::RequiresVersionRole, "2.6.3")); + data.append(qMakePair(ModelList::OrderRole, "na")); + data.append(qMakePair(ModelList::RamrequiredRole, 0)); + data.append(qMakePair(ModelList::ParametersRole, "?")); + data.append(qMakePair(ModelList::QuantRole, "NA")); + data.append(qMakePair(ModelList::TypeRole, "Bert")); + updateData(id, data); } } void ModelList::updateDiscoveredInstalled(const ModelInfo &info) { - updateData(info.id(), ModelList::InstalledRole, true); - updateData(info.id(), ModelList::IsDiscoveredRole, true); - updateData(info.id(), ModelList::NameRole, info.name()); - updateData(info.id(), ModelList::FilenameRole, info.filename()); - updateData(info.id(), ModelList::DescriptionRole, info.description()); - updateData(info.id(), ModelList::UrlRole, info.url()); - updateData(info.id(), ModelList::LikesRole, info.likes()); - updateData(info.id(), ModelList::DownloadsRole, info.downloads()); - updateData(info.id(), ModelList::RecencyRole, info.recency()); - updateData(info.id(), ModelList::QuantRole, info.quant()); - updateData(info.id(), ModelList::TypeRole, info.type()); + QVector> data; + data.append(qMakePair(ModelList::InstalledRole, true)); + data.append(qMakePair(ModelList::IsDiscoveredRole, true)); + data.append(qMakePair(ModelList::NameRole, info.name())); + data.append(qMakePair(ModelList::FilenameRole, info.filename())); + data.append(qMakePair(ModelList::DescriptionRole, info.description())); + data.append(qMakePair(ModelList::UrlRole, info.url())); + data.append(qMakePair(ModelList::LikesRole, info.likes())); + data.append(qMakePair(ModelList::DownloadsRole, info.downloads())); + data.append(qMakePair(ModelList::RecencyRole, info.recency())); + data.append(qMakePair(ModelList::QuantRole, info.quant())); + data.append(qMakePair(ModelList::TypeRole, info.type())); + updateData(info.id(), data); } void ModelList::updateModelsFromSettings() @@ -1511,98 +1510,100 @@ void ModelList::updateModelsFromSettings() addModel(id); + QVector> data; if (settings.contains(g + "/name")) { const QString name = settings.value(g + "/name").toString(); - updateData(id, ModelList::NameRole, name); + data.append(qMakePair(ModelList::NameRole, name)); } if (settings.contains(g + "/filename")) { const QString filename = settings.value(g + "/filename").toString(); - updateData(id, ModelList::FilenameRole, filename); + data.append(qMakePair(ModelList::FilenameRole, filename)); } if (settings.contains(g + "/description")) { const QString d = settings.value(g + "/description").toString(); - updateData(id, ModelList::DescriptionRole, d); + data.append(qMakePair(ModelList::DescriptionRole, d)); } if (settings.contains(g + "/url")) { const QString u = settings.value(g + "/url").toString(); - updateData(id, ModelList::UrlRole, u); + data.append(qMakePair(ModelList::UrlRole, u)); } if (settings.contains(g + "/quant")) { const QString q = settings.value(g + "/quant").toString(); - updateData(id, ModelList::QuantRole, q); + data.append(qMakePair(ModelList::QuantRole, q)); } if (settings.contains(g + "/type")) { const QString t = settings.value(g + "/type").toString(); - updateData(id, ModelList::TypeRole, t); + data.append(qMakePair(ModelList::TypeRole, t)); } if (settings.contains(g + "/isClone")) { const bool b = settings.value(g + "/isClone").toBool(); - updateData(id, ModelList::IsCloneRole, b); + data.append(qMakePair(ModelList::IsCloneRole, b)); } if (settings.contains(g + "/isDiscovered")) { const bool b = settings.value(g + "/isDiscovered").toBool(); - updateData(id, ModelList::IsDiscoveredRole, b); + data.append(qMakePair(ModelList::IsDiscoveredRole, b)); } if (settings.contains(g + "/likes")) { const int l = settings.value(g + "/likes").toInt(); - updateData(id, ModelList::LikesRole, l); + data.append(qMakePair(ModelList::LikesRole, l)); } if (settings.contains(g + "/downloads")) { const int d = settings.value(g + "/downloads").toInt(); - updateData(id, ModelList::DownloadsRole, d); + data.append(qMakePair(ModelList::DownloadsRole, d)); } if (settings.contains(g + "/recency")) { const QDateTime r = settings.value(g + "/recency").toDateTime(); - updateData(id, ModelList::RecencyRole, r); + data.append(qMakePair(ModelList::RecencyRole, r)); } if (settings.contains(g + "/temperature")) { const double temperature = settings.value(g + "/temperature").toDouble(); - updateData(id, ModelList::TemperatureRole, temperature); + data.append(qMakePair(ModelList::TemperatureRole, temperature)); } if (settings.contains(g + "/topP")) { const double topP = settings.value(g + "/topP").toDouble(); - updateData(id, ModelList::TopPRole, topP); + data.append(qMakePair(ModelList::TopPRole, topP)); } if (settings.contains(g + "/minP")) { const double minP = settings.value(g + "/minP").toDouble(); - updateData(id, ModelList::MinPRole, minP); + data.append(qMakePair(ModelList::MinPRole, minP)); } if (settings.contains(g + "/topK")) { const int topK = settings.value(g + "/topK").toInt(); - updateData(id, ModelList::TopKRole, topK); + data.append(qMakePair(ModelList::TopKRole, topK)); } if (settings.contains(g + "/maxLength")) { const int maxLength = settings.value(g + "/maxLength").toInt(); - updateData(id, ModelList::MaxLengthRole, maxLength); + data.append(qMakePair(ModelList::MaxLengthRole, maxLength)); } if (settings.contains(g + "/promptBatchSize")) { const int promptBatchSize = settings.value(g + "/promptBatchSize").toInt(); - updateData(id, ModelList::PromptBatchSizeRole, promptBatchSize); + data.append(qMakePair(ModelList::PromptBatchSizeRole, promptBatchSize)); } if (settings.contains(g + "/contextLength")) { const int contextLength = settings.value(g + "/contextLength").toInt(); - updateData(id, ModelList::ContextLengthRole, contextLength); + data.append(qMakePair(ModelList::ContextLengthRole, contextLength)); } if (settings.contains(g + "/gpuLayers")) { const int gpuLayers = settings.value(g + "/gpuLayers").toInt(); - updateData(id, ModelList::GpuLayersRole, gpuLayers); + data.append(qMakePair(ModelList::GpuLayersRole, gpuLayers)); } if (settings.contains(g + "/repeatPenalty")) { const double repeatPenalty = settings.value(g + "/repeatPenalty").toDouble(); - updateData(id, ModelList::RepeatPenaltyRole, repeatPenalty); + data.append(qMakePair(ModelList::RepeatPenaltyRole, repeatPenalty)); } if (settings.contains(g + "/repeatPenaltyTokens")) { const int repeatPenaltyTokens = settings.value(g + "/repeatPenaltyTokens").toInt(); - updateData(id, ModelList::RepeatPenaltyTokensRole, repeatPenaltyTokens); + data.append(qMakePair(ModelList::RepeatPenaltyTokensRole, repeatPenaltyTokens)); } if (settings.contains(g + "/promptTemplate")) { const QString promptTemplate = settings.value(g + "/promptTemplate").toString(); - updateData(id, ModelList::PromptTemplateRole, promptTemplate); + data.append(qMakePair(ModelList::PromptTemplateRole, promptTemplate)); } if (settings.contains(g + "/systemPrompt")) { const QString systemPrompt = settings.value(g + "/systemPrompt").toString(); - updateData(id, ModelList::SystemPromptRole, systemPrompt); + data.append(qMakePair(ModelList::SystemPromptRole, systemPrompt)); } + updateData(id, data); } } diff --git a/gpt4all-chat/modellist.h b/gpt4all-chat/modellist.h index f2f24d9e..2268179d 100644 --- a/gpt4all-chat/modellist.h +++ b/gpt4all-chat/modellist.h @@ -373,8 +373,7 @@ public: QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override; QVariant data(const QString &id, int role) const; QVariant dataByFilename(const QString &filename, int role) const; - void updateData(const QString &id, int role, const QVariant &value); - void updateDataByFilename(const QString &filename, int role, const QVariant &value); + void updateDataByFilename(const QString &filename, const QVector> &data); void updateData(const QString &id, const QVector> &data); int count() const { return m_models.size(); }