diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index c0cba369..1b624c70 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -1210,132 +1210,133 @@ bool ModelList::modelExists(const QString &modelFilename) const return false; } +static void updateOldRemoteModels(const QString &path) +{ + QDirIterator it(path, QDirIterator::Subdirectories); + while (it.hasNext()) { + it.next(); + if (!it.fileInfo().isDir()) { + QString filename = it.fileName(); + if (filename.startsWith("chatgpt-") && filename.endsWith(".txt")) { + QString apikey; + QString modelname(filename); + modelname.chop(4); // strip ".txt" extension + modelname.remove(0, 8); // strip "chatgpt-" prefix + QFile file(path + filename); + if (file.open(QIODevice::ReadWrite)) { + QTextStream in(&file); + apikey = in.readAll(); + file.close(); + } + + QJsonObject obj; + obj.insert("apiKey", apikey); + obj.insert("modelName", modelname); + QJsonDocument doc(obj); + + auto newfilename = u"gpt4all-%1.rmodel"_s.arg(modelname); + QFile newfile(path + newfilename); + if (newfile.open(QIODevice::ReadWrite)) { + QTextStream out(&newfile); + out << doc.toJson(); + newfile.close(); + } + file.remove(); + } + } + } +} + +void ModelList::processModelDirectory(const QString &path) +{ + QDirIterator it(path, QDir::Files, QDirIterator::Subdirectories); + while (it.hasNext()) { + it.next(); + + QString filename = it.fileName(); + if (filename.startsWith("incomplete") || FILENAME_BLACKLIST.contains(filename)) + continue; + if (!filename.endsWith(".gguf") && !filename.endsWith(".rmodel")) + continue; + + QVector modelsById; + { + QMutexLocker locker(&m_mutex); + for (ModelInfo *info : m_models) + if (info->filename() == filename) + modelsById.append(info->id()); + } + + if (modelsById.isEmpty()) { + if (!contains(filename)) + addModel(filename); + modelsById.append(filename); + } + + QFileInfo info = it.fileInfo(); + + bool isOnline(filename.endsWith(".rmodel")); + bool isCompatibleApi(filename.endsWith("-capi.rmodel")); + + QString name; + QString description; + if (isCompatibleApi) { + QJsonObject obj; + { + QFile file(path + filename); + bool success = file.open(QIODeviceBase::ReadOnly); + (void)success; + Q_ASSERT(success); + QJsonDocument doc = QJsonDocument::fromJson(file.readAll()); + obj = doc.object(); + } + { + QString apiKey(obj["apiKey"].toString()); + QString baseUrl(obj["baseUrl"].toString()); + QString modelName(obj["modelName"].toString()); + apiKey = apiKey.length() < 10 ? "*****" : apiKey.left(5) + "*****"; + name = tr("%1 (%2)").arg(modelName, baseUrl); + description = tr("OpenAI-Compatible API Model
" + "") + .arg(apiKey, baseUrl, modelName); + } + } + + for (const QString &id : modelsById) { + QVector> data { + { InstalledRole, true }, + { FilenameRole, filename }, + { OnlineRole, isOnline }, + { CompatibleApiRole, isCompatibleApi }, + { DirpathRole, info.dir().absolutePath() + "/" }, + { FilesizeRole, info.size() }, + }; + if (isCompatibleApi) { + // The data will be saved to "GPT4All.ini". + data.append({ NameRole, name }); + // The description is hard-coded into "GPT4All.ini" due to performance issue. + // If the description goes to be dynamic from its .rmodel file, it will get high I/O usage while using the ModelList. + data.append({ DescriptionRole, description }); + // Prompt template should be clear while using ChatML format which is using in most of OpenAI-Compatible API server. + data.append({ PromptTemplateRole, "%1" }); + } + updateData(id, data); + } + } +} + void ModelList::updateModelsFromDirectory() { const QString exePath = QCoreApplication::applicationDirPath() + QDir::separator(); const QString localPath = MySettings::globalInstance()->modelPath(); - auto updateOldRemoteModels = [&](const QString& path) { - QDirIterator it(path, QDirIterator::Subdirectories); - while (it.hasNext()) { - it.next(); - if (!it.fileInfo().isDir()) { - QString filename = it.fileName(); - if (filename.startsWith("chatgpt-") && filename.endsWith(".txt")) { - QString apikey; - QString modelname(filename); - modelname.chop(4); // strip ".txt" extension - modelname.remove(0, 8); // strip "chatgpt-" prefix - QFile file(path + filename); - if (file.open(QIODevice::ReadWrite)) { - QTextStream in(&file); - apikey = in.readAll(); - file.close(); - } - - QJsonObject obj; - obj.insert("apiKey", apikey); - obj.insert("modelName", modelname); - QJsonDocument doc(obj); - - auto newfilename = u"gpt4all-%1.rmodel"_s.arg(modelname); - QFile newfile(path + newfilename); - if (newfile.open(QIODevice::ReadWrite)) { - QTextStream out(&newfile); - out << doc.toJson(); - newfile.close(); - } - file.remove(); - } - } - } - }; - - auto processDirectory = [&](const QString& path) { - QDirIterator it(path, QDir::Files, QDirIterator::Subdirectories); - while (it.hasNext()) { - it.next(); - - QString filename = it.fileName(); - if (filename.startsWith("incomplete") || FILENAME_BLACKLIST.contains(filename)) - continue; - if (!filename.endsWith(".gguf") && !filename.endsWith(".rmodel")) - continue; - - QVector modelsById; - { - QMutexLocker locker(&m_mutex); - for (ModelInfo *info : m_models) - if (info->filename() == filename) - modelsById.append(info->id()); - } - - if (modelsById.isEmpty()) { - if (!contains(filename)) - addModel(filename); - modelsById.append(filename); - } - - QFileInfo info = it.fileInfo(); - - bool isOnline(filename.endsWith(".rmodel")); - bool isCompatibleApi(filename.endsWith("-capi.rmodel")); - - QString name; - QString description; - if (isCompatibleApi) { - QJsonObject obj; - { - QFile file(path + filename); - bool success = file.open(QIODeviceBase::ReadOnly); - (void)success; - Q_ASSERT(success); - QJsonDocument doc = QJsonDocument::fromJson(file.readAll()); - obj = doc.object(); - } - { - QString apiKey(obj["apiKey"].toString()); - QString baseUrl(obj["baseUrl"].toString()); - QString modelName(obj["modelName"].toString()); - apiKey = apiKey.length() < 10 ? "*****" : apiKey.left(5) + "*****"; - name = tr("%1 (%2)").arg(modelName, baseUrl); - description = tr("OpenAI-Compatible API Model
" - "
  • API Key: %1
  • " - "
  • Base URL: %2
  • " - "
  • Model Name: %3
") - .arg(apiKey, baseUrl, modelName); - } - } - - for (const QString &id : modelsById) { - QVector> data { - { InstalledRole, true }, - { FilenameRole, filename }, - { OnlineRole, isOnline }, - { CompatibleApiRole, isCompatibleApi }, - { DirpathRole, info.dir().absolutePath() + "/" }, - { FilesizeRole, info.size() }, - }; - if (isCompatibleApi) { - // The data will be saved to "GPT4All.ini". - data.append({ NameRole, name }); - // The description is hard-coded into "GPT4All.ini" due to performance issue. - // If the description goes to be dynamic from its .rmodel file, it will get high I/O usage while using the ModelList. - data.append({ DescriptionRole, description }); - // Prompt template should be clear while using ChatML format which is using in most of OpenAI-Compatible API server. - data.append({ PromptTemplateRole, "%1" }); - } - updateData(id, data); - } - } - }; - - updateOldRemoteModels(exePath); - processDirectory(exePath); + processModelDirectory(exePath); if (localPath != exePath) { updateOldRemoteModels(localPath); - processDirectory(localPath); + processModelDirectory(localPath); } } diff --git a/gpt4all-chat/modellist.h b/gpt4all-chat/modellist.h index d08e37be..fb1b7ed2 100644 --- a/gpt4all-chat/modellist.h +++ b/gpt4all-chat/modellist.h @@ -509,6 +509,7 @@ private: void parseModelsJsonFile(const QByteArray &jsonData, bool save); void parseDiscoveryJsonFile(const QByteArray &jsonData); QString uniqueModelName(const ModelInfo &model) const; + void processModelDirectory(const QString &path); private: mutable QMutex m_mutex;