modellist: fix models.json cache location

- The filename must have a version number, or we will possibly load the
  cache for the wrong version of GPT4All.
- The file should be stored in an appropriate folder for cache, not in the
  settings location.

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-10-07 14:33:05 -04:00
parent 8a64b039dc
commit 1cbea1027f

View File

@ -28,6 +28,7 @@
#include <QSettings> #include <QSettings>
#include <QSslConfiguration> #include <QSslConfiguration>
#include <QSslSocket> #include <QSslSocket>
#include <QStandardPaths>
#include <QStringList> #include <QStringList>
#include <QTextStream> #include <QTextStream>
#include <QTimer> #include <QTimer>
@ -44,6 +45,8 @@ using namespace Qt::Literals::StringLiterals;
//#define USE_LOCAL_MODELSJSON //#define USE_LOCAL_MODELSJSON
#define MODELS_JSON_VERSION "3"
static const QStringList FILENAME_BLACKLIST { u"gpt4all-nomic-embed-text-v1.rmodel"_s }; static const QStringList FILENAME_BLACKLIST { u"gpt4all-nomic-embed-text-v1.rmodel"_s };
QString ModelInfo::id() const QString ModelInfo::id() const
@ -1382,15 +1385,32 @@ void ModelList::updateModelsFromDirectory()
} }
} }
#define MODELS_VERSION 3 static QString modelsJsonFilename()
{
return QStringLiteral("models" MODELS_JSON_VERSION ".json");
}
static std::optional<QFile> modelsJsonCacheFile()
{
constexpr auto loc = QStandardPaths::CacheLocation;
QString modelsJsonFname = modelsJsonFilename();
if (auto path = QStandardPaths::locate(loc, modelsJsonFname); !path.isEmpty())
return std::make_optional<QFile>(path);
if (auto path = QStandardPaths::writableLocation(loc); !path.isEmpty())
return std::make_optional<QFile>(path);
return std::nullopt;
}
void ModelList::updateModelsFromJson() void ModelList::updateModelsFromJson()
{ {
QString modelsJsonFname = modelsJsonFilename();
#if defined(USE_LOCAL_MODELSJSON) #if defined(USE_LOCAL_MODELSJSON)
QUrl jsonUrl("file://" + QDir::homePath() + u"/dev/large_language_models/gpt4all/gpt4all-chat/metadata/models%1.json"_s.arg(MODELS_VERSION)); QUrl jsonUrl(u"file://%1/dev/large_language_models/gpt4all/gpt4all-chat/metadata/%2"_s.arg(QDir::homePath(), modelsJsonFname));
#else #else
QUrl jsonUrl(u"http://gpt4all.io/models/models%1.json"_s.arg(MODELS_VERSION)); QUrl jsonUrl(u"http://gpt4all.io/models/%1"_s.arg(modelsJsonFname));
#endif #endif
QNetworkRequest request(jsonUrl); QNetworkRequest request(jsonUrl);
QSslConfiguration conf = request.sslConfiguration(); QSslConfiguration conf = request.sslConfiguration();
conf.setPeerVerifyMode(QSslSocket::VerifyNone); conf.setPeerVerifyMode(QSslSocket::VerifyNone);
@ -1409,18 +1429,15 @@ void ModelList::updateModelsFromJson()
qWarning() << "WARNING: Could not download models.json synchronously"; qWarning() << "WARNING: Could not download models.json synchronously";
updateModelsFromJsonAsync(); updateModelsFromJsonAsync();
QSettings settings; auto cacheFile = modelsJsonCacheFile();
QFileInfo info(settings.fileName()); if (!cacheFile) {
QString dirPath = info.canonicalPath(); // no known location
const QString modelsConfig = dirPath + "/models.json"; } else if (cacheFile->open(QIODeviceBase::ReadOnly)) {
QFile file(modelsConfig); QByteArray jsonData = cacheFile->readAll();
if (!file.open(QIODeviceBase::ReadOnly)) { cacheFile->close();
qWarning() << "ERROR: Couldn't read models config file: " << modelsConfig;
} else {
QByteArray jsonData = file.readAll();
file.close();
parseModelsJsonFile(jsonData, false); parseModelsJsonFile(jsonData, false);
} } else if (cacheFile->exists())
qWarning() << "ERROR: Couldn't read models.json cache file: " << cacheFile->fileName();
} }
delete jsonReply; delete jsonReply;
} }
@ -1429,12 +1446,14 @@ void ModelList::updateModelsFromJsonAsync()
{ {
m_asyncModelRequestOngoing = true; m_asyncModelRequestOngoing = true;
emit asyncModelRequestOngoingChanged(); emit asyncModelRequestOngoingChanged();
QString modelsJsonFname = modelsJsonFilename();
#if defined(USE_LOCAL_MODELSJSON) #if defined(USE_LOCAL_MODELSJSON)
QUrl jsonUrl("file://" + QDir::homePath() + u"/dev/large_language_models/gpt4all/gpt4all-chat/metadata/models%1.json"_s.arg(MODELS_VERSION)); QUrl jsonUrl(u"file://%1/dev/large_language_models/gpt4all/gpt4all-chat/metadata/%2"_s.arg(QDir::homePath(), modelsJsonFname));
#else #else
QUrl jsonUrl(u"http://gpt4all.io/models/models%1.json"_s.arg(MODELS_VERSION)); QUrl jsonUrl(u"http://gpt4all.io/models/%1"_s.arg(modelsJsonFname));
#endif #endif
QNetworkRequest request(jsonUrl); QNetworkRequest request(jsonUrl);
QSslConfiguration conf = request.sslConfiguration(); QSslConfiguration conf = request.sslConfiguration();
conf.setPeerVerifyMode(QSslSocket::VerifyNone); conf.setPeerVerifyMode(QSslSocket::VerifyNone);
@ -1497,17 +1516,14 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
} }
if (save) { if (save) {
QSettings settings; auto cacheFile = modelsJsonCacheFile();
QFileInfo info(settings.fileName()); if (!cacheFile) {
QString dirPath = info.canonicalPath(); // no known location
const QString modelsConfig = dirPath + "/models.json"; } else if (QFileInfo(*cacheFile).dir().mkpath(u"."_s) && cacheFile->open(QIODeviceBase::WriteOnly)) {
QFile file(modelsConfig); cacheFile->write(jsonData);
if (!file.open(QIODeviceBase::WriteOnly)) { cacheFile->close();
qWarning() << "ERROR: Couldn't write models config file: " << modelsConfig; } else
} else { qWarning() << "ERROR: Couldn't write models config file: " << cacheFile->fileName();
file.write(jsonData);
file.close();
}
} }
QJsonArray jsonArray = document.array(); QJsonArray jsonArray = document.array();