mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-06 10:33:38 +00:00
Prefer 7b models in order of default model load.
This commit is contained in:
parent
5df4f1bf8c
commit
60627bd41f
@ -161,16 +161,6 @@ int InstalledModels::count() const
|
|||||||
return rowCount();
|
return rowCount();
|
||||||
}
|
}
|
||||||
|
|
||||||
QString InstalledModels::firstId() const
|
|
||||||
{
|
|
||||||
if (rowCount() > 0) {
|
|
||||||
QModelIndex firstIndex = index(0, 0);
|
|
||||||
return sourceModel()->data(firstIndex, ModelList::IdRole).toString();
|
|
||||||
} else {
|
|
||||||
return QString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
DownloadableModels::DownloadableModels(QObject *parent)
|
DownloadableModels::DownloadableModels(QObject *parent)
|
||||||
: QSortFilterProxyModel(parent)
|
: QSortFilterProxyModel(parent)
|
||||||
, m_expanded(false)
|
, m_expanded(false)
|
||||||
@ -298,12 +288,9 @@ ModelInfo ModelList::defaultModelInfo() const
|
|||||||
settings.sync();
|
settings.sync();
|
||||||
|
|
||||||
// The user default model can be set by the user in the settings dialog. The "default" user
|
// The user default model can be set by the user in the settings dialog. The "default" user
|
||||||
// default model is "Application default" which signals we should use the default model that was
|
// default model is "Application default" which signals we should use the logic here.
|
||||||
// specified by the models.json file.
|
|
||||||
const QString userDefaultModelName = MySettings::globalInstance()->userDefaultModel();
|
const QString userDefaultModelName = MySettings::globalInstance()->userDefaultModel();
|
||||||
const bool hasUserDefaultName = !userDefaultModelName.isEmpty() && userDefaultModelName != "Application default";
|
const bool hasUserDefaultName = !userDefaultModelName.isEmpty() && userDefaultModelName != "Application default";
|
||||||
const QString defaultModelName = settings.value("defaultModel").toString();
|
|
||||||
const bool hasDefaultName = hasUserDefaultName ? false : !defaultModelName.isEmpty();
|
|
||||||
|
|
||||||
ModelInfo *defaultModel = nullptr;
|
ModelInfo *defaultModel = nullptr;
|
||||||
for (ModelInfo *info : m_models) {
|
for (ModelInfo *info : m_models) {
|
||||||
@ -311,12 +298,10 @@ ModelInfo ModelList::defaultModelInfo() const
|
|||||||
continue;
|
continue;
|
||||||
defaultModel = info;
|
defaultModel = info;
|
||||||
|
|
||||||
// If we don't have either setting, then just use the first model that is installed
|
const size_t ramrequired = defaultModel->ramrequired;
|
||||||
if (!hasUserDefaultName && !hasDefaultName)
|
|
||||||
break;
|
|
||||||
|
|
||||||
// If we don't have a user specified default, but *do* have a default setting and match, then use it
|
// If we don't have either setting, then just use the first model that requires less than 16GB that is installed
|
||||||
if (!hasUserDefaultName && hasDefaultName && (defaultModel->id() == defaultModelName))
|
if (!hasUserDefaultName && !info->isChatGPT && ramrequired > 0 && ramrequired < 16)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// If we have a user specified default and match, then use it
|
// If we have a user specified default and match, then use it
|
||||||
@ -847,14 +832,6 @@ void ModelList::updateModelsFromDirectory()
|
|||||||
processDirectory(exePath);
|
processDirectory(exePath);
|
||||||
if (localPath != exePath)
|
if (localPath != exePath)
|
||||||
processDirectory(localPath);
|
processDirectory(localPath);
|
||||||
|
|
||||||
if (installedModels()->count()) {
|
|
||||||
const QString firstModel =
|
|
||||||
installedModels()->firstId();
|
|
||||||
QSettings settings;
|
|
||||||
settings.setValue("defaultModel", firstModel);
|
|
||||||
settings.sync();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ModelList::updateModelsFromJson()
|
void ModelList::updateModelsFromJson()
|
||||||
@ -1132,14 +1109,6 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
|||||||
updateData(id, ModelList::QuantRole, "NA");
|
updateData(id, ModelList::QuantRole, "NA");
|
||||||
updateData(id, ModelList::TypeRole, "GPT");
|
updateData(id, ModelList::TypeRole, "GPT");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (installedModels()->count()) {
|
|
||||||
const QString firstModel =
|
|
||||||
installedModels()->firstId();
|
|
||||||
QSettings settings;
|
|
||||||
settings.setValue("defaultModel", firstModel);
|
|
||||||
settings.sync();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ModelList::updateModelsFromSettings()
|
void ModelList::updateModelsFromSettings()
|
||||||
|
@ -127,7 +127,6 @@ class InstalledModels : public QSortFilterProxyModel
|
|||||||
public:
|
public:
|
||||||
explicit InstalledModels(QObject *parent);
|
explicit InstalledModels(QObject *parent);
|
||||||
int count() const;
|
int count() const;
|
||||||
QString firstId() const;
|
|
||||||
|
|
||||||
Q_SIGNALS:
|
Q_SIGNALS:
|
||||||
void countChanged();
|
void countChanged();
|
||||||
|
Loading…
Reference in New Issue
Block a user