diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt
index 21ece673..6888ac28 100644
--- a/deps/CMakeLists.txt
+++ b/deps/CMakeLists.txt
@@ -8,7 +8,6 @@ set(QCORO_BUILD_EXAMPLES OFF)
set(QCORO_WITH_QTDBUS OFF)
set(QCORO_WITH_QTWEBSOCKETS OFF)
set(QCORO_WITH_QTQUICK OFF)
-set(QCORO_WITH_QML OFF)
set(QCORO_WITH_QTTEST OFF)
add_subdirectory(qcoro)
diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt
index d29af3ef..36c0a033 100644
--- a/gpt4all-chat/CMakeLists.txt
+++ b/gpt4all-chat/CMakeLists.txt
@@ -249,7 +249,7 @@ qt_add_executable(chat
src/localdocs.cpp src/localdocs.h
src/localdocsmodel.cpp src/localdocsmodel.h
src/logger.cpp src/logger.h
- src/main.cpp
+ src/main.cpp src/main.h
src/modellist.cpp src/modellist.h
src/mysettings.cpp src/mysettings.h
src/network.cpp src/network.h
@@ -467,7 +467,7 @@ else()
endif()
target_link_libraries(chat PRIVATE
Boost::describe Boost::json Boost::system
- QCoro6::Core QCoro6::Network
+ QCoro6::Core QCoro6::Network QCoro6::Qml
QXlsx
SingleApplication
duckx::duckx
diff --git a/gpt4all-chat/qml/AddRemoteModelView.qml b/gpt4all-chat/qml/AddRemoteModelView.qml
index bca28199..924ddc6e 100644
--- a/gpt4all-chat/qml/AddRemoteModelView.qml
+++ b/gpt4all-chat/qml/AddRemoteModelView.qml
@@ -48,92 +48,41 @@ ColumnLayout {
bottomPadding: 20
property int childWidth: 330 * theme.fontScale
property int childHeight: 400 + 166 * theme.fontScale
- RemoteModelCard {
- width: parent.childWidth
- height: parent.childHeight
- providerBaseUrl: "https://api.groq.com/openai/v1/"
- providerName: qsTr("Groq")
- providerImage: "qrc:/gpt4all/icons/groq.svg"
- providerDesc: qsTr('Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.
Get your API key: https://groq.com/')
- modelWhitelist: [
- // last updated 2025-02-24
- "deepseek-r1-distill-llama-70b",
- "deepseek-r1-distill-qwen-32b",
- "gemma2-9b-it",
- "llama-3.1-8b-instant",
- "llama-3.2-1b-preview",
- "llama-3.2-3b-preview",
- "llama-3.3-70b-specdec",
- "llama-3.3-70b-versatile",
- "llama3-70b-8192",
- "llama3-8b-8192",
- "mixtral-8x7b-32768",
- "qwen-2.5-32b",
- "qwen-2.5-coder-32b",
- ]
- }
- RemoteModelCard {
- width: parent.childWidth
- height: parent.childHeight
- providerBaseUrl: "https://api.openai.com/v1/"
- providerName: qsTr("OpenAI")
- providerImage: "qrc:/gpt4all/icons/openai.svg"
- providerDesc: qsTr('OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.
Get your API key: https://openai.com/')
- modelWhitelist: [
- // last updated 2025-02-24
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-4",
- "gpt-4-32k",
- "gpt-4-turbo",
- "gpt-4o",
- ]
- }
- RemoteModelCard {
- width: parent.childWidth
- height: parent.childHeight
- providerBaseUrl: "https://api.mistral.ai/v1/"
- providerName: qsTr("Mistral")
- providerImage: "qrc:/gpt4all/icons/mistral.svg"
- providerDesc: qsTr('Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.
Get your API key: https://mistral.ai/')
- modelWhitelist: [
- // last updated 2025-02-24
- "codestral-2405",
- "codestral-2411-rc5",
- "codestral-2412",
- "codestral-2501",
- "codestral-latest",
- "codestral-mamba-2407",
- "codestral-mamba-latest",
- "ministral-3b-2410",
- "ministral-3b-latest",
- "ministral-8b-2410",
- "ministral-8b-latest",
- "mistral-large-2402",
- "mistral-large-2407",
- "mistral-large-2411",
- "mistral-large-latest",
- "mistral-medium-2312",
- "mistral-medium-latest",
- "mistral-saba-2502",
- "mistral-saba-latest",
- "mistral-small-2312",
- "mistral-small-2402",
- "mistral-small-2409",
- "mistral-small-2501",
- "mistral-small-latest",
- "mistral-tiny-2312",
- "mistral-tiny-2407",
- "mistral-tiny-latest",
- "open-codestral-mamba",
- "open-mistral-7b",
- "open-mistral-nemo",
- "open-mistral-nemo-2407",
- "open-mixtral-8x22b",
- "open-mixtral-8x22b-2404",
- "open-mixtral-8x7b",
- ]
+ Repeater {
+ model: BuiltinProviderList
+ delegate: RemoteModelCard {
+ required property var data
+ width: parent.childWidth
+ height: parent.childHeight
+ provider: data
+ providerBaseUrl: data.baseUrl
+ providerName: data.name
+ providerImage: data.icon
+ providerDesc: ({
+ '{20f963dc-1f99-441e-ad80-f30a0a06bcac}': qsTr(
+ 'Groq offers a high-performance AI inference engine designed for low-latency and ' +
+ 'efficient processing. Optimized for real-time applications, Groq’s technology is ideal ' +
+ 'for users who need fast responses from open large language models and other AI ' +
+ 'workloads.
Get your API key: ' +
+ 'https://groq.com/'
+ ),
+ '{6f874c3a-f1ad-47f7-9129-755c5477146c}': qsTr(
+ 'OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range ' +
+ 'of applications, from conversational AI to content generation and code completion.' +
+ '
Get your API key: ' +
+ 'https://openai.com/'
+ ),
+ '{7ae617b3-c0b2-4d2c-9ff2-bc3f049494cc}': qsTr(
+ 'Mistral AI specializes in efficient, open-weight language models optimized for various ' +
+ 'natural language processing tasks. Their models are designed for flexibility and ' +
+ 'performance, making them a solid option for applications requiring scalable AI ' +
+ 'solutions.
Get your API key: https://mistral.ai/'
+ ),
+ })[data.id.toString()]
+ modelWhitelist: data.modelWhitelist
+ }
}
+ /*
RemoteModelCard {
width: parent.childWidth
height: parent.childHeight
@@ -142,6 +91,7 @@ ColumnLayout {
providerImage: "qrc:/gpt4all/icons/antenna_3.svg"
providerDesc: qsTr("The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.")
}
+ */
}
}
}
diff --git a/gpt4all-chat/qml/RemoteModelCard.qml b/gpt4all-chat/qml/RemoteModelCard.qml
index e8c63765..f8585264 100644
--- a/gpt4all-chat/qml/RemoteModelCard.qml
+++ b/gpt4all-chat/qml/RemoteModelCard.qml
@@ -18,6 +18,7 @@ import localdocs
Rectangle {
+ required property var provider
property alias providerName: providerNameLabel.text
property alias providerImage: myimage.source
property alias providerDesc: providerDescLabel.text
@@ -100,18 +101,23 @@ Rectangle {
Layout.fillWidth: true
font.pixelSize: theme.fontSizeLarge
wrapMode: Text.WrapAnywhere
+ echoMode: TextField.Password
function showError() {
messageToast.show(qsTr("ERROR: $API_KEY is empty."));
apiKeyField.placeholderTextColor = theme.textErrorColor;
}
+ Component.onCompleted: { text = provider.apiKey; }
onTextChanged: {
apiKeyField.placeholderTextColor = theme.mutedTextColor;
- if (!providerIsCustom) {
- let models = ModelList.remoteModelList(apiKeyField.text, providerBaseUrl);
- if (modelWhitelist !== null)
- models = models.filter(m => modelWhitelist.includes(m));
- myModelList.model = models;
- myModelList.currentIndex = -1;
+ if (!providerIsCustom && provider.setApiKeyQml(text)) {
+ provider.listModelsQml().then(modelList => {
+ if (modelList !== null) {
+ if (modelWhitelist !== null)
+ models = models.filter(m => modelWhitelist.includes(m));
+ myModelList.model = models;
+ myModelList.currentIndex = -1;
+ }
+ });
}
}
placeholderText: qsTr("enter $API_KEY")
diff --git a/gpt4all-chat/src/llmodel_ollama.h b/gpt4all-chat/src/llmodel_ollama.h
index bfd94334..6768badb 100644
--- a/gpt4all-chat/src/llmodel_ollama.h
+++ b/gpt4all-chat/src/llmodel_ollama.h
@@ -38,7 +38,8 @@ protected:
};
class OllamaProvider : public QObject, public virtual ModelProvider {
- Q_OBJECT
+ Q_GADGET
+ Q_PROPERTY(QUuid id READ id CONSTANT)
public:
~OllamaProvider() noexcept override = 0;
@@ -63,6 +64,8 @@ public:
class OllamaProviderCustom final : public OllamaProvider, public ModelProviderCustom {
Q_OBJECT
+ Q_PROPERTY(QString name READ name NOTIFY nameChanged )
+ Q_PROPERTY(QUrl baseUrl READ baseUrl NOTIFY baseUrlChanged)
public:
/// Load an existing OllamaProvider from disk.
diff --git a/gpt4all-chat/src/llmodel_openai.cpp b/gpt4all-chat/src/llmodel_openai.cpp
index b4f9f6c9..7252654f 100644
--- a/gpt4all-chat/src/llmodel_openai.cpp
+++ b/gpt4all-chat/src/llmodel_openai.cpp
@@ -1,10 +1,13 @@
#include "llmodel_openai.h"
+#include "main.h"
#include "mysettings.h"
#include "utils.h"
#include // IWYU pragma: keep
#include // IWYU pragma: keep
+#include // IWYU pragma: keep
+#include // IWYU pragma: keep
#include
#include // IWYU pragma: keep
#include
@@ -36,6 +39,7 @@
#include
#include
+namespace json = boost::json;
using namespace Qt::Literals::StringLiterals;
//#define DEBUG
@@ -86,6 +90,14 @@ auto OpenaiGenerationParams::toMap() const -> QMap
OpenaiProvider::~OpenaiProvider() noexcept = default;
+Q_INVOKABLE bool OpenaiProvider::setApiKeyQml(QString value)
+{
+ auto res = setApiKey(std::move(value));
+ if (!res)
+ qWarning().noquote() << "setApiKey failed:" << res.error().errorString();
+ return bool(res);
+}
+
auto OpenaiProvider::supportedGenerationParams() const -> QSet
{
using enum GenerationParam;
@@ -96,9 +108,61 @@ auto OpenaiProvider::makeGenerationParams(const QMap
-> OpenaiGenerationParams *
{ return new OpenaiGenerationParams(values); }
-OpenaiProviderBuiltin::OpenaiProviderBuiltin(ProviderStore *store, QUuid id, QString name, QUrl baseUrl)
+auto OpenaiProvider::listModels() -> QCoro::Task>
+{
+ auto *nam = networkAccessManager();
+
+ QNetworkRequest request(m_baseUrl.resolved(u"models"_s));
+ request.setHeader (QNetworkRequest::ContentTypeHeader, "application/json"_ba);
+ request.setRawHeader("Authorization"_ba, fmt::format("Bearer {}", m_apiKey).c_str());
+
+ std::unique_ptr reply(nam->get(request));
+ QRestReply restReply(reply.get());
+
+ if (reply->error())
+ co_return std::unexpected(&restReply);
+
+ QStringList models;
+ try {
+ json::stream_parser parser;
+ auto coroReply = qCoro(*reply);
+ for (;;) {
+ auto chunk = co_await coroReply.readAll();
+ if (!restReply.isSuccess())
+ co_return std::unexpected(&restReply);
+ if (chunk.isEmpty()) {
+ Q_ASSERT(reply->atEnd());
+ break;
+ }
+ parser.write(chunk.data(), chunk.size());
+ }
+ parser.finish();
+ auto resp = parser.release().as_object();
+ for (auto &entry : resp.at("data").as_array())
+ models << json::value_to(entry.at("id"));
+ } catch (const boost::system::system_error &e) {
+ co_return std::unexpected(e);
+ }
+ co_return models;
+}
+
+QCoro::QmlTask OpenaiProvider::listModelsQml()
+{
+ return [this]() -> QCoro::Task {
+ auto result = co_await listModels();
+ if (result)
+ co_return *result;
+ qWarning().noquote() << "OpenaiProvider::listModels failed:" << result.error().errorString();
+ co_return QVariant::fromValue(nullptr);
+ }();
+}
+
+OpenaiProviderBuiltin::OpenaiProviderBuiltin(ProviderStore *store, QUuid id, QString name, QUrl icon, QUrl baseUrl,
+ QStringList modelWhitelist)
: ModelProvider(std::move(id), std::move(name), std::move(baseUrl))
+ , ModelProviderBuiltin(std::move(icon))
, ModelProviderMutable(store)
+ , m_modelWhitelist(std::move(modelWhitelist))
{
auto res = m_store->acquire(m_id);
if (!res)
diff --git a/gpt4all-chat/src/llmodel_openai.h b/gpt4all-chat/src/llmodel_openai.h
index 79746853..5d5e790e 100644
--- a/gpt4all-chat/src/llmodel_openai.h
+++ b/gpt4all-chat/src/llmodel_openai.h
@@ -4,9 +4,13 @@
#include "llmodel_description.h"
#include "llmodel_provider.h"
+#include // IWYU pragma: keep
+#include
+
#include // IWYU pragma: keep
#include // IWYU pragma: keep
#include
+#include // IWYU pragma: keep
#include
#include
#include // IWYU pragma: keep
@@ -17,6 +21,7 @@
class QNetworkAccessManager;
template class QMap;
template class QSet;
+namespace QCoro { template class Task; }
namespace gpt4all::ui {
@@ -42,7 +47,8 @@ protected:
class OpenaiProvider : public QObject, public virtual ModelProvider {
Q_OBJECT
- Q_PROPERTY(QString apiKey READ apiKey WRITE setApiKey NOTIFY apiKeyChanged)
+ Q_PROPERTY(QUuid id READ id CONSTANT )
+ Q_PROPERTY(QString apiKey READ apiKey NOTIFY apiKeyChanged)
protected:
explicit OpenaiProvider() = default;
@@ -57,11 +63,15 @@ public:
[[nodiscard]] const QString &apiKey() const { return m_apiKey; }
- virtual void setApiKey(QString value) = 0;
+ [[nodiscard]] virtual DataStoreResult<> setApiKey(QString value) = 0;
+ Q_INVOKABLE bool setApiKeyQml(QString value);
auto supportedGenerationParams() const -> QSet override;
auto makeGenerationParams(const QMap &values) const -> OpenaiGenerationParams * override;
+ auto listModels() -> QCoro::Task>;
+ Q_INVOKABLE QCoro::QmlTask listModelsQml();
+
Q_SIGNALS:
void apiKeyChanged(const QString &value);
@@ -69,23 +79,33 @@ protected:
QString m_apiKey;
};
-class OpenaiProviderBuiltin : public OpenaiProvider, public ModelProviderMutable {
+class OpenaiProviderBuiltin : public OpenaiProvider, public ModelProviderBuiltin, public ModelProviderMutable {
Q_OBJECT
- Q_PROPERTY(QString name READ name CONSTANT)
- Q_PROPERTY(QUrl baseUrl READ baseUrl CONSTANT)
+ Q_PROPERTY(QString name READ name CONSTANT)
+ Q_PROPERTY(QUrl icon READ icon CONSTANT)
+ Q_PROPERTY(QUrl baseUrl READ baseUrl CONSTANT)
+ Q_PROPERTY(QStringList modelWhitelist READ modelWhitelist CONSTANT)
public:
/// Create a new built-in OpenAI provider, loading its API key from disk if known.
- explicit OpenaiProviderBuiltin(ProviderStore *store, QUuid id, QString name, QUrl baseUrl);
+ explicit OpenaiProviderBuiltin(ProviderStore *store, QUuid id, QString name, QUrl icon, QUrl baseUrl,
+ QStringList modelWhitelist);
- void setApiKey(QString value) override { setMemberProp(&OpenaiProviderBuiltin::m_apiKey, "apiKey", std::move(value)); }
+ [[nodiscard]] const QStringList &modelWhitelist() { return m_modelWhitelist; }
+
+ [[nodiscard]] DataStoreResult<> setApiKey(QString value) override
+ { return setMemberProp(&OpenaiProviderBuiltin::m_apiKey, "apiKey", std::move(value), /*createName*/ m_name); }
protected:
auto asData() -> ModelProviderData override;
+
+ QStringList m_modelWhitelist;
};
class OpenaiProviderCustom final : public OpenaiProvider, public ModelProviderCustom {
Q_OBJECT
+ Q_PROPERTY(QString name READ name NOTIFY nameChanged )
+ Q_PROPERTY(QUrl baseUrl READ baseUrl NOTIFY baseUrlChanged)
public:
/// Load an existing OpenaiProvider from disk.
@@ -94,7 +114,8 @@ public:
/// Create a new OpenaiProvider on disk.
explicit OpenaiProviderCustom(ProviderStore *store, QString name, QUrl baseUrl, QString apiKey);
- void setApiKey(QString value) override { setMemberProp(&OpenaiProviderCustom::m_apiKey, "apiKey", std::move(value)); }
+ [[nodiscard]] DataStoreResult<> setApiKey(QString value) override
+ { return setMemberProp(&OpenaiProviderCustom::m_apiKey, "apiKey", std::move(value)); }
Q_SIGNALS:
void nameChanged (const QString &value);
diff --git a/gpt4all-chat/src/llmodel_provider.cpp b/gpt4all-chat/src/llmodel_provider.cpp
index d1d816cd..95ab16e2 100644
--- a/gpt4all-chat/src/llmodel_provider.cpp
+++ b/gpt4all-chat/src/llmodel_provider.cpp
@@ -60,13 +60,26 @@ ProviderRegistry::ProviderRegistry(PathSet paths)
load();
}
+namespace {
+ class ProviderRegistryInternal : public ProviderRegistry {};
+ Q_GLOBAL_STATIC(ProviderRegistryInternal, providerRegistry)
+}
+
+ProviderRegistry *ProviderRegistry::globalInstance()
+{ return providerRegistry(); }
+
void ProviderRegistry::load()
{
+ size_t i = 0;
for (auto &p : s_builtinProviders) { // (not all builtin providers are stored)
- auto provider = std::make_shared(&m_builtinStore, p.id, p.name, p.base_url);
+ auto provider = std::make_shared(
+ &m_builtinStore, p.id, p.name, p.icon, p.base_url,
+ QStringList(p.model_whitelist.begin(), p.model_whitelist.end())
+ );
auto [_, unique] = m_providers.emplace(p.id, std::move(provider));
if (!unique)
throw std::logic_error(fmt::format("duplicate builtin provider id: {}", p.id.toString()));
+ m_builtinProviders[i++] = p.id;
}
for (auto &p : m_customStore.list()) { // disk is source of truth for custom providers
if (!p.custom_details) {
@@ -91,11 +104,12 @@ void ProviderRegistry::load()
auto [_, unique] = m_providers.emplace(p.id, std::move(provider));
if (!unique)
qWarning() << "ignoring duplicate custom provider with id:" << p.id;
+ m_customProviders.push_back(std::make_unique(p.id));
}
}
[[nodiscard]]
-bool ProviderRegistry::add(std::unique_ptr provider)
+bool ProviderRegistry::add(std::shared_ptr provider)
{
auto [it, unique] = m_providers.emplace(provider->id(), std::move(provider));
if (unique) {
@@ -105,13 +119,21 @@ bool ProviderRegistry::add(std::unique_ptr provider)
return unique;
}
-auto ProviderRegistry::customProviderAt(size_t i) const -> const ModelProviderCustom *
+auto ProviderRegistry::customProviderAt(size_t i) const -> ModelProviderCustom *
{
auto it = m_providers.find(*m_customProviders.at(i));
Q_ASSERT(it != m_providers.end());
return &dynamic_cast(*it->second);
}
+auto ProviderRegistry::builtinProviderAt(size_t i) const -> ModelProviderBuiltin *
+{
+ auto it = m_providers.find(m_builtinProviders.at(i));
+ Q_ASSERT(it != m_providers.end());
+ return &dynamic_cast(*it->second);
+
+}
+
auto ProviderRegistry::getSubdirs() -> PathSet
{
auto *mysettings = MySettings::globalInstance();
@@ -135,19 +157,31 @@ void ProviderRegistry::onModelPathChanged()
}
}
-CustomProviderList::CustomProviderList(QPointer registry)
- : m_registry(std::move(registry) )
- , m_size (m_registry->customProviderCount())
+auto BuiltinProviderList::roleNames() const -> QHash
+{ return { { Qt::DisplayRole, "data"_ba } }; }
+
+QVariant BuiltinProviderList::data(const QModelIndex &index, int role) const
{
- connect(m_registry, &ProviderRegistry::customProviderAdded, this, &CustomProviderList::onCustomProviderAdded);
- connect(m_registry, &ProviderRegistry::aboutToBeCleared, this, &CustomProviderList::onAboutToBeCleared,
+ auto *registry = ProviderRegistry::globalInstance();
+ if (index.isValid() && index.row() < rowCount() && role == Qt::DisplayRole)
+ return QVariant::fromValue(registry->builtinProviderAt(index.row())->asQObject());
+ return {};
+}
+
+CustomProviderList::CustomProviderList()
+ : m_size(ProviderRegistry::globalInstance()->customProviderCount())
+{
+ auto *registry = ProviderRegistry::globalInstance();
+ connect(registry, &ProviderRegistry::customProviderAdded, this, &CustomProviderList::onCustomProviderAdded);
+ connect(registry, &ProviderRegistry::aboutToBeCleared, this, &CustomProviderList::onAboutToBeCleared,
Qt::DirectConnection);
}
QVariant CustomProviderList::data(const QModelIndex &index, int role) const
{
+ auto *registry = ProviderRegistry::globalInstance();
if (index.isValid() && index.row() < rowCount() && role == Qt::DisplayRole)
- return QVariant::fromValue(m_registry->customProviderAt(index.row()));
+ return QVariant::fromValue(registry->customProviderAt(index.row())->asQObject());
return {};
}
@@ -165,10 +199,10 @@ void CustomProviderList::onAboutToBeCleared()
endResetModel();
}
-bool CustomProviderListSort::lessThan(const QModelIndex &left, const QModelIndex &right) const
+bool ProviderListSort::lessThan(const QModelIndex &left, const QModelIndex &right) const
{
- auto *leftData = sourceModel()->data(left ).value();
- auto *rightData = sourceModel()->data(right).value();
+ auto *leftData = sourceModel()->data(left ).value();
+ auto *rightData = sourceModel()->data(right).value();
if (leftData && rightData)
return QString::localeAwareCompare(leftData->name(), rightData->name()) < 0;
return true;
diff --git a/gpt4all-chat/src/llmodel_provider.h b/gpt4all-chat/src/llmodel_provider.h
index 6ff6b9ef..1242baf8 100644
--- a/gpt4all-chat/src/llmodel_provider.h
+++ b/gpt4all-chat/src/llmodel_provider.h
@@ -6,7 +6,6 @@
#include
#include
-#include
#include // IWYU pragma: keep
#include
#include
@@ -18,12 +17,15 @@
#include
#include
#include
+#include
#include
#include
#include
#include
+class QByteArray;
class QJSEngine;
+template class QHash;
namespace gpt4all::ui {
@@ -60,9 +62,6 @@ protected:
};
class ModelProvider {
- Q_GADGET
- Q_PROPERTY(QUuid id READ id CONSTANT)
-
protected:
explicit ModelProvider(QUuid id, QString name, QUrl baseUrl) // create built-in or load
: m_id(std::move(id)), m_name(std::move(name)), m_baseUrl(std::move(baseUrl)) {}
@@ -92,10 +91,20 @@ protected:
QUrl m_baseUrl;
};
+class ModelProviderBuiltin : public virtual ModelProvider {
+protected:
+ explicit ModelProviderBuiltin(QUrl icon)
+ : m_icon(std::move(icon)) {}
+
+public:
+ [[nodiscard]] const QUrl &icon() const { return m_icon; }
+
+protected:
+ QUrl m_icon;
+};
+
// Mixin with no public interface providing basic load/save
class ModelProviderMutable : public virtual ModelProvider {
- Q_GADGET
-
protected:
explicit ModelProviderMutable(ProviderStore *store)
: m_store(store) {}
@@ -107,53 +116,54 @@ protected:
virtual auto asData() -> ModelProviderData = 0;
template
- void setMemberProp(this S &self, T C::* member, std::string_view name, T value);
+ [[nodiscard]] DataStoreResult<> setMemberProp(this S &self, T C::* member, std::string_view name, T value,
+ std::optional createName = {});
ProviderStore *m_store;
};
class ModelProviderCustom : public ModelProviderMutable {
- Q_GADGET
- Q_PROPERTY(QString name READ name WRITE setName NOTIFY nameChanged )
- Q_PROPERTY(QUrl baseUrl READ baseUrl WRITE setBaseUrl NOTIFY baseUrlChanged)
-
protected:
explicit ModelProviderCustom(ProviderStore *store)
: ModelProviderMutable(store) {}
public:
// setters
- void setName (QString value) { setMemberProp(&ModelProviderCustom::m_name, "name", std::move(value)); }
- void setBaseUrl(QUrl value) { setMemberProp(&ModelProviderCustom::m_baseUrl, "baseUrl", std::move(value)); }
+ [[nodiscard]] DataStoreResult<> setName (QString value)
+ { return setMemberProp(&ModelProviderCustom::m_name, "name", std::move(value)); }
+ [[nodiscard]] DataStoreResult<> setBaseUrl(QUrl value)
+ { return setMemberProp(&ModelProviderCustom::m_baseUrl, "baseUrl", std::move(value)); }
};
class ProviderRegistry : public QObject {
Q_OBJECT
- QML_ELEMENT
- QML_SINGLETON
private:
struct PathSet { std::filesystem::path builtin, custom; };
struct BuiltinProviderData {
- QUuid id;
- QString name;
- QUrl base_url;
+ QUuid id;
+ QString name;
+ QUrl icon;
+ QUrl base_url;
+ std::span model_whitelist;
};
protected:
explicit ProviderRegistry(PathSet paths);
+ explicit ProviderRegistry(): ProviderRegistry(getSubdirs()) {}
public:
- static ProviderRegistry *create(QQmlEngine *, QJSEngine *) { return new ProviderRegistry(getSubdirs()); }
- [[nodiscard]] bool add(std::unique_ptr provider);
+ static ProviderRegistry *globalInstance();
+ [[nodiscard]] bool add(std::shared_ptr provider);
+
+ auto operator[](const QUuid &id) -> const ModelProvider * { return m_providers.at(id).get(); }
// TODO(jared): implement a way to remove custom providers via the model
- [[nodiscard]] size_t customProviderCount() const
- { return m_customProviders.size(); }
- [[nodiscard]] auto customProviderAt(size_t i) const -> const ModelProviderCustom *;
- auto operator[](const QUuid &id) -> ModelProviderCustom *
- { return &dynamic_cast(*m_providers.at(id)); }
+ [[nodiscard]] size_t customProviderCount () const { return m_customProviders.size(); }
+ [[nodiscard]] auto customProviderAt (size_t i) const -> ModelProviderCustom *;
+ [[nodiscard]] size_t builtinProviderCount() const { return m_builtinProviders.size(); }
+ [[nodiscard]] auto builtinProviderAt (size_t i) const -> ModelProviderBuiltin *;
Q_SIGNALS:
void customProviderAdded(size_t index);
@@ -174,15 +184,36 @@ private:
ProviderStore m_builtinStore;
std::unordered_map> m_providers;
std::vector> m_customProviders;
+ std::array m_builtinProviders;
+};
+
+// TODO: api keys are allowed to change for here and also below. That should emit dataChanged.
+class BuiltinProviderList : public QAbstractListModel {
+ Q_OBJECT
+ QML_SINGLETON
+ QML_ELEMENT
+
+public:
+ explicit BuiltinProviderList()
+ : m_size(ProviderRegistry::globalInstance()->builtinProviderCount()) {}
+
+ static BuiltinProviderList *create(QQmlEngine *, QJSEngine *) { return new BuiltinProviderList(); }
+
+ auto roleNames() const -> QHash override;
+ int rowCount(const QModelIndex &parent = {}) const override
+ { Q_UNUSED(parent) return int(m_size); }
+ QVariant data(const QModelIndex &index, int role) const override;
+
+private:
+ size_t m_size;
};
class CustomProviderList : public QAbstractListModel {
Q_OBJECT
-protected:
- explicit CustomProviderList(QPointer registry);
-
public:
+ explicit CustomProviderList();
+
int rowCount(const QModelIndex &parent = {}) const override
{ Q_UNUSED(parent) return int(m_size); }
QVariant data(const QModelIndex &index, int role) const override;
@@ -192,15 +223,28 @@ private Q_SLOTS:
void onAboutToBeCleared();
private:
- QPointer m_registry;
- size_t m_size;
+ size_t m_size;
};
-class CustomProviderListSort : public QSortFilterProxyModel {
+// todo: don't have singletons use singletons directly
+// TODO: actually use the provider sort, here, rather than unsorted, for builtins
+class ProviderListSort : public QSortFilterProxyModel {
Q_OBJECT
+ QML_SINGLETON
+ QML_ELEMENT
+
+private:
+ explicit ProviderListSort() { setSourceModel(&m_model); }
+
+public:
+ static ProviderListSort *create(QQmlEngine *, QJSEngine *) { return new ProviderListSort(); }
protected:
bool lessThan(const QModelIndex &left, const QModelIndex &right) const override;
+
+private:
+ // TODO: support custom providers as well
+ BuiltinProviderList m_model;
};
diff --git a/gpt4all-chat/src/llmodel_provider.inl b/gpt4all-chat/src/llmodel_provider.inl
index 94a078c3..fda1c02e 100644
--- a/gpt4all-chat/src/llmodel_provider.inl
+++ b/gpt4all-chat/src/llmodel_provider.inl
@@ -13,17 +13,19 @@ void GenerationParams::tryParseValue(this S &self, QMap
-void ModelProviderMutable::setMemberProp(this S &self, T C::* member, std::string_view name, T value)
+auto ModelProviderMutable::setMemberProp(this S &self, T C::* member, std::string_view name, T value,
+ std::optional createName) -> DataStoreResult<>
{
auto &mpc = static_cast(self);
auto &cur = self.*member;
if (cur != value) {
cur = std::move(value);
auto data = mpc.asData();
- if (auto res = mpc.m_store->setData(std::move(data)); !res)
- res.error().raise();
+ if (auto res = mpc.m_store->setData(std::move(data), createName); !res)
+ return res;
QMetaObject::invokeMethod(self.asQObject(), fmt::format("{}Changed", name).c_str(), cur);
}
+ return {};
}
diff --git a/gpt4all-chat/src/llmodel_provider_builtins.cpp b/gpt4all-chat/src/llmodel_provider_builtins.cpp
index 77367ee1..ee5e2842 100644
--- a/gpt4all-chat/src/llmodel_provider_builtins.cpp
+++ b/gpt4all-chat/src/llmodel_provider_builtins.cpp
@@ -6,27 +6,94 @@ using namespace Qt::StringLiterals;
namespace gpt4all::ui {
-// TODO: use these in the constructor of ProviderRegistry
-// TODO: we have to be careful to reserve these names for ProviderStore purposes, so the user can't write JSON files that alias them.
-// this *is a problem*, because we want to be able to safely introduce these.
-// so we need a different namespace, i.e. a *different directory*.
+static const QString MODEL_WHITELIST_GROQ[] {
+ // last updated 2025-02-24
+ u"deepseek-r1-distill-llama-70b"_s,
+ u"deepseek-r1-distill-qwen-32b"_s,
+ u"gemma2-9b-it"_s,
+ u"llama-3.1-8b-instant"_s,
+ u"llama-3.2-1b-preview"_s,
+ u"llama-3.2-3b-preview"_s,
+ u"llama-3.3-70b-specdec"_s,
+ u"llama-3.3-70b-versatile"_s,
+ u"llama3-70b-8192"_s,
+ u"llama3-8b-8192"_s,
+ u"mixtral-8x7b-32768"_s,
+ u"qwen-2.5-32b"_s,
+ u"qwen-2.5-coder-32b"_s,
+};
+
+static const QString MODEL_WHITELIST_OPENAI[] {
+ // last updated 2025-02-24
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-4",
+ "gpt-4-32k",
+ "gpt-4-turbo",
+ "gpt-4o",
+};
+
+static const QString MODEL_WHITELIST_MISTRAL[] {
+ // last updated 2025-02-24
+ "codestral-2405",
+ "codestral-2411-rc5",
+ "codestral-2412",
+ "codestral-2501",
+ "codestral-latest",
+ "codestral-mamba-2407",
+ "codestral-mamba-latest",
+ "ministral-3b-2410",
+ "ministral-3b-latest",
+ "ministral-8b-2410",
+ "ministral-8b-latest",
+ "mistral-large-2402",
+ "mistral-large-2407",
+ "mistral-large-2411",
+ "mistral-large-latest",
+ "mistral-medium-2312",
+ "mistral-medium-latest",
+ "mistral-saba-2502",
+ "mistral-saba-latest",
+ "mistral-small-2312",
+ "mistral-small-2402",
+ "mistral-small-2409",
+ "mistral-small-2501",
+ "mistral-small-latest",
+ "mistral-tiny-2312",
+ "mistral-tiny-2407",
+ "mistral-tiny-latest",
+ "open-codestral-mamba",
+ "open-mistral-7b",
+ "open-mistral-nemo",
+ "open-mistral-nemo-2407",
+ "open-mixtral-8x22b",
+ "open-mixtral-8x22b-2404",
+ "open-mixtral-8x7b",
+};
+
const std::array<
ProviderRegistry::BuiltinProviderData, ProviderRegistry::N_BUILTIN
> ProviderRegistry::s_builtinProviders {
BuiltinProviderData {
- .id = QUuid("20f963dc-1f99-441e-ad80-f30a0a06bcac"),
- .name = u"Groq"_s,
- .base_url = u"https://api.groq.com/openai/v1/"_s,
+ .id = QUuid("20f963dc-1f99-441e-ad80-f30a0a06bcac"),
+ .name = u"Groq"_s,
+ .icon = u"qrc:/gpt4all/icons/groq.svg"_s,
+ .base_url = u"https://api.groq.com/openai/v1/"_s,
+ .model_whitelist = MODEL_WHITELIST_GROQ,
},
BuiltinProviderData {
- .id = QUuid("6f874c3a-f1ad-47f7-9129-755c5477146c"),
- .name = u"OpenAI"_s,
- .base_url = u"https://api.openai.com/v1/"_s,
+ .id = QUuid("6f874c3a-f1ad-47f7-9129-755c5477146c"),
+ .name = u"OpenAI"_s,
+ .icon = u"qrc:/gpt4all/icons/openai.svg"_s,
+ .base_url = u"https://api.openai.com/v1/"_s,
+ .model_whitelist = MODEL_WHITELIST_OPENAI,
},
BuiltinProviderData {
- .id = QUuid("7ae617b3-c0b2-4d2c-9ff2-bc3f049494cc"),
- .name = u"Mistral"_s,
- .base_url = u"https://api.mistral.ai/v1/"_s,
+ .id = QUuid("7ae617b3-c0b2-4d2c-9ff2-bc3f049494cc"),
+ .name = u"Mistral"_s,
+ .icon = u"qrc:/gpt4all/icons/mistral.svg"_s,
+ .base_url = u"https://api.mistral.ai/v1/"_s,
+ .model_whitelist = MODEL_WHITELIST_MISTRAL,
},
};
diff --git a/gpt4all-chat/src/main.cpp b/gpt4all-chat/src/main.cpp
index cf782ead..9d8810be 100644
--- a/gpt4all-chat/src/main.cpp
+++ b/gpt4all-chat/src/main.cpp
@@ -52,6 +52,9 @@
using namespace Qt::Literals::StringLiterals;
+namespace gpt4all::ui {
+
+
static void raiseWindow(QWindow *window)
{
#ifdef Q_OS_WINDOWS
@@ -70,8 +73,19 @@ static void raiseWindow(QWindow *window)
#endif
}
+Q_GLOBAL_STATIC(QNetworkAccessManager, globalNetworkAccessManager)
+
+QNetworkAccessManager *networkAccessManager()
+{ return globalNetworkAccessManager(); }
+
+
+} // namespace gpt4all::ui
+
+
int main(int argc, char *argv[])
{
+ using namespace gpt4all::ui;
+
#ifndef GPT4ALL_USE_QTPDF
FPDF_InitLibrary();
#endif
diff --git a/gpt4all-chat/src/main.h b/gpt4all-chat/src/main.h
new file mode 100644
index 00000000..84d6af08
--- /dev/null
+++ b/gpt4all-chat/src/main.h
@@ -0,0 +1,12 @@
+#pragma once
+
+class QNetworkAccessManager;
+
+
+namespace gpt4all::ui {
+
+
+QNetworkAccessManager *networkAccessManager();
+
+
+} // namespace gpt4all::ui
diff --git a/gpt4all-chat/src/modellist.cpp b/gpt4all-chat/src/modellist.cpp
index 218efdad..8c87f332 100644
--- a/gpt4all-chat/src/modellist.cpp
+++ b/gpt4all-chat/src/modellist.cpp
@@ -2365,56 +2365,3 @@ void ModelList::handleDiscoveryItemErrorOccurred(QNetworkReply::NetworkError cod
qWarning() << u"ERROR: Discovery item failed with error code \"%1-%2\""_s
.arg(code).arg(reply->errorString()).toStdString();
}
-
-QStringList ModelList::remoteModelList(const QString &apiKey, const QUrl &baseUrl)
-{
- QStringList modelList;
-
- // Create the request
- QNetworkRequest request;
- request.setUrl(baseUrl.resolved(QUrl("models")));
- request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
-
- // Add the Authorization header
- const QString bearerToken = QString("Bearer %1").arg(apiKey);
- request.setRawHeader("Authorization", bearerToken.toUtf8());
-
- // Make the GET request
- QNetworkReply *reply = m_networkManager.get(request);
-
- // We use a local event loop to wait for the request to complete
- QEventLoop loop;
- connect(reply, &QNetworkReply::finished, &loop, &QEventLoop::quit);
- loop.exec();
-
- // Check for errors
- if (reply->error() == QNetworkReply::NoError) {
- // Parse the JSON response
- const QByteArray responseData = reply->readAll();
- const QJsonDocument jsonDoc = QJsonDocument::fromJson(responseData);
-
- if (!jsonDoc.isNull() && jsonDoc.isObject()) {
- QJsonObject rootObj = jsonDoc.object();
- QJsonValue dataValue = rootObj.value("data");
-
- if (dataValue.isArray()) {
- QJsonArray dataArray = dataValue.toArray();
- for (const QJsonValue &val : dataArray) {
- if (val.isObject()) {
- QJsonObject obj = val.toObject();
- const QString modelId = obj.value("id").toString();
- modelList.append(modelId);
- }
- }
- }
- }
- } else {
- // Handle network error (e.g. print it to qDebug)
- qWarning() << "Error retrieving models:" << reply->errorString();
- }
-
- // Clean up
- reply->deleteLater();
-
- return modelList;
-}
diff --git a/gpt4all-chat/src/modellist.h b/gpt4all-chat/src/modellist.h
index 36a84871..1f9a5b3e 100644
--- a/gpt4all-chat/src/modellist.h
+++ b/gpt4all-chat/src/modellist.h
@@ -546,8 +546,6 @@ public:
Q_INVOKABLE void discoverSearch(const QString &discover);
- Q_INVOKABLE QStringList remoteModelList(const QString &apiKey, const QUrl &baseUrl);
-
Q_SIGNALS:
void countChanged();
void installedModelsChanged();
diff --git a/gpt4all-chat/src/store_base.cpp b/gpt4all-chat/src/store_base.cpp
index c084e605..294b9bc5 100644
--- a/gpt4all-chat/src/store_base.cpp
+++ b/gpt4all-chat/src/store_base.cpp
@@ -26,6 +26,18 @@ using namespace Qt::StringLiterals;
namespace gpt4all::ui {
+DataStoreError::DataStoreError(std::error_code e)
+ : m_error(e)
+ , m_errorString(QString::fromStdString(e.message()))
+ {}
+
+DataStoreError::DataStoreError(const sys::system_error &e)
+ : m_error(e.code())
+ , m_errorString(QString::fromUtf8(e.what()))
+{
+ Q_ASSERT(e.code());
+}
+
DataStoreError::DataStoreError(const QFileDevice *file)
: m_error(file->error())
, m_errorString(file->errorString())
@@ -33,13 +45,6 @@ DataStoreError::DataStoreError(const QFileDevice *file)
Q_ASSERT(file->error());
}
-DataStoreError::DataStoreError(const boost::system::system_error &e)
- : m_error(e.code())
- , m_errorString(QString::fromUtf8(e.what()))
-{
- Q_ASSERT(e.code());
-}
-
DataStoreError::DataStoreError(QString e)
: m_error()
, m_errorString(e)
@@ -48,9 +53,10 @@ DataStoreError::DataStoreError(QString e)
void DataStoreError::raise() const
{
std::visit(Overloaded {
- [&](QFileDevice::FileError e) { throw FileError(m_errorString, e); },
- [&](boost::system::error_code e) { throw std::runtime_error(m_errorString.toUtf8().constData()); },
- [&](std::monostate ) { throw std::runtime_error(m_errorString.toUtf8().constData()); },
+ [&](std::error_code e) { throw std::system_error(e); },
+ [&](sys::error_code e) { throw std::runtime_error(m_errorString.toUtf8().constData()); },
+ [&](QFileDevice::FileError e) { throw FileError(m_errorString, e); },
+ [&](std::monostate ) { throw std::runtime_error(m_errorString.toUtf8().constData()); },
}, m_error);
Q_UNREACHABLE();
}
@@ -63,7 +69,20 @@ auto DataStoreBase::reload() -> DataStoreResult<>
json::stream_parser parser;
QFile file;
- for (auto &entry : fs::directory_iterator(m_path)) {
+ fs::directory_iterator it;
+ try {
+ it = fs::directory_iterator(m_path);
+ } catch (const fs::filesystem_error &e) {
+ if (e.code() == std::errc::no_such_file_or_directory) {
+ fs::create_directories(m_path);
+ return {}; // brand new dir, nothing to load
+ }
+ throw;
+ }
+
+ for (auto &entry : it) {
+ if (!entry.is_regular_file())
+ continue; // skip directories and such
file.setFileName(entry.path());
if (!file.open(QFile::ReadOnly)) {
qWarning().noquote() << "skipping unopenable file:" << file.fileName();
@@ -71,7 +90,7 @@ auto DataStoreBase::reload() -> DataStoreResult<>
}
auto jv = read(file, parser);
if (!jv) {
- (qWarning().nospace() << "skipping " << file.fileName() << "because of read error: ").noquote()
+ (qWarning().nospace() << "skipping " << file.fileName() << " because of read error: ").noquote()
<< jv.error().errorString();
} else if (auto [unique, uuid] = cacheInsert(*jv); !unique)
qWarning() << "skipping duplicate data store entry:" << uuid;
@@ -89,7 +108,7 @@ auto DataStoreBase::setPath(fs::path path) -> DataStoreResult<>
return {};
}
-auto DataStoreBase::getFilePath(const QString &name) -> std::filesystem::path
+auto DataStoreBase::getFilePath(const QString &name) -> fs::path
{ return m_path / fmt::format("{}.json", QLatin1StringView(normalizeName(name))); }
auto DataStoreBase::openNew(const QString &name) -> DataStoreResult>
@@ -106,7 +125,7 @@ auto DataStoreBase::openNew(const QString &name) -> DataStoreResult DataStoreResult>
{
auto path = getFilePath(name);
- if (!QFile::exists(path))
+ if (!allowCreate && !QFile::exists(path))
return std::unexpected(sys::system_error(
std::make_error_code(std::errc::no_such_file_or_directory), path.string()
));
@@ -119,33 +138,81 @@ auto DataStoreBase::openExisting(const QString &name, bool allowCreate) -> DataS
return file;
}
-auto DataStoreBase::read(QFileDevice &file, boost::json::stream_parser &parser) -> DataStoreResult
+auto DataStoreBase::read(QFileDevice &file, json::stream_parser &parser) -> DataStoreResult
{
- for (;;) {
- auto chunk = file.read(JSON_BUFSIZ);
- if (file.error())
- return std::unexpected(&file);
- if (chunk.isEmpty()) {
- Q_ASSERT(file.atEnd());
- break;
+ // chunk stream
+ auto iterChunks = [&] -> tl::generator> {
+ for (;;) {
+ auto chunk = file.read(JSON_BUFSIZ);
+ if (file.error()) {
+ DataStoreResult res(std::unexpect, &file);
+ co_yield res;
+ }
+ if (chunk.isEmpty()) {
+ Q_ASSERT(file.atEnd());
+ break;
+ }
+ DataStoreResult res(std::move(chunk));
+ co_yield res;
}
- parser.write(chunk.data(), chunk.size());
+ };
+
+ auto inner = [&] -> DataStoreResult<> {
+ bool partialRead = false;
+ auto chunkIt = iterChunks();
+ // read JSON data
+ for (auto &chunk : chunkIt) {
+ if (!chunk)
+ return std::unexpected(chunk.error());
+ size_t nRead = parser.write_some(chunk->data(), chunk->size());
+ // consume trailing whitespace in chunk
+ if (nRead < chunk->size()) {
+ auto rest = QByteArrayView(*chunk).slice(nRead);
+ if (!rest.trimmed().isEmpty())
+ return std::unexpected(u"unexpected data after json: \"%1\""_s.arg(QByteArray(rest)));
+ partialRead = true;
+ break;
+ }
+ }
+ // consume trailing whitespace in file
+ if (partialRead) {
+ for (auto &chunk : chunkIt) {
+ if (!chunk)
+ return std::unexpected(chunk.error());
+ if (!chunk->trimmed().isEmpty())
+ return std::unexpected(u"unexpected data after json: \"%1\""_s.arg(*chunk));
+ }
+ }
+ return {};
+ };
+
+ auto res = inner();
+ if (!res) {
+ parser.reset();
+ return std::unexpected(res.error());
}
return parser.release();
}
auto DataStoreBase::write(const json::value &value, QFileDevice &file) -> DataStoreResult<>
{
+ qint64 nWritten;
m_serializer.reset(&value);
std::array buf;
while (!m_serializer.done()) {
auto chunk = m_serializer.read(buf.data(), buf.size());
- qint64 nWritten = file.write(chunk.data(), chunk.size());
+ nWritten = file.write(chunk.data(), chunk.size());
if (nWritten < 0)
return std::unexpected(&file);
Q_ASSERT(nWritten == chunk.size());
}
+ // write trailing newline to make it a valid text file
+ nWritten = file.write("\n"_ba);
+ if (nWritten < 0)
+ return std::unexpected(&file);
+ Q_ASSERT(nWritten == 1);
+
if (!file.flush())
return std::unexpected(&file);
diff --git a/gpt4all-chat/src/store_base.h b/gpt4all-chat/src/store_base.h
index d9eb45e5..49bb9524 100644
--- a/gpt4all-chat/src/store_base.h
+++ b/gpt4all-chat/src/store_base.h
@@ -16,6 +16,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -31,13 +32,15 @@ namespace gpt4all::ui {
class DataStoreError {
public:
using ErrorCode = std::variant<
- QFileDevice::FileError,
+ std::monostate,
+ std::error_code,
boost::system::error_code,
- std::monostate
+ QFileDevice::FileError
>;
- DataStoreError(const QFileDevice *file);
+ DataStoreError(std::error_code e);
DataStoreError(const boost::system::system_error &e);
+ DataStoreError(const QFileDevice *file);
DataStoreError(QString e);
[[nodiscard]] const ErrorCode &error () const { return m_error; }
@@ -94,8 +97,7 @@ public:
explicit DataStore(std::filesystem::path path);
auto list() -> tl::generator;
- auto setData(T data) -> DataStoreResult<>;
- auto createOrSetData(T data, const QString &name) -> DataStoreResult<>;
+ auto setData(T data, std::optional createName = {}) -> DataStoreResult<>;
auto remove(const QUuid &id) -> DataStoreResult<>;
auto acquire(QUuid id) -> DataStoreResult>;
diff --git a/gpt4all-chat/src/store_base.inl b/gpt4all-chat/src/store_base.inl
index 641ad905..c81e2870 100644
--- a/gpt4all-chat/src/store_base.inl
+++ b/gpt4all-chat/src/store_base.inl
@@ -6,6 +6,8 @@
#include
#include
+#include
+
namespace gpt4all::ui {
@@ -49,38 +51,19 @@ auto DataStore::createImpl(T data, const QString &name) -> DataStoreResult
-auto DataStore::setData(T data) -> DataStoreResult<>
+auto DataStore::setData(T data, std::optional createName) -> DataStoreResult<>
{
+ const QString *openName;
auto name_it = m_names.find(data.id);
- if (name_it == m_names.end())
+ if (name_it != m_names.end()) {
+ openName = &name_it->second;
+ } else if (createName) {
+ openName = &*createName;
+ } else
return std::unexpected(QStringLiteral("id not found: %1").arg(data.id.toString()));
// acquire path
- auto file = openExisting(name_it->second);
- if (!file)
- return std::unexpected(file.error());
-
- // serialize
- if (auto res = write(boost::json::value_from(data), **file); !res)
- return std::unexpected(res.error());
- if (!(*file)->commit())
- return std::unexpected(file->get());
-
- // update
- m_entries.at(data.id) = std::move(data);
- return {};
-}
-
-template
-auto DataStore::createOrSetData(T data, const QString &name) -> DataStoreResult<>
-{
- auto name_it = m_names.find(data.id);
- if (name_it != m_names.end() && name_it->second != name)
- return std::unexpected(QStringLiteral("name conflict for id %1: old=%2, new=%3")
- .arg(data.id.toString(), name_it->second, name));
-
- // acquire path
- auto file = openExisting(name, /*allowCreate*/ true);
+ auto file = openExisting(*openName, !!createName);
if (!file)
return std::unexpected(file.error());
@@ -92,8 +75,19 @@ auto DataStore::createOrSetData(T data, const QString &name) -> DataStoreResu
// update
m_entries[data.id] = std::move(data);
- if (name_it == m_names.end())
- m_names.emplace(data.id, name);
+
+ // rename if necessary
+ if (name_it == m_names.end()) {
+ m_names.emplace(data.id, std::move(*createName));
+ } else if (*createName != name_it->second) {
+ std::error_code ec;
+ auto newPath = getFilePath(*createName);
+ std::filesystem::rename(getFilePath(name_it->second), newPath, ec);
+ if (ec)
+ return std::unexpected(ec);
+ m_names.at(data.id) = std::move(*createName);
+ }
+
return {};
}