diff --git a/gpt4all-chat/qml/AddRemoteModelView.qml b/gpt4all-chat/qml/AddRemoteModelView.qml
index 07df00c0..83e7a0dd 100644
--- a/gpt4all-chat/qml/AddRemoteModelView.qml
+++ b/gpt4all-chat/qml/AddRemoteModelView.qml
@@ -13,6 +13,7 @@ import download
 import modellist
 import network
 import gpt4all
+import gpt4all.store_provider
 import mysettings
 import localdocs
 
@@ -55,28 +56,38 @@ ColumnLayout {
                     height: parent.childHeight
                     provider: modelData
                     providerName: provider.name
-                    providerImage: provider.icon
-                    providerDesc: ({
-                        '{20f963dc-1f99-441e-ad80-f30a0a06bcac}': qsTr(
-                            'Groq offers a high-performance AI inference engine designed for low-latency and ' +
-                            'efficient processing. Optimized for real-time applications, Groq’s technology is ideal ' +
-                            'for users who need fast responses from open large language models and other AI ' +
-                            'workloads.<br><br>Get your API key: ' +
-                            '<a href="https://console.groq.com/keys">https://groq.com/</a>'
-                        ),
-                        '{6f874c3a-f1ad-47f7-9129-755c5477146c}': qsTr(
-                            'OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range ' +
-                            'of applications, from conversational AI to content generation and code completion.' +
-                            '<br><br>Get your API key: ' +
-                            '<a href="https://platform.openai.com/signup">https://openai.com/</a>'
-                        ),
-                        '{7ae617b3-c0b2-4d2c-9ff2-bc3f049494cc}': qsTr(
-                            'Mistral AI specializes in efficient, open-weight language models optimized for various ' +
-                            'natural language processing tasks. Their models are designed for flexibility and ' +
-                            'performance, making them a solid option for applications requiring scalable AI ' +
-                            'solutions.<br><br>Get your API key: <a href="https://mistral.ai/">https://mistral.ai/</a>'
-                        ),
-                    })[provider.id.toString()]
+                    providerImage: "icon" in provider ? provider.icon : "qrc:/gpt4all/icons/antenna_3.svg"
+                    providerDesc: {
+                        if (!provider.isBuiltin) {
+                            switch (provider.type) {
+                            case ProviderStore.ProviderType.openai:
+                                return qsTr("A custom OpenAI provider.");
+                            case ProviderStore.ProviderType.ollama:
+                                return qsTr("A custom Ollama provider.");
+                            }
+                        }
+                        return ({
+                            '{20f963dc-1f99-441e-ad80-f30a0a06bcac}': qsTr(
+                                'Groq offers a high-performance AI inference engine designed for low-latency and ' +
+                                'efficient processing. Optimized for real-time applications, Groq’s technology is ideal ' +
+                                'for users who need fast responses from open large language models and other AI ' +
+                                'workloads.<br><br>Get your API key: ' +
+                                '<a href="https://console.groq.com/keys">https://groq.com/</a>'
+                            ),
+                            '{6f874c3a-f1ad-47f7-9129-755c5477146c}': qsTr(
+                                'OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range ' +
+                                'of applications, from conversational AI to content generation and code completion.' +
+                                '<br><br>Get your API key: ' +
+                                '<a href="https://platform.openai.com/signup">https://openai.com/</a>'
+                            ),
+                            '{7ae617b3-c0b2-4d2c-9ff2-bc3f049494cc}': qsTr(
+                                'Mistral AI specializes in efficient, open-weight language models optimized for various ' +
+                                'natural language processing tasks. Their models are designed for flexibility and ' +
+                                'performance, making them a solid option for applications requiring scalable AI ' +
+                                'solutions.<br><br>Get your API key: <a href="https://mistral.ai/">https://mistral.ai/</a>'
+                            ),
+                        })[provider.id.toString()];
+                    }
                 }
             }
         }
diff --git a/gpt4all-chat/qml/RemoteModelCard.qml b/gpt4all-chat/qml/RemoteModelCard.qml
index 9c853ecb..7b53a4f3 100644
--- a/gpt4all-chat/qml/RemoteModelCard.qml
+++ b/gpt4all-chat/qml/RemoteModelCard.qml
@@ -131,6 +131,8 @@ Rectangle {
                 }
                 onTextChanged: {
                     if (!initialized) return;
+                    console.log(`${provider} has an apiKey: ${('apiKey' in provider)},${typeof provider.apiKey},${provider.apiKey}`);
+                    return;
                     ok = provider.setApiKeyQml(text.trim()) && text.trim() !== "";
                 }
                 placeholderText: qsTr("Provider API Key")
@@ -208,7 +210,7 @@ Rectangle {
             property string apiKeyText: apiKeyField.text.trim()
             property string modelNameText: myModelList.currentText.trim()
 
-            enabled: baseUrlGood && apiKeyGood && modelNameText !== ""
+            enabled: nameField.ok && baseUrlField.ok && apiKeyField.ok && modelNameText !== ""
 
             onClicked: {
                 Download.installCompatibleModel(
diff --git a/gpt4all-chat/src/llmodel_ollama.cpp b/gpt4all-chat/src/llmodel_ollama.cpp
index 42e89c6a..6e256120 100644
--- a/gpt4all-chat/src/llmodel_ollama.cpp
+++ b/gpt4all-chat/src/llmodel_ollama.cpp
@@ -64,12 +64,6 @@ auto OllamaProvider::listModels() -> QCoro::Task<backend::DataOrRespErr<QStringL
     co_return res;
 }
 
-QCoro::QmlTask OllamaProvider::statusQml()
-{ return wrapQmlTask(this, &OllamaProvider::status, u"OllamaProvider::status"_s); }
-
-QCoro::QmlTask OllamaProvider::listModelsQml()
-{ return wrapQmlTask(this, &OllamaProvider::listModels, u"OllamaProvider::listModels"_s); }
-
 auto OllamaProvider::newModel(const QByteArray &modelHash) const -> std::shared_ptr<OllamaModelDescription>
 { return std::static_pointer_cast<OllamaModelDescription>(newModelImpl(modelHash)); }
 
diff --git a/gpt4all-chat/src/llmodel_ollama.h b/gpt4all-chat/src/llmodel_ollama.h
index a3b128e2..bb0061cc 100644
--- a/gpt4all-chat/src/llmodel_ollama.h
+++ b/gpt4all-chat/src/llmodel_ollama.h
@@ -46,8 +46,9 @@ protected:
 
 class OllamaProvider : public QObject, public virtual ModelProvider {
     Q_OBJECT
-    Q_PROPERTY(QUuid id        READ id        CONSTANT)
-    Q_PROPERTY(bool  isBuiltin READ isBuiltin CONSTANT)
+    Q_PROPERTY(QUuid        id        READ id        CONSTANT)
+    Q_PROPERTY(bool         isBuiltin READ isBuiltin CONSTANT)
+    Q_PROPERTY(ProviderType type      READ type      CONSTANT)
 
 protected:
     explicit OllamaProvider();
@@ -58,6 +59,8 @@ public:
           QObject *asQObject()       override { return this; }
     const QObject *asQObject() const override { return this; }
 
+    ProviderType type() const final { return ProviderType::ollama; }
+
     auto supportedGenerationParams() const -> QSet<GenerationParam> override;
     auto makeGenerationParams(const QMap<GenerationParam, QVariant> &values) const -> OllamaGenerationParams * override;
 
@@ -65,9 +68,9 @@ public:
     auto status    () -> QCoro::Task<ProviderStatus                     > override;
     auto listModels() -> QCoro::Task<backend::DataOrRespErr<QStringList>> override;
 
-    // QML wrapped endpoints
-    Q_INVOKABLE QCoro::QmlTask statusQml    ();
-    Q_INVOKABLE QCoro::QmlTask listModelsQml();
+    // QML endpoints
+    Q_INVOKABLE QCoro::QmlTask statusQml    () { return ModelProvider::statusQml    (); }
+    Q_INVOKABLE QCoro::QmlTask listModelsQml() { return ModelProvider::listModelsQml(); }
 
     [[nodiscard]] auto newModel(const QByteArray &modelHash) const -> std::shared_ptr<OllamaModelDescription>;
 
@@ -103,6 +106,10 @@ public:
     /// Create a new OllamaProvider on disk.
     explicit OllamaProviderCustom(protected_t p, ProviderStore *store, QString name, QUrl baseUrl);
 
+    // QML setters
+    Q_INVOKABLE bool setNameQml   (QString value) { return ModelProviderCustom::setNameQml   (std::move(value)); }
+    Q_INVOKABLE bool setBaseUrlQml(QString value) { return ModelProviderCustom::setBaseUrlQml(std::move(value)); }
+
 Q_SIGNALS:
     void nameChanged   (const QString &value);
     void baseUrlChanged(const QUrl    &value);
diff --git a/gpt4all-chat/src/llmodel_openai.cpp b/gpt4all-chat/src/llmodel_openai.cpp
index 7065cd15..e748c3ab 100644
--- a/gpt4all-chat/src/llmodel_openai.cpp
+++ b/gpt4all-chat/src/llmodel_openai.cpp
@@ -155,12 +155,6 @@ auto OpenaiProvider::listModels() -> QCoro::Task<backend::DataOrRespErr<QStringL
     co_return models;
 }
 
-QCoro::QmlTask OpenaiProvider::statusQml()
-{ return wrapQmlTask(this, &OpenaiProvider::status, u"OpenaiProvider::status"_s); }
-
-QCoro::QmlTask OpenaiProvider::listModelsQml()
-{ return wrapQmlTask(this, &OpenaiProvider::listModels, u"OpenaiProvider::listModels"_s); }
-
 auto OpenaiProvider::newModel(const QString &modelName) const -> std::shared_ptr<OpenaiModelDescription>
 { return std::static_pointer_cast<OpenaiModelDescription>(newModelImpl(modelName)); }
 
diff --git a/gpt4all-chat/src/llmodel_openai.h b/gpt4all-chat/src/llmodel_openai.h
index 26753ebd..982fc84e 100644
--- a/gpt4all-chat/src/llmodel_openai.h
+++ b/gpt4all-chat/src/llmodel_openai.h
@@ -50,9 +50,10 @@ protected:
 
 class OpenaiProvider : public QObject, public virtual ModelProvider {
     Q_OBJECT
-    Q_PROPERTY(QUuid   id        READ id        CONSTANT            )
-    Q_PROPERTY(QString apiKey    READ apiKey    NOTIFY apiKeyChanged)
-    Q_PROPERTY(bool    isBuiltin READ isBuiltin CONSTANT            )
+    Q_PROPERTY(QUuid        id        READ id        CONSTANT            )
+    Q_PROPERTY(QString      apiKey    READ apiKey    NOTIFY apiKeyChanged)
+    Q_PROPERTY(bool         isBuiltin READ isBuiltin CONSTANT            )
+    Q_PROPERTY(ProviderType type      READ type      CONSTANT            )
 
 protected:
     explicit OpenaiProvider();
@@ -64,6 +65,8 @@ public:
           QObject *asQObject()       override { return this; }
     const QObject *asQObject() const override { return this; }
 
+    ProviderType type() const final { return ProviderType::openai; }
+
     [[nodiscard]] const QString &apiKey() const { return m_apiKey; }
 
     [[nodiscard]] virtual DataStoreResult<> setApiKey(QString value) = 0;
@@ -77,9 +80,9 @@ public:
     auto status    () -> QCoro::Task<ProviderStatus                     > override;
     auto listModels() -> QCoro::Task<backend::DataOrRespErr<QStringList>> override;
 
-    // QML wrapped endpoints
-    Q_INVOKABLE QCoro::QmlTask statusQml    ();
-    Q_INVOKABLE QCoro::QmlTask listModelsQml();
+    // QML endpoints
+    Q_INVOKABLE QCoro::QmlTask statusQml    () { return ModelProvider::statusQml    (); }
+    Q_INVOKABLE QCoro::QmlTask listModelsQml() { return ModelProvider::listModelsQml(); }
 
     [[nodiscard]] auto newModel(const QString &modelName) const -> std::shared_ptr<OpenaiModelDescription>;
 
@@ -140,6 +143,10 @@ public:
     [[nodiscard]] DataStoreResult<> setApiKey(QString value) override
     { return setMemberProp<QString>(&OpenaiProviderCustom::m_apiKey, "apiKey", std::move(value)); }
 
+    // QML setters
+    Q_INVOKABLE bool setNameQml   (QString value) { return ModelProviderCustom::setNameQml   (std::move(value)); }
+    Q_INVOKABLE bool setBaseUrlQml(QString value) { return ModelProviderCustom::setBaseUrlQml(std::move(value)); }
+
 Q_SIGNALS:
     void nameChanged   (const QString &value);
     void baseUrlChanged(const QUrl    &value);
diff --git a/gpt4all-chat/src/llmodel_provider.h b/gpt4all-chat/src/llmodel_provider.h
index 58aaf673..c452980a 100644
--- a/gpt4all-chat/src/llmodel_provider.h
+++ b/gpt4all-chat/src/llmodel_provider.h
@@ -5,6 +5,7 @@
 #include "qmlsharedptr.h" // IWYU pragma: keep
 #include "utils.h" // IWYU pragma: keep
 
+#include <QCoro/QCoroQmlTask> // IWYU pragma: keep
 #include <gpt4all-backend/ollama-client.h>
 
 #include <QAbstractListModel>
@@ -34,15 +35,12 @@ class QJSEngine;
 template <typename Key, typename T> class QHash;
 namespace QCoro {
     template <typename T> class Task;
-    struct QmlTask;
 }
 
 
 namespace gpt4all::ui {
 
 
-Q_NAMESPACE
-
 class ModelDescription;
 
 namespace detail {
@@ -71,6 +69,10 @@ QCoro::QmlTask wrapQmlTask(C *obj, F f, QString prefix, Args &&...args);
 template <typename C, typename F, typename... Args>
 bool wrapQmlFunc(C *obj, F &&f, QStringView prefix, Args &&...args);
 
+inline namespace llmodel_provider {
+
+Q_NAMESPACE
+
 enum class GenerationParam  {
     NPredict,
     Temperature,
@@ -82,6 +84,8 @@ enum class GenerationParam  {
 };
 Q_ENUM_NS(GenerationParam)
 
+} // inline namespace llmodel_provider
+
 class GenerationParams {
 public:
     virtual ~GenerationParams() noexcept = 0;
@@ -128,7 +132,8 @@ public:
     virtual       QObject *asQObject() = 0;
     virtual const QObject *asQObject() const = 0;
 
-    virtual bool isBuiltin() const = 0;
+    virtual bool         isBuiltin() const = 0;
+    virtual ProviderType type     () const = 0;
 
     // getters
     [[nodiscard]] const QUuid   &id     () const { return m_id;      }
@@ -142,6 +147,12 @@ public:
     virtual auto status    () -> QCoro::Task<ProviderStatus                     > = 0;
     virtual auto listModels() -> QCoro::Task<backend::DataOrRespErr<QStringList>> = 0;
 
+    // QML endpoints
+    QCoro::QmlTask statusQml()
+    { return wrapQmlTask(this, &ModelProvider::status,     QStringLiteral("ModelProvider::status")    ); }
+    QCoro::QmlTask listModelsQml()
+    { return wrapQmlTask(this, &ModelProvider::listModels, QStringLiteral("ModelProvider::listModels")); }
+
     /// create a model using this provider
     [[nodiscard]] auto newModel(const QVariant &key) const -> std::shared_ptr<ModelDescription>;
 
@@ -207,9 +218,9 @@ public:
     { return setMemberProp<QUrl   >(&ModelProviderCustom::m_baseUrl, "baseUrl", std::move(value)); }
 
     // QML setters
-    Q_INVOKABLE bool setNameQml   (QString value)
+    bool setNameQml   (QString value)
     { return wrapQmlFunc(this, &ModelProviderCustom::setName,    u"setName",    std::move(value)); }
-    Q_INVOKABLE bool setBaseUrlQml(QString value)
+    bool setBaseUrlQml(QString value)
     { return wrapQmlFunc(this, &ModelProviderCustom::setBaseUrl, u"setBaseUrl", std::move(value)); }
 
     [[nodiscard]] auto persist() -> DataStoreResult<>;
diff --git a/gpt4all-chat/src/llmodel_provider.inl b/gpt4all-chat/src/llmodel_provider.inl
index 1b88ad73..447ba90c 100644
--- a/gpt4all-chat/src/llmodel_provider.inl
+++ b/gpt4all-chat/src/llmodel_provider.inl
@@ -1,7 +1,6 @@
 #include <fmt/format.h>
 
-#include <QCoro/QCoroQmlTask>
-#include <QCoro/QCoroTask>
+#include <QCoro/QCoroTask> // IWYU pragma: keep
 
 #include <QDebug>
 #include <QVariant>
diff --git a/gpt4all-chat/src/main.cpp b/gpt4all-chat/src/main.cpp
index 1be7b2bc..0d530cf6 100644
--- a/gpt4all-chat/src/main.cpp
+++ b/gpt4all-chat/src/main.cpp
@@ -157,6 +157,8 @@ int main(int argc, char *argv[])
     qmlRegisterSingletonInstance("gpt4all.ProviderRegistry", 1, 0, "ProviderRegistry", ProviderRegistry::globalInstance());
     qmlRegisterUncreatableMetaObject(ToolEnums::staticMetaObject, "toolenums", 1, 0, "ToolEnums", "Error: only enums");
     qmlRegisterUncreatableMetaObject(MySettingsEnums::staticMetaObject, "mysettingsenums", 1, 0, "MySettingsEnums", "Error: only enums");
+    qmlRegisterUncreatableMetaObject(gpt4all::ui::llmodel_provider::staticMetaObject, "gpt4all.llmodel_provider", 1, 0, "Provider", "Error: only enums");
+    qmlRegisterUncreatableMetaObject(gpt4all::ui::store_provider::staticMetaObject, "gpt4all.store_provider", 1, 0, "ProviderStore", "Error: only enums");
 
     {
         auto fixedFont = QFontDatabase::systemFont(QFontDatabase::FixedFont);
diff --git a/gpt4all-chat/src/store_provider.cpp b/gpt4all-chat/src/store_provider.cpp
index 6edd32db..1da8f451 100644
--- a/gpt4all-chat/src/store_provider.cpp
+++ b/gpt4all-chat/src/store_provider.cpp
@@ -42,7 +42,7 @@ auto tag_invoke(const boost::json::value_to_tag<ModelProviderData> &, const boos
             json::value_to<QString>(jv.at("name"    )),
             json::value_to<QString>(jv.at("base_url")),
         });
-    ModelProviderData::ProviderDetails provider_details;
+    ModelProviderData::ProviderDetails provider_details = std::monostate();
     switch (type) {
         using enum ProviderType;
     case openai:
diff --git a/gpt4all-chat/src/store_provider.h b/gpt4all-chat/src/store_provider.h
index b64dc343..ddc73a85 100644
--- a/gpt4all-chat/src/store_provider.h
+++ b/gpt4all-chat/src/store_provider.h
@@ -6,6 +6,7 @@
 #include <boost/describe/enum.hpp>
 #include <boost/json.hpp> // IWYU pragma: keep
 
+#include <QObject>
 #include <QString>
 #include <QUrl>
 #include <QUuid>
@@ -14,12 +15,19 @@
 namespace gpt4all::ui {
 
 
+inline namespace store_provider {
+
+Q_NAMESPACE
+
 // indices of this enum should be consistent with indices of ProviderDetails
 enum class ProviderType {
     openai = 0,
     ollama = 1,
 };
 BOOST_DESCRIBE_ENUM(ProviderType, openai, ollama)
+Q_ENUM_NS(ProviderType)
+
+} // inline namespace provider
 
 struct CustomProviderDetails {
     QString name;