diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp
index bf3f6253..750e8548 100644
--- a/gpt4all-chat/chatllm.cpp
+++ b/gpt4all-chat/chatllm.cpp
@@ -274,16 +274,6 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
// Store the file info in the modelInfo in case we have an error loading
m_llModelInfo.fileInfo = fileInfo;
- // Check if we've previously tried to load this file and failed/crashed
- if (MySettings::globalInstance()->attemptModelLoad() == filePath) {
- MySettings::globalInstance()->setAttemptModelLoad(QString()); // clear the flag
- if (!m_isServer)
- LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
- m_llModelInfo = LLModelInfo();
- emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to insufficient memory. You should either remove this model or decrease your system RAM usage by closing other applications.").arg(modelInfo.filename()));
- return false;
- }
-
if (fileInfo.exists()) {
if (isChatGPT) {
QString apiKey;
@@ -319,9 +309,6 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
return m_shouldBeLoaded;
});
- // Update the settings that a model is being loaded and update the device list
- MySettings::globalInstance()->setAttemptModelLoad(filePath);
-
// Pick the best match for the device
QString actualDevice = m_llModelInfo.model->implementation().buildVariant() == "metal" ? "Metal" : "CPU";
const QString requestedDevice = MySettings::globalInstance()->device();
@@ -373,7 +360,6 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
emit reportFallbackReason("
model or quant has no GPU support");
}
- MySettings::globalInstance()->setAttemptModelLoad(QString());
if (!success) {
delete m_llModelInfo.model;
m_llModelInfo.model = nullptr;
diff --git a/gpt4all-chat/mysettings.cpp b/gpt4all-chat/mysettings.cpp
index f9774bde..9e5cdad0 100644
--- a/gpt4all-chat/mysettings.cpp
+++ b/gpt4all-chat/mysettings.cpp
@@ -717,24 +717,3 @@ void MySettings::setNetworkUsageStatsActive(bool b)
setting.sync();
emit networkUsageStatsActiveChanged();
}
-
-QString MySettings::attemptModelLoad() const
-{
- QSettings setting;
- setting.sync();
- return setting.value("attemptModelLoad", QString()).toString();
-}
-
-void MySettings::setAttemptModelLoad(const QString &modelFile)
-{
- if (attemptModelLoad() == modelFile)
- return;
-
- QSettings setting;
- if (modelFile.isEmpty())
- setting.remove("attemptModelLoad");
- else
- setting.setValue("attemptModelLoad", modelFile);
- setting.sync();
- emit attemptModelLoadChanged();
-}
diff --git a/gpt4all-chat/mysettings.h b/gpt4all-chat/mysettings.h
index 4bfbef6b..c5019b91 100644
--- a/gpt4all-chat/mysettings.h
+++ b/gpt4all-chat/mysettings.h
@@ -110,8 +110,6 @@ public:
bool networkUsageStatsActive() const;
void setNetworkUsageStatsActive(bool b);
- QString attemptModelLoad() const;
- void setAttemptModelLoad(const QString &modelFile);
QVector deviceList() const;
void setDeviceList(const QVector &deviceList);