diff --git a/gpt4all-chat/CHANGELOG.md b/gpt4all-chat/CHANGELOG.md
index e888e2fa..96881f1a 100644
--- a/gpt4all-chat/CHANGELOG.md
+++ b/gpt4all-chat/CHANGELOG.md
@@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
### Added
- Support DeepSeek-R1 Qwen models ([#3431](https://github.com/nomic-ai/gpt4all/pull/3431))
- Support for think tags in the GUI ([#3440](https://github.com/nomic-ai/gpt4all/pull/3440))
+- Support specifying SHA256 hash in models3.json instead of MD5 ([#3437](https://github.com/nomic-ai/gpt4all/pull/3437))
### Changed
- Use minja instead of Jinja2Cpp for significantly improved template compatibility ([#3433](https://github.com/nomic-ai/gpt4all/pull/3433))
diff --git a/gpt4all-chat/metadata/models3.json b/gpt4all-chat/metadata/models3.json
index d829fad0..654fe18d 100644
--- a/gpt4all-chat/metadata/models3.json
+++ b/gpt4all-chat/metadata/models3.json
@@ -32,6 +32,66 @@
"systemPrompt": "",
"chatTemplate": "{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n {{- content }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
},
+ {
+ "order": "aa1",
+ "sha256sum": "5cd4ee65211770f1d99b4f6f4951780b9ef40e29314bd6542bb5bd0ad0bc29d1",
+ "name": "DeepSeek-R1-Distill-Qwen-7B",
+ "filename": "DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf",
+ "filesize": "4444121056",
+ "requires": "3.8.0",
+ "ramrequired": "8",
+ "parameters": "7 billion",
+ "quant": "q4_0",
+ "type": "deepseek",
+ "description": "
The official Qwen2.5-Math-7B distillation of DeepSeek-R1.
- License: MIT
- No restrictions on commercial use
- #reasoning
",
+ "url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf",
+ "chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
+ },
+ {
+ "order": "aa2",
+ "sha256sum": "906b3382f2680f4ce845459b4a122e904002b075238080307586bcffcde49eef",
+ "name": "DeepSeek-R1-Distill-Qwen-14B",
+ "filename": "DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf",
+ "filesize": "8544267680",
+ "requires": "3.8.0",
+ "ramrequired": "16",
+ "parameters": "14 billion",
+ "quant": "q4_0",
+ "type": "deepseek",
+ "description": "The official Qwen2.5-14B distillation of DeepSeek-R1.
- License: MIT
- No restrictions on commercial use
- #reasoning
",
+ "url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf",
+ "chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
+ },
+ {
+ "order": "aa3",
+ "sha256sum": "0eb93e436ac8beec18aceb958c120d282cb2cf5451b23185e7be268fe9d375cc",
+ "name": "DeepSeek-R1-Distill-Llama-8B",
+ "filename": "DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf",
+ "filesize": "4675894112",
+ "requires": "3.8.0",
+ "ramrequired": "8",
+ "parameters": "8 billion",
+ "quant": "q4_0",
+ "type": "deepseek",
+ "description": "The official Llama-3.1-8B distillation of DeepSeek-R1.
- License: MIT
- No restrictions on commercial use
- #reasoning
",
+ "url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Llama-8B-GGUF/resolve/main/DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf",
+ "chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
+ },
+ {
+ "order": "aa4",
+ "sha256sum": "b3af887d0a015b39fab2395e4faf682c1a81a6a3fd09a43f0d4292f7d94bf4d0",
+ "name": "DeepSeek-R1-Distill-Qwen-1.5B",
+ "filename": "DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf",
+ "filesize": "1068807776",
+ "requires": "3.8.0",
+ "ramrequired": "3",
+ "parameters": "1.5 billion",
+ "quant": "q4_0",
+ "type": "deepseek",
+ "description": "The official Qwen2.5-Math-1.5B distillation of DeepSeek-R1.
- License: MIT
- No restrictions on commercial use
- #reasoning
",
+ "url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf",
+ "chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
+ },
{
"order": "b",
"md5sum": "27b44e8ae1817525164ddf4f8dae8af4",
@@ -472,7 +532,7 @@
"filename": "qwen2-1_5b-instruct-q4_0.gguf",
"filesize": "937532800",
"requires": "3.0",
- "ramrequired": "4",
+ "ramrequired": "3",
"parameters": "1.5 billion",
"quant": "q4_0",
"type": "qwen2",
diff --git a/gpt4all-chat/src/modellist.cpp b/gpt4all-chat/src/modellist.cpp
index 63b00b35..d075b770 100644
--- a/gpt4all-chat/src/modellist.cpp
+++ b/gpt4all-chat/src/modellist.cpp
@@ -1621,7 +1621,6 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
QString requiresVersion = obj["requires"].toString();
QString versionRemoved = obj["removedIn"].toString();
QString url = obj["url"].toString();
- QByteArray modelHash = obj["md5sum"].toString().toLatin1();
bool isDefault = obj.contains("isDefault") && obj["isDefault"] == u"true"_s;
bool disableGUI = obj.contains("disableGUI") && obj["disableGUI"] == u"true"_s;
QString description = obj["description"].toString();
@@ -1632,6 +1631,16 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
QString type = obj["type"].toString();
bool isEmbeddingModel = obj["embeddingModel"].toBool();
+ QByteArray modelHash;
+ ModelInfo::HashAlgorithm hashAlgorithm;
+ if (auto it = obj.find("sha256sum"_L1); it != obj.end()) {
+ modelHash = it->toString().toLatin1();
+ hashAlgorithm = ModelInfo::Sha256;
+ } else {
+ modelHash = obj["md5sum"].toString().toLatin1();
+ hashAlgorithm = ModelInfo::Md5;
+ }
+
// Some models aren't supported in the GUI at all
if (disableGUI)
continue;
@@ -1660,7 +1669,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
{ ModelList::FilenameRole, modelFilename },
{ ModelList::FilesizeRole, modelFilesize },
{ ModelList::HashRole, modelHash },
- { ModelList::HashAlgorithmRole, ModelInfo::Md5 },
+ { ModelList::HashAlgorithmRole, hashAlgorithm },
{ ModelList::DefaultRole, isDefault },
{ ModelList::DescriptionRole, description },
{ ModelList::RequiresVersionRole, requiresVersion },