mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-02 08:08:29 +00:00
Add falcon to our models.json
This commit is contained in:
parent
d3b8234106
commit
924efd9e25
@ -13,6 +13,8 @@
|
||||
#include <QStandardPaths>
|
||||
#include <QSettings>
|
||||
|
||||
#define USE_LOCAL_MODELSJSON
|
||||
|
||||
class MyDownload: public Download { };
|
||||
Q_GLOBAL_STATIC(MyDownload, downloadInstance)
|
||||
Download *Download::globalInstance()
|
||||
@ -93,7 +95,11 @@ bool Download::isFirstStart() const
|
||||
|
||||
void Download::updateModelList()
|
||||
{
|
||||
#if defined(USE_LOCAL_MODELSJSON)
|
||||
QUrl jsonUrl("file://" + QDir::homePath() + "/dev/large_language_models/gpt4all/gpt4all-chat/metadata/models.json");
|
||||
#else
|
||||
QUrl jsonUrl("http://gpt4all.io/models/models.json");
|
||||
#endif
|
||||
QNetworkRequest request(jsonUrl);
|
||||
QSslConfiguration conf = request.sslConfiguration();
|
||||
conf.setPeerVerifyMode(QSslSocket::VerifyNone);
|
||||
|
@ -15,16 +15,16 @@
|
||||
},
|
||||
{
|
||||
"order": "b",
|
||||
"md5sum": "756249d3d6abe23bde3b1ae272628640",
|
||||
"name": "MPT Chat",
|
||||
"filename": "ggml-mpt-7b-chat.bin",
|
||||
"filesize": "4854401050",
|
||||
"requires": "2.4.1",
|
||||
"md5sum": "725f148218a65ce8ebcc724e52f31b49",
|
||||
"name": "Falcon",
|
||||
"filename": "ggml-model-gpt4all-falcon-q4_0.bin",
|
||||
"filesize": "4061641216",
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "MPT",
|
||||
"description": "<strong>Best overall smaller model</strong><br><ul><li>Fast responses<li>Chat based<li>Trained by Mosaic ML<li>Cannot be used commercially</ul>"
|
||||
"type": "Falcon",
|
||||
"description": "<strong>Best overall smaller model</strong><br><ul><li>Fast responses</li><li>Instruction based</li><li>Trained by TII<li>Finetuned by Nomic AI<li>Licensed for commercial use</ul>",
|
||||
"url": "https://huggingface.co/nomic-ai/gpt4all-falcon-ggml/resolve/main/ggml-model-gpt4all-falcon-q4_0.bin"
|
||||
},
|
||||
{
|
||||
"order": "c",
|
||||
@ -36,10 +36,10 @@
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "GPT-J",
|
||||
"description": "<strong>Best overall for commercial usage</strong><br><ul><li>Fast responses<li>Creative responses</li><li>Instruction based</li><li>Trained by Nomic ML<li>Licensed for commercial use</ul>"
|
||||
"description": "<strong>Creative model can be used for commercial purposes</strong><br><ul><li>Fast responses<li>Creative responses</li><li>Instruction based</li><li>Trained by Nomic AI<li>Licensed for commercial use</ul>"
|
||||
},
|
||||
{
|
||||
"order": "d",
|
||||
"order": "e",
|
||||
"md5sum": "11d9f060ca24575a2c303bdc39952486",
|
||||
"name": "Snoozy",
|
||||
"filename": "GPT4All-13B-snoozy.ggmlv3.q4_0.bin",
|
||||
@ -53,7 +53,20 @@
|
||||
"url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin"
|
||||
},
|
||||
{
|
||||
"order": "e",
|
||||
"order": "f",
|
||||
"md5sum": "756249d3d6abe23bde3b1ae272628640",
|
||||
"name": "MPT Chat",
|
||||
"filename": "ggml-mpt-7b-chat.bin",
|
||||
"filesize": "4854401050",
|
||||
"requires": "2.4.1",
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "MPT",
|
||||
"description": "<strong>Best overall smaller model</strong><br><ul><li>Fast responses<li>Chat based<li>Trained by Mosaic ML<li>Cannot be used commercially</ul>"
|
||||
},
|
||||
{
|
||||
"order": "g",
|
||||
"md5sum": "e64e74375ce9d36a3d0af3db1523fd0a",
|
||||
"name": "Orca",
|
||||
"filename": "orca-mini-7b.ggmlv3.q4_0.bin",
|
||||
@ -67,7 +80,7 @@
|
||||
"url": "https://huggingface.co/TheBloke/orca_mini_7B-GGML/resolve/main/orca-mini-7b.ggmlv3.q4_0.bin"
|
||||
},
|
||||
{
|
||||
"order": "f",
|
||||
"order": "h",
|
||||
"md5sum": "6a087f7f4598fad0bb70e6cb4023645e",
|
||||
"name": "Orca (Small)",
|
||||
"filename": "orca-mini-3b.ggmlv3.q4_0.bin",
|
||||
@ -81,7 +94,7 @@
|
||||
"url": "https://huggingface.co/TheBloke/orca_mini_3B-GGML/resolve/main/orca-mini-3b.ggmlv3.q4_0.bin"
|
||||
},
|
||||
{
|
||||
"order": "g",
|
||||
"order": "i",
|
||||
"md5sum": "959b7f65b2d12fd1e3ff99e7493c7a3a",
|
||||
"name": "Orca (Large)",
|
||||
"filename": "orca-mini-13b.ggmlv3.q4_0.bin",
|
||||
@ -95,7 +108,7 @@
|
||||
"url": "https://huggingface.co/TheBloke/orca_mini_13B-GGML/resolve/main/orca-mini-13b.ggmlv3.q4_0.bin"
|
||||
},
|
||||
{
|
||||
"order": "h",
|
||||
"order": "j",
|
||||
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
|
||||
"name": "Vicuna",
|
||||
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
|
||||
@ -107,7 +120,7 @@
|
||||
"description": "<strong>Good small model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego</strong><br><ul><li>Instruction based<li>Cannot be used commercially</ul>"
|
||||
},
|
||||
{
|
||||
"order": "i",
|
||||
"order": "k",
|
||||
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
|
||||
"name": "Vicuna (large)",
|
||||
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
|
||||
@ -119,7 +132,7 @@
|
||||
"description": "<strong>Good larger model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego</strong><br><ul><li>Instruction based<li>Cannot be used commercially</ul>"
|
||||
},
|
||||
{
|
||||
"order": "j",
|
||||
"order": "l",
|
||||
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
|
||||
"name": "Wizard",
|
||||
"filename": "ggml-wizardLM-7B.q4_2.bin",
|
||||
@ -131,7 +144,7 @@
|
||||
"description": "<strong>Good small model - trained by by Microsoft and Peking University</strong><br><ul><li>Instruction based<li>Cannot be used commercially</ul>"
|
||||
},
|
||||
{
|
||||
"order": "k",
|
||||
"order": "m",
|
||||
"md5sum": "6cb4ee297537c9133bddab9692879de0",
|
||||
"name": "Stable Vicuna",
|
||||
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
|
||||
@ -143,7 +156,7 @@
|
||||
"description": "<strong>Trained with RHLF by Stability AI</strong><br><ul><li>Instruction based<li>Cannot be used commercially</ul>"
|
||||
},
|
||||
{
|
||||
"order": "l",
|
||||
"order": "n",
|
||||
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
|
||||
"name": "MPT Instruct",
|
||||
"filename": "ggml-mpt-7b-instruct.bin",
|
||||
@ -156,7 +169,7 @@
|
||||
"description": "<strong>Mosaic's instruction model</strong><br><ul><li>Instruction based<li>Trained by Mosaic ML<li>Licensed for commercial use</ul>"
|
||||
},
|
||||
{
|
||||
"order": "m",
|
||||
"order": "o",
|
||||
"md5sum": "120c32a51d020066288df045ef5d52b9",
|
||||
"name": "MPT Base",
|
||||
"filename": "ggml-mpt-7b-base.bin",
|
||||
@ -169,7 +182,7 @@
|
||||
"description": "<strong>Trained for text completion with no assistant finetuning</strong><br><ul><li>Completion based<li>Trained by Mosaic ML<li>Licensed for commercial use</ul>"
|
||||
},
|
||||
{
|
||||
"order": "n",
|
||||
"order": "p",
|
||||
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
|
||||
"name": "Nous Vicuna",
|
||||
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
|
||||
@ -181,7 +194,7 @@
|
||||
"description": "<strong>Trained on ~180,000 instructions</strong><br><ul><li>Instruction based<li>Trained by Nous Research<li>Cannot be used commercially</ul>"
|
||||
},
|
||||
{
|
||||
"order": "o",
|
||||
"order": "q",
|
||||
"md5sum": "489d21fd48840dcb31e5f92f453f3a20",
|
||||
"name": "Wizard Uncensored",
|
||||
"filename": "wizardLM-13B-Uncensored.ggmlv3.q4_0.bin",
|
||||
@ -195,7 +208,7 @@
|
||||
"url": "https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGML/resolve/main/wizardLM-13B-Uncensored.ggmlv3.q4_0.bin"
|
||||
},
|
||||
{
|
||||
"order": "p",
|
||||
"order": "r",
|
||||
"md5sum": "615890cb571fcaa0f70b2f8d15ef809e",
|
||||
"disableGUI": "true",
|
||||
"name": "Replit",
|
||||
|
Loading…
Reference in New Issue
Block a user