diff --git a/gpt4all-chat/download.cpp b/gpt4all-chat/download.cpp index 9a68d495..e91e1406 100644 --- a/gpt4all-chat/download.cpp +++ b/gpt4all-chat/download.cpp @@ -13,6 +13,8 @@ #include #include +#define USE_LOCAL_MODELSJSON + class MyDownload: public Download { }; Q_GLOBAL_STATIC(MyDownload, downloadInstance) Download *Download::globalInstance() @@ -93,7 +95,11 @@ bool Download::isFirstStart() const void Download::updateModelList() { +#if defined(USE_LOCAL_MODELSJSON) + QUrl jsonUrl("file://" + QDir::homePath() + "/dev/large_language_models/gpt4all/gpt4all-chat/metadata/models.json"); +#else QUrl jsonUrl("http://gpt4all.io/models/models.json"); +#endif QNetworkRequest request(jsonUrl); QSslConfiguration conf = request.sslConfiguration(); conf.setPeerVerifyMode(QSslSocket::VerifyNone); diff --git a/gpt4all-chat/metadata/models.json b/gpt4all-chat/metadata/models.json index 15854e03..22522167 100644 --- a/gpt4all-chat/metadata/models.json +++ b/gpt4all-chat/metadata/models.json @@ -15,16 +15,16 @@ }, { "order": "b", - "md5sum": "756249d3d6abe23bde3b1ae272628640", - "name": "MPT Chat", - "filename": "ggml-mpt-7b-chat.bin", - "filesize": "4854401050", - "requires": "2.4.1", + "md5sum": "725f148218a65ce8ebcc724e52f31b49", + "name": "Falcon", + "filename": "ggml-model-gpt4all-falcon-q4_0.bin", + "filesize": "4061641216", "ramrequired": "8", "parameters": "7 billion", "quant": "q4_0", - "type": "MPT", - "description": "Best overall smaller model
" + "type": "Falcon", + "description": "Best overall smaller model
", + "url": "https://huggingface.co/nomic-ai/gpt4all-falcon-ggml/resolve/main/ggml-model-gpt4all-falcon-q4_0.bin" }, { "order": "c", @@ -36,10 +36,10 @@ "parameters": "7 billion", "quant": "q4_0", "type": "GPT-J", - "description": "Best overall for commercial usage
" + "description": "Creative model can be used for commercial purposes
" }, { - "order": "d", + "order": "e", "md5sum": "11d9f060ca24575a2c303bdc39952486", "name": "Snoozy", "filename": "GPT4All-13B-snoozy.ggmlv3.q4_0.bin", @@ -53,7 +53,20 @@ "url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin" }, { - "order": "e", + "order": "f", + "md5sum": "756249d3d6abe23bde3b1ae272628640", + "name": "MPT Chat", + "filename": "ggml-mpt-7b-chat.bin", + "filesize": "4854401050", + "requires": "2.4.1", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "MPT", + "description": "Best overall smaller model
" + }, + { + "order": "g", "md5sum": "e64e74375ce9d36a3d0af3db1523fd0a", "name": "Orca", "filename": "orca-mini-7b.ggmlv3.q4_0.bin", @@ -67,7 +80,7 @@ "url": "https://huggingface.co/TheBloke/orca_mini_7B-GGML/resolve/main/orca-mini-7b.ggmlv3.q4_0.bin" }, { - "order": "f", + "order": "h", "md5sum": "6a087f7f4598fad0bb70e6cb4023645e", "name": "Orca (Small)", "filename": "orca-mini-3b.ggmlv3.q4_0.bin", @@ -81,7 +94,7 @@ "url": "https://huggingface.co/TheBloke/orca_mini_3B-GGML/resolve/main/orca-mini-3b.ggmlv3.q4_0.bin" }, { - "order": "g", + "order": "i", "md5sum": "959b7f65b2d12fd1e3ff99e7493c7a3a", "name": "Orca (Large)", "filename": "orca-mini-13b.ggmlv3.q4_0.bin", @@ -95,7 +108,7 @@ "url": "https://huggingface.co/TheBloke/orca_mini_13B-GGML/resolve/main/orca-mini-13b.ggmlv3.q4_0.bin" }, { - "order": "h", + "order": "j", "md5sum": "29119f8fa11712704c6b22ac5ab792ea", "name": "Vicuna", "filename": "ggml-vicuna-7b-1.1-q4_2.bin", @@ -107,7 +120,7 @@ "description": "Good small model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
" }, { - "order": "i", + "order": "k", "md5sum": "95999b7b0699e2070af63bf5d34101a8", "name": "Vicuna (large)", "filename": "ggml-vicuna-13b-1.1-q4_2.bin", @@ -119,7 +132,7 @@ "description": "Good larger model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
" }, { - "order": "j", + "order": "l", "md5sum": "99e6d129745a3f1fb1121abed747b05a", "name": "Wizard", "filename": "ggml-wizardLM-7B.q4_2.bin", @@ -131,7 +144,7 @@ "description": "Good small model - trained by by Microsoft and Peking University
" }, { - "order": "k", + "order": "m", "md5sum": "6cb4ee297537c9133bddab9692879de0", "name": "Stable Vicuna", "filename": "ggml-stable-vicuna-13B.q4_2.bin", @@ -143,7 +156,7 @@ "description": "Trained with RHLF by Stability AI
" }, { - "order": "l", + "order": "n", "md5sum": "1cfa4958f489f0a0d1ffdf6b37322809", "name": "MPT Instruct", "filename": "ggml-mpt-7b-instruct.bin", @@ -156,7 +169,7 @@ "description": "Mosaic's instruction model
" }, { - "order": "m", + "order": "o", "md5sum": "120c32a51d020066288df045ef5d52b9", "name": "MPT Base", "filename": "ggml-mpt-7b-base.bin", @@ -169,7 +182,7 @@ "description": "Trained for text completion with no assistant finetuning
" }, { - "order": "n", + "order": "p", "md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe", "name": "Nous Vicuna", "filename": "ggml-nous-gpt4-vicuna-13b.bin", @@ -181,7 +194,7 @@ "description": "Trained on ~180,000 instructions
" }, { - "order": "o", + "order": "q", "md5sum": "489d21fd48840dcb31e5f92f453f3a20", "name": "Wizard Uncensored", "filename": "wizardLM-13B-Uncensored.ggmlv3.q4_0.bin", @@ -195,7 +208,7 @@ "url": "https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGML/resolve/main/wizardLM-13B-Uncensored.ggmlv3.q4_0.bin" }, { - "order": "p", + "order": "r", "md5sum": "615890cb571fcaa0f70b2f8d15ef809e", "disableGUI": "true", "name": "Replit",