diff --git a/gpt4all-chat/metadata/models3.json b/gpt4all-chat/metadata/models3.json index 28833d05..60e77fc6 100644 --- a/gpt4all-chat/metadata/models3.json +++ b/gpt4all-chat/metadata/models3.json @@ -1,6 +1,22 @@ [ { "order": "a", + "md5sum": "A5F6B4EABD3992DA4D7FB7F020F921EB", + "name": "Nous Hermes 2 Mistral DPO", + "filename": "Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", + "filesize": "4108928000", + "requires": "2.7.1", + "ramrequired": "8", + "parameters": "7 billion", + "quant": "q4_0", + "type": "Mistral", + "description": "Best overall fast chat model
", + "url": "https://gpt4all.io/models/gguf/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", + "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n", + "systemPrompt": "<|im_start|>system\nYou are \"Hermes 2\", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|>" + }, + { + "order": "b", "md5sum": "f692417a22405d80573ac10cb0cd6c6a", "name": "Mistral OpenOrca", "filename": "mistral-7b-openorca.gguf2.Q4_0.gguf", @@ -10,13 +26,13 @@ "parameters": "7 billion", "quant": "q4_0", "type": "Mistral", - "description": "Best overall fast chat model
", + "description": "Strong overall fast chat model
", "url": "https://gpt4all.io/models/gguf/mistral-7b-openorca.gguf2.Q4_0.gguf", "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n", "systemPrompt": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI. For multi-step problems, write out your reasoning for each step.\n<|im_end|>" }, { - "order": "b", + "order": "c", "md5sum": "97463be739b50525df56d33b26b00852", "name": "Mistral Instruct", "filename": "mistral-7b-instruct-v0.1.Q4_0.gguf", @@ -27,12 +43,12 @@ "quant": "q4_0", "type": "Mistral", "systemPrompt": " ", - "description": "Best overall fast instruction following model
", + "description": "Strong overall fast instruction following model
", "url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf", "promptTemplate": "[INST] %1 [/INST]" }, { - "order": "c", + "order": "d", "md5sum": "c4c78adf744d6a20f05c8751e3961b84", "name": "GPT4All Falcon", "filename": "gpt4all-falcon-newbpe-q4_0.gguf", @@ -89,7 +105,7 @@ "quant": "q4_0", "type": "LLaMA2", "systemPrompt": " ", - "description": "Best overall larger model
", + "description": "Strong overall larger model
", "url": "https://gpt4all.io/models/gguf/wizardlm-13b-v1.2.Q4_0.gguf" }, {