mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-04 01:54:49 +00:00
llamamodel: add gemma model support
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Submodule gpt4all-backend/llama.cpp-mainline updated: 822a9c894e...7d4ced8505
@@ -519,8 +519,8 @@ DLL_EXPORT bool magic_match(const char *fname) {
|
|||||||
bool valid = true;
|
bool valid = true;
|
||||||
|
|
||||||
static const std::vector<const char *> known_arches {
|
static const std::vector<const char *> known_arches {
|
||||||
"baichuan", "bloom", "codeshell", "falcon", "gpt2", "llama", "mpt", "orion", "persimmon", "phi2", "plamo",
|
"baichuan", "bloom", "codeshell", "falcon", "gemma", "gpt2", "llama", "mpt", "orion", "persimmon", "phi2",
|
||||||
"qwen", "qwen2", "refact", "stablelm", "starcoder"
|
"plamo", "qwen", "qwen2", "refact", "stablelm", "starcoder"
|
||||||
};
|
};
|
||||||
|
|
||||||
if (std::find(known_arches.begin(), known_arches.end(), arch) == known_arches.end()) {
|
if (std::find(known_arches.begin(), known_arches.end(), arch) == known_arches.end()) {
|
||||||
|
Reference in New Issue
Block a user