mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-25 23:13:06 +00:00
add falcon to chatllm::serialize
This commit is contained in:
parent
198b5e4832
commit
e22dd164d8
@ -19,6 +19,7 @@
|
|||||||
#define GPTJ_INTERNAL_STATE_VERSION 0
|
#define GPTJ_INTERNAL_STATE_VERSION 0
|
||||||
#define REPLIT_INTERNAL_STATE_VERSION 0
|
#define REPLIT_INTERNAL_STATE_VERSION 0
|
||||||
#define LLAMA_INTERNAL_STATE_VERSION 0
|
#define LLAMA_INTERNAL_STATE_VERSION 0
|
||||||
|
#define FALCON_INTERNAL_STATE_VERSION 0
|
||||||
|
|
||||||
class LLModelStore {
|
class LLModelStore {
|
||||||
public:
|
public:
|
||||||
@ -570,6 +571,7 @@ bool ChatLLM::serialize(QDataStream &stream, int version)
|
|||||||
case MPT_: stream << MPT_INTERNAL_STATE_VERSION; break;
|
case MPT_: stream << MPT_INTERNAL_STATE_VERSION; break;
|
||||||
case GPTJ_: stream << GPTJ_INTERNAL_STATE_VERSION; break;
|
case GPTJ_: stream << GPTJ_INTERNAL_STATE_VERSION; break;
|
||||||
case LLAMA_: stream << LLAMA_INTERNAL_STATE_VERSION; break;
|
case LLAMA_: stream << LLAMA_INTERNAL_STATE_VERSION; break;
|
||||||
|
case FALCON_: stream << LLAMA_INTERNAL_STATE_VERSION; break;
|
||||||
default: Q_UNREACHABLE();
|
default: Q_UNREACHABLE();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@ enum LLModelType {
|
|||||||
LLAMA_,
|
LLAMA_,
|
||||||
CHATGPT_,
|
CHATGPT_,
|
||||||
REPLIT_,
|
REPLIT_,
|
||||||
FALCON_
|
FALCON_,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct LLModelInfo {
|
struct LLModelInfo {
|
||||||
|
Loading…
Reference in New Issue
Block a user