From e22dd164d8d663d945d71bc630a8824efcfd2e44 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Tue, 27 Jun 2023 08:19:33 -0700 Subject: [PATCH] add falcon to chatllm::serialize --- gpt4all-chat/chatllm.cpp | 2 ++ gpt4all-chat/chatllm.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 8a006469..544949e3 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -19,6 +19,7 @@ #define GPTJ_INTERNAL_STATE_VERSION 0 #define REPLIT_INTERNAL_STATE_VERSION 0 #define LLAMA_INTERNAL_STATE_VERSION 0 +#define FALCON_INTERNAL_STATE_VERSION 0 class LLModelStore { public: @@ -570,6 +571,7 @@ bool ChatLLM::serialize(QDataStream &stream, int version) case MPT_: stream << MPT_INTERNAL_STATE_VERSION; break; case GPTJ_: stream << GPTJ_INTERNAL_STATE_VERSION; break; case LLAMA_: stream << LLAMA_INTERNAL_STATE_VERSION; break; + case FALCON_: stream << LLAMA_INTERNAL_STATE_VERSION; break; default: Q_UNREACHABLE(); } } diff --git a/gpt4all-chat/chatllm.h b/gpt4all-chat/chatllm.h index b3fe9b49..6f9c8ea6 100644 --- a/gpt4all-chat/chatllm.h +++ b/gpt4all-chat/chatllm.h @@ -15,7 +15,7 @@ enum LLModelType { LLAMA_, CHATGPT_, REPLIT_, - FALCON_ + FALCON_, }; struct LLModelInfo {