Load models from filepath only.

This commit is contained in:
Adam Treat 2023-04-28 20:15:10 -04:00
parent ca2af100cd
commit 69f92d8ea8
6 changed files with 5 additions and 18 deletions

View File

@ -91,14 +91,14 @@ bool LLMObject::loadModelPrivate(const QString &modelName)
if (info.exists()) { if (info.exists()) {
auto fin = std::ifstream(filePath.toStdString(), std::ios::binary); auto fin = std::ifstream(filePath.toStdString(), std::ios::binary);
uint32_t magic; uint32_t magic;
fin.read((char *) &magic, sizeof(magic)); fin.read((char *) &magic, sizeof(magic));
fin.seekg(0); fin.seekg(0);
fin.close();
isGPTJ = magic == 0x67676d6c; isGPTJ = magic == 0x67676d6c;
if (isGPTJ) { if (isGPTJ) {
m_llmodel = new GPTJ; m_llmodel = new GPTJ;
m_llmodel->loadModel(modelName.toStdString(), fin); m_llmodel->loadModel(filePath.toStdString());
} else { } else {
m_llmodel = new LLamaModel; m_llmodel = new LLamaModel;
m_llmodel->loadModel(filePath.toStdString()); m_llmodel->loadModel(filePath.toStdString());

View File

@ -645,16 +645,12 @@ GPTJ::GPTJ()
d_ptr->modelLoaded = false; d_ptr->modelLoaded = false;
} }
bool GPTJ::loadModel(const std::string &modelPath) bool GPTJ::loadModel(const std::string &modelPath) {
{
std::cerr << "GPTJ ERROR: loading gpt model from file unsupported!\n";
return false;
}
bool GPTJ::loadModel(const std::string &modelPath, std::istream &fin) {
std::mt19937 rng(time(NULL)); std::mt19937 rng(time(NULL));
d_ptr->rng = rng; d_ptr->rng = rng;
auto fin = std::ifstream(modelPath, std::ios::binary);
// load the model // load the model
if (!gptj_model_load(modelPath, fin, d_ptr->model, d_ptr->vocab)) { if (!gptj_model_load(modelPath, fin, d_ptr->model, d_ptr->vocab)) {
std::cerr << "GPT-J ERROR: failed to load model from " << modelPath; std::cerr << "GPT-J ERROR: failed to load model from " << modelPath;

View File

@ -13,7 +13,6 @@ public:
~GPTJ(); ~GPTJ();
bool loadModel(const std::string &modelPath) override; bool loadModel(const std::string &modelPath) override;
bool loadModel(const std::string &modelPath, std::istream &fin) override;
bool isModelLoaded() const override; bool isModelLoaded() const override;
void prompt(const std::string &prompt, void prompt(const std::string &prompt,
std::function<bool(int32_t)> promptCallback, std::function<bool(int32_t)> promptCallback,

View File

@ -31,12 +31,6 @@ LLamaModel::LLamaModel()
d_ptr->modelLoaded = false; d_ptr->modelLoaded = false;
} }
bool LLamaModel::loadModel(const std::string &modelPath, std::istream &fin)
{
std::cerr << "LLAMA ERROR: loading llama model from stream unsupported!\n";
return false;
}
bool LLamaModel::loadModel(const std::string &modelPath) bool LLamaModel::loadModel(const std::string &modelPath)
{ {
// load the model // load the model

View File

@ -13,7 +13,6 @@ public:
~LLamaModel(); ~LLamaModel();
bool loadModel(const std::string &modelPath) override; bool loadModel(const std::string &modelPath) override;
bool loadModel(const std::string &modelPath, std::istream &fin) override;
bool isModelLoaded() const override; bool isModelLoaded() const override;
void prompt(const std::string &prompt, void prompt(const std::string &prompt,
std::function<bool(int32_t)> promptCallback, std::function<bool(int32_t)> promptCallback,

View File

@ -11,7 +11,6 @@ public:
virtual ~LLModel() {} virtual ~LLModel() {}
virtual bool loadModel(const std::string &modelPath) = 0; virtual bool loadModel(const std::string &modelPath) = 0;
virtual bool loadModel(const std::string &modelPath, std::istream &fin) = 0;
virtual bool isModelLoaded() const = 0; virtual bool isModelLoaded() const = 0;
struct PromptContext { struct PromptContext {
std::vector<float> logits; // logits of current context std::vector<float> logits; // logits of current context