mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 11:00:48 +00:00
Dlopen better implementation management (Version 2)
This commit is contained in:
@@ -1,23 +1,35 @@
|
||||
#ifndef LLMODEL_H
|
||||
#define LLMODEL_H
|
||||
#include "dlhandle.h"
|
||||
|
||||
#include <string>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <string_view>
|
||||
#include <fstream>
|
||||
#include <cstdint>
|
||||
|
||||
|
||||
class LLModel {
|
||||
public:
|
||||
explicit LLModel() {}
|
||||
virtual ~LLModel() {}
|
||||
class Implementation {
|
||||
LLModel *(*construct_)();
|
||||
|
||||
static LLModel *construct(const std::string &modelPath, std::string buildVariant = "default");
|
||||
public:
|
||||
Implementation(Dlhandle&&);
|
||||
|
||||
virtual bool loadModel(const std::string &modelPath) = 0;
|
||||
virtual bool isModelLoaded() const = 0;
|
||||
virtual size_t stateSize() const { return 0; }
|
||||
virtual size_t saveState(uint8_t */*dest*/) const { return 0; }
|
||||
virtual size_t restoreState(const uint8_t */*src*/) { return 0; }
|
||||
static bool isImplementation(const Dlhandle&);
|
||||
|
||||
std::string_view modelType, buildVariant;
|
||||
bool (*magicMatch)(std::ifstream& f);
|
||||
Dlhandle dlhandle;
|
||||
|
||||
LLModel *construct() const {
|
||||
auto fres = construct_();
|
||||
fres->implementation = this;
|
||||
return fres;
|
||||
}
|
||||
};
|
||||
struct PromptContext {
|
||||
std::vector<float> logits; // logits of current context
|
||||
std::vector<int32_t> tokens; // current tokens in the context window
|
||||
@@ -31,8 +43,17 @@ public:
|
||||
float repeat_penalty = 1.10f;
|
||||
int32_t repeat_last_n = 64; // last n tokens to penalize
|
||||
float contextErase = 0.75f; // percent of context to erase if we exceed the context
|
||||
// window
|
||||
// window
|
||||
};
|
||||
|
||||
explicit LLModel() {}
|
||||
virtual ~LLModel() {}
|
||||
|
||||
virtual bool loadModel(const std::string &modelPath) = 0;
|
||||
virtual bool isModelLoaded() const = 0;
|
||||
virtual size_t stateSize() const { return 0; }
|
||||
virtual size_t saveState(uint8_t */*dest*/) const { return 0; }
|
||||
virtual size_t restoreState(const uint8_t */*src*/) { return 0; }
|
||||
virtual void prompt(const std::string &prompt,
|
||||
std::function<bool(int32_t)> promptCallback,
|
||||
std::function<bool(int32_t, const std::string&)> responseCallback,
|
||||
@@ -41,15 +62,18 @@ public:
|
||||
virtual void setThreadCount(int32_t /*n_threads*/) {}
|
||||
virtual int32_t threadCount() const { return 1; }
|
||||
|
||||
const char *getModelType() const {
|
||||
return modelType;
|
||||
const Implementation& getImplementation() const {
|
||||
return *implementation;
|
||||
}
|
||||
|
||||
static const std::vector<Implementation>& getImplementationList();
|
||||
static const Implementation *getImplementation(std::ifstream& f, const std::string& buildVariant);
|
||||
static LLModel *construct(const std::string &modelPath, std::string buildVariant = "default");
|
||||
|
||||
protected:
|
||||
const Implementation *implementation;
|
||||
|
||||
virtual void recalculateContext(PromptContext &promptCtx,
|
||||
std::function<bool(bool)> recalculate) = 0;
|
||||
|
||||
const char *modelType;
|
||||
};
|
||||
|
||||
#endif // LLMODEL_H
|
||||
|
Reference in New Issue
Block a user