mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-10 12:59:09 +00:00
add requiredMem method to llmodel impls
most of these can just shortcut out of the model loading logic llama is a bit worse to deal with because we submodule it so I have to at least parse the hparams, and then I just use the size on disk as an estimate for the mem size (which seems reasonable since we mmap() the llama files anyway)
This commit is contained in:
@@ -20,6 +20,12 @@ ChatGPT::ChatGPT()
|
||||
{
|
||||
}
|
||||
|
||||
size_t ChatGPT::requiredMem(const std::string &modelPath)
|
||||
{
|
||||
Q_UNUSED(modelPath);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ChatGPT::loadModel(const std::string &modelPath)
|
||||
{
|
||||
Q_UNUSED(modelPath);
|
||||
|
@@ -16,6 +16,7 @@ public:
|
||||
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string &modelPath) override;
|
||||
size_t stateSize() const override;
|
||||
size_t saveState(uint8_t *dest) const override;
|
||||
size_t restoreState(const uint8_t *src) override;
|
||||
|
Reference in New Issue
Block a user