mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 11:00:48 +00:00
implement local Nomic Embed via llama.cpp (#2086)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -11,15 +11,18 @@
|
||||
#include "llmodel.h"
|
||||
|
||||
struct LLamaPrivate;
|
||||
struct EmbModelSpec;
|
||||
|
||||
class LLamaModel : public LLModel {
|
||||
public:
|
||||
LLamaModel();
|
||||
~LLamaModel();
|
||||
|
||||
bool supportsEmbedding() const override { return false; }
|
||||
bool supportsCompletion() const override { return true; }
|
||||
bool supportsEmbedding() const override { return m_supportsEmbedding; }
|
||||
bool supportsCompletion() const override { return m_supportsCompletion; }
|
||||
bool loadModel(const std::string &modelPath, int n_ctx, int ngl) override;
|
||||
bool isModelBlacklisted(const std::string &modelPath) override;
|
||||
bool isModelBlacklisted(const std::string &modelPath) const override;
|
||||
bool isEmbeddingModel(const std::string &modelPath) const override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string &modelPath, int n_ctx, int ngl) override;
|
||||
size_t stateSize() const override;
|
||||
@@ -29,12 +32,22 @@ public:
|
||||
int32_t threadCount() const override;
|
||||
std::vector<GPUDevice> availableGPUDevices(size_t memoryRequired) const override;
|
||||
bool initializeGPUDevice(size_t memoryRequired, const std::string &name) const override;
|
||||
bool initializeGPUDevice(int device, std::string *unavail_reason) const override;
|
||||
bool initializeGPUDevice(int device, std::string *unavail_reason = nullptr) const override;
|
||||
bool hasGPUDevice() override;
|
||||
bool usingGPUDevice() override;
|
||||
|
||||
size_t embeddingSize() const override;
|
||||
// user-specified prefix
|
||||
void embed(const std::vector<std::string> &texts, float *embeddings, std::optional<std::string> prefix,
|
||||
int dimensionality = -1, bool doMean = true, bool atlas = false) override;
|
||||
// automatic prefix
|
||||
void embed(const std::vector<std::string> &texts, float *embeddings, bool isRetrieval, int dimensionality = -1,
|
||||
bool doMean = true, bool atlas = false) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<LLamaPrivate> d_ptr;
|
||||
bool m_supportsEmbedding = false;
|
||||
bool m_supportsCompletion = false;
|
||||
|
||||
protected:
|
||||
std::vector<Token> tokenize(PromptContext &ctx, const std::string &str, bool special) const override;
|
||||
@@ -44,9 +57,11 @@ protected:
|
||||
int32_t contextLength() const override;
|
||||
const std::vector<Token> &endTokens() const override;
|
||||
bool shouldAddBOS() const override;
|
||||
|
||||
int32_t maxContextLength(std::string const &modelPath) const override;
|
||||
int32_t layerCount(std::string const &modelPath) const override;
|
||||
|
||||
void embedInternal(const std::vector<std::string> &texts, float *embeddings, std::string prefix, int dimensionality,
|
||||
bool doMean, bool atlas, const EmbModelSpec *spec);
|
||||
};
|
||||
|
||||
#endif // LLAMAMODEL_H
|
||||
|
Reference in New Issue
Block a user