mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-07 19:40:21 +00:00
Handle edge cases when generating embeddings (#1215)
* Handle edge cases when generating embeddings * Improve Python handling & add llmodel_c.h note - In the Python bindings fail fast with a ValueError when text is empty - Advice other bindings authors to do likewise in llmodel_c.h
This commit is contained in:
@@ -168,10 +168,14 @@ void llmodel_prompt(llmodel_model model, const char *prompt,
|
||||
|
||||
float *llmodel_embedding(llmodel_model model, const char *text, size_t *embedding_size)
|
||||
{
|
||||
if (model == nullptr || text == nullptr || !strlen(text)) {
|
||||
*embedding_size = 0;
|
||||
return nullptr;
|
||||
}
|
||||
LLModelWrapper *wrapper = reinterpret_cast<LLModelWrapper*>(model);
|
||||
std::vector<float> embeddingVector = wrapper->llModel->embedding(text);
|
||||
float *embedding = (float *)malloc(embeddingVector.size() * sizeof(float));
|
||||
if(embedding == nullptr) {
|
||||
if (embedding == nullptr) {
|
||||
*embedding_size = 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
Reference in New Issue
Block a user