1
0
mirror of https://github.com/nomic-ai/gpt4all.git synced 2025-05-07 07:57:23 +00:00

embllm: stop embdding loop on exit

This is especially important on CPU, when we may spend many hours
embedding a colection of documents.

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2025-02-05 18:55:55 -05:00
parent 4e18c81107
commit 1731e7929c

View File

@ -281,6 +281,9 @@ void EmbeddingLLMWorker::docEmbeddingsRequested(const QVector<EmbeddingChunk> &c
std::vector<float> result;
result.resize(chunks.size() * m_model->embeddingSize());
for (int j = 0; j < chunks.size(); j += BATCH_SIZE) {
if (m_stopGenerating)
return;
QMutexLocker locker(&m_mutex);
std::vector batchTexts(texts.begin() + j, texts.begin() + std::min(j + BATCH_SIZE, int(texts.size())));
try {