mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-05-07 07:57:23 +00:00
embllm: stop embdding loop on exit
This is especially important on CPU, when we may spend many hours embedding a colection of documents. Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
4e18c81107
commit
1731e7929c
@ -281,6 +281,9 @@ void EmbeddingLLMWorker::docEmbeddingsRequested(const QVector<EmbeddingChunk> &c
|
||||
std::vector<float> result;
|
||||
result.resize(chunks.size() * m_model->embeddingSize());
|
||||
for (int j = 0; j < chunks.size(); j += BATCH_SIZE) {
|
||||
if (m_stopGenerating)
|
||||
return;
|
||||
|
||||
QMutexLocker locker(&m_mutex);
|
||||
std::vector batchTexts(texts.begin() + j, texts.begin() + std::min(j + BATCH_SIZE, int(texts.size())));
|
||||
try {
|
||||
|
Loading…
Reference in New Issue
Block a user