mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-19 20:24:41 +00:00
chatllm: do not pass nullptr as response callback (#2995)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
50949d304e
commit
364d9772e4
@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
|||||||
### Added
|
### Added
|
||||||
- Add bm25 hybrid search to localdocs ([#2969](https://github.com/nomic-ai/gpt4all/pull/2969))
|
- Add bm25 hybrid search to localdocs ([#2969](https://github.com/nomic-ai/gpt4all/pull/2969))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fix a crash when attempting to continue a chat loaded from disk ([#2995](https://github.com/nomic-ai/gpt4all/pull/2995))
|
||||||
|
|
||||||
## [3.3.0] - 2024-09-20
|
## [3.3.0] - 2024-09-20
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
@ -1314,8 +1314,16 @@ void ChatLLM::processRestoreStateFromText()
|
|||||||
auto &response = *it++;
|
auto &response = *it++;
|
||||||
Q_ASSERT(response.first != "Prompt: ");
|
Q_ASSERT(response.first != "Prompt: ");
|
||||||
|
|
||||||
m_llModelInfo.model->prompt(prompt.second.toStdString(), promptTemplate.toStdString(), promptFunc, nullptr,
|
// FIXME(jared): this doesn't work well with the "regenerate" button since we are not incrementing
|
||||||
/*allowContextShift*/ true, m_ctx, false, response.second.toUtf8().constData());
|
// m_promptTokens or m_promptResponseTokens
|
||||||
|
m_llModelInfo.model->prompt(
|
||||||
|
prompt.second.toStdString(), promptTemplate.toStdString(),
|
||||||
|
promptFunc, /*responseFunc*/ [](auto &&...) { return true; },
|
||||||
|
/*allowContextShift*/ true,
|
||||||
|
m_ctx,
|
||||||
|
/*special*/ false,
|
||||||
|
response.second.toUtf8().constData()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!m_stopGenerating) {
|
if (!m_stopGenerating) {
|
||||||
|
Loading…
Reference in New Issue
Block a user