Fix spacing issues with deepseek models: (#3470)

Signed-off-by: Adam Treat <treat.adam@gmail.com>
Signed-off-by: AT <manyoso@users.noreply.github.com>
This commit is contained in:
AT 2025-02-06 12:04:32 -05:00 committed by GitHub
parent 22ebd42c32
commit 5e7e4b3f78
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 6 additions and 3 deletions

View File

@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
### Fixed ### Fixed
- Fix several potential crashes ([#3465](https://github.com/nomic-ai/gpt4all/pull/3465)) - Fix several potential crashes ([#3465](https://github.com/nomic-ai/gpt4all/pull/3465))
- Fix visual spacing issues with deepseek models ([#3470](https://github.com/nomic-ai/gpt4all/pull/3470))
## [3.9.0] - 2025-02-04 ## [3.9.0] - 2025-02-04

View File

@ -198,6 +198,7 @@ GridLayout {
isError: false isError: false
isThinking: true isThinking: true
thinkingTime: modelData.thinkingTime thinkingTime: modelData.thinkingTime
visible: modelData.content !== ""
} }
} }
} }

View File

@ -976,7 +976,8 @@ public:
{ {
Q_UNUSED(bufferIdx) Q_UNUSED(bufferIdx)
try { try {
m_cllm->m_chatModel->setResponseValue(response); QString r = response;
m_cllm->m_chatModel->setResponseValue(removeLeadingWhitespace(r));
} catch (const std::exception &e) { } catch (const std::exception &e) {
// We have a try/catch here because the main thread might have removed the response from // We have a try/catch here because the main thread might have removed the response from
// the chatmodel by erasing the conversation during the response... the main thread sets // the chatmodel by erasing the conversation during the response... the main thread sets
@ -991,7 +992,7 @@ public:
bool onRegularResponse() override bool onRegularResponse() override
{ {
auto respStr = QString::fromUtf8(m_result->response); auto respStr = QString::fromUtf8(m_result->response);
return onBufferResponse(removeLeadingWhitespace(respStr), 0); return onBufferResponse(respStr, 0);
} }
bool getStopGenerating() const override bool getStopGenerating() const override
@ -1078,7 +1079,7 @@ auto ChatLLM::promptInternal(
auto respStr = QString::fromUtf8(result.response); auto respStr = QString::fromUtf8(result.response);
if (!respStr.isEmpty() && (std::as_const(respStr).back().isSpace() || finalBuffers.size() > 1)) { if (!respStr.isEmpty() && (std::as_const(respStr).back().isSpace() || finalBuffers.size() > 1)) {
if (finalBuffers.size() > 1) if (finalBuffers.size() > 1)
m_chatModel->setResponseValue(finalBuffers.last()); m_chatModel->setResponseValue(finalBuffers.last().trimmed());
else else
m_chatModel->setResponseValue(respStr.trimmed()); m_chatModel->setResponseValue(respStr.trimmed());
emit responseChanged(); emit responseChanged();