mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-31 07:59:29 +00:00
Remove these as it is mitigated by repeat penalty and models really should train this out.
This commit is contained in:
5
llm.cpp
5
llm.cpp
@@ -297,11 +297,6 @@ bool LLMObject::handleResponse(int32_t token, const std::string &response)
|
|||||||
Q_ASSERT(!response.empty());
|
Q_ASSERT(!response.empty());
|
||||||
m_response.append(response);
|
m_response.append(response);
|
||||||
emit responseChanged();
|
emit responseChanged();
|
||||||
|
|
||||||
// Stop generation if we encounter prompt or response tokens
|
|
||||||
QString r = QString::fromStdString(m_response);
|
|
||||||
if (r.contains("### Prompt:") || r.contains("### Response:"))
|
|
||||||
return false;
|
|
||||||
return !m_stopGenerating;
|
return !m_stopGenerating;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user