From 037a9a6ec5807297bd729ec85ee27660fad94427 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 30 Apr 2023 08:02:20 -0400 Subject: [PATCH] Remove these as it is mitigated by repeat penalty and models really should train this out. --- llm.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/llm.cpp b/llm.cpp index e771e716..b7336f47 100644 --- a/llm.cpp +++ b/llm.cpp @@ -297,11 +297,6 @@ bool LLMObject::handleResponse(int32_t token, const std::string &response) Q_ASSERT(!response.empty()); m_response.append(response); emit responseChanged(); - - // Stop generation if we encounter prompt or response tokens - QString r = QString::fromStdString(m_response); - if (r.contains("### Prompt:") || r.contains("### Response:")) - return false; return !m_stopGenerating; }