mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-28 16:27:31 +00:00
Add debug for chatllm model loading and fix order of getting rid of the
dummy chat when no models are restored.
This commit is contained in:
parent
3a039c8dc1
commit
fb464bb60e
@ -231,8 +231,13 @@ void ChatListModel::restoreChat(Chat *chat)
|
|||||||
void ChatListModel::chatsRestoredFinished()
|
void ChatListModel::chatsRestoredFinished()
|
||||||
{
|
{
|
||||||
if (m_dummyChat) {
|
if (m_dummyChat) {
|
||||||
removeChat(m_dummyChat);
|
beginResetModel();
|
||||||
|
Chat *dummy = m_dummyChat;
|
||||||
m_dummyChat = nullptr;
|
m_dummyChat = nullptr;
|
||||||
|
m_chats.clear();
|
||||||
|
addChat();
|
||||||
|
delete dummy;
|
||||||
|
endResetModel();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m_chats.isEmpty())
|
if (m_chats.isEmpty())
|
||||||
|
29
chatllm.cpp
29
chatllm.cpp
@ -100,6 +100,13 @@ bool ChatLLM::loadModel(const QString &modelName)
|
|||||||
m_llmodel->loadModel(filePath.toStdString());
|
m_llmodel->loadModel(filePath.toStdString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
restoreState();
|
||||||
|
|
||||||
|
#if defined(DEBUG)
|
||||||
|
qDebug() << "chatllm modelLoadedChanged" << m_chat->id();
|
||||||
|
fflush(stdout);
|
||||||
|
#endif
|
||||||
|
|
||||||
emit isModelLoadedChanged();
|
emit isModelLoadedChanged();
|
||||||
|
|
||||||
if (isFirstLoad)
|
if (isFirstLoad)
|
||||||
@ -200,6 +207,9 @@ bool ChatLLM::handlePrompt(int32_t token)
|
|||||||
{
|
{
|
||||||
// m_promptResponseTokens and m_responseLogits are related to last prompt/response not
|
// m_promptResponseTokens and m_responseLogits are related to last prompt/response not
|
||||||
// the entire context window which we can reset on regenerate prompt
|
// the entire context window which we can reset on regenerate prompt
|
||||||
|
#if defined(DEBUG)
|
||||||
|
qDebug() << "chatllm prompt process" << m_chat->id() << token;
|
||||||
|
#endif
|
||||||
++m_promptResponseTokens;
|
++m_promptResponseTokens;
|
||||||
return !m_stopGenerating;
|
return !m_stopGenerating;
|
||||||
}
|
}
|
||||||
@ -280,6 +290,9 @@ bool ChatLLM::prompt(const QString &prompt, const QString &prompt_template, int3
|
|||||||
|
|
||||||
void ChatLLM::unloadModel()
|
void ChatLLM::unloadModel()
|
||||||
{
|
{
|
||||||
|
#if defined(DEBUG)
|
||||||
|
qDebug() << "chatllm unloadModel" << m_chat->id();
|
||||||
|
#endif
|
||||||
saveState();
|
saveState();
|
||||||
delete m_llmodel;
|
delete m_llmodel;
|
||||||
m_llmodel = nullptr;
|
m_llmodel = nullptr;
|
||||||
@ -288,12 +301,14 @@ void ChatLLM::unloadModel()
|
|||||||
|
|
||||||
void ChatLLM::reloadModel(const QString &modelName)
|
void ChatLLM::reloadModel(const QString &modelName)
|
||||||
{
|
{
|
||||||
|
#if defined(DEBUG)
|
||||||
|
qDebug() << "chatllm reloadModel" << m_chat->id();
|
||||||
|
#endif
|
||||||
if (modelName.isEmpty()) {
|
if (modelName.isEmpty()) {
|
||||||
loadDefaultModel();
|
loadDefaultModel();
|
||||||
} else {
|
} else {
|
||||||
loadModel(modelName);
|
loadModel(modelName);
|
||||||
}
|
}
|
||||||
restoreState();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ChatLLM::generateName()
|
void ChatLLM::generateName()
|
||||||
@ -367,6 +382,9 @@ bool ChatLLM::serialize(QDataStream &stream)
|
|||||||
saveState();
|
saveState();
|
||||||
QByteArray compressed = qCompress(m_state);
|
QByteArray compressed = qCompress(m_state);
|
||||||
stream << compressed;
|
stream << compressed;
|
||||||
|
#if defined(DEBUG)
|
||||||
|
qDebug() << "chatllm serialize" << m_chat->id() << m_state.size();
|
||||||
|
#endif
|
||||||
return stream.status() == QDataStream::Ok;
|
return stream.status() == QDataStream::Ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -392,6 +410,9 @@ bool ChatLLM::deserialize(QDataStream &stream)
|
|||||||
QByteArray compressed;
|
QByteArray compressed;
|
||||||
stream >> compressed;
|
stream >> compressed;
|
||||||
m_state = qUncompress(compressed);
|
m_state = qUncompress(compressed);
|
||||||
|
#if defined(DEBUG)
|
||||||
|
qDebug() << "chatllm deserialize" << m_chat->id();
|
||||||
|
#endif
|
||||||
return stream.status() == QDataStream::Ok;
|
return stream.status() == QDataStream::Ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -402,6 +423,9 @@ void ChatLLM::saveState()
|
|||||||
|
|
||||||
const size_t stateSize = m_llmodel->stateSize();
|
const size_t stateSize = m_llmodel->stateSize();
|
||||||
m_state.resize(stateSize);
|
m_state.resize(stateSize);
|
||||||
|
#if defined(DEBUG)
|
||||||
|
qDebug() << "chatllm saveState" << m_chat->id() << "size:" << m_state.size();
|
||||||
|
#endif
|
||||||
m_llmodel->saveState(static_cast<uint8_t*>(reinterpret_cast<void*>(m_state.data())));
|
m_llmodel->saveState(static_cast<uint8_t*>(reinterpret_cast<void*>(m_state.data())));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -410,5 +434,8 @@ void ChatLLM::restoreState()
|
|||||||
if (!isModelLoaded() || m_state.isEmpty())
|
if (!isModelLoaded() || m_state.isEmpty())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
#if defined(DEBUG)
|
||||||
|
qDebug() << "chatllm restoreState" << m_chat->id() << "size:" << m_state.size();
|
||||||
|
#endif
|
||||||
m_llmodel->restoreState(static_cast<const uint8_t*>(reinterpret_cast<void*>(m_state.data())));
|
m_llmodel->restoreState(static_cast<const uint8_t*>(reinterpret_cast<void*>(m_state.data())));
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,6 @@ struct gptj_buffer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
~gptj_buffer() {
|
~gptj_buffer() {
|
||||||
std::cout << "yes we are cleaning up" << std::endl;
|
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
delete[] addr;
|
delete[] addr;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user