mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-08 03:49:10 +00:00
Implement configurable context length (#1749)
This commit is contained in:
@@ -90,6 +90,7 @@ void MySettings::restoreModelDefaults(const ModelInfo &model)
|
||||
setModelTopK(model, model.m_topK);;
|
||||
setModelMaxLength(model, model.m_maxLength);
|
||||
setModelPromptBatchSize(model, model.m_promptBatchSize);
|
||||
setModelContextLength(model, model.m_contextLength);
|
||||
setModelRepeatPenalty(model, model.m_repeatPenalty);
|
||||
setModelRepeatPenaltyTokens(model, model.m_repeatPenaltyTokens);
|
||||
setModelPromptTemplate(model, model.m_promptTemplate);
|
||||
@@ -280,6 +281,28 @@ void MySettings::setModelPromptBatchSize(const ModelInfo &m, int s, bool force)
|
||||
emit promptBatchSizeChanged(m);
|
||||
}
|
||||
|
||||
int MySettings::modelContextLength(const ModelInfo &m) const
|
||||
{
|
||||
QSettings setting;
|
||||
setting.sync();
|
||||
return setting.value(QString("model-%1").arg(m.id()) + "/contextLength", m.m_contextLength).toInt();
|
||||
}
|
||||
|
||||
void MySettings::setModelContextLength(const ModelInfo &m, int l, bool force)
|
||||
{
|
||||
if (modelContextLength(m) == l && !force)
|
||||
return;
|
||||
|
||||
QSettings setting;
|
||||
if (m.m_contextLength == l && !m.isClone)
|
||||
setting.remove(QString("model-%1").arg(m.id()) + "/contextLength");
|
||||
else
|
||||
setting.setValue(QString("model-%1").arg(m.id()) + "/contextLength", l);
|
||||
setting.sync();
|
||||
if (!force)
|
||||
emit contextLengthChanged(m);
|
||||
}
|
||||
|
||||
double MySettings::modelRepeatPenalty(const ModelInfo &m) const
|
||||
{
|
||||
QSettings setting;
|
||||
|
Reference in New Issue
Block a user