mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-01 08:38:35 +00:00
qml: improve chats with missing models and model settings layout (#2520)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -303,48 +303,58 @@ MySettingsTab {
|
||||
helpText: qsTr("Number of input and output tokens the model sees.")
|
||||
Layout.row: 0
|
||||
Layout.column: 0
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
MyTextField {
|
||||
id: contextLengthField
|
||||
visible: !root.currentModelInfo.isOnline
|
||||
text: root.currentModelInfo.contextLength
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
color: theme.textColor
|
||||
ToolTip.text: qsTr("Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.")
|
||||
ToolTip.visible: hovered
|
||||
Item {
|
||||
Layout.row: 0
|
||||
Layout.column: 1
|
||||
Connections {
|
||||
target: MySettings
|
||||
function onContextLengthChanged() {
|
||||
contextLengthField.text = root.currentModelInfo.contextLength;
|
||||
}
|
||||
}
|
||||
Connections {
|
||||
target: root
|
||||
function onCurrentModelInfoChanged() {
|
||||
contextLengthField.text = root.currentModelInfo.contextLength;
|
||||
}
|
||||
}
|
||||
onEditingFinished: {
|
||||
var val = parseInt(text)
|
||||
if (isNaN(val)) {
|
||||
text = root.currentModelInfo.contextLength
|
||||
} else {
|
||||
if (val < 8) {
|
||||
val = 8
|
||||
contextLengthField.text = val
|
||||
} else if (val > root.currentModelInfo.maxContextLength) {
|
||||
val = root.currentModelInfo.maxContextLength
|
||||
contextLengthField.text = val
|
||||
Layout.fillWidth: true
|
||||
Layout.maximumWidth: 200
|
||||
Layout.margins: 0
|
||||
height: contextLengthField.height
|
||||
|
||||
MyTextField {
|
||||
id: contextLengthField
|
||||
anchors.left: parent.left
|
||||
anchors.verticalCenter: parent.verticalCenter
|
||||
visible: !root.currentModelInfo.isOnline
|
||||
text: root.currentModelInfo.contextLength
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
color: theme.textColor
|
||||
ToolTip.text: qsTr("Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.")
|
||||
ToolTip.visible: hovered
|
||||
Connections {
|
||||
target: MySettings
|
||||
function onContextLengthChanged() {
|
||||
contextLengthField.text = root.currentModelInfo.contextLength;
|
||||
}
|
||||
MySettings.setModelContextLength(root.currentModelInfo, val)
|
||||
focus = false
|
||||
}
|
||||
Connections {
|
||||
target: root
|
||||
function onCurrentModelInfoChanged() {
|
||||
contextLengthField.text = root.currentModelInfo.contextLength;
|
||||
}
|
||||
}
|
||||
onEditingFinished: {
|
||||
var val = parseInt(text)
|
||||
if (isNaN(val)) {
|
||||
text = root.currentModelInfo.contextLength
|
||||
} else {
|
||||
if (val < 8) {
|
||||
val = 8
|
||||
contextLengthField.text = val
|
||||
} else if (val > root.currentModelInfo.maxContextLength) {
|
||||
val = root.currentModelInfo.maxContextLength
|
||||
contextLengthField.text = val
|
||||
}
|
||||
MySettings.setModelContextLength(root.currentModelInfo, val)
|
||||
focus = false
|
||||
}
|
||||
}
|
||||
Accessible.role: Accessible.EditableText
|
||||
Accessible.name: contextLengthLabel.text
|
||||
Accessible.description: ToolTip.text
|
||||
}
|
||||
Accessible.role: Accessible.EditableText
|
||||
Accessible.name: contextLengthLabel.text
|
||||
Accessible.description: ToolTip.text
|
||||
}
|
||||
|
||||
MySettingsLabel {
|
||||
@@ -353,6 +363,7 @@ MySettingsTab {
|
||||
helpText: qsTr("Randomness of model output. Higher -> more variation.")
|
||||
Layout.row: 1
|
||||
Layout.column: 2
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
|
||||
MyTextField {
|
||||
@@ -398,6 +409,7 @@ MySettingsTab {
|
||||
helpText: qsTr("Nucleus Sampling factor. Lower -> more predicatable.")
|
||||
Layout.row: 2
|
||||
Layout.column: 0
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
MyTextField {
|
||||
id: topPField
|
||||
@@ -442,6 +454,7 @@ MySettingsTab {
|
||||
helpText: qsTr("Minimum token probability. Higher -> more predictable.")
|
||||
Layout.row: 3
|
||||
Layout.column: 0
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
MyTextField {
|
||||
id: minPField
|
||||
@@ -488,6 +501,7 @@ MySettingsTab {
|
||||
helpText: qsTr("Size of selection pool for tokens.")
|
||||
Layout.row: 2
|
||||
Layout.column: 2
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
MyTextField {
|
||||
id: topKField
|
||||
@@ -534,6 +548,7 @@ MySettingsTab {
|
||||
helpText: qsTr("Maximum response length, in tokens.")
|
||||
Layout.row: 0
|
||||
Layout.column: 2
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
MyTextField {
|
||||
id: maxLengthField
|
||||
@@ -579,6 +594,7 @@ MySettingsTab {
|
||||
helpText: qsTr("The batch size used for prompt processing.")
|
||||
Layout.row: 1
|
||||
Layout.column: 0
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
MyTextField {
|
||||
id: batchSizeField
|
||||
@@ -625,6 +641,7 @@ MySettingsTab {
|
||||
helpText: qsTr("Repetition penalty factor. Set to 1 to disable.")
|
||||
Layout.row: 4
|
||||
Layout.column: 2
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
MyTextField {
|
||||
id: repeatPenaltyField
|
||||
@@ -669,6 +686,7 @@ MySettingsTab {
|
||||
helpText: qsTr("Number of previous tokens used for penalty.")
|
||||
Layout.row: 3
|
||||
Layout.column: 2
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
MyTextField {
|
||||
id: repeatPenaltyTokenField
|
||||
@@ -714,6 +732,7 @@ MySettingsTab {
|
||||
helpText: qsTr("Number of model layers to load into VRAM.")
|
||||
Layout.row: 4
|
||||
Layout.column: 0
|
||||
Layout.maximumWidth: 300 * theme.fontScale
|
||||
}
|
||||
MyTextField {
|
||||
id: gpuLayersField
|
||||
|
Reference in New Issue
Block a user