qml: improve chats with missing models and model settings layout (#2520)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel
2024-07-08 17:01:30 -04:00
committed by GitHub
parent 11b58a1a15
commit 4853adebd9
3 changed files with 66 additions and 41 deletions

View File

@@ -47,6 +47,10 @@ Rectangle {
return ModelList.modelInfo(currentChat.modelInfo.id).name;
}
function currentModelInstalled() {
return currentModelName() !== "" && ModelList.modelInfo(currentChat.modelInfo.id).installed;
}
PopupDialog {
id: modelLoadingErrorPopup
anchors.centerIn: parent
@@ -322,7 +326,7 @@ Rectangle {
visible: currentChat.modelLoadingError === ""
&& !currentChat.trySwitchContextInProgress
&& !currentChat.isCurrentlyLoading
&& (currentChat.isModelLoaded || currentModelName() !== "")
&& (currentChat.isModelLoaded || currentModelInstalled())
source: "qrc:/gpt4all/icons/regenerate.svg"
backgroundColor: theme.textColor
backgroundColorHovered: theme.styledTextColor
@@ -358,15 +362,17 @@ Rectangle {
rightPadding: 10
text: {
if (ModelList.selectableModels.count === 0)
return qsTr("No model installed...")
return qsTr("No model installed.")
if (currentChat.modelLoadingError !== "")
return qsTr("Model loading error...")
return qsTr("Model loading error.")
if (currentChat.trySwitchContextInProgress === 1)
return qsTr("Waiting for model...")
if (currentChat.trySwitchContextInProgress === 2)
return qsTr("Switching context...")
if (currentModelName() === "")
return qsTr("Choose a model...")
if (!currentModelInstalled())
return qsTr("Not found: %1").arg(currentModelName())
if (currentChat.modelLoadingPercentage === 0.0)
return qsTr("Reload \u00B7 ") + currentModelName()
if (currentChat.isCurrentlyLoading)
@@ -1519,7 +1525,7 @@ Rectangle {
&& currentChat.modelLoadingError === ""
&& !currentChat.trySwitchContextInProgress
&& !currentChat.isCurrentlyLoading
&& currentModelName() !== ""
&& currentModelInstalled()
Image {
anchors.verticalCenter: parent.verticalCenter

View File

@@ -303,48 +303,58 @@ MySettingsTab {
helpText: qsTr("Number of input and output tokens the model sees.")
Layout.row: 0
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: contextLengthField
visible: !root.currentModelInfo.isOnline
text: root.currentModelInfo.contextLength
font.pixelSize: theme.fontSizeLarge
color: theme.textColor
ToolTip.text: qsTr("Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.")
ToolTip.visible: hovered
Item {
Layout.row: 0
Layout.column: 1
Connections {
target: MySettings
function onContextLengthChanged() {
contextLengthField.text = root.currentModelInfo.contextLength;
}
}
Connections {
target: root
function onCurrentModelInfoChanged() {
contextLengthField.text = root.currentModelInfo.contextLength;
}
}
onEditingFinished: {
var val = parseInt(text)
if (isNaN(val)) {
text = root.currentModelInfo.contextLength
} else {
if (val < 8) {
val = 8
contextLengthField.text = val
} else if (val > root.currentModelInfo.maxContextLength) {
val = root.currentModelInfo.maxContextLength
contextLengthField.text = val
Layout.fillWidth: true
Layout.maximumWidth: 200
Layout.margins: 0
height: contextLengthField.height
MyTextField {
id: contextLengthField
anchors.left: parent.left
anchors.verticalCenter: parent.verticalCenter
visible: !root.currentModelInfo.isOnline
text: root.currentModelInfo.contextLength
font.pixelSize: theme.fontSizeLarge
color: theme.textColor
ToolTip.text: qsTr("Maximum combined prompt/response tokens before information is lost.\nUsing more context than the model was trained on will yield poor results.\nNOTE: Does not take effect until you reload the model.")
ToolTip.visible: hovered
Connections {
target: MySettings
function onContextLengthChanged() {
contextLengthField.text = root.currentModelInfo.contextLength;
}
MySettings.setModelContextLength(root.currentModelInfo, val)
focus = false
}
Connections {
target: root
function onCurrentModelInfoChanged() {
contextLengthField.text = root.currentModelInfo.contextLength;
}
}
onEditingFinished: {
var val = parseInt(text)
if (isNaN(val)) {
text = root.currentModelInfo.contextLength
} else {
if (val < 8) {
val = 8
contextLengthField.text = val
} else if (val > root.currentModelInfo.maxContextLength) {
val = root.currentModelInfo.maxContextLength
contextLengthField.text = val
}
MySettings.setModelContextLength(root.currentModelInfo, val)
focus = false
}
}
Accessible.role: Accessible.EditableText
Accessible.name: contextLengthLabel.text
Accessible.description: ToolTip.text
}
Accessible.role: Accessible.EditableText
Accessible.name: contextLengthLabel.text
Accessible.description: ToolTip.text
}
MySettingsLabel {
@@ -353,6 +363,7 @@ MySettingsTab {
helpText: qsTr("Randomness of model output. Higher -> more variation.")
Layout.row: 1
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
@@ -398,6 +409,7 @@ MySettingsTab {
helpText: qsTr("Nucleus Sampling factor. Lower -> more predicatable.")
Layout.row: 2
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: topPField
@@ -442,6 +454,7 @@ MySettingsTab {
helpText: qsTr("Minimum token probability. Higher -> more predictable.")
Layout.row: 3
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: minPField
@@ -488,6 +501,7 @@ MySettingsTab {
helpText: qsTr("Size of selection pool for tokens.")
Layout.row: 2
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: topKField
@@ -534,6 +548,7 @@ MySettingsTab {
helpText: qsTr("Maximum response length, in tokens.")
Layout.row: 0
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: maxLengthField
@@ -579,6 +594,7 @@ MySettingsTab {
helpText: qsTr("The batch size used for prompt processing.")
Layout.row: 1
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: batchSizeField
@@ -625,6 +641,7 @@ MySettingsTab {
helpText: qsTr("Repetition penalty factor. Set to 1 to disable.")
Layout.row: 4
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: repeatPenaltyField
@@ -669,6 +686,7 @@ MySettingsTab {
helpText: qsTr("Number of previous tokens used for penalty.")
Layout.row: 3
Layout.column: 2
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: repeatPenaltyTokenField
@@ -714,6 +732,7 @@ MySettingsTab {
helpText: qsTr("Number of model layers to load into VRAM.")
Layout.row: 4
Layout.column: 0
Layout.maximumWidth: 300 * theme.fontScale
}
MyTextField {
id: gpuLayersField