Feat: Integrated Local LLM (Llama 3.2 1B) for Intelligent Correction -- New Core: Added LLMEngine utilizing llama-cpp-python for local private text post-processing. -- Forensic Protocol: Engineered strict system prompts to prevent LLM refusals, censorship, or assistant chatter. -- Three Modes: Grammar, Standard, Rewrite. -- Start/Stop Logic: Consolidated conflicting recording methods. -- Hotkeys: Added dedicated F9 (Correct) vs F8 (Transcribe). -- UI: Updated Settings. -- Build: Updated portable_build.py. -- Docs: Updated README.

This commit is contained in:
Your Name
2026-01-31 01:02:24 +02:00
parent 3137770742
commit baa5e2e69e
10 changed files with 601 additions and 61 deletions

View File

@@ -110,6 +110,7 @@ class UIBridge(QObject):
logAppended = Signal(str) # Emits new log line
settingChanged = Signal(str, 'QVariant')
modelStatesChanged = Signal() # Notify UI to re-check isModelDownloaded
llmDownloadRequested = Signal()
def __init__(self, parent=None):
super().__init__(parent)
@@ -356,11 +357,7 @@ class UIBridge(QObject):
except Exception as e:
logging.error(f"Failed to preload audio devices: {e}")
@Slot()
def toggle_recording(self):
"""Called by UI elements to trigger the app's recording logic."""
# This will be connected to the main app's toggle logic
pass
@Property(bool, notify=isDownloadingChanged)
def isDownloading(self): return self._is_downloading
@@ -400,6 +397,16 @@ class UIBridge(QObject):
logging.error(f"Error checking model status: {e}")
return False
@Slot(result=bool)
def isLLMModelDownloaded(self):
try:
from src.core.paths import get_models_path
# Hardcoded check for the 1B model we support
model_file = get_models_path() / "llm" / "llama-3.2-1b-instruct" / "llama-3.2-1b-instruct-q4_k_m.gguf"
return model_file.exists()
except:
return False
@Slot(str)
def downloadModel(self, size):
self.downloadRequested.emit(size)
@@ -407,3 +414,7 @@ class UIBridge(QObject):
@Slot()
def notifyModelStatesChanged(self):
self.modelStatesChanged.emit()
@Slot()
def downloadLLM(self):
self.llmDownloadRequested.emit()

View File

@@ -315,7 +315,7 @@ Window {
ModernSettingsItem {
label: "Global Hotkey (Transcribe)"
description: "Press to record a new shortcut (e.g. F9)"
description: "Standard: Raw transcription"
control: ModernKeySequenceRecorder {
implicitWidth: 240
currentSequence: ui.getSetting("hotkey")
@@ -323,6 +323,16 @@ Window {
}
}
ModernSettingsItem {
label: "Global Hotkey (Correct)"
description: "Enhanced: Transcribe + AI Correction"
control: ModernKeySequenceRecorder {
implicitWidth: 240
currentSequence: ui.getSetting("hotkey_correct")
onSequenceChanged: (seq) => ui.setSetting("hotkey_correct", seq)
}
}
ModernSettingsItem {
label: "Global Hotkey (Translate)"
description: "Press to record a new shortcut (e.g. F10)"
@@ -359,8 +369,8 @@ Window {
showSeparator: false
control: ModernSlider {
Layout.preferredWidth: 200
from: 10; to: 6000
stepSize: 10
from: 10; to: 20000
stepSize: 100
snapMode: Slider.SnapAlways
value: ui.getSetting("typing_speed")
onMoved: ui.setSetting("typing_speed", value)
@@ -845,6 +855,137 @@ Window {
}
}
ModernSettingsSection {
title: "Correction & Rewriting"
Layout.margins: 32
Layout.topMargin: 0
content: ColumnLayout {
width: parent.width
spacing: 0
ModernSettingsItem {
label: "Enable Correction"
description: "Post-process text with Llama 3.2 1B (Adds latency)"
control: ModernSwitch {
checked: ui.getSetting("llm_enabled")
onToggled: ui.setSetting("llm_enabled", checked)
}
}
ModernSettingsItem {
label: "Correction Mode"
description: "Grammar Fix vs. Complete Rewrite"
visible: ui.getSetting("llm_enabled")
control: ModernComboBox {
width: 140
model: ["Grammar", "Standard", "Rewrite"]
currentIndex: model.indexOf(ui.getSetting("llm_mode"))
onActivated: ui.setSetting("llm_mode", currentText)
}
}
// LLM Model Status Card
Rectangle {
Layout.fillWidth: true
Layout.margins: 12
Layout.topMargin: 0
Layout.bottomMargin: 16
height: 54
color: "#0a0a0f"
visible: ui.getSetting("llm_enabled")
radius: 6
border.color: SettingsStyle.borderSubtle
border.width: 1
property bool isDownloaded: false
property bool isDownloading: ui.isDownloading && ui.statusText.indexOf("LLM") !== -1
Timer {
interval: 2000
running: visible
repeat: true
onTriggered: parent.checkStatus()
}
function checkStatus() {
isDownloaded = ui.isLLMModelDownloaded()
}
Component.onCompleted: checkStatus()
Connections {
target: ui
function onModelStatesChanged() { parent.checkStatus() }
function onIsDownloadingChanged() { parent.checkStatus() }
}
RowLayout {
anchors.fill: parent
anchors.leftMargin: 12
anchors.rightMargin: 12
spacing: 12
Image {
source: "smart_toy.svg"
sourceSize: Qt.size(16, 16)
layer.enabled: true
layer.effect: MultiEffect {
colorization: 1.0
colorizationColor: parent.parent.isDownloaded ? SettingsStyle.accent : "#808080"
}
}
ColumnLayout {
Layout.fillWidth: true
spacing: 2
Text {
text: "Llama 3.2 1B (Instruct)"
color: "#ffffff"
font.family: "JetBrains Mono"; font.bold: true
font.pixelSize: 11
}
Text {
text: parent.parent.isDownloaded ? "Ready." : "Model missing (~1.2GB)"
color: SettingsStyle.textSecondary
font.family: "JetBrains Mono"; font.pixelSize: 10
}
}
Button {
id: dlBtn
text: "Download"
visible: !parent.parent.isDownloaded && !parent.parent.isDownloading
Layout.preferredHeight: 24
Layout.preferredWidth: 80
contentItem: Text {
text: "DOWNLOAD"
font.pixelSize: 10; font.bold: true; color: "#000000"; horizontalAlignment: Text.AlignHCenter; verticalAlignment: Text.AlignVCenter
}
background: Rectangle {
color: dlBtn.hovered ? "#ffffff" : SettingsStyle.accent; radius: 4
}
onClicked: ui.downloadLLM()
}
// Progress Bar
Rectangle {
visible: parent.parent.isDownloading
Layout.fillWidth: true
height: 4
color: "#30ffffff"
Rectangle {
width: parent.width * (ui.downloadProgress / 100)
height: parent.height
color: SettingsStyle.accent
}
}
}
}
}
}
ModernSettingsSection {
title: "Advanced Decoding"
Layout.margins: 32