v1.0.2 - style prompting, drop grammar correction
This commit is contained in:
@@ -15,6 +15,11 @@ import numpy as np
|
||||
from src.core.config import ConfigManager
|
||||
from src.core.paths import get_models_path
|
||||
|
||||
try:
|
||||
import torch
|
||||
except ImportError:
|
||||
torch = None
|
||||
|
||||
# Import directly - valid since we are now running in the full environment
|
||||
from faster_whisper import WhisperModel
|
||||
|
||||
@@ -153,7 +158,14 @@ class WhisperTranscriber:
|
||||
for segment in segments:
|
||||
text_result += segment.text + " "
|
||||
|
||||
return text_result.strip()
|
||||
text_result = text_result.strip()
|
||||
|
||||
# Low VRAM Mode: Unload Whisper Model immediately
|
||||
if self.config.get("unload_models_after_use"):
|
||||
self.unload_model()
|
||||
|
||||
logging.info(f"Final Transcription Output: '{text_result}'")
|
||||
return text_result
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Transcription failed: {e}")
|
||||
@@ -172,3 +184,21 @@ class WhisperTranscriber:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def unload_model(self):
|
||||
"""
|
||||
Unloads model to free memory.
|
||||
"""
|
||||
if self.model:
|
||||
del self.model
|
||||
|
||||
self.model = None
|
||||
self.current_model_size = None
|
||||
|
||||
# Force garbage collection
|
||||
import gc
|
||||
gc.collect()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
logging.info("Whisper Model unloaded (Low VRAM Mode).")
|
||||
|
||||
Reference in New Issue
Block a user