Phase 2: Core transcription pipeline and audio playback
- Implement faster-whisper TranscribeService with word-level timestamps, progress reporting, and hardware auto-detection - Wire up Rust SidecarManager for Python process lifecycle (spawn, IPC, shutdown) - Add transcribe_file Tauri command bridging frontend to Python sidecar - Integrate wavesurfer.js WaveformPlayer with play/pause, skip, seek controls - Build TranscriptEditor with word-level click-to-seek and active highlighting - Connect file import flow: prompt → asset load → transcribe → display - Add typed tauri-bridge service with TranscriptionResult interface - Add Python tests for hardware detection and transcription result formatting Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -2,8 +2,74 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# TODO: Implement hardware detection
|
||||
# - Check torch.cuda.is_available()
|
||||
# - Detect VRAM size
|
||||
# - Detect CPU cores and available RAM
|
||||
# - Return recommended model configuration
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class HardwareInfo:
|
||||
"""Detected hardware capabilities."""
|
||||
|
||||
has_cuda: bool = False
|
||||
cuda_device_name: str = ""
|
||||
vram_mb: int = 0
|
||||
ram_mb: int = 0
|
||||
cpu_cores: int = 0
|
||||
recommended_model: str = "base"
|
||||
recommended_device: str = "cpu"
|
||||
recommended_compute_type: str = "int8"
|
||||
|
||||
|
||||
def detect_hardware() -> HardwareInfo:
|
||||
"""Detect available hardware and recommend model configuration."""
|
||||
info = HardwareInfo()
|
||||
|
||||
# CPU info
|
||||
info.cpu_cores = os.cpu_count() or 1
|
||||
|
||||
# RAM info
|
||||
try:
|
||||
with open("/proc/meminfo") as f:
|
||||
for line in f:
|
||||
if line.startswith("MemTotal:"):
|
||||
# Value is in kB
|
||||
info.ram_mb = int(line.split()[1]) // 1024
|
||||
break
|
||||
except (FileNotFoundError, ValueError):
|
||||
pass
|
||||
|
||||
# CUDA detection
|
||||
try:
|
||||
import torch
|
||||
|
||||
if torch.cuda.is_available():
|
||||
info.has_cuda = True
|
||||
info.cuda_device_name = torch.cuda.get_device_name(0)
|
||||
info.vram_mb = torch.cuda.get_device_properties(0).total_mem // (1024 * 1024)
|
||||
except ImportError:
|
||||
print("[sidecar] torch not available, GPU detection skipped", file=sys.stderr, flush=True)
|
||||
|
||||
# Model recommendation based on hardware
|
||||
if info.has_cuda and info.vram_mb >= 8000:
|
||||
info.recommended_model = "large-v3-turbo"
|
||||
info.recommended_device = "cuda"
|
||||
info.recommended_compute_type = "int8"
|
||||
elif info.has_cuda and info.vram_mb >= 4000:
|
||||
info.recommended_model = "medium"
|
||||
info.recommended_device = "cuda"
|
||||
info.recommended_compute_type = "int8"
|
||||
elif info.ram_mb >= 16000:
|
||||
info.recommended_model = "medium"
|
||||
info.recommended_device = "cpu"
|
||||
info.recommended_compute_type = "int8"
|
||||
elif info.ram_mb >= 8000:
|
||||
info.recommended_model = "small"
|
||||
info.recommended_device = "cpu"
|
||||
info.recommended_compute_type = "int8"
|
||||
else:
|
||||
info.recommended_model = "base"
|
||||
info.recommended_device = "cpu"
|
||||
info.recommended_compute_type = "int8"
|
||||
|
||||
return info
|
||||
|
||||
Reference in New Issue
Block a user