Cross-platform distribution, UI improvements, and performance optimizations

- PyInstaller frozen sidecar: spec file, build script, and ffmpeg path resolver
  for self-contained distribution without Python prerequisites
- Dual-mode sidecar launcher: frozen binary (production) with dev mode fallback
- Parallel transcription + diarization pipeline (~30-40% faster)
- GPU auto-detection for diarization (CUDA when available)
- Async run_pipeline command for real-time progress event delivery
- Web Audio API backend for instant playback and seeking
- OpenAI-compatible provider replacing LiteLLM client-side routing
- Cross-platform RAM detection (Linux/macOS/Windows)
- Settings: speaker count hint, token reveal toggles, dark dropdown styling
- Loading splash screen, flexbox layout fix for viewport overflow
- Gitea Actions CI/CD pipeline (Linux, Windows, macOS ARM)
- Updated README and CLAUDE.md documentation

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Claude
2026-03-20 21:33:43 -07:00
parent 42ccd3e21d
commit 58faa83cb3
27 changed files with 1301 additions and 283 deletions

View File

@@ -8,6 +8,8 @@ export interface AppSettings {
openai_model: string;
anthropic_model: string;
litellm_model: string;
litellm_api_key: string;
litellm_api_base: string;
local_model_path: string;
local_binary_path: string;
transcription_model: string;
@@ -15,6 +17,7 @@ export interface AppSettings {
transcription_language: string;
skip_diarization: boolean;
hf_token: string;
num_speakers: number | null;
}
const defaults: AppSettings = {
@@ -24,6 +27,8 @@ const defaults: AppSettings = {
openai_model: 'gpt-4o-mini',
anthropic_model: 'claude-sonnet-4-6',
litellm_model: 'gpt-4o-mini',
litellm_api_key: '',
litellm_api_base: '',
local_model_path: '',
local_binary_path: 'llama-server',
transcription_model: 'base',
@@ -31,6 +36,7 @@ const defaults: AppSettings = {
transcription_language: '',
skip_diarization: false,
hf_token: '',
num_speakers: null,
};
export const settings = writable<AppSettings>({ ...defaults });
@@ -47,4 +53,20 @@ export async function loadSettings(): Promise<void> {
export async function saveSettings(s: AppSettings): Promise<void> {
settings.set(s);
await invoke('save_settings', { settings: s });
// Configure the AI provider in the Python sidecar
const configMap: Record<string, Record<string, string>> = {
openai: { api_key: s.openai_api_key, model: s.openai_model },
anthropic: { api_key: s.anthropic_api_key, model: s.anthropic_model },
litellm: { api_key: s.litellm_api_key, api_base: s.litellm_api_base, model: s.litellm_model },
local: { model: s.local_model_path, base_url: 'http://localhost:8080' },
};
const config = configMap[s.ai_provider];
if (config) {
try {
await invoke('ai_configure', { provider: s.ai_provider, config });
} catch {
// Sidecar may not be running yet — provider will be configured on first use
}
}
}