Fix Ollama settings on startup + video extraction UX
All checks were successful
Release / Bump version and tag (push) Successful in 3s
Release / Build App (macOS) (push) Successful in 1m18s
Release / Build App (Linux) (push) Successful in 3m44s
Release / Build App (Windows) (push) Successful in 3m57s

AI provider:
- Extract configureAIProvider() from saveSettings for reuse
- Call it on app startup after sidecar is ready (was only called on Save)
- Call it after first-time sidecar download completes
- Sidecar now receives correct Ollama URL/model immediately

Video extraction:
- Hide ffmpeg console window on Windows (CREATE_NO_WINDOW flag)
- Show "Extracting audio from video..." overlay with spinner during extraction
- UI stays responsive while ffmpeg runs

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Claude
2026-03-23 05:30:12 -07:00
parent 8faa336cbc
commit aa319eb823
3 changed files with 74 additions and 10 deletions

View File

@@ -1,6 +1,9 @@
use std::path::PathBuf; use std::path::PathBuf;
use std::process::Command; use std::process::Command;
#[cfg(target_os = "windows")]
use std::os::windows::process::CommandExt;
/// Extract audio from a video file to a WAV file using ffmpeg. /// Extract audio from a video file to a WAV file using ffmpeg.
/// Returns the path to the extracted audio file. /// Returns the path to the extracted audio file.
#[tauri::command] #[tauri::command]
@@ -23,8 +26,8 @@ pub fn extract_audio(file_path: String) -> Result<String, String> {
// Find ffmpeg — check sidecar extract dir first, then system PATH // Find ffmpeg — check sidecar extract dir first, then system PATH
let ffmpeg = find_ffmpeg().ok_or("ffmpeg not found. Install ffmpeg or ensure it's in PATH.")?; let ffmpeg = find_ffmpeg().ok_or("ffmpeg not found. Install ffmpeg or ensure it's in PATH.")?;
let status = Command::new(&ffmpeg) let mut cmd = Command::new(&ffmpeg);
.args([ cmd.args([
"-y", // Overwrite output "-y", // Overwrite output
"-i", "-i",
&file_path, &file_path,
@@ -38,7 +41,13 @@ pub fn extract_audio(file_path: String) -> Result<String, String> {
]) ])
.arg(output.to_str().unwrap()) .arg(output.to_str().unwrap())
.stdout(std::process::Stdio::null()) .stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped());
// Hide the console window on Windows (CREATE_NO_WINDOW = 0x08000000)
#[cfg(target_os = "windows")]
cmd.creation_flags(0x08000000);
let status = cmd
.status() .status()
.map_err(|e| format!("Failed to run ffmpeg: {e}"))?; .map_err(|e| format!("Failed to run ffmpeg: {e}"))?;

View File

@@ -52,11 +52,7 @@ export async function loadSettings(): Promise<void> {
} }
} }
export async function saveSettings(s: AppSettings): Promise<void> { export async function configureAIProvider(s: AppSettings): Promise<void> {
settings.set(s);
await invoke('save_settings', { settings: s });
// Configure the AI provider in the Python sidecar
const configMap: Record<string, Record<string, string>> = { const configMap: Record<string, Record<string, string>> = {
openai: { api_key: s.openai_api_key, model: s.openai_model }, openai: { api_key: s.openai_api_key, model: s.openai_model },
anthropic: { api_key: s.anthropic_api_key, model: s.anthropic_model }, anthropic: { api_key: s.anthropic_api_key, model: s.anthropic_model },
@@ -68,7 +64,15 @@ export async function saveSettings(s: AppSettings): Promise<void> {
try { try {
await invoke('ai_configure', { provider: s.ai_provider, config }); await invoke('ai_configure', { provider: s.ai_provider, config });
} catch { } catch {
// Sidecar may not be running yet — provider will be configured on first use // Sidecar may not be running yet
} }
} }
} }
export async function saveSettings(s: AppSettings): Promise<void> {
settings.set(s);
await invoke('save_settings', { settings: s });
// Configure the AI provider in the Python sidecar
await configureAIProvider(s);
}

View File

@@ -10,7 +10,7 @@
import SettingsModal from '$lib/components/SettingsModal.svelte'; import SettingsModal from '$lib/components/SettingsModal.svelte';
import SidecarSetup from '$lib/components/SidecarSetup.svelte'; import SidecarSetup from '$lib/components/SidecarSetup.svelte';
import { segments, speakers } from '$lib/stores/transcript'; import { segments, speakers } from '$lib/stores/transcript';
import { settings, loadSettings } from '$lib/stores/settings'; import { settings, loadSettings, configureAIProvider } from '$lib/stores/settings';
import type { Segment, Speaker } from '$lib/types/transcript'; import type { Segment, Speaker } from '$lib/types/transcript';
import { onMount, tick } from 'svelte'; import { onMount, tick } from 'svelte';
@@ -54,6 +54,7 @@
function handleSidecarSetupComplete() { function handleSidecarSetupComplete() {
sidecarReady = true; sidecarReady = true;
configureAIProvider($settings);
checkSidecarUpdate(); checkSidecarUpdate();
} }
@@ -71,6 +72,7 @@
}); });
checkSidecar().then(() => { checkSidecar().then(() => {
if (sidecarReady) { if (sidecarReady) {
configureAIProvider($settings);
checkSidecarUpdate(); checkSidecarUpdate();
} }
}); });
@@ -120,6 +122,7 @@
let transcriptionProgress = $state(0); let transcriptionProgress = $state(0);
let transcriptionStage = $state(''); let transcriptionStage = $state('');
let transcriptionMessage = $state(''); let transcriptionMessage = $state('');
let extractingAudio = $state(false);
// Speaker color palette for auto-assignment // Speaker color palette for auto-assignment
const speakerColors = ['#e94560', '#4ecdc4', '#ffe66d', '#a8e6cf', '#ff8b94', '#c7ceea', '#ffd93d', '#6bcb77']; const speakerColors = ['#e94560', '#4ecdc4', '#ffe66d', '#a8e6cf', '#ff8b94', '#c7ceea', '#ffd93d', '#6bcb77'];
@@ -271,6 +274,8 @@
const ext = filePath.split('.').pop()?.toLowerCase() ?? ''; const ext = filePath.split('.').pop()?.toLowerCase() ?? '';
let audioPath = filePath; let audioPath = filePath;
if (VIDEO_EXTENSIONS.includes(ext)) { if (VIDEO_EXTENSIONS.includes(ext)) {
extractingAudio = true;
await tick();
try { try {
audioPath = await invoke<string>('extract_audio', { filePath }); audioPath = await invoke<string>('extract_audio', { filePath });
} catch (err) { } catch (err) {
@@ -289,6 +294,8 @@
alert(`Failed to extract audio from video: ${msg}`); alert(`Failed to extract audio from video: ${msg}`);
} }
return; return;
} finally {
extractingAudio = false;
} }
} }
@@ -602,6 +609,15 @@
message={transcriptionMessage} message={transcriptionMessage}
/> />
{#if extractingAudio}
<div class="extraction-overlay">
<div class="extraction-card">
<div class="extraction-spinner"></div>
<p>Extracting audio from video...</p>
</div>
</div>
{/if}
<SettingsModal <SettingsModal
visible={showSettings} visible={showSettings}
onClose={() => showSettings = false} onClose={() => showSettings = false}
@@ -808,4 +824,39 @@
.update-dismiss:hover { .update-dismiss:hover {
color: #e0e0e0; color: #e0e0e0;
} }
/* Audio extraction overlay */
.extraction-overlay {
position: fixed;
inset: 0;
background: rgba(0, 0, 0, 0.8);
display: flex;
align-items: center;
justify-content: center;
z-index: 9999;
}
.extraction-card {
background: #16213e;
padding: 2rem 2.5rem;
border-radius: 12px;
color: #e0e0e0;
border: 1px solid #2a3a5e;
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.5);
display: flex;
flex-direction: column;
align-items: center;
gap: 1rem;
}
.extraction-card p {
margin: 0;
font-size: 1rem;
}
.extraction-spinner {
width: 32px;
height: 32px;
border: 3px solid #2a3a5e;
border-top-color: #e94560;
border-radius: 50%;
animation: spin 0.8s linear infinite;
}
</style> </style>