Phase 6: Llama-server manager, settings UI, packaging, and polish
- Implement LlamaManager in Rust for llama-server lifecycle: spawn with port allocation, health check, clean shutdown on Drop, model listing - Add llama_start/stop/status/list_models Tauri commands - Add load_settings/save_settings commands with JSON persistence - Build SettingsModal with tabs for Transcription, AI Provider, Local AI settings (model size, device, language, API keys, provider selection) - Wire settings into pipeline calls (model, device, language, skip diarization) - Configure Tauri packaging: asset protocol for local audio files, CSP policy, bundle metadata, Linux .deb/.AppImage and Windows .msi config - Add keyboard shortcuts: Space (play/pause), Ctrl+O (import), Ctrl+, (settings), Escape (close menus/modals) - Close export dropdown on outside click - Tests: 30 Python, 6 Rust, 0 Svelte errors Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -1,11 +1,14 @@
|
||||
pub mod commands;
|
||||
pub mod db;
|
||||
pub mod llama;
|
||||
pub mod sidecar;
|
||||
pub mod state;
|
||||
|
||||
use commands::ai::{ai_chat, ai_configure, ai_list_providers};
|
||||
use commands::export::export_transcript;
|
||||
use commands::project::{create_project, get_project, list_projects};
|
||||
use commands::settings::{load_settings, save_settings};
|
||||
use commands::system::{get_data_dir, llama_list_models, llama_start, llama_status, llama_stop};
|
||||
use commands::transcribe::{run_pipeline, transcribe_file};
|
||||
|
||||
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
||||
@@ -23,6 +26,13 @@ pub fn run() {
|
||||
ai_chat,
|
||||
ai_list_providers,
|
||||
ai_configure,
|
||||
llama_start,
|
||||
llama_stop,
|
||||
llama_status,
|
||||
llama_list_models,
|
||||
get_data_dir,
|
||||
load_settings,
|
||||
save_settings,
|
||||
])
|
||||
.run(tauri::generate_context!())
|
||||
.expect("error while running tauri application");
|
||||
|
||||
Reference in New Issue
Block a user