- Implement LlamaManager in Rust for llama-server lifecycle: spawn with port allocation, health check, clean shutdown on Drop, model listing - Add llama_start/stop/status/list_models Tauri commands - Add load_settings/save_settings commands with JSON persistence - Build SettingsModal with tabs for Transcription, AI Provider, Local AI settings (model size, device, language, API keys, provider selection) - Wire settings into pipeline calls (model, device, language, skip diarization) - Configure Tauri packaging: asset protocol for local audio files, CSP policy, bundle metadata, Linux .deb/.AppImage and Windows .msi config - Add keyboard shortcuts: Space (play/pause), Ctrl+O (import), Ctrl+, (settings), Escape (close menus/modals) - Close export dropdown on outside click - Tests: 30 Python, 6 Rust, 0 Svelte errors Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
65 lines
1.7 KiB
Rust
65 lines
1.7 KiB
Rust
use serde_json::{json, Value};
|
|
|
|
use crate::llama::{LlamaConfig, LlamaManager, LlamaStatus};
|
|
|
|
use std::path::PathBuf;
|
|
use std::sync::OnceLock;
|
|
|
|
/// Global llama manager — persists across command invocations.
|
|
fn llama_manager() -> &'static LlamaManager {
|
|
static INSTANCE: OnceLock<LlamaManager> = OnceLock::new();
|
|
INSTANCE.get_or_init(LlamaManager::new)
|
|
}
|
|
|
|
/// Start the local llama-server with a GGUF model.
|
|
#[tauri::command]
|
|
pub fn llama_start(
|
|
model_path: String,
|
|
binary_path: Option<String>,
|
|
port: Option<u16>,
|
|
n_gpu_layers: Option<i32>,
|
|
context_size: Option<u32>,
|
|
threads: Option<u32>,
|
|
) -> Result<LlamaStatus, String> {
|
|
let config = LlamaConfig {
|
|
binary_path: PathBuf::from(
|
|
binary_path.unwrap_or_else(|| "llama-server".to_string()),
|
|
),
|
|
model_path: PathBuf::from(model_path),
|
|
port: port.unwrap_or(0),
|
|
n_gpu_layers: n_gpu_layers.unwrap_or(0),
|
|
context_size: context_size.unwrap_or(4096),
|
|
threads: threads.unwrap_or(4),
|
|
};
|
|
|
|
llama_manager().start(&config)
|
|
}
|
|
|
|
/// Stop the local llama-server.
|
|
#[tauri::command]
|
|
pub fn llama_stop() -> Result<(), String> {
|
|
llama_manager().stop()
|
|
}
|
|
|
|
/// Get the status of the local llama-server.
|
|
#[tauri::command]
|
|
pub fn llama_status() -> LlamaStatus {
|
|
llama_manager().status()
|
|
}
|
|
|
|
/// List available GGUF models in the models directory.
|
|
#[tauri::command]
|
|
pub fn llama_list_models() -> Value {
|
|
let models = LlamaManager::list_models();
|
|
json!({
|
|
"models": models,
|
|
"models_dir": LlamaManager::models_dir().to_string_lossy(),
|
|
})
|
|
}
|
|
|
|
/// Get the app data directory path.
|
|
#[tauri::command]
|
|
pub fn get_data_dir() -> String {
|
|
LlamaManager::data_dir().to_string_lossy().to_string()
|
|
}
|