Files
voice-to-notes/src-tauri/src/commands/system.rs

65 lines
1.7 KiB
Rust
Raw Normal View History

use serde_json::{json, Value};
use crate::llama::{LlamaConfig, LlamaManager, LlamaStatus};
use std::path::PathBuf;
use std::sync::OnceLock;
/// Global llama manager — persists across command invocations.
fn llama_manager() -> &'static LlamaManager {
static INSTANCE: OnceLock<LlamaManager> = OnceLock::new();
INSTANCE.get_or_init(LlamaManager::new)
}
/// Start the local llama-server with a GGUF model.
#[tauri::command]
pub fn llama_start(
model_path: String,
binary_path: Option<String>,
port: Option<u16>,
n_gpu_layers: Option<i32>,
context_size: Option<u32>,
threads: Option<u32>,
) -> Result<LlamaStatus, String> {
let config = LlamaConfig {
binary_path: PathBuf::from(
binary_path.unwrap_or_else(|| "llama-server".to_string()),
),
model_path: PathBuf::from(model_path),
port: port.unwrap_or(0),
n_gpu_layers: n_gpu_layers.unwrap_or(0),
context_size: context_size.unwrap_or(4096),
threads: threads.unwrap_or(4),
};
llama_manager().start(&config)
}
/// Stop the local llama-server.
#[tauri::command]
pub fn llama_stop() -> Result<(), String> {
llama_manager().stop()
}
/// Get the status of the local llama-server.
#[tauri::command]
pub fn llama_status() -> LlamaStatus {
llama_manager().status()
}
/// List available GGUF models in the models directory.
#[tauri::command]
pub fn llama_list_models() -> Value {
let models = LlamaManager::list_models();
json!({
"models": models,
"models_dir": LlamaManager::models_dir().to_string_lossy(),
})
}
/// Get the app data directory path.
#[tauri::command]
pub fn get_data_dir() -> String {
LlamaManager::data_dir().to_string_lossy().to_string()
}