Fix Rust backend: secrets to keychain, status recovery, shutdown, dedup

- Move git_token and Bedrock credentials to OS keychain instead of
  storing in plaintext projects.json via skip_serializing + keyring
- Fix project status stuck in Starting on container creation failure
  by resetting to Stopped on any error path
- Add granular store methods to reduce TOCTOU race window
- Add auth_mode, project path, and bedrock config change detection
  to container_needs_recreation with label-based fingerprinting
- Fix mutex held across async Docker API call in exec resize by
  cloning exec_id under lock then releasing before API call
- Add graceful shutdown via on_window_event to clean up exec sessions
- Extract compute_env_fingerprint and merge_claude_instructions helpers
  to eliminate code duplication in container.rs
- Remove unused thiserror dependency
- Return error instead of falling back to CWD when data dir unavailable

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-28 20:42:55 +00:00
parent 82f159d2a9
commit 03e0590631
10 changed files with 316 additions and 99 deletions

View File

@@ -4512,7 +4512,6 @@ dependencies = [
"tauri-plugin-dialog", "tauri-plugin-dialog",
"tauri-plugin-opener", "tauri-plugin-opener",
"tauri-plugin-store", "tauri-plugin-store",
"thiserror 2.0.18",
"tokio", "tokio",
"uuid", "uuid",
] ]

View File

@@ -24,7 +24,6 @@ tokio = { version = "1", features = ["full"] }
futures-util = "0.3" futures-util = "0.3"
uuid = { version = "1", features = ["v4"] } uuid = { version = "1", features = ["v4"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
thiserror = "2"
dirs = "6" dirs = "6"
log = "0.4" log = "0.4"
env_logger = "0.11" env_logger = "0.11"

View File

@@ -5,6 +5,44 @@ use crate::models::{container_config, AuthMode, Project, ProjectStatus};
use crate::storage::secure; use crate::storage::secure;
use crate::AppState; use crate::AppState;
/// Extract secret fields from a project and store them in the OS keychain.
fn store_secrets_for_project(project: &Project) -> Result<(), String> {
if let Some(ref token) = project.git_token {
secure::store_project_secret(&project.id, "git-token", token)?;
}
if let Some(ref bedrock) = project.bedrock_config {
if let Some(ref v) = bedrock.aws_access_key_id {
secure::store_project_secret(&project.id, "aws-access-key-id", v)?;
}
if let Some(ref v) = bedrock.aws_secret_access_key {
secure::store_project_secret(&project.id, "aws-secret-access-key", v)?;
}
if let Some(ref v) = bedrock.aws_session_token {
secure::store_project_secret(&project.id, "aws-session-token", v)?;
}
if let Some(ref v) = bedrock.aws_bearer_token {
secure::store_project_secret(&project.id, "aws-bearer-token", v)?;
}
}
Ok(())
}
/// Populate secret fields on a project struct from the OS keychain.
fn load_secrets_for_project(project: &mut Project) {
project.git_token = secure::get_project_secret(&project.id, "git-token")
.unwrap_or(None);
if let Some(ref mut bedrock) = project.bedrock_config {
bedrock.aws_access_key_id = secure::get_project_secret(&project.id, "aws-access-key-id")
.unwrap_or(None);
bedrock.aws_secret_access_key = secure::get_project_secret(&project.id, "aws-secret-access-key")
.unwrap_or(None);
bedrock.aws_session_token = secure::get_project_secret(&project.id, "aws-session-token")
.unwrap_or(None);
bedrock.aws_bearer_token = secure::get_project_secret(&project.id, "aws-bearer-token")
.unwrap_or(None);
}
}
#[tauri::command] #[tauri::command]
pub async fn list_projects(state: State<'_, AppState>) -> Result<Vec<Project>, String> { pub async fn list_projects(state: State<'_, AppState>) -> Result<Vec<Project>, String> {
Ok(state.projects_store.list()) Ok(state.projects_store.list())
@@ -17,6 +55,7 @@ pub async fn add_project(
state: State<'_, AppState>, state: State<'_, AppState>,
) -> Result<Project, String> { ) -> Result<Project, String> {
let project = Project::new(name, path); let project = Project::new(name, path);
store_secrets_for_project(&project)?;
state.projects_store.add(project) state.projects_store.add(project)
} }
@@ -34,6 +73,11 @@ pub async fn remove_project(
} }
} }
// Clean up keychain secrets for this project
if let Err(e) = secure::delete_project_secrets(&project_id) {
log::warn!("Failed to delete keychain secrets for project {}: {}", project_id, e);
}
state.projects_store.remove(&project_id) state.projects_store.remove(&project_id)
} }
@@ -42,6 +86,7 @@ pub async fn update_project(
project: Project, project: Project,
state: State<'_, AppState>, state: State<'_, AppState>,
) -> Result<Project, String> { ) -> Result<Project, String> {
store_secrets_for_project(&project)?;
state.projects_store.update(project) state.projects_store.update(project)
} }
@@ -55,6 +100,10 @@ pub async fn start_project_container(
.get(&project_id) .get(&project_id)
.ok_or_else(|| format!("Project {} not found", project_id))?; .ok_or_else(|| format!("Project {} not found", project_id))?;
// Populate secret fields from the OS keychain so they are available
// in memory when building environment variables for the container.
load_secrets_for_project(&mut project);
// Load settings for image resolution and global AWS // Load settings for image resolution and global AWS
let settings = state.settings_store.get(); let settings = state.settings_store.get();
let image_name = container_config::resolve_image_name(&settings.image_source, &settings.custom_image_name); let image_name = container_config::resolve_image_name(&settings.image_source, &settings.custom_image_name);
@@ -83,39 +132,51 @@ pub async fn start_project_container(
// Update status to starting // Update status to starting
state.projects_store.update_status(&project_id, ProjectStatus::Starting)?; state.projects_store.update_status(&project_id, ProjectStatus::Starting)?;
// Ensure image exists // Wrap container operations so that any failure resets status to Stopped.
if !docker::image_exists(&image_name).await? { let result: Result<String, String> = async {
state.projects_store.update_status(&project_id, ProjectStatus::Stopped)?; // Ensure image exists
return Err(format!("Docker image '{}' not found. Please pull or build the image first.", image_name)); if !docker::image_exists(&image_name).await? {
} return Err(format!("Docker image '{}' not found. Please pull or build the image first.", image_name));
}
// Determine docker socket path // Determine docker socket path
let docker_socket = settings.docker_socket_path let docker_socket = settings.docker_socket_path
.as_deref() .as_deref()
.map(|s| s.to_string()) .map(|s| s.to_string())
.unwrap_or_else(|| default_docker_socket()); .unwrap_or_else(|| default_docker_socket());
// AWS config path from global settings // AWS config path from global settings
let aws_config_path = settings.global_aws.aws_config_path.clone(); let aws_config_path = settings.global_aws.aws_config_path.clone();
// Check for existing container // Check for existing container
let container_id = if let Some(existing_id) = docker::find_existing_container(&project).await? { let container_id = if let Some(existing_id) = docker::find_existing_container(&project).await? {
// Compare the running container's configuration (mounts, env vars) let needs_recreation = docker::container_needs_recreation(
// against the current project settings. If anything changed (SSH key &existing_id,
// path, git config, docker socket, etc.) we recreate the container. &project,
// Safe to recreate: the claude config named volume is keyed by settings.global_claude_instructions.as_deref(),
// project ID (not container ID) so it persists across recreation. )
let needs_recreation = docker::container_needs_recreation( .await
&existing_id, .unwrap_or(false);
&project, if needs_recreation {
settings.global_claude_instructions.as_deref(), log::info!("Container config changed, recreating container for project {}", project.id);
) let _ = docker::stop_container(&existing_id).await;
.await docker::remove_container(&existing_id).await?;
.unwrap_or(false); let new_id = docker::create_container(
if needs_recreation { &project,
log::info!("Container config changed, recreating container for project {}", project.id); api_key.as_deref(),
let _ = docker::stop_container(&existing_id).await; &docker_socket,
docker::remove_container(&existing_id).await?; &image_name,
aws_config_path.as_deref(),
&settings.global_aws,
settings.global_claude_instructions.as_deref(),
).await?;
docker::start_container(&new_id).await?;
new_id
} else {
docker::start_container(&existing_id).await?;
existing_id
}
} else {
let new_id = docker::create_container( let new_id = docker::create_container(
&project, &project,
api_key.as_deref(), api_key.as_deref(),
@@ -127,27 +188,20 @@ pub async fn start_project_container(
).await?; ).await?;
docker::start_container(&new_id).await?; docker::start_container(&new_id).await?;
new_id new_id
} else { };
// Start existing container as-is
docker::start_container(&existing_id).await?;
existing_id
}
} else {
// Create new container
let new_id = docker::create_container(
&project,
api_key.as_deref(),
&docker_socket,
&image_name,
aws_config_path.as_deref(),
&settings.global_aws,
settings.global_claude_instructions.as_deref(),
).await?;
docker::start_container(&new_id).await?;
new_id
};
// Update project with container info Ok(container_id)
}.await;
// On failure, reset status to Stopped so the project doesn't get stuck.
if let Err(ref e) = result {
log::error!("Failed to start container for project {}: {}", project_id, e);
let _ = state.projects_store.update_status(&project_id, ProjectStatus::Stopped);
}
let container_id = result?;
// Update project with container info using granular methods (Issue 14: TOCTOU)
state.projects_store.set_container_id(&project_id, Some(container_id.clone()))?; state.projects_store.set_container_id(&project_id, Some(container_id.clone()))?;
state.projects_store.update_status(&project_id, ProjectStatus::Running)?; state.projects_store.update_status(&project_id, ProjectStatus::Running)?;

View File

@@ -4,9 +4,63 @@ use bollard::container::{
}; };
use bollard::models::{ContainerSummary, HostConfig, Mount, MountTypeEnum}; use bollard::models::{ContainerSummary, HostConfig, Mount, MountTypeEnum};
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use super::client::get_docker; use super::client::get_docker;
use crate::models::{AuthMode, BedrockAuthMethod, ContainerInfo, GlobalAwsSettings, Project}; use crate::models::{AuthMode, BedrockAuthMethod, ContainerInfo, EnvVar, GlobalAwsSettings, Project};
/// Compute a fingerprint string for the custom environment variables.
/// Sorted alphabetically so order changes do not cause spurious recreation.
fn compute_env_fingerprint(custom_env_vars: &[EnvVar]) -> String {
let reserved_prefixes = ["ANTHROPIC_", "AWS_", "GIT_", "HOST_", "CLAUDE_", "TRIPLE_C_"];
let mut parts: Vec<String> = Vec::new();
for env_var in custom_env_vars {
let key = env_var.key.trim();
if key.is_empty() {
continue;
}
let is_reserved = reserved_prefixes.iter().any(|p| key.to_uppercase().starts_with(p));
if is_reserved {
continue;
}
parts.push(format!("{}={}", key, env_var.value));
}
parts.sort();
parts.join(",")
}
/// Merge global and per-project Claude instructions into a single string.
fn merge_claude_instructions(
global_instructions: Option<&str>,
project_instructions: Option<&str>,
) -> Option<String> {
match (global_instructions, project_instructions) {
(Some(g), Some(p)) => Some(format!("{}\n\n{}", g, p)),
(Some(g), None) => Some(g.to_string()),
(None, Some(p)) => Some(p.to_string()),
(None, None) => None,
}
}
/// Compute a fingerprint for the Bedrock configuration so we can detect changes.
fn compute_bedrock_fingerprint(project: &Project) -> String {
if let Some(ref bedrock) = project.bedrock_config {
let mut hasher = DefaultHasher::new();
format!("{:?}", bedrock.auth_method).hash(&mut hasher);
bedrock.aws_region.hash(&mut hasher);
bedrock.aws_access_key_id.hash(&mut hasher);
bedrock.aws_secret_access_key.hash(&mut hasher);
bedrock.aws_session_token.hash(&mut hasher);
bedrock.aws_profile.hash(&mut hasher);
bedrock.aws_bearer_token.hash(&mut hasher);
bedrock.model_id.hash(&mut hasher);
bedrock.disable_prompt_caching.hash(&mut hasher);
format!("{:x}", hasher.finish())
} else {
String::new()
}
}
pub async fn find_existing_container(project: &Project) -> Result<Option<String>, String> { pub async fn find_existing_container(project: &Project) -> Result<Option<String>, String> {
let docker = get_docker()?; let docker = get_docker()?;
@@ -153,7 +207,6 @@ pub async fn create_container(
// Custom environment variables // Custom environment variables
let reserved_prefixes = ["ANTHROPIC_", "AWS_", "GIT_", "HOST_", "CLAUDE_", "TRIPLE_C_"]; let reserved_prefixes = ["ANTHROPIC_", "AWS_", "GIT_", "HOST_", "CLAUDE_", "TRIPLE_C_"];
let mut custom_env_fingerprint_parts: Vec<String> = Vec::new();
for env_var in &project.custom_env_vars { for env_var in &project.custom_env_vars {
let key = env_var.key.trim(); let key = env_var.key.trim();
if key.is_empty() { if key.is_empty() {
@@ -165,19 +218,15 @@ pub async fn create_container(
continue; continue;
} }
env_vars.push(format!("{}={}", key, env_var.value)); env_vars.push(format!("{}={}", key, env_var.value));
custom_env_fingerprint_parts.push(format!("{}={}", key, env_var.value));
} }
custom_env_fingerprint_parts.sort(); let custom_env_fingerprint = compute_env_fingerprint(&project.custom_env_vars);
let custom_env_fingerprint = custom_env_fingerprint_parts.join(",");
env_vars.push(format!("TRIPLE_C_CUSTOM_ENV={}", custom_env_fingerprint)); env_vars.push(format!("TRIPLE_C_CUSTOM_ENV={}", custom_env_fingerprint));
// Claude instructions (global + per-project) // Claude instructions (global + per-project)
let combined_instructions = match (global_claude_instructions, project.claude_instructions.as_deref()) { let combined_instructions = merge_claude_instructions(
(Some(g), Some(p)) => Some(format!("{}\n\n{}", g, p)), global_claude_instructions,
(Some(g), None) => Some(g.to_string()), project.claude_instructions.as_deref(),
(None, Some(p)) => Some(p.to_string()), );
(None, None) => None,
};
if let Some(ref instructions) = combined_instructions { if let Some(ref instructions) = combined_instructions {
env_vars.push(format!("CLAUDE_INSTRUCTIONS={}", instructions)); env_vars.push(format!("CLAUDE_INSTRUCTIONS={}", instructions));
} }
@@ -265,6 +314,10 @@ pub async fn create_container(
labels.insert("triple-c.managed".to_string(), "true".to_string()); labels.insert("triple-c.managed".to_string(), "true".to_string());
labels.insert("triple-c.project-id".to_string(), project.id.clone()); labels.insert("triple-c.project-id".to_string(), project.id.clone());
labels.insert("triple-c.project-name".to_string(), project.name.clone()); labels.insert("triple-c.project-name".to_string(), project.name.clone());
labels.insert("triple-c.auth-mode".to_string(), format!("{:?}", project.auth_mode));
labels.insert("triple-c.project-path".to_string(), project.path.clone());
labels.insert("triple-c.bedrock-fingerprint".to_string(), compute_bedrock_fingerprint(project));
labels.insert("triple-c.image".to_string(), image_name.to_string());
let host_config = HostConfig { let host_config = HostConfig {
mounts: Some(mounts), mounts: Some(mounts),
@@ -343,6 +396,15 @@ pub async fn container_needs_recreation(
.await .await
.map_err(|e| format!("Failed to inspect container: {}", e))?; .map_err(|e| format!("Failed to inspect container: {}", e))?;
let labels = info
.config
.as_ref()
.and_then(|c| c.labels.as_ref());
let get_label = |name: &str| -> Option<String> {
labels.and_then(|l| l.get(name).cloned())
};
let mounts = info let mounts = info
.host_config .host_config
.as_ref() .as_ref()
@@ -354,6 +416,50 @@ pub async fn container_needs_recreation(
// Code settings stored in the named volume). The change takes effect // Code settings stored in the named volume). The change takes effect
// on the next explicit rebuild instead. // on the next explicit rebuild instead.
// ── Auth mode ────────────────────────────────────────────────────────
let current_auth_mode = format!("{:?}", project.auth_mode);
if let Some(container_auth_mode) = get_label("triple-c.auth-mode") {
if container_auth_mode != current_auth_mode {
log::info!("Auth mode mismatch (container={:?}, project={:?})", container_auth_mode, current_auth_mode);
return Ok(true);
}
}
// ── Project path ─────────────────────────────────────────────────────
if let Some(container_path) = get_label("triple-c.project-path") {
if container_path != project.path {
log::info!("Project path mismatch (container={:?}, project={:?})", container_path, project.path);
return Ok(true);
}
}
// ── Bedrock config fingerprint ───────────────────────────────────────
let expected_bedrock_fp = compute_bedrock_fingerprint(project);
let container_bedrock_fp = get_label("triple-c.bedrock-fingerprint").unwrap_or_default();
if container_bedrock_fp != expected_bedrock_fp {
log::info!("Bedrock config mismatch");
return Ok(true);
}
// ── Image ────────────────────────────────────────────────────────────
// The image label is set at creation time; if the user changed the
// configured image we need to recreate. We only compare when the
// label exists (containers created before this change won't have it).
if let Some(container_image) = get_label("triple-c.image") {
// The caller doesn't pass the image name, but we can read the
// container's actual image from Docker inspect.
let actual_image = info
.config
.as_ref()
.and_then(|c| c.image.as_ref());
if let Some(actual) = actual_image {
if *actual != container_image {
log::info!("Image mismatch (actual={:?}, label={:?})", actual, container_image);
return Ok(true);
}
}
}
// ── SSH key path mount ─────────────────────────────────────────────── // ── SSH key path mount ───────────────────────────────────────────────
let ssh_mount_source = mounts let ssh_mount_source = mounts
.and_then(|m| { .and_then(|m| {
@@ -403,21 +509,7 @@ pub async fn container_needs_recreation(
} }
// ── Custom environment variables ────────────────────────────────────── // ── Custom environment variables ──────────────────────────────────────
let reserved_prefixes = ["ANTHROPIC_", "AWS_", "GIT_", "HOST_", "CLAUDE_", "TRIPLE_C_"]; let expected_fingerprint = compute_env_fingerprint(&project.custom_env_vars);
let mut expected_parts: Vec<String> = Vec::new();
for env_var in &project.custom_env_vars {
let key = env_var.key.trim();
if key.is_empty() {
continue;
}
let is_reserved = reserved_prefixes.iter().any(|p| key.to_uppercase().starts_with(p));
if is_reserved {
continue;
}
expected_parts.push(format!("{}={}", key, env_var.value));
}
expected_parts.sort();
let expected_fingerprint = expected_parts.join(",");
let container_fingerprint = get_env("TRIPLE_C_CUSTOM_ENV").unwrap_or_default(); let container_fingerprint = get_env("TRIPLE_C_CUSTOM_ENV").unwrap_or_default();
if container_fingerprint != expected_fingerprint { if container_fingerprint != expected_fingerprint {
log::info!("Custom env vars mismatch (container={:?}, expected={:?})", container_fingerprint, expected_fingerprint); log::info!("Custom env vars mismatch (container={:?}, expected={:?})", container_fingerprint, expected_fingerprint);
@@ -425,12 +517,10 @@ pub async fn container_needs_recreation(
} }
// ── Claude instructions ─────────────────────────────────────────────── // ── Claude instructions ───────────────────────────────────────────────
let expected_instructions = match (global_claude_instructions, project.claude_instructions.as_deref()) { let expected_instructions = merge_claude_instructions(
(Some(g), Some(p)) => Some(format!("{}\n\n{}", g, p)), global_claude_instructions,
(Some(g), None) => Some(g.to_string()), project.claude_instructions.as_deref(),
(None, Some(p)) => Some(p.to_string()), );
(None, None) => None,
};
let container_instructions = get_env("CLAUDE_INSTRUCTIONS"); let container_instructions = get_env("CLAUDE_INSTRUCTIONS");
if container_instructions.as_deref() != expected_instructions.as_deref() { if container_instructions.as_deref() != expected_instructions.as_deref() {
log::info!("CLAUDE_INSTRUCTIONS mismatch"); log::info!("CLAUDE_INSTRUCTIONS mismatch");

View File

@@ -163,11 +163,26 @@ impl ExecSessionManager {
} }
pub async fn resize(&self, session_id: &str, cols: u16, rows: u16) -> Result<(), String> { pub async fn resize(&self, session_id: &str, cols: u16, rows: u16) -> Result<(), String> {
let sessions = self.sessions.lock().await; // Clone the exec_id under the lock, then drop the lock before the
let session = sessions // async Docker API call to avoid holding the mutex across await.
.get(session_id) let exec_id = {
.ok_or_else(|| format!("Session {} not found", session_id))?; let sessions = self.sessions.lock().await;
session.resize(cols, rows).await let session = sessions
.get(session_id)
.ok_or_else(|| format!("Session {} not found", session_id))?;
session.exec_id.clone()
};
let docker = get_docker()?;
docker
.resize_exec(
&exec_id,
ResizeExecOptions {
width: cols,
height: rows,
},
)
.await
.map_err(|e| format!("Failed to resize exec: {}", e))
} }
pub async fn close_session(&self, session_id: &str) { pub async fn close_session(&self, session_id: &str) {

View File

@@ -6,6 +6,7 @@ mod storage;
use docker::exec::ExecSessionManager; use docker::exec::ExecSessionManager;
use storage::projects_store::ProjectsStore; use storage::projects_store::ProjectsStore;
use storage::settings_store::SettingsStore; use storage::settings_store::SettingsStore;
use tauri::Manager;
pub struct AppState { pub struct AppState {
pub projects_store: ProjectsStore, pub projects_store: ProjectsStore,
@@ -21,10 +22,18 @@ pub fn run() {
.plugin(tauri_plugin_dialog::init()) .plugin(tauri_plugin_dialog::init())
.plugin(tauri_plugin_opener::init()) .plugin(tauri_plugin_opener::init())
.manage(AppState { .manage(AppState {
projects_store: ProjectsStore::new(), projects_store: ProjectsStore::new().expect("Failed to initialize projects store"),
settings_store: SettingsStore::new(), settings_store: SettingsStore::new().expect("Failed to initialize settings store"),
exec_manager: ExecSessionManager::new(), exec_manager: ExecSessionManager::new(),
}) })
.on_window_event(|window, event| {
if let tauri::WindowEvent::CloseRequested { .. } = event {
let state = window.state::<AppState>();
tauri::async_runtime::block_on(async {
state.exec_manager.close_all_sessions().await;
});
}
})
.invoke_handler(tauri::generate_handler![ .invoke_handler(tauri::generate_handler![
// Docker // Docker
commands::docker_commands::check_docker, commands::docker_commands::check_docker,

View File

@@ -17,6 +17,7 @@ pub struct Project {
pub bedrock_config: Option<BedrockConfig>, pub bedrock_config: Option<BedrockConfig>,
pub allow_docker_access: bool, pub allow_docker_access: bool,
pub ssh_key_path: Option<String>, pub ssh_key_path: Option<String>,
#[serde(skip_serializing)]
pub git_token: Option<String>, pub git_token: Option<String>,
pub git_user_name: Option<String>, pub git_user_name: Option<String>,
pub git_user_email: Option<String>, pub git_user_email: Option<String>,
@@ -76,10 +77,14 @@ impl Default for BedrockAuthMethod {
pub struct BedrockConfig { pub struct BedrockConfig {
pub auth_method: BedrockAuthMethod, pub auth_method: BedrockAuthMethod,
pub aws_region: String, pub aws_region: String,
#[serde(skip_serializing)]
pub aws_access_key_id: Option<String>, pub aws_access_key_id: Option<String>,
#[serde(skip_serializing)]
pub aws_secret_access_key: Option<String>, pub aws_secret_access_key: Option<String>,
#[serde(skip_serializing)]
pub aws_session_token: Option<String>, pub aws_session_token: Option<String>,
pub aws_profile: Option<String>, pub aws_profile: Option<String>,
#[serde(skip_serializing)]
pub aws_bearer_token: Option<String>, pub aws_bearer_token: Option<String>,
pub model_id: Option<String>, pub model_id: Option<String>,
pub disable_prompt_caching: bool, pub disable_prompt_caching: bool,

View File

@@ -10,9 +10,9 @@ pub struct ProjectsStore {
} }
impl ProjectsStore { impl ProjectsStore {
pub fn new() -> Self { pub fn new() -> Result<Self, String> {
let data_dir = dirs::data_dir() let data_dir = dirs::data_dir()
.unwrap_or_else(|| PathBuf::from(".")) .ok_or_else(|| "Could not determine data directory. Set XDG_DATA_HOME on Linux.".to_string())?
.join("triple-c"); .join("triple-c");
fs::create_dir_all(&data_dir).ok(); fs::create_dir_all(&data_dir).ok();
@@ -42,10 +42,10 @@ impl ProjectsStore {
Vec::new() Vec::new()
}; };
Self { Ok(Self {
projects: Mutex::new(projects), projects: Mutex::new(projects),
file_path, file_path,
} })
} }
fn lock(&self) -> std::sync::MutexGuard<'_, Vec<Project>> { fn lock(&self) -> std::sync::MutexGuard<'_, Vec<Project>> {

View File

@@ -36,3 +36,49 @@ pub fn has_api_key() -> Result<bool, String> {
Err(e) => Err(e), Err(e) => Err(e),
} }
} }
/// Store a per-project secret in the OS keychain.
pub fn store_project_secret(project_id: &str, key_name: &str, value: &str) -> Result<(), String> {
let service = format!("triple-c-project-{}-{}", project_id, key_name);
let entry = keyring::Entry::new(&service, "secret")
.map_err(|e| format!("Keyring error: {}", e))?;
entry
.set_password(value)
.map_err(|e| format!("Failed to store project secret '{}': {}", key_name, e))
}
/// Retrieve a per-project secret from the OS keychain.
pub fn get_project_secret(project_id: &str, key_name: &str) -> Result<Option<String>, String> {
let service = format!("triple-c-project-{}-{}", project_id, key_name);
let entry = keyring::Entry::new(&service, "secret")
.map_err(|e| format!("Keyring error: {}", e))?;
match entry.get_password() {
Ok(value) => Ok(Some(value)),
Err(keyring::Error::NoEntry) => Ok(None),
Err(e) => Err(format!("Failed to retrieve project secret '{}': {}", key_name, e)),
}
}
/// Delete all known secrets for a project from the OS keychain.
pub fn delete_project_secrets(project_id: &str) -> Result<(), String> {
let secret_keys = [
"git-token",
"aws-access-key-id",
"aws-secret-access-key",
"aws-session-token",
"aws-bearer-token",
];
for key_name in &secret_keys {
let service = format!("triple-c-project-{}-{}", project_id, key_name);
let entry = keyring::Entry::new(&service, "secret")
.map_err(|e| format!("Keyring error: {}", e))?;
match entry.delete_credential() {
Ok(()) => {}
Err(keyring::Error::NoEntry) => {}
Err(e) => {
log::warn!("Failed to delete project secret '{}': {}", key_name, e);
}
}
}
Ok(())
}

View File

@@ -10,9 +10,9 @@ pub struct SettingsStore {
} }
impl SettingsStore { impl SettingsStore {
pub fn new() -> Self { pub fn new() -> Result<Self, String> {
let data_dir = dirs::data_dir() let data_dir = dirs::data_dir()
.unwrap_or_else(|| PathBuf::from(".")) .ok_or_else(|| "Could not determine data directory. Set XDG_DATA_HOME on Linux.".to_string())?
.join("triple-c"); .join("triple-c");
fs::create_dir_all(&data_dir).ok(); fs::create_dir_all(&data_dir).ok();
@@ -41,10 +41,10 @@ impl SettingsStore {
AppSettings::default() AppSettings::default()
}; };
Self { Ok(Self {
settings: Mutex::new(settings), settings: Mutex::new(settings),
file_path, file_path,
} })
} }
fn lock(&self) -> std::sync::MutexGuard<'_, AppSettings> { fn lock(&self) -> std::sync::MutexGuard<'_, AppSettings> {