Add container registry pull, image source settings, and global AWS config
All checks were successful
Build Container / build-container (push) Successful in 1m59s

Support pulling images from registry (default: repo.anhonesthost.net/cybercovellc/triple-c/triple-c-sandbox:latest),
local builds, or custom images via a new settings UI. Add global AWS configuration
(config path auto-detect, profile picker, region) that serves as defaults overridable
per-project for Bedrock auth.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-27 15:22:49 +00:00
parent 6e68374604
commit 0f188783e1
22 changed files with 772 additions and 87 deletions

View File

@@ -1,7 +1,7 @@
use tauri::State;
use crate::docker;
use crate::models::ContainerInfo;
use crate::models::{container_config, ContainerInfo};
use crate::AppState;
#[tauri::command]
@@ -10,8 +10,10 @@ pub async fn check_docker() -> Result<bool, String> {
}
#[tauri::command]
pub async fn check_image_exists() -> Result<bool, String> {
docker::image_exists().await
pub async fn check_image_exists(state: State<'_, AppState>) -> Result<bool, String> {
let settings = state.settings_store.get();
let image_name = container_config::resolve_image_name(&settings.image_source, &settings.custom_image_name);
docker::image_exists(&image_name).await
}
#[tauri::command]

View File

@@ -1,7 +1,7 @@
use tauri::State;
use crate::docker;
use crate::models::{AuthMode, Project, ProjectStatus};
use crate::models::{container_config, AuthMode, Project, ProjectStatus};
use crate::storage::secure;
use crate::AppState;
@@ -57,6 +57,10 @@ pub async fn start_project_container(
.get(&project_id)
.ok_or_else(|| format!("Project {} not found", project_id))?;
// Load settings for image resolution and global AWS
let settings = state.settings_store.get();
let image_name = container_config::resolve_image_name(&settings.image_source, &settings.custom_image_name);
// Get API key only if auth mode requires it
let api_key = match project.auth_mode {
AuthMode::ApiKey => {
@@ -65,16 +69,14 @@ pub async fn start_project_container(
Some(key)
}
AuthMode::Login => {
// Login mode: no API key needed, user runs `claude login` in the container.
// Auth state persists in the .claude config volume.
None
}
AuthMode::Bedrock => {
// Bedrock mode: no Anthropic API key needed, uses AWS credentials.
let bedrock = project.bedrock_config.as_ref()
.ok_or_else(|| "Bedrock auth mode selected but no Bedrock configuration found.".to_string())?;
if bedrock.aws_region.is_empty() {
return Err("AWS region is required for Bedrock auth mode.".to_string());
// Region can come from per-project or global
if bedrock.aws_region.is_empty() && settings.global_aws.aws_region.is_none() {
return Err("AWS region is required for Bedrock auth mode. Set it per-project or in global AWS settings.".to_string());
}
None
}
@@ -84,12 +86,19 @@ pub async fn start_project_container(
state.projects_store.update_status(&project_id, ProjectStatus::Starting)?;
// Ensure image exists
if !docker::image_exists().await? {
return Err("Docker image not built. Please build the image first.".to_string());
if !docker::image_exists(&image_name).await? {
state.projects_store.update_status(&project_id, ProjectStatus::Stopped)?;
return Err(format!("Docker image '{}' not found. Please pull or build the image first.", image_name));
}
// Determine docker socket path
let docker_socket = default_docker_socket();
let docker_socket = settings.docker_socket_path
.as_deref()
.map(|s| s.to_string())
.unwrap_or_else(|| default_docker_socket());
// AWS config path from global settings
let aws_config_path = settings.global_aws.aws_config_path.clone();
// Check for existing container
let container_id = if let Some(existing_id) = docker::find_existing_container(&project).await? {
@@ -98,7 +107,14 @@ pub async fn start_project_container(
existing_id
} else {
// Create new container
let new_id = docker::create_container(&project, api_key.as_deref(), &docker_socket).await?;
let new_id = docker::create_container(
&project,
api_key.as_deref(),
&docker_socket,
&image_name,
aws_config_path.as_deref(),
&settings.global_aws,
).await?;
docker::start_container(&new_id).await?;
new_id
};

View File

@@ -1,4 +1,9 @@
use tauri::State;
use crate::docker;
use crate::models::AppSettings;
use crate::storage::secure;
use crate::AppState;
#[tauri::command]
pub async fn set_api_key(key: String) -> Result<(), String> {
@@ -14,3 +19,88 @@ pub async fn has_api_key() -> Result<bool, String> {
pub async fn delete_api_key() -> Result<(), String> {
secure::delete_api_key()
}
#[tauri::command]
pub async fn get_settings(state: State<'_, AppState>) -> Result<AppSettings, String> {
Ok(state.settings_store.get())
}
#[tauri::command]
pub async fn update_settings(
settings: AppSettings,
state: State<'_, AppState>,
) -> Result<AppSettings, String> {
state.settings_store.update(settings)
}
#[tauri::command]
pub async fn pull_image(
image_name: String,
app_handle: tauri::AppHandle,
) -> Result<(), String> {
use tauri::Emitter;
docker::pull_image(&image_name, move |msg| {
let _ = app_handle.emit("image-pull-progress", msg);
})
.await
}
#[tauri::command]
pub async fn detect_aws_config() -> Result<Option<String>, String> {
if let Some(home) = dirs::home_dir() {
let aws_dir = home.join(".aws");
if aws_dir.exists() {
return Ok(Some(aws_dir.to_string_lossy().to_string()));
}
}
Ok(None)
}
#[tauri::command]
pub async fn list_aws_profiles() -> Result<Vec<String>, String> {
let mut profiles = Vec::new();
let home = match dirs::home_dir() {
Some(h) => h,
None => return Ok(profiles),
};
// Parse ~/.aws/credentials
let credentials_path = home.join(".aws").join("credentials");
if credentials_path.exists() {
if let Ok(contents) = std::fs::read_to_string(&credentials_path) {
for line in contents.lines() {
let trimmed = line.trim();
if trimmed.starts_with('[') && trimmed.ends_with(']') {
let profile = trimmed[1..trimmed.len() - 1].to_string();
if !profiles.contains(&profile) {
profiles.push(profile);
}
}
}
}
}
// Parse ~/.aws/config (profiles are prefixed with "profile ")
let config_path = home.join(".aws").join("config");
if config_path.exists() {
if let Ok(contents) = std::fs::read_to_string(&config_path) {
for line in contents.lines() {
let trimmed = line.trim();
if trimmed.starts_with('[') && trimmed.ends_with(']') {
let section = &trimmed[1..trimmed.len() - 1];
let profile = if let Some(name) = section.strip_prefix("profile ") {
name.to_string()
} else {
section.to_string()
};
if !profiles.contains(&profile) {
profiles.push(profile);
}
}
}
}
}
Ok(profiles)
}

View File

@@ -6,7 +6,7 @@ use bollard::models::{ContainerSummary, HostConfig, Mount, MountTypeEnum};
use std::collections::HashMap;
use super::client::get_docker;
use crate::models::{container_config, AuthMode, BedrockAuthMethod, ContainerInfo, Project};
use crate::models::{AuthMode, BedrockAuthMethod, ContainerInfo, GlobalAwsSettings, Project};
pub async fn find_existing_container(project: &Project) -> Result<Option<String>, String> {
let docker = get_docker()?;
@@ -42,10 +42,12 @@ pub async fn create_container(
project: &Project,
api_key: Option<&str>,
docker_socket_path: &str,
image_name: &str,
aws_config_path: Option<&str>,
global_aws: &GlobalAwsSettings,
) -> Result<String, String> {
let docker = get_docker()?;
let container_name = project.container_name();
let image = container_config::full_image_name();
let mut env_vars: Vec<String> = Vec::new();
@@ -55,14 +57,32 @@ pub async fn create_container(
let uid = std::process::Command::new("id").arg("-u").output();
let gid = std::process::Command::new("id").arg("-g").output();
if let Ok(out) = uid {
let val = String::from_utf8_lossy(&out.stdout).trim().to_string();
env_vars.push(format!("HOST_UID={}", val));
if out.status.success() {
let val = String::from_utf8_lossy(&out.stdout).trim().to_string();
if !val.is_empty() {
log::debug!("Host UID detected: {}", val);
env_vars.push(format!("HOST_UID={}", val));
}
} else {
log::debug!("Failed to detect host UID (exit code {:?})", out.status.code());
}
}
if let Ok(out) = gid {
let val = String::from_utf8_lossy(&out.stdout).trim().to_string();
env_vars.push(format!("HOST_GID={}", val));
if out.status.success() {
let val = String::from_utf8_lossy(&out.stdout).trim().to_string();
if !val.is_empty() {
log::debug!("Host GID detected: {}", val);
env_vars.push(format!("HOST_GID={}", val));
}
} else {
log::debug!("Failed to detect host GID (exit code {:?})", out.status.code());
}
}
}
#[cfg(windows)]
{
log::debug!("Skipping HOST_UID/HOST_GID on Windows — Docker Desktop's Linux VM handles user mapping");
}
if let Some(key) = api_key {
env_vars.push(format!("ANTHROPIC_API_KEY={}", key));
@@ -82,7 +102,16 @@ pub async fn create_container(
if project.auth_mode == AuthMode::Bedrock {
if let Some(ref bedrock) = project.bedrock_config {
env_vars.push("CLAUDE_CODE_USE_BEDROCK=1".to_string());
env_vars.push(format!("AWS_REGION={}", bedrock.aws_region));
// AWS region: per-project overrides global
let region = if !bedrock.aws_region.is_empty() {
Some(bedrock.aws_region.clone())
} else {
global_aws.aws_region.clone()
};
if let Some(ref r) = region {
env_vars.push(format!("AWS_REGION={}", r));
}
match bedrock.auth_method {
BedrockAuthMethod::StaticCredentials => {
@@ -97,8 +126,11 @@ pub async fn create_container(
}
}
BedrockAuthMethod::Profile => {
if let Some(ref profile) = bedrock.aws_profile {
env_vars.push(format!("AWS_PROFILE={}", profile));
// Per-project profile overrides global
let profile = bedrock.aws_profile.as_ref()
.or(global_aws.aws_profile.as_ref());
if let Some(p) = profile {
env_vars.push(format!("AWS_PROFILE={}", p));
}
}
BedrockAuthMethod::BearerToken => {
@@ -148,22 +180,32 @@ pub async fn create_container(
});
}
// AWS config mount (read-only, for profile-based auth)
if project.auth_mode == AuthMode::Bedrock {
// AWS config mount (read-only)
// Mount if: Bedrock profile auth needs it, OR a global aws_config_path is set
let should_mount_aws = if project.auth_mode == AuthMode::Bedrock {
if let Some(ref bedrock) = project.bedrock_config {
if bedrock.auth_method == BedrockAuthMethod::Profile {
if let Some(home) = dirs::home_dir() {
let aws_dir = home.join(".aws");
if aws_dir.exists() {
mounts.push(Mount {
target: Some("/home/claude/.aws".to_string()),
source: Some(aws_dir.to_string_lossy().to_string()),
typ: Some(MountTypeEnum::BIND),
read_only: Some(true),
..Default::default()
});
}
}
bedrock.auth_method == BedrockAuthMethod::Profile
} else {
false
}
} else {
false
};
if should_mount_aws || aws_config_path.is_some() {
let aws_dir = aws_config_path
.map(|p| std::path::PathBuf::from(p))
.or_else(|| dirs::home_dir().map(|h| h.join(".aws")));
if let Some(ref aws_path) = aws_dir {
if aws_path.exists() {
mounts.push(Mount {
target: Some("/home/claude/.aws".to_string()),
source: Some(aws_path.to_string_lossy().to_string()),
typ: Some(MountTypeEnum::BIND),
read_only: Some(true),
..Default::default()
});
}
}
}
@@ -190,7 +232,7 @@ pub async fn create_container(
};
let config = Config {
image: Some(image),
image: Some(image_name.to_string()),
hostname: Some("triple-c".to_string()),
env: Some(env_vars),
labels: Some(labels),
@@ -257,11 +299,17 @@ pub async fn get_container_info(project: &Project) -> Result<Option<ContainerInf
.map(|s| format!("{:?}", s))
.unwrap_or_else(|| "unknown".to_string());
// Read actual image from Docker inspect
let image = info
.config
.and_then(|c| c.image)
.unwrap_or_else(|| "unknown".to_string());
Ok(Some(ContainerInfo {
container_id: container_id.clone(),
project_id: project.id.clone(),
status,
image: container_config::full_image_name(),
image,
}))
}
Err(_) => Ok(None),
@@ -282,7 +330,6 @@ pub async fn list_sibling_containers() -> Result<Vec<ContainerSummary>, String>
.await
.map_err(|e| format!("Failed to list containers: {}", e))?;
// Filter out Triple-C managed containers
let siblings: Vec<ContainerSummary> = all_containers
.into_iter()
.filter(|c| {

View File

@@ -1,4 +1,4 @@
use bollard::image::{BuildImageOptions, ListImagesOptions};
use bollard::image::{BuildImageOptions, CreateImageOptions, ListImagesOptions};
use bollard::models::ImageSummary;
use futures_util::StreamExt;
use std::collections::HashMap;
@@ -10,13 +10,12 @@ use crate::models::container_config;
const DOCKERFILE: &str = include_str!("../../../../container/Dockerfile");
const ENTRYPOINT: &str = include_str!("../../../../container/entrypoint.sh");
pub async fn image_exists() -> Result<bool, String> {
pub async fn image_exists(image_name: &str) -> Result<bool, String> {
let docker = get_docker()?;
let full_name = container_config::full_image_name();
let filters: HashMap<String, Vec<String>> = HashMap::from([(
"reference".to_string(),
vec![full_name],
vec![image_name.to_string()],
)]);
let images: Vec<ImageSummary> = docker
@@ -30,14 +29,65 @@ pub async fn image_exists() -> Result<bool, String> {
Ok(!images.is_empty())
}
pub async fn pull_image<F>(image_name: &str, on_progress: F) -> Result<(), String>
where
F: Fn(String) + Send + 'static,
{
let docker = get_docker()?;
// Parse image name into from_image and tag
let (from_image, tag) = if let Some(pos) = image_name.rfind(':') {
// Check that the colon is part of a tag, not a port
let after_colon = &image_name[pos + 1..];
if after_colon.contains('/') {
// The colon is part of a port (e.g., host:port/repo)
(image_name, "latest")
} else {
(&image_name[..pos], after_colon)
}
} else {
(image_name, "latest")
};
let options = CreateImageOptions {
from_image,
tag,
..Default::default()
};
let mut stream = docker.create_image(Some(options), None, None);
while let Some(result) = stream.next().await {
match result {
Ok(info) => {
let mut msg_parts = Vec::new();
if let Some(ref status) = info.status {
msg_parts.push(status.clone());
}
if let Some(ref progress) = info.progress {
msg_parts.push(progress.clone());
}
if !msg_parts.is_empty() {
on_progress(msg_parts.join(" "));
}
if let Some(ref error) = info.error {
return Err(format!("Pull error: {}", error));
}
}
Err(e) => return Err(format!("Pull stream error: {}", e)),
}
}
Ok(())
}
pub async fn build_image<F>(on_progress: F) -> Result<(), String>
where
F: Fn(String) + Send + 'static,
{
let docker = get_docker()?;
let full_name = container_config::full_image_name();
let full_name = container_config::local_build_image_name();
// Create a tar archive in memory containing Dockerfile and entrypoint.sh
let tar_bytes = create_build_context().map_err(|e| format!("Failed to create build context: {}", e))?;
let options = BuildImageOptions {
@@ -71,7 +121,6 @@ fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
{
let mut archive = tar::Builder::new(&mut buf);
// Add Dockerfile
let dockerfile_bytes = DOCKERFILE.as_bytes();
let mut header = tar::Header::new_gnu();
header.set_size(dockerfile_bytes.len() as u64);
@@ -79,7 +128,6 @@ fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
header.set_cksum();
archive.append_data(&mut header, "Dockerfile", dockerfile_bytes)?;
// Add entrypoint.sh
let entrypoint_bytes = ENTRYPOINT.as_bytes();
let mut header = tar::Header::new_gnu();
header.set_size(entrypoint_bytes.len() as u64);
@@ -90,7 +138,6 @@ fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
archive.finish()?;
}
// Flush to make sure all data is written
let _ = buf.flush();
Ok(buf)
}

View File

@@ -5,9 +5,11 @@ mod storage;
use docker::exec::ExecSessionManager;
use storage::projects_store::ProjectsStore;
use storage::settings_store::SettingsStore;
pub struct AppState {
pub projects_store: ProjectsStore,
pub settings_store: SettingsStore,
pub exec_manager: ExecSessionManager,
}
@@ -20,6 +22,7 @@ pub fn run() {
.plugin(tauri_plugin_opener::init())
.manage(AppState {
projects_store: ProjectsStore::new(),
settings_store: SettingsStore::new(),
exec_manager: ExecSessionManager::new(),
})
.invoke_handler(tauri::generate_handler![
@@ -41,6 +44,11 @@ pub fn run() {
commands::settings_commands::set_api_key,
commands::settings_commands::has_api_key,
commands::settings_commands::delete_api_key,
commands::settings_commands::get_settings,
commands::settings_commands::update_settings,
commands::settings_commands::pull_image,
commands::settings_commands::detect_aws_config,
commands::settings_commands::list_aws_profiles,
// Terminal
commands::terminal_commands::open_terminal_session,
commands::terminal_commands::terminal_input,

View File

@@ -1,11 +1,55 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum ImageSource {
Registry,
LocalBuild,
Custom,
}
impl Default for ImageSource {
fn default() -> Self {
Self::Registry
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GlobalAwsSettings {
#[serde(default)]
pub aws_config_path: Option<String>,
#[serde(default)]
pub aws_profile: Option<String>,
#[serde(default)]
pub aws_region: Option<String>,
}
impl Default for GlobalAwsSettings {
fn default() -> Self {
Self {
aws_config_path: None,
aws_profile: None,
aws_region: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AppSettings {
#[serde(default)]
pub default_ssh_key_path: Option<String>,
#[serde(default)]
pub default_git_user_name: Option<String>,
#[serde(default)]
pub default_git_user_email: Option<String>,
#[serde(default)]
pub docker_socket_path: Option<String>,
#[serde(default)]
pub image_source: ImageSource,
#[serde(default)]
pub custom_image_name: Option<String>,
#[serde(default)]
pub global_aws: GlobalAwsSettings,
}
impl Default for AppSettings {
@@ -15,6 +59,9 @@ impl Default for AppSettings {
default_git_user_name: None,
default_git_user_email: None,
docker_socket_path: None,
image_source: ImageSource::default(),
custom_image_name: None,
global_aws: GlobalAwsSettings::default(),
}
}
}

View File

@@ -1,5 +1,7 @@
use serde::{Deserialize, Serialize};
use super::app_settings::ImageSource;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ContainerInfo {
pub container_id: String,
@@ -8,9 +10,22 @@ pub struct ContainerInfo {
pub image: String,
}
pub const IMAGE_NAME: &str = "triple-c";
pub const LOCAL_IMAGE_NAME: &str = "triple-c";
pub const IMAGE_TAG: &str = "latest";
pub const REGISTRY_IMAGE: &str = "repo.anhonesthost.net/cybercovellc/triple-c/triple-c-sandbox:latest";
pub fn full_image_name() -> String {
format!("{IMAGE_NAME}:{IMAGE_TAG}")
pub fn local_build_image_name() -> String {
format!("{LOCAL_IMAGE_NAME}:{IMAGE_TAG}")
}
pub fn resolve_image_name(source: &ImageSource, custom: &Option<String>) -> String {
match source {
ImageSource::Registry => REGISTRY_IMAGE.to_string(),
ImageSource::LocalBuild => local_build_image_name(),
ImageSource::Custom => custom
.as_deref()
.filter(|s| !s.is_empty())
.unwrap_or(REGISTRY_IMAGE)
.to_string(),
}
}

View File

@@ -1,5 +1,7 @@
pub mod projects_store;
pub mod secure;
pub mod settings_store;
pub use projects_store::*;
pub use secure::*;
pub use settings_store::*;

View File

@@ -0,0 +1,76 @@
use std::fs;
use std::path::PathBuf;
use std::sync::Mutex;
use crate::models::AppSettings;
pub struct SettingsStore {
settings: Mutex<AppSettings>,
file_path: PathBuf,
}
impl SettingsStore {
pub fn new() -> Self {
let data_dir = dirs::data_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("triple-c");
fs::create_dir_all(&data_dir).ok();
let file_path = data_dir.join("settings.json");
let settings = if file_path.exists() {
match fs::read_to_string(&file_path) {
Ok(data) => match serde_json::from_str(&data) {
Ok(parsed) => parsed,
Err(e) => {
log::error!("Failed to parse settings.json: {}. Using defaults.", e);
let backup = file_path.with_extension("json.bak");
if let Err(be) = fs::copy(&file_path, &backup) {
log::error!("Failed to back up corrupted settings.json: {}", be);
}
AppSettings::default()
}
},
Err(e) => {
log::error!("Failed to read settings.json: {}", e);
AppSettings::default()
}
}
} else {
AppSettings::default()
};
Self {
settings: Mutex::new(settings),
file_path,
}
}
fn lock(&self) -> std::sync::MutexGuard<'_, AppSettings> {
self.settings.lock().unwrap_or_else(|e| e.into_inner())
}
fn save(&self, settings: &AppSettings) -> Result<(), String> {
let data = serde_json::to_string_pretty(settings)
.map_err(|e| format!("Failed to serialize settings: {}", e))?;
let tmp_path = self.file_path.with_extension("json.tmp");
fs::write(&tmp_path, data)
.map_err(|e| format!("Failed to write temp settings file: {}", e))?;
fs::rename(&tmp_path, &self.file_path)
.map_err(|e| format!("Failed to rename settings file: {}", e))?;
Ok(())
}
pub fn get(&self) -> AppSettings {
self.lock().clone()
}
pub fn update(&self, new_settings: AppSettings) -> Result<AppSettings, String> {
let mut settings = self.lock();
*settings = new_settings.clone();
self.save(&settings)?;
Ok(new_settings)
}
}