Initial commit: Triple-C app, container, and CI

Tauri v2 desktop app (React/TypeScript + Rust) for managing
containerized Claude Code environments. Includes Gitea Actions
workflow for building and pushing the sandbox container image,
and a BUILDING.md guide for manual app builds on Linux and Windows.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-27 04:29:51 +00:00
commit 97a0745ead
65 changed files with 17202 additions and 0 deletions

5771
app/src-tauri/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

38
app/src-tauri/Cargo.toml Normal file
View File

@@ -0,0 +1,38 @@
[package]
name = "triple-c"
version = "0.1.0"
edition = "2021"
[lib]
name = "triple_c_lib"
crate-type = ["lib", "cdylib", "staticlib"]
[[bin]]
name = "triple-c"
path = "src/main.rs"
[dependencies]
tauri = { version = "2", features = [] }
tauri-plugin-store = "2"
tauri-plugin-dialog = "2"
tauri-plugin-opener = "2"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
bollard = "0.18"
keyring = { version = "3", features = ["apple-native", "windows-native", "linux-native"] }
tokio = { version = "1", features = ["full"] }
futures-util = "0.3"
uuid = { version = "1", features = ["v4"] }
chrono = { version = "0.4", features = ["serde"] }
thiserror = "2"
dirs = "6"
log = "0.4"
env_logger = "0.11"
tar = "0.4"
[build-dependencies]
tauri-build = { version = "2", features = [] }
[features]
default = ["custom-protocol"]
custom-protocol = ["tauri/custom-protocol"]

3
app/src-tauri/build.rs Normal file
View File

@@ -0,0 +1,3 @@
fn main() {
tauri_build::build()
}

View File

@@ -0,0 +1,33 @@
{
"identifier": "default",
"description": "Default capabilities for Triple-C",
"windows": ["main"],
"permissions": [
"core:default",
"core:event:default",
"core:event:allow-emit",
"core:event:allow-listen",
"core:event:allow-unlisten",
"core:event:allow-emit-to",
"dialog:default",
"dialog:allow-open",
"dialog:allow-save",
"dialog:allow-message",
"dialog:allow-ask",
"dialog:allow-confirm",
"store:default",
"store:allow-get",
"store:allow-set",
"store:allow-delete",
"store:allow-keys",
"store:allow-values",
"store:allow-entries",
"store:allow-length",
"store:allow-load",
"store:allow-reset",
"store:allow-save",
"store:allow-clear",
"opener:default",
"opener:allow-open-url"
]
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
{"default":{"identifier":"default","description":"Default capabilities for Triple-C","local":true,"windows":["main"],"permissions":["core:default","core:event:default","core:event:allow-emit","core:event:allow-listen","core:event:allow-unlisten","core:event:allow-emit-to","dialog:default","dialog:allow-open","dialog:allow-save","dialog:allow-message","dialog:allow-ask","dialog:allow-confirm","store:default","store:allow-get","store:allow-set","store:allow-delete","store:allow-keys","store:allow-values","store:allow-entries","store:allow-length","store:allow-load","store:allow-reset","store:allow-save","store:allow-clear","opener:default","opener:allow-open-url"]}}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 372 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 914 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 B

View File

@@ -0,0 +1,54 @@
use tauri::State;
use crate::docker;
use crate::models::ContainerInfo;
use crate::AppState;
#[tauri::command]
pub async fn check_docker() -> Result<bool, String> {
docker::check_docker_available().await
}
#[tauri::command]
pub async fn check_image_exists() -> Result<bool, String> {
docker::image_exists().await
}
#[tauri::command]
pub async fn build_image(app_handle: tauri::AppHandle) -> Result<(), String> {
use tauri::Emitter;
docker::build_image(move |msg| {
let _ = app_handle.emit("image-build-progress", msg);
})
.await
}
#[tauri::command]
pub async fn get_container_info(
project_id: String,
state: State<'_, AppState>,
) -> Result<Option<ContainerInfo>, String> {
let project = state
.projects_store
.get(&project_id)
.ok_or_else(|| format!("Project {} not found", project_id))?;
docker::get_container_info(&project).await
}
#[tauri::command]
pub async fn list_sibling_containers() -> Result<Vec<serde_json::Value>, String> {
let containers = docker::list_sibling_containers().await?;
let result: Vec<serde_json::Value> = containers
.into_iter()
.map(|c| {
serde_json::json!({
"id": c.id,
"names": c.names,
"image": c.image,
"state": c.state,
"status": c.status,
})
})
.collect();
Ok(result)
}

View File

@@ -0,0 +1,4 @@
pub mod docker_commands;
pub mod project_commands;
pub mod settings_commands;
pub mod terminal_commands;

View File

@@ -0,0 +1,157 @@
use tauri::State;
use crate::docker;
use crate::models::{AuthMode, Project, ProjectStatus};
use crate::storage::secure;
use crate::AppState;
#[tauri::command]
pub async fn list_projects(state: State<'_, AppState>) -> Result<Vec<Project>, String> {
Ok(state.projects_store.list())
}
#[tauri::command]
pub async fn add_project(
name: String,
path: String,
state: State<'_, AppState>,
) -> Result<Project, String> {
let project = Project::new(name, path);
state.projects_store.add(project)
}
#[tauri::command]
pub async fn remove_project(
project_id: String,
state: State<'_, AppState>,
) -> Result<(), String> {
// Stop and remove container if it exists
if let Some(project) = state.projects_store.get(&project_id) {
if let Some(ref container_id) = project.container_id {
let _ = docker::stop_container(container_id).await;
let _ = docker::remove_container(container_id).await;
}
}
// Close any exec sessions
state.exec_manager.close_all_sessions().await;
state.projects_store.remove(&project_id)
}
#[tauri::command]
pub async fn update_project(
project: Project,
state: State<'_, AppState>,
) -> Result<Project, String> {
state.projects_store.update(project)
}
#[tauri::command]
pub async fn start_project_container(
project_id: String,
state: State<'_, AppState>,
) -> Result<Project, String> {
let mut project = state
.projects_store
.get(&project_id)
.ok_or_else(|| format!("Project {} not found", project_id))?;
// Get API key only if auth mode requires it
let api_key = match project.auth_mode {
AuthMode::ApiKey => {
let key = secure::get_api_key()?
.ok_or_else(|| "No API key configured. Please set your Anthropic API key in Settings.".to_string())?;
Some(key)
}
AuthMode::Login => {
// Login mode: no API key needed, user runs `claude login` in the container.
// Auth state persists in the .claude config volume.
None
}
};
// Update status to starting
state.projects_store.update_status(&project_id, ProjectStatus::Starting)?;
// Ensure image exists
if !docker::image_exists().await? {
return Err("Docker image not built. Please build the image first.".to_string());
}
// Determine docker socket path
let docker_socket = default_docker_socket();
// Check for existing container
let container_id = if let Some(existing_id) = docker::find_existing_container(&project).await? {
// Start existing container
docker::start_container(&existing_id).await?;
existing_id
} else {
// Create new container
let new_id = docker::create_container(&project, api_key.as_deref(), &docker_socket).await?;
docker::start_container(&new_id).await?;
new_id
};
// Update project with container info
state.projects_store.set_container_id(&project_id, Some(container_id.clone()))?;
state.projects_store.update_status(&project_id, ProjectStatus::Running)?;
project.container_id = Some(container_id);
project.status = ProjectStatus::Running;
Ok(project)
}
#[tauri::command]
pub async fn stop_project_container(
project_id: String,
state: State<'_, AppState>,
) -> Result<(), String> {
let project = state
.projects_store
.get(&project_id)
.ok_or_else(|| format!("Project {} not found", project_id))?;
if let Some(ref container_id) = project.container_id {
state.projects_store.update_status(&project_id, ProjectStatus::Stopping)?;
// Close exec sessions for this project
state.exec_manager.close_all_sessions().await;
docker::stop_container(container_id).await?;
state.projects_store.update_status(&project_id, ProjectStatus::Stopped)?;
}
Ok(())
}
#[tauri::command]
pub async fn rebuild_project_container(
project_id: String,
state: State<'_, AppState>,
) -> Result<Project, String> {
let project = state
.projects_store
.get(&project_id)
.ok_or_else(|| format!("Project {} not found", project_id))?;
// Remove existing container
if let Some(ref container_id) = project.container_id {
state.exec_manager.close_all_sessions().await;
let _ = docker::stop_container(container_id).await;
docker::remove_container(container_id).await?;
state.projects_store.set_container_id(&project_id, None)?;
}
// Start fresh
start_project_container(project_id, state).await
}
fn default_docker_socket() -> String {
if cfg!(target_os = "windows") {
"//./pipe/docker_engine".to_string()
} else {
"/var/run/docker.sock".to_string()
}
}

View File

@@ -0,0 +1,16 @@
use crate::storage::secure;
#[tauri::command]
pub async fn set_api_key(key: String) -> Result<(), String> {
secure::store_api_key(&key)
}
#[tauri::command]
pub async fn has_api_key() -> Result<bool, String> {
secure::has_api_key()
}
#[tauri::command]
pub async fn delete_api_key() -> Result<(), String> {
secure::delete_api_key()
}

View File

@@ -0,0 +1,74 @@
use tauri::{AppHandle, Emitter, State};
use crate::AppState;
#[tauri::command]
pub async fn open_terminal_session(
project_id: String,
session_id: String,
app_handle: AppHandle,
state: State<'_, AppState>,
) -> Result<(), String> {
let project = state
.projects_store
.get(&project_id)
.ok_or_else(|| format!("Project {} not found", project_id))?;
let container_id = project
.container_id
.as_ref()
.ok_or_else(|| "Container not running".to_string())?;
let cmd = vec![
"claude".to_string(),
"--dangerously-skip-permissions".to_string(),
];
let output_event = format!("terminal-output-{}", session_id);
let exit_event = format!("terminal-exit-{}", session_id);
let app_handle_output = app_handle.clone();
let app_handle_exit = app_handle.clone();
state
.exec_manager
.create_session(
container_id,
&session_id,
cmd,
move |data| {
let _ = app_handle_output.emit(&output_event, data);
},
Box::new(move || {
let _ = app_handle_exit.emit(&exit_event, ());
}),
)
.await
}
#[tauri::command]
pub async fn terminal_input(
session_id: String,
data: Vec<u8>,
state: State<'_, AppState>,
) -> Result<(), String> {
state.exec_manager.send_input(&session_id, data).await
}
#[tauri::command]
pub async fn terminal_resize(
session_id: String,
cols: u16,
rows: u16,
state: State<'_, AppState>,
) -> Result<(), String> {
state.exec_manager.resize(&session_id, cols, rows).await
}
#[tauri::command]
pub async fn close_terminal_session(
session_id: String,
state: State<'_, AppState>,
) -> Result<(), String> {
state.exec_manager.close_session(&session_id).await;
Ok(())
}

View File

@@ -0,0 +1,23 @@
use bollard::Docker;
use std::sync::OnceLock;
static DOCKER: OnceLock<Result<Docker, String>> = OnceLock::new();
pub fn get_docker() -> Result<&'static Docker, String> {
let result = DOCKER.get_or_init(|| {
Docker::connect_with_local_defaults()
.map_err(|e| format!("Failed to connect to Docker daemon: {}", e))
});
match result {
Ok(docker) => Ok(docker),
Err(e) => Err(e.clone()),
}
}
pub async fn check_docker_available() -> Result<bool, String> {
let docker = get_docker()?;
match docker.ping().await {
Ok(_) => Ok(true),
Err(e) => Err(format!("Docker daemon not responding: {}", e)),
}
}

View File

@@ -0,0 +1,223 @@
use bollard::container::{
Config, CreateContainerOptions, ListContainersOptions, RemoveContainerOptions,
StartContainerOptions, StopContainerOptions,
};
use bollard::models::{ContainerSummary, HostConfig, Mount, MountTypeEnum};
use std::collections::HashMap;
use super::client::get_docker;
use crate::models::{container_config, ContainerInfo, Project};
pub async fn find_existing_container(project: &Project) -> Result<Option<String>, String> {
let docker = get_docker()?;
let container_name = project.container_name();
let filters: HashMap<String, Vec<String>> = HashMap::from([
("name".to_string(), vec![container_name.clone()]),
]);
let containers: Vec<ContainerSummary> = docker
.list_containers(Some(ListContainersOptions {
all: true,
filters,
..Default::default()
}))
.await
.map_err(|e| format!("Failed to list containers: {}", e))?;
// Match exact name (Docker prepends /)
let expected = format!("/{}", container_name);
for c in &containers {
if let Some(names) = &c.names {
if names.iter().any(|n| n == &expected) {
return Ok(c.id.clone());
}
}
}
Ok(None)
}
pub async fn create_container(
project: &Project,
api_key: Option<&str>,
docker_socket_path: &str,
) -> Result<String, String> {
let docker = get_docker()?;
let container_name = project.container_name();
let image = container_config::full_image_name();
let mut env_vars: Vec<String> = Vec::new();
if let Some(key) = api_key {
env_vars.push(format!("ANTHROPIC_API_KEY={}", key));
}
if let Some(ref token) = project.git_token {
env_vars.push(format!("GIT_TOKEN={}", token));
}
if let Some(ref name) = project.git_user_name {
env_vars.push(format!("GIT_USER_NAME={}", name));
}
if let Some(ref email) = project.git_user_email {
env_vars.push(format!("GIT_USER_EMAIL={}", email));
}
let mut mounts = vec![
// Project directory -> /workspace
Mount {
target: Some("/workspace".to_string()),
source: Some(project.path.clone()),
typ: Some(MountTypeEnum::BIND),
read_only: Some(false),
..Default::default()
},
// Named volume for claude config persistence
Mount {
target: Some("/home/claude/.claude".to_string()),
source: Some(format!("triple-c-claude-config-{}", project.id)),
typ: Some(MountTypeEnum::VOLUME),
read_only: Some(false),
..Default::default()
},
];
// SSH keys mount (read-only)
if let Some(ref ssh_path) = project.ssh_key_path {
mounts.push(Mount {
target: Some("/home/claude/.ssh".to_string()),
source: Some(ssh_path.clone()),
typ: Some(MountTypeEnum::BIND),
read_only: Some(true),
..Default::default()
});
}
// Docker socket (only if allowed)
if project.allow_docker_access {
mounts.push(Mount {
target: Some("/var/run/docker.sock".to_string()),
source: Some(docker_socket_path.to_string()),
typ: Some(MountTypeEnum::BIND),
read_only: Some(false),
..Default::default()
});
}
let mut labels = HashMap::new();
labels.insert("triple-c.managed".to_string(), "true".to_string());
labels.insert("triple-c.project-id".to_string(), project.id.clone());
labels.insert("triple-c.project-name".to_string(), project.name.clone());
let host_config = HostConfig {
mounts: Some(mounts),
..Default::default()
};
let config = Config {
image: Some(image),
hostname: Some("triple-c".to_string()),
env: Some(env_vars),
labels: Some(labels),
working_dir: Some("/workspace".to_string()),
host_config: Some(host_config),
tty: Some(true),
..Default::default()
};
let options = CreateContainerOptions {
name: container_name,
..Default::default()
};
let response = docker
.create_container(Some(options), config)
.await
.map_err(|e| format!("Failed to create container: {}", e))?;
Ok(response.id)
}
pub async fn start_container(container_id: &str) -> Result<(), String> {
let docker = get_docker()?;
docker
.start_container(container_id, None::<StartContainerOptions<String>>)
.await
.map_err(|e| format!("Failed to start container: {}", e))
}
pub async fn stop_container(container_id: &str) -> Result<(), String> {
let docker = get_docker()?;
docker
.stop_container(
container_id,
Some(StopContainerOptions { t: 10 }),
)
.await
.map_err(|e| format!("Failed to stop container: {}", e))
}
pub async fn remove_container(container_id: &str) -> Result<(), String> {
let docker = get_docker()?;
docker
.remove_container(
container_id,
Some(RemoveContainerOptions {
force: true,
..Default::default()
}),
)
.await
.map_err(|e| format!("Failed to remove container: {}", e))
}
pub async fn get_container_info(project: &Project) -> Result<Option<ContainerInfo>, String> {
if let Some(ref container_id) = project.container_id {
let docker = get_docker()?;
match docker.inspect_container(container_id, None).await {
Ok(info) => {
let status = info
.state
.and_then(|s| s.status)
.map(|s| format!("{:?}", s))
.unwrap_or_else(|| "unknown".to_string());
Ok(Some(ContainerInfo {
container_id: container_id.clone(),
project_id: project.id.clone(),
status,
image: container_config::full_image_name(),
}))
}
Err(_) => Ok(None),
}
} else {
Ok(None)
}
}
pub async fn list_sibling_containers() -> Result<Vec<ContainerSummary>, String> {
let docker = get_docker()?;
let all_containers: Vec<ContainerSummary> = docker
.list_containers(Some(ListContainersOptions::<String> {
all: true,
..Default::default()
}))
.await
.map_err(|e| format!("Failed to list containers: {}", e))?;
// Filter out Triple-C managed containers
let siblings: Vec<ContainerSummary> = all_containers
.into_iter()
.filter(|c| {
if let Some(labels) = &c.labels {
!labels.contains_key("triple-c.managed")
} else {
true
}
})
.collect();
Ok(siblings)
}

View File

@@ -0,0 +1,183 @@
use bollard::exec::{CreateExecOptions, ResizeExecOptions, StartExecResults};
use futures_util::StreamExt;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::io::AsyncWriteExt;
use tokio::sync::{mpsc, Mutex};
use super::client::get_docker;
pub struct ExecSession {
pub exec_id: String,
pub input_tx: mpsc::UnboundedSender<Vec<u8>>,
shutdown_tx: mpsc::Sender<()>,
}
impl ExecSession {
pub async fn send_input(&self, data: Vec<u8>) -> Result<(), String> {
self.input_tx
.send(data)
.map_err(|e| format!("Failed to send input: {}", e))
}
pub async fn resize(&self, cols: u16, rows: u16) -> Result<(), String> {
let docker = get_docker()?;
docker
.resize_exec(
&self.exec_id,
ResizeExecOptions {
width: cols,
height: rows,
},
)
.await
.map_err(|e| format!("Failed to resize exec: {}", e))
}
pub fn shutdown(&self) {
let _ = self.shutdown_tx.try_send(());
}
}
pub struct ExecSessionManager {
sessions: Arc<Mutex<HashMap<String, ExecSession>>>,
}
impl ExecSessionManager {
pub fn new() -> Self {
Self {
sessions: Arc::new(Mutex::new(HashMap::new())),
}
}
pub async fn create_session<F>(
&self,
container_id: &str,
session_id: &str,
cmd: Vec<String>,
on_output: F,
on_exit: Box<dyn FnOnce() + Send>,
) -> Result<(), String>
where
F: Fn(Vec<u8>) + Send + 'static,
{
let docker = get_docker()?;
let exec = docker
.create_exec(
container_id,
CreateExecOptions {
attach_stdin: Some(true),
attach_stdout: Some(true),
attach_stderr: Some(true),
tty: Some(true),
cmd: Some(cmd),
working_dir: Some("/workspace".to_string()),
..Default::default()
},
)
.await
.map_err(|e| format!("Failed to create exec: {}", e))?;
let exec_id = exec.id.clone();
let result = docker
.start_exec(&exec_id, None)
.await
.map_err(|e| format!("Failed to start exec: {}", e))?;
let (input_tx, mut input_rx) = mpsc::unbounded_channel::<Vec<u8>>();
let (shutdown_tx, mut shutdown_rx) = mpsc::channel::<()>(1);
match result {
StartExecResults::Attached { mut output, mut input } => {
// Output reader task
let session_id_clone = session_id.to_string();
let shutdown_tx_clone = shutdown_tx.clone();
tokio::spawn(async move {
loop {
tokio::select! {
msg = output.next() => {
match msg {
Some(Ok(output)) => {
on_output(output.into_bytes().to_vec());
}
Some(Err(e)) => {
log::error!("Exec output error for {}: {}", session_id_clone, e);
break;
}
None => {
log::info!("Exec output stream ended for {}", session_id_clone);
break;
}
}
}
_ = shutdown_rx.recv() => {
log::info!("Exec session {} shutting down", session_id_clone);
break;
}
}
}
on_exit();
let _ = shutdown_tx_clone;
});
// Input writer task
tokio::spawn(async move {
while let Some(data) = input_rx.recv().await {
if let Err(e) = input.write_all(&data).await {
log::error!("Failed to write to exec stdin: {}", e);
break;
}
}
});
}
StartExecResults::Detached => {
return Err("Exec started in detached mode".to_string());
}
}
let session = ExecSession {
exec_id,
input_tx,
shutdown_tx,
};
self.sessions
.lock()
.await
.insert(session_id.to_string(), session);
Ok(())
}
pub async fn send_input(&self, session_id: &str, data: Vec<u8>) -> Result<(), String> {
let sessions = self.sessions.lock().await;
let session = sessions
.get(session_id)
.ok_or_else(|| format!("Session {} not found", session_id))?;
session.send_input(data).await
}
pub async fn resize(&self, session_id: &str, cols: u16, rows: u16) -> Result<(), String> {
let sessions = self.sessions.lock().await;
let session = sessions
.get(session_id)
.ok_or_else(|| format!("Session {} not found", session_id))?;
session.resize(cols, rows).await
}
pub async fn close_session(&self, session_id: &str) {
let mut sessions = self.sessions.lock().await;
if let Some(session) = sessions.remove(session_id) {
session.shutdown();
}
}
pub async fn close_all_sessions(&self) {
let mut sessions = self.sessions.lock().await;
for (_, session) in sessions.drain() {
session.shutdown();
}
}
}

View File

@@ -0,0 +1,96 @@
use bollard::image::{BuildImageOptions, ListImagesOptions};
use bollard::models::ImageSummary;
use futures_util::StreamExt;
use std::collections::HashMap;
use std::io::Write;
use super::client::get_docker;
use crate::models::container_config;
const DOCKERFILE: &str = include_str!("../../../../container/Dockerfile");
const ENTRYPOINT: &str = include_str!("../../../../container/entrypoint.sh");
pub async fn image_exists() -> Result<bool, String> {
let docker = get_docker()?;
let full_name = container_config::full_image_name();
let filters: HashMap<String, Vec<String>> = HashMap::from([(
"reference".to_string(),
vec![full_name],
)]);
let images: Vec<ImageSummary> = docker
.list_images(Some(ListImagesOptions {
filters,
..Default::default()
}))
.await
.map_err(|e| format!("Failed to list images: {}", e))?;
Ok(!images.is_empty())
}
pub async fn build_image<F>(on_progress: F) -> Result<(), String>
where
F: Fn(String) + Send + 'static,
{
let docker = get_docker()?;
let full_name = container_config::full_image_name();
// Create a tar archive in memory containing Dockerfile and entrypoint.sh
let tar_bytes = create_build_context().map_err(|e| format!("Failed to create build context: {}", e))?;
let options = BuildImageOptions {
t: full_name.as_str(),
rm: true,
forcerm: true,
..Default::default()
};
let mut stream = docker.build_image(options, None, Some(tar_bytes.into()));
while let Some(result) = stream.next().await {
match result {
Ok(output) => {
if let Some(stream) = output.stream {
on_progress(stream);
}
if let Some(error) = output.error {
return Err(format!("Build error: {}", error));
}
}
Err(e) => return Err(format!("Build stream error: {}", e)),
}
}
Ok(())
}
fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
let mut buf = Vec::new();
{
let mut archive = tar::Builder::new(&mut buf);
// Add Dockerfile
let dockerfile_bytes = DOCKERFILE.as_bytes();
let mut header = tar::Header::new_gnu();
header.set_size(dockerfile_bytes.len() as u64);
header.set_mode(0o644);
header.set_cksum();
archive.append_data(&mut header, "Dockerfile", dockerfile_bytes)?;
// Add entrypoint.sh
let entrypoint_bytes = ENTRYPOINT.as_bytes();
let mut header = tar::Header::new_gnu();
header.set_size(entrypoint_bytes.len() as u64);
header.set_mode(0o755);
header.set_cksum();
archive.append_data(&mut header, "entrypoint.sh", entrypoint_bytes)?;
archive.finish()?;
}
// Flush to make sure all data is written
let _ = buf.flush();
Ok(buf)
}

View File

@@ -0,0 +1,9 @@
pub mod client;
pub mod container;
pub mod image;
pub mod exec;
pub use client::*;
pub use container::*;
pub use image::*;
pub use exec::*;

52
app/src-tauri/src/lib.rs Normal file
View File

@@ -0,0 +1,52 @@
mod commands;
mod docker;
mod models;
mod storage;
use docker::exec::ExecSessionManager;
use storage::projects_store::ProjectsStore;
pub struct AppState {
pub projects_store: ProjectsStore,
pub exec_manager: ExecSessionManager,
}
pub fn run() {
env_logger::init();
tauri::Builder::default()
.plugin(tauri_plugin_store::Builder::default().build())
.plugin(tauri_plugin_dialog::init())
.plugin(tauri_plugin_opener::init())
.manage(AppState {
projects_store: ProjectsStore::new(),
exec_manager: ExecSessionManager::new(),
})
.invoke_handler(tauri::generate_handler![
// Docker
commands::docker_commands::check_docker,
commands::docker_commands::check_image_exists,
commands::docker_commands::build_image,
commands::docker_commands::get_container_info,
commands::docker_commands::list_sibling_containers,
// Projects
commands::project_commands::list_projects,
commands::project_commands::add_project,
commands::project_commands::remove_project,
commands::project_commands::update_project,
commands::project_commands::start_project_container,
commands::project_commands::stop_project_container,
commands::project_commands::rebuild_project_container,
// Settings
commands::settings_commands::set_api_key,
commands::settings_commands::has_api_key,
commands::settings_commands::delete_api_key,
// Terminal
commands::terminal_commands::open_terminal_session,
commands::terminal_commands::terminal_input,
commands::terminal_commands::terminal_resize,
commands::terminal_commands::close_terminal_session,
])
.run(tauri::generate_context!())
.expect("error while running tauri application");
}

View File

@@ -0,0 +1,6 @@
// Prevents additional console window on Windows in release
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
fn main() {
triple_c_lib::run()
}

View File

@@ -0,0 +1,20 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AppSettings {
pub default_ssh_key_path: Option<String>,
pub default_git_user_name: Option<String>,
pub default_git_user_email: Option<String>,
pub docker_socket_path: Option<String>,
}
impl Default for AppSettings {
fn default() -> Self {
Self {
default_ssh_key_path: None,
default_git_user_name: None,
default_git_user_email: None,
docker_socket_path: None,
}
}
}

View File

@@ -0,0 +1,16 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ContainerInfo {
pub container_id: String,
pub project_id: String,
pub status: String,
pub image: String,
}
pub const IMAGE_NAME: &str = "triple-c";
pub const IMAGE_TAG: &str = "latest";
pub fn full_image_name() -> String {
format!("{IMAGE_NAME}:{IMAGE_TAG}")
}

View File

@@ -0,0 +1,7 @@
pub mod project;
pub mod container_config;
pub mod app_settings;
pub use project::*;
pub use container_config::*;
pub use app_settings::*;

View File

@@ -0,0 +1,69 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Project {
pub id: String,
pub name: String,
pub path: String,
pub container_id: Option<String>,
pub status: ProjectStatus,
pub auth_mode: AuthMode,
pub allow_docker_access: bool,
pub ssh_key_path: Option<String>,
pub git_token: Option<String>,
pub git_user_name: Option<String>,
pub git_user_email: Option<String>,
pub created_at: String,
pub updated_at: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum ProjectStatus {
Stopped,
Starting,
Running,
Stopping,
Error,
}
/// How the project authenticates with Claude.
/// - `Login`: User runs `claude login` inside the container (OAuth, persisted via config volume)
/// - `ApiKey`: Uses the API key stored in the OS keychain
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum AuthMode {
Login,
ApiKey,
}
impl Default for AuthMode {
fn default() -> Self {
Self::Login
}
}
impl Project {
pub fn new(name: String, path: String) -> Self {
let now = chrono::Utc::now().to_rfc3339();
Self {
id: uuid::Uuid::new_v4().to_string(),
name,
path,
container_id: None,
status: ProjectStatus::Stopped,
auth_mode: AuthMode::default(),
allow_docker_access: false,
ssh_key_path: None,
git_token: None,
git_user_name: None,
git_user_email: None,
created_at: now.clone(),
updated_at: now,
}
}
pub fn container_name(&self) -> String {
format!("triple-c-{}", self.id)
}
}

View File

@@ -0,0 +1,5 @@
pub mod projects_store;
pub mod secure;
pub use projects_store::*;
pub use secure::*;

View File

@@ -0,0 +1,129 @@
use std::fs;
use std::path::PathBuf;
use std::sync::Mutex;
use crate::models::Project;
pub struct ProjectsStore {
projects: Mutex<Vec<Project>>,
file_path: PathBuf,
}
impl ProjectsStore {
pub fn new() -> Self {
let data_dir = dirs::data_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("triple-c");
fs::create_dir_all(&data_dir).ok();
let file_path = data_dir.join("projects.json");
let projects = if file_path.exists() {
match fs::read_to_string(&file_path) {
Ok(data) => match serde_json::from_str(&data) {
Ok(parsed) => parsed,
Err(e) => {
log::error!("Failed to parse projects.json: {}. Starting with empty list.", e);
// Back up the corrupted file
let backup = file_path.with_extension("json.bak");
if let Err(be) = fs::copy(&file_path, &backup) {
log::error!("Failed to back up corrupted projects.json: {}", be);
}
Vec::new()
}
},
Err(e) => {
log::error!("Failed to read projects.json: {}", e);
Vec::new()
}
}
} else {
Vec::new()
};
Self {
projects: Mutex::new(projects),
file_path,
}
}
fn lock(&self) -> std::sync::MutexGuard<'_, Vec<Project>> {
self.projects.lock().unwrap_or_else(|e| e.into_inner())
}
fn save(&self, projects: &[Project]) -> Result<(), String> {
let data = serde_json::to_string_pretty(projects)
.map_err(|e| format!("Failed to serialize projects: {}", e))?;
// Atomic write: write to temp file, then rename
let tmp_path = self.file_path.with_extension("json.tmp");
fs::write(&tmp_path, data)
.map_err(|e| format!("Failed to write temp projects file: {}", e))?;
fs::rename(&tmp_path, &self.file_path)
.map_err(|e| format!("Failed to rename projects file: {}", e))?;
Ok(())
}
pub fn list(&self) -> Vec<Project> {
self.lock().clone()
}
pub fn get(&self, id: &str) -> Option<Project> {
self.lock().iter().find(|p| p.id == id).cloned()
}
pub fn add(&self, project: Project) -> Result<Project, String> {
let mut projects = self.lock();
let cloned = project.clone();
projects.push(project);
self.save(&projects)?;
Ok(cloned)
}
pub fn update(&self, updated: Project) -> Result<Project, String> {
let mut projects = self.lock();
if let Some(p) = projects.iter_mut().find(|p| p.id == updated.id) {
*p = updated.clone();
self.save(&projects)?;
Ok(updated)
} else {
Err(format!("Project {} not found", updated.id))
}
}
pub fn remove(&self, id: &str) -> Result<(), String> {
let mut projects = self.lock();
let initial_len = projects.len();
projects.retain(|p| p.id != id);
if projects.len() == initial_len {
return Err(format!("Project {} not found", id));
}
self.save(&projects)?;
Ok(())
}
pub fn update_status(&self, id: &str, status: crate::models::ProjectStatus) -> Result<(), String> {
let mut projects = self.lock();
if let Some(p) = projects.iter_mut().find(|p| p.id == id) {
p.status = status;
p.updated_at = chrono::Utc::now().to_rfc3339();
self.save(&projects)?;
Ok(())
} else {
Err(format!("Project {} not found", id))
}
}
pub fn set_container_id(&self, project_id: &str, container_id: Option<String>) -> Result<(), String> {
let mut projects = self.lock();
if let Some(p) = projects.iter_mut().find(|p| p.id == project_id) {
p.container_id = container_id;
p.updated_at = chrono::Utc::now().to_rfc3339();
self.save(&projects)?;
Ok(())
} else {
Err(format!("Project {} not found", project_id))
}
}
}

View File

@@ -0,0 +1,38 @@
const SERVICE_NAME: &str = "triple-c";
const API_KEY_USER: &str = "anthropic-api-key";
pub fn store_api_key(key: &str) -> Result<(), String> {
let entry = keyring::Entry::new(SERVICE_NAME, API_KEY_USER)
.map_err(|e| format!("Keyring error: {}", e))?;
entry
.set_password(key)
.map_err(|e| format!("Failed to store API key: {}", e))
}
pub fn get_api_key() -> Result<Option<String>, String> {
let entry = keyring::Entry::new(SERVICE_NAME, API_KEY_USER)
.map_err(|e| format!("Keyring error: {}", e))?;
match entry.get_password() {
Ok(key) => Ok(Some(key)),
Err(keyring::Error::NoEntry) => Ok(None),
Err(e) => Err(format!("Failed to retrieve API key: {}", e)),
}
}
pub fn delete_api_key() -> Result<(), String> {
let entry = keyring::Entry::new(SERVICE_NAME, API_KEY_USER)
.map_err(|e| format!("Keyring error: {}", e))?;
match entry.delete_credential() {
Ok(()) => Ok(()),
Err(keyring::Error::NoEntry) => Ok(()),
Err(e) => Err(format!("Failed to delete API key: {}", e)),
}
}
pub fn has_api_key() -> Result<bool, String> {
match get_api_key() {
Ok(Some(_)) => Ok(true),
Ok(None) => Ok(false),
Err(e) => Err(e),
}
}

View File

@@ -0,0 +1,38 @@
{
"$schema": "https://raw.githubusercontent.com/tauri-apps/tauri/dev/crates/tauri-cli/schema.json",
"productName": "Triple-C",
"version": "0.1.0",
"identifier": "com.triple-c.app",
"build": {
"beforeDevCommand": "npm run dev",
"devUrl": "http://localhost:1420",
"beforeBuildCommand": "npm run build",
"frontendDist": "../dist"
},
"app": {
"windows": [
{
"title": "Triple-C",
"width": 1200,
"height": 800,
"resizable": true,
"fullscreen": false,
"minWidth": 800,
"minHeight": 600
}
],
"security": {
"csp": null
}
},
"bundle": {
"active": true,
"targets": "all",
"icon": [
"icons/32x32.png",
"icons/128x128.png",
"icons/128x128@2x.png"
]
},
"plugins": {}
}