Compare commits

...

5 Commits

Author SHA1 Message Date
96f8acc40d Fix Docker socket mount failing on Windows
All checks were successful
Build App / build-linux (push) Successful in 3m24s
Build App / build-windows (push) Successful in 3m51s
The Windows named pipe (//./pipe/docker_engine) cannot be bind-mounted
into a Linux container. Use /var/run/docker.sock as the mount source
on Windows, which Docker Desktop exposes for container mounts.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-27 15:49:00 -08:00
b77b9679b1 Auto-increment app version using git commit count in CI builds
All checks were successful
Build App / build-windows (push) Successful in 3m11s
Build App / build-linux (push) Successful in 4m7s
Version is computed as 0.1.{commit_count} and patched into
tauri.conf.json, package.json, and Cargo.toml at build time.
Release tags now use v0.1.N format instead of build-{sha}.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-27 12:12:24 -08:00
0a4f207556 Fix stopping one project killing all project terminal sessions
All checks were successful
Build App / build-windows (push) Successful in 3m11s
Build App / build-linux (push) Successful in 6m15s
close_all_sessions() was called when stopping/removing/rebuilding a
project, which shut down exec sessions for every project. Track
container_id per session and use close_sessions_for_container() to
only close sessions belonging to the target project.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-27 19:55:38 +00:00
839dd9f105 Update project documentation with architecture and recent changes
Expand Triple-C.md from a one-liner to comprehensive docs covering
architecture, container lifecycle, mounts, auth modes, sibling
containers, Docker socket handling, key files, and CSS/styling notes.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-27 11:50:41 -08:00
df3d434877 Fix SSH keys, git config, and HTTPS token not applied on container restart
All checks were successful
Build App / build-linux (push) Successful in 2m26s
Build App / build-windows (push) Successful in 3m17s
Recreate the container when SSH key path, git name, git email, or git
HTTPS token change — not just when the docker socket toggle changes.
The claude config named volume persists across recreation so no data
is lost.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-27 19:37:06 +00:00
5 changed files with 247 additions and 34 deletions

View File

@@ -22,6 +22,24 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Compute version
id: version
run: |
COMMIT_COUNT=$(git rev-list --count HEAD)
VERSION="0.1.${COMMIT_COUNT}"
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
echo "Computed version: ${VERSION}"
- name: Set app version
run: |
VERSION="${{ steps.version.outputs.VERSION }}"
sed -i "s/\"version\": \".*\"/\"version\": \"${VERSION}\"/" app/src-tauri/tauri.conf.json
sed -i "s/\"version\": \".*\"/\"version\": \"${VERSION}\"/" app/package.json
sed -i "s/^version = \".*\"/version = \"${VERSION}\"/" app/src-tauri/Cargo.toml
echo "Patched version to ${VERSION}"
- name: Install system dependencies - name: Install system dependencies
run: | run: |
@@ -80,12 +98,12 @@ jobs:
env: env:
TOKEN: ${{ secrets.REGISTRY_TOKEN }} TOKEN: ${{ secrets.REGISTRY_TOKEN }}
run: | run: |
TAG="build-$(echo ${{ gitea.sha }} | cut -c1-7)" TAG="v${{ steps.version.outputs.VERSION }}"
# Create release # Create release
curl -s -X POST \ curl -s -X POST \
-H "Authorization: token ${TOKEN}" \ -H "Authorization: token ${TOKEN}" \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d "{\"tag_name\": \"${TAG}\", \"name\": \"Linux Build ${TAG}\", \"body\": \"Automated build from commit ${{ gitea.sha }}\"}" \ -d "{\"tag_name\": \"${TAG}\", \"name\": \"Triple-C ${TAG} (Linux)\", \"body\": \"Automated build from commit ${{ gitea.sha }}\"}" \
"${GITEA_URL}/api/v1/repos/${REPO}/releases" > release.json "${GITEA_URL}/api/v1/repos/${REPO}/releases" > release.json
RELEASE_ID=$(cat release.json | grep -o '"id":[0-9]*' | head -1 | grep -o '[0-9]*') RELEASE_ID=$(cat release.json | grep -o '"id":[0-9]*' | head -1 | grep -o '[0-9]*')
echo "Release ID: ${RELEASE_ID}" echo "Release ID: ${RELEASE_ID}"
@@ -109,6 +127,25 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Compute version
id: version
run: |
for /f %%i in ('git rev-list --count HEAD') do set "COMMIT_COUNT=%%i"
set "VERSION=0.1.%COMMIT_COUNT%"
echo VERSION=%VERSION%>> %GITHUB_OUTPUT%
echo Computed version: %VERSION%
- name: Set app version
shell: powershell
run: |
$version = "${{ steps.version.outputs.VERSION }}"
(Get-Content app/src-tauri/tauri.conf.json) -replace '"version": ".*?"', "`"version`": `"$version`"" | Set-Content app/src-tauri/tauri.conf.json
(Get-Content app/package.json) -replace '"version": ".*?"', "`"version`": `"$version`"" | Set-Content app/package.json
(Get-Content app/src-tauri/Cargo.toml) -replace '^version = ".*?"', "version = `"$version`"" | Set-Content app/src-tauri/Cargo.toml
Write-Host "Patched version to $version"
- name: Install Rust stable - name: Install Rust stable
run: | run: |
@@ -186,9 +223,9 @@ jobs:
TOKEN: ${{ secrets.REGISTRY_TOKEN }} TOKEN: ${{ secrets.REGISTRY_TOKEN }}
COMMIT_SHA: ${{ gitea.sha }} COMMIT_SHA: ${{ gitea.sha }}
run: | run: |
set "TAG=build-win-%COMMIT_SHA:~0,7%" set "TAG=v${{ steps.version.outputs.VERSION }}-win"
echo Creating release %TAG%... echo Creating release %TAG%...
curl -s -X POST -H "Authorization: token %TOKEN%" -H "Content-Type: application/json" -d "{\"tag_name\": \"%TAG%\", \"name\": \"Windows Build %TAG%\", \"body\": \"Automated build from commit %COMMIT_SHA%\"}" "%GITEA_URL%/api/v1/repos/%REPO%/releases" > release.json curl -s -X POST -H "Authorization: token %TOKEN%" -H "Content-Type: application/json" -d "{\"tag_name\": \"%TAG%\", \"name\": \"Triple-C v${{ steps.version.outputs.VERSION }} (Windows)\", \"body\": \"Automated build from commit %COMMIT_SHA%\"}" "%GITEA_URL%/api/v1/repos/%REPO%/releases" > release.json
for /f "tokens=2 delims=:," %%a in ('findstr /c:"\"id\"" release.json') do set "RELEASE_ID=%%a" & goto :found for /f "tokens=2 delims=:," %%a in ('findstr /c:"\"id\"" release.json') do set "RELEASE_ID=%%a" & goto :found
:found :found
echo Release ID: %RELEASE_ID% echo Release ID: %RELEASE_ID%

View File

@@ -1,3 +1,104 @@
# Triple-C (Claude-Code-Container) # Triple-C (Claude-Code-Container)
Triple C is a container intended to limit what files Claude Code has access to, so when you run with `--dangerously-skip-permissions` Claude only has access to files/projects you provide to it. Triple-C is a cross-platform desktop application that sandboxes Claude Code inside Docker containers. When running with `--dangerously-skip-permissions`, Claude only has access to the files and projects you explicitly provide to it.
## Architecture
- **Frontend**: React 19 + TypeScript + Tailwind CSS v4 + Zustand state management
- **Backend**: Rust (Tauri v2 framework)
- **Terminal**: xterm.js with WebGL rendering
- **Docker API**: bollard (pure Rust Docker client)
### Layout Structure
```
┌─────────────────────────────────────────────────────┐
│ TopBar (terminal tabs + Docker/Image status) │
├────────────┬────────────────────────────────────────┤
│ Sidebar │ Main Content (terminal views) │
│ (25% w, │ │
│ responsive│ │
│ min/max) │ │
├────────────┴────────────────────────────────────────┤
│ StatusBar (project/terminal counts) │
└─────────────────────────────────────────────────────┘
```
### Container Lifecycle
1. **Create**: New container created with bind mounts, env vars, and labels
2. **Start**: Container started, entrypoint remaps UID/GID, sets up SSH, configures Docker group
3. **Terminal**: `docker exec` launches Claude Code with a PTY
4. **Stop**: Container halted (filesystem persists in named volume)
5. **Restart**: Existing container restarted; recreated if settings changed (e.g., Docker access toggled)
6. **Reset**: Container removed and recreated from scratch (named volume preserved)
### Mounts
| Target in Container | Source | Type | Notes |
|---|---|---|---|
| `/workspace` | Project directory | Bind | Read-write |
| `/home/claude/.claude` | `triple-c-claude-config-{projectId}` | Named Volume | Persists across container recreation |
| `/tmp/.host-ssh` | SSH key directory | Bind | Read-only; entrypoint copies to `~/.ssh` |
| `/home/claude/.aws` | AWS config directory | Bind | Read-only; for Bedrock auth |
| `/var/run/docker.sock` | Host Docker socket | Bind | Only if "Allow container spawning" is ON |
### Authentication Modes
Each project can independently use one of:
- **`/login`** (OAuth): User runs `claude login` inside the terminal. Token persisted in the config volume.
- **API Key**: Stored in the OS keychain, injected as `ANTHROPIC_API_KEY` env var.
- **AWS Bedrock**: Per-project AWS credentials (static keys, profile, or bearer token).
### Container Spawning (Sibling Containers)
When "Allow container spawning" is enabled per-project, the host Docker socket is bind-mounted into the container. This allows Claude Code to create **sibling containers** (not nested Docker-in-Docker) that are visible to the host. The entrypoint detects the socket's GID and adds the `claude` user to the matching group.
If the Docker access setting is toggled after a container already exists, the container is automatically recreated on next start to apply the mount change. The named config volume (keyed by project ID) is preserved across recreation.
### Docker Socket Path
The socket path is OS-aware:
- **Linux/macOS**: `/var/run/docker.sock`
- **Windows**: `//./pipe/docker_engine`
Users can override this in Settings via the global `docker_socket_path` option.
## Key Files
| File | Purpose |
|---|---|
| `app/src/App.tsx` | Root layout (TopBar + Sidebar + Main + StatusBar) |
| `app/src/index.css` | Global CSS variables, dark theme, `color-scheme: dark` |
| `app/src/components/layout/TopBar.tsx` | Terminal tabs + Docker/Image status indicators |
| `app/src/components/layout/Sidebar.tsx` | Responsive sidebar (25% width, min 224px, max 320px) |
| `app/src/components/layout/StatusBar.tsx` | Running project/terminal counts |
| `app/src/components/projects/ProjectCard.tsx` | Project config, auth mode, action buttons |
| `app/src/components/projects/ProjectList.tsx` | Project list in sidebar |
| `app/src/components/settings/SettingsPanel.tsx` | API key, Docker, AWS settings |
| `app/src/components/terminal/TerminalView.tsx` | xterm.js terminal with WebGL, URL detection |
| `app/src/components/terminal/TerminalTabs.tsx` | Tab bar for multiple terminal sessions |
| `app/src-tauri/src/docker/container.rs` | Container creation, mounts, env vars, inspection |
| `app/src-tauri/src/docker/exec.rs` | PTY exec sessions for terminal interaction |
| `app/src-tauri/src/docker/image.rs` | Image building/pulling |
| `app/src-tauri/src/commands/project_commands.rs` | Start/stop/rebuild Tauri command handlers |
| `app/src-tauri/src/models/project.rs` | Project struct (auth mode, Docker access, etc.) |
| `app/src-tauri/src/models/app_settings.rs` | Global settings (image source, Docker socket, AWS) |
| `container/Dockerfile` | Ubuntu 24.04 sandbox image with Claude Code + dev tools |
| `container/entrypoint.sh` | UID/GID remap, SSH setup, Docker group config |
## CSS / Styling Notes
- Uses **Tailwind CSS v4** with the Vite plugin (`@tailwindcss/vite`)
- All colors use CSS custom properties defined in `index.css` `:root`
- `color-scheme: dark` is set on `:root` so native form controls (select dropdowns, scrollbars) render in dark mode
- **Do not** add a global `* { padding: 0 }` reset — Tailwind v4 uses CSS `@layer`, and unlayered CSS overrides all layered utilities. Tailwind's built-in Preflight handles resets.
## Container Image
**Base**: Ubuntu 24.04
**Pre-installed tools**: Claude Code, Node.js 22 LTS + pnpm, Python 3.12 + uv + ruff, Rust (stable), Docker CLI, git + gh, AWS CLI v2, ripgrep, openssh-client, build-essential
**Default user**: `claude` (UID/GID 1000, remapped by entrypoint to match host)

View File

@@ -28,14 +28,12 @@ pub async fn remove_project(
// Stop and remove container if it exists // Stop and remove container if it exists
if let Some(project) = state.projects_store.get(&project_id) { if let Some(project) = state.projects_store.get(&project_id) {
if let Some(ref container_id) = project.container_id { if let Some(ref container_id) = project.container_id {
state.exec_manager.close_sessions_for_container(container_id).await;
let _ = docker::stop_container(container_id).await; let _ = docker::stop_container(container_id).await;
let _ = docker::remove_container(container_id).await; let _ = docker::remove_container(container_id).await;
} }
} }
// Close any exec sessions
state.exec_manager.close_all_sessions().await;
state.projects_store.remove(&project_id) state.projects_store.remove(&project_id)
} }
@@ -102,20 +100,16 @@ pub async fn start_project_container(
// Check for existing container // Check for existing container
let container_id = if let Some(existing_id) = docker::find_existing_container(&project).await? { let container_id = if let Some(existing_id) = docker::find_existing_container(&project).await? {
// Check if docker socket mount matches the current project setting. // Compare the running container's configuration (mounts, env vars)
// If the user toggled "Allow container spawning" after the container was // against the current project settings. If anything changed (SSH key
// created, we need to recreate the container for the mount change to take // path, git config, docker socket, etc.) we recreate the container.
// effect. // Safe to recreate: the claude config named volume is keyed by
let has_socket = docker::container_has_docker_socket(&existing_id).await.unwrap_or(false); // project ID (not container ID) so it persists across recreation.
if has_socket != project.allow_docker_access { let needs_recreation = docker::container_needs_recreation(&existing_id, &project)
log::info!( .await
"Docker socket mismatch (container has_socket={}, project wants={}), recreating container", .unwrap_or(false);
has_socket, project.allow_docker_access if needs_recreation {
); log::info!("Container config changed, recreating container for project {}", project.id);
// Safe to remove and recreate: the claude config named volume is
// keyed by project ID (not container ID) so it persists across
// container recreation. Bind mounts (workspace, SSH, AWS) are
// host paths and are unaffected.
let _ = docker::stop_container(&existing_id).await; let _ = docker::stop_container(&existing_id).await;
docker::remove_container(&existing_id).await?; docker::remove_container(&existing_id).await?;
let new_id = docker::create_container( let new_id = docker::create_container(
@@ -170,7 +164,7 @@ pub async fn stop_project_container(
state.projects_store.update_status(&project_id, ProjectStatus::Stopping)?; state.projects_store.update_status(&project_id, ProjectStatus::Stopping)?;
// Close exec sessions for this project // Close exec sessions for this project
state.exec_manager.close_all_sessions().await; state.exec_manager.close_sessions_for_container(container_id).await;
docker::stop_container(container_id).await?; docker::stop_container(container_id).await?;
state.projects_store.update_status(&project_id, ProjectStatus::Stopped)?; state.projects_store.update_status(&project_id, ProjectStatus::Stopped)?;
@@ -191,7 +185,7 @@ pub async fn rebuild_project_container(
// Remove existing container // Remove existing container
if let Some(ref container_id) = project.container_id { if let Some(ref container_id) = project.container_id {
state.exec_manager.close_all_sessions().await; state.exec_manager.close_sessions_for_container(container_id).await;
let _ = docker::stop_container(container_id).await; let _ = docker::stop_container(container_id).await;
docker::remove_container(container_id).await?; docker::remove_container(container_id).await?;
state.projects_store.set_container_id(&project_id, None)?; state.projects_store.set_container_id(&project_id, None)?;

View File

@@ -212,9 +212,17 @@ pub async fn create_container(
// Docker socket (only if allowed) // Docker socket (only if allowed)
if project.allow_docker_access { if project.allow_docker_access {
// On Windows, the named pipe (//./pipe/docker_engine) cannot be
// bind-mounted into a Linux container. Docker Desktop exposes the
// daemon socket as /var/run/docker.sock for container mounts.
let mount_source = if docker_socket_path == "//./pipe/docker_engine" {
"/var/run/docker.sock".to_string()
} else {
docker_socket_path.to_string()
};
mounts.push(Mount { mounts.push(Mount {
target: Some("/var/run/docker.sock".to_string()), target: Some("/var/run/docker.sock".to_string()),
source: Some(docker_socket_path.to_string()), source: Some(mount_source),
typ: Some(MountTypeEnum::BIND), typ: Some(MountTypeEnum::BIND),
read_only: Some(false), read_only: Some(false),
..Default::default() ..Default::default()
@@ -288,25 +296,82 @@ pub async fn remove_container(container_id: &str) -> Result<(), String> {
.map_err(|e| format!("Failed to remove container: {}", e)) .map_err(|e| format!("Failed to remove container: {}", e))
} }
/// Check whether an existing container has docker socket mounted. /// Check whether the existing container's configuration still matches the
pub async fn container_has_docker_socket(container_id: &str) -> Result<bool, String> { /// current project settings. Returns `true` when the container must be
/// recreated (mounts or env vars differ).
pub async fn container_needs_recreation(container_id: &str, project: &Project) -> Result<bool, String> {
let docker = get_docker()?; let docker = get_docker()?;
let info = docker let info = docker
.inspect_container(container_id, None) .inspect_container(container_id, None)
.await .await
.map_err(|e| format!("Failed to inspect container: {}", e))?; .map_err(|e| format!("Failed to inspect container: {}", e))?;
let has_socket = info let mounts = info
.host_config .host_config
.and_then(|hc| hc.mounts) .as_ref()
.map(|mounts| { .and_then(|hc| hc.mounts.as_ref());
mounts.iter().any(|m| {
m.target.as_deref() == Some("/var/run/docker.sock") // ── Docker socket mount ──────────────────────────────────────────────
}) let has_socket = mounts
.map(|m| {
m.iter()
.any(|mount| mount.target.as_deref() == Some("/var/run/docker.sock"))
}) })
.unwrap_or(false); .unwrap_or(false);
if has_socket != project.allow_docker_access {
log::info!("Docker socket mismatch (container={}, project={})", has_socket, project.allow_docker_access);
return Ok(true);
}
Ok(has_socket) // ── SSH key path mount ───────────────────────────────────────────────
let ssh_mount_source = mounts
.and_then(|m| {
m.iter()
.find(|mount| mount.target.as_deref() == Some("/tmp/.host-ssh"))
})
.and_then(|mount| mount.source.as_deref());
let project_ssh = project.ssh_key_path.as_deref();
if ssh_mount_source != project_ssh {
log::info!(
"SSH key path mismatch (container={:?}, project={:?})",
ssh_mount_source,
project_ssh
);
return Ok(true);
}
// ── Git environment variables ────────────────────────────────────────
let env_vars = info
.config
.as_ref()
.and_then(|c| c.env.as_ref());
let get_env = |name: &str| -> Option<String> {
env_vars.and_then(|vars| {
vars.iter()
.find(|v| v.starts_with(&format!("{}=", name)))
.map(|v| v[name.len() + 1..].to_string())
})
};
let container_git_name = get_env("GIT_USER_NAME");
let container_git_email = get_env("GIT_USER_EMAIL");
let container_git_token = get_env("GIT_TOKEN");
if container_git_name.as_deref() != project.git_user_name.as_deref() {
log::info!("GIT_USER_NAME mismatch (container={:?}, project={:?})", container_git_name, project.git_user_name);
return Ok(true);
}
if container_git_email.as_deref() != project.git_user_email.as_deref() {
log::info!("GIT_USER_EMAIL mismatch (container={:?}, project={:?})", container_git_email, project.git_user_email);
return Ok(true);
}
if container_git_token.as_deref() != project.git_token.as_deref() {
log::info!("GIT_TOKEN mismatch");
return Ok(true);
}
Ok(false)
} }
pub async fn get_container_info(project: &Project) -> Result<Option<ContainerInfo>, String> { pub async fn get_container_info(project: &Project) -> Result<Option<ContainerInfo>, String> {

View File

@@ -9,6 +9,7 @@ use super::client::get_docker;
pub struct ExecSession { pub struct ExecSession {
pub exec_id: String, pub exec_id: String,
pub container_id: String,
pub input_tx: mpsc::UnboundedSender<Vec<u8>>, pub input_tx: mpsc::UnboundedSender<Vec<u8>>,
shutdown_tx: mpsc::Sender<()>, shutdown_tx: mpsc::Sender<()>,
} }
@@ -140,6 +141,7 @@ impl ExecSessionManager {
let session = ExecSession { let session = ExecSession {
exec_id, exec_id,
container_id: container_id.to_string(),
input_tx, input_tx,
shutdown_tx, shutdown_tx,
}; };
@@ -175,6 +177,20 @@ impl ExecSessionManager {
} }
} }
pub async fn close_sessions_for_container(&self, container_id: &str) {
let mut sessions = self.sessions.lock().await;
let ids_to_close: Vec<String> = sessions
.iter()
.filter(|(_, s)| s.container_id == container_id)
.map(|(id, _)| id.clone())
.collect();
for id in ids_to_close {
if let Some(session) = sessions.remove(&id) {
session.shutdown();
}
}
}
pub async fn close_all_sessions(&self) { pub async fn close_all_sessions(&self) {
let mut sessions = self.sessions.lock().await; let mut sessions = self.sessions.lock().await;
for (_, session) in sessions.drain() { for (_, session) in sessions.drain() {