Compare commits
17 Commits
v0.1.54
...
v0.1.71-ma
| Author | SHA1 | Date | |
|---|---|---|---|
| 3344ce1cbf | |||
| d642cc64de | |||
| e3502876eb | |||
| 4f41f0d98b | |||
| c9dc232fc4 | |||
| 2d4fce935f | |||
| e739f6aaff | |||
| 550159fc63 | |||
| e3c874bc75 | |||
| 6cae0e7feb | |||
| b566446b75 | |||
| 601a2db3cf | |||
| b795e27251 | |||
| 19d4cbce27 | |||
| 946ea03956 | |||
| ba4cb4176d | |||
| 4b56610ff5 |
84
.gitea/workflows/backfill-releases.yml
Normal file
84
.gitea/workflows/backfill-releases.yml
Normal file
@@ -0,0 +1,84 @@
|
||||
name: Backfill Releases to GitHub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
backfill:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Backfill all Gitea releases to GitHub
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
GITEA_TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
GITEA_API: https://repo.anhonesthost.net/api/v1
|
||||
GITEA_REPO: cybercovellc/triple-c
|
||||
GITHUB_REPO: shadowdao/triple-c
|
||||
run: |
|
||||
set -e
|
||||
|
||||
echo "==> Fetching releases from Gitea..."
|
||||
RELEASES=$(curl -sf \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
"$GITEA_API/repos/$GITEA_REPO/releases?limit=50")
|
||||
|
||||
echo "$RELEASES" | jq -c '.[]' | while read release; do
|
||||
TAG=$(echo "$release" | jq -r '.tag_name')
|
||||
NAME=$(echo "$release" | jq -r '.name')
|
||||
BODY=$(echo "$release" | jq -r '.body')
|
||||
IS_PRERELEASE=$(echo "$release" | jq -r '.prerelease')
|
||||
IS_DRAFT=$(echo "$release" | jq -r '.draft')
|
||||
|
||||
EXISTS=$(curl -sf \
|
||||
-H "Authorization: Bearer $GH_PAT" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/$GITHUB_REPO/releases/tags/$TAG" \
|
||||
-o /dev/null -w "%{http_code}" || true)
|
||||
|
||||
if [ "$EXISTS" = "200" ]; then
|
||||
echo "==> Skipping $TAG (already exists on GitHub)"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "==> Creating release $TAG..."
|
||||
RESPONSE=$(curl -sf -X POST \
|
||||
-H "Authorization: Bearer $GH_PAT" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Content-Type: application/json" \
|
||||
https://api.github.com/repos/$GITHUB_REPO/releases \
|
||||
-d "{
|
||||
\"tag_name\": \"$TAG\",
|
||||
\"name\": \"$NAME\",
|
||||
\"body\": $(echo "$BODY" | jq -Rs .),
|
||||
\"draft\": $IS_DRAFT,
|
||||
\"prerelease\": $IS_PRERELEASE
|
||||
}")
|
||||
|
||||
UPLOAD_URL=$(echo "$RESPONSE" | jq -r '.upload_url' | sed 's/{?name,label}//')
|
||||
|
||||
echo "$release" | jq -c '.assets[]?' | while read asset; do
|
||||
ASSET_NAME=$(echo "$asset" | jq -r '.name')
|
||||
ASSET_ID=$(echo "$asset" | jq -r '.id')
|
||||
|
||||
echo " ==> Downloading $ASSET_NAME..."
|
||||
DOWNLOAD_URL=$(echo "$asset" | jq -r '.browser_download_url')
|
||||
curl -sfL -o "/tmp/$ASSET_NAME" \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
"$DOWNLOAD_URL"
|
||||
|
||||
echo " ==> Uploading $ASSET_NAME to GitHub..."
|
||||
ENCODED_NAME=$(python3 -c "import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1]))" "$ASSET_NAME")
|
||||
curl -sf -X POST \
|
||||
-H "Authorization: Bearer $GH_PAT" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@/tmp/$ASSET_NAME" \
|
||||
"$UPLOAD_URL?name=$ENCODED_NAME"
|
||||
|
||||
echo " Uploaded: $ASSET_NAME"
|
||||
done
|
||||
|
||||
echo "==> Done: $TAG"
|
||||
done
|
||||
|
||||
echo "==> Backfill complete."
|
||||
@@ -20,6 +20,32 @@ jobs:
|
||||
build-linux:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Node.js 22
|
||||
run: |
|
||||
NEED_INSTALL=false
|
||||
if command -v node >/dev/null 2>&1; then
|
||||
NODE_MAJOR=$(node --version | sed 's/v\([0-9]*\).*/\1/')
|
||||
OLD_NODE_DIR=$(dirname "$(which node)")
|
||||
echo "Found Node.js $(node --version) at $(which node) (major: ${NODE_MAJOR})"
|
||||
if [ "$NODE_MAJOR" -lt 22 ]; then
|
||||
echo "Node.js ${NODE_MAJOR} is too old, removing before installing 22..."
|
||||
sudo rm -f "${OLD_NODE_DIR}/node" "${OLD_NODE_DIR}/npm" "${OLD_NODE_DIR}/npx" "${OLD_NODE_DIR}/corepack"
|
||||
hash -r
|
||||
NEED_INSTALL=true
|
||||
fi
|
||||
else
|
||||
echo "Node.js not found, installing 22..."
|
||||
NEED_INSTALL=true
|
||||
fi
|
||||
if [ "$NEED_INSTALL" = true ]; then
|
||||
curl -fsSL https://deb.nodesource.com/setup_22.x | sudo -E bash -
|
||||
sudo apt-get install -y nodejs
|
||||
hash -r
|
||||
fi
|
||||
echo "Node.js at: $(which node)"
|
||||
node --version
|
||||
npm --version
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -61,29 +87,35 @@ jobs:
|
||||
xdg-utils
|
||||
|
||||
- name: Install Rust stable
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Rust cache
|
||||
uses: swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "./app/src-tauri -> target"
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
run: |
|
||||
if command -v rustup >/dev/null 2>&1; then
|
||||
echo "Rust already installed: $(rustc --version)"
|
||||
rustup update stable
|
||||
rustup default stable
|
||||
else
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
||||
fi
|
||||
export PATH="$HOME/.cargo/bin:$PATH"
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
- name: Install frontend dependencies
|
||||
working-directory: ./app
|
||||
run: npm ci
|
||||
run: |
|
||||
rm -rf node_modules package-lock.json
|
||||
npm install
|
||||
|
||||
- name: Install Tauri CLI
|
||||
working-directory: ./app
|
||||
run: npx tauri --version || npm install @tauri-apps/cli
|
||||
run: |
|
||||
export PATH="$HOME/.cargo/bin:$PATH"
|
||||
npx tauri --version || npm install @tauri-apps/cli
|
||||
|
||||
- name: Build Tauri app
|
||||
working-directory: ./app
|
||||
run: npx tauri build
|
||||
run: |
|
||||
export PATH="$HOME/.cargo/bin:$PATH"
|
||||
npx tauri build
|
||||
|
||||
- name: Collect artifacts
|
||||
run: |
|
||||
@@ -119,6 +151,116 @@ jobs:
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/releases/${RELEASE_ID}/assets?name=${filename}"
|
||||
done
|
||||
|
||||
build-macos:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install Node.js 22
|
||||
run: |
|
||||
NEED_INSTALL=false
|
||||
if command -v node >/dev/null 2>&1; then
|
||||
NODE_MAJOR=$(node --version | sed 's/v\([0-9]*\).*/\1/')
|
||||
echo "Found Node.js $(node --version) (major: ${NODE_MAJOR})"
|
||||
if [ "$NODE_MAJOR" -lt 22 ]; then
|
||||
echo "Node.js ${NODE_MAJOR} is too old, upgrading to 22..."
|
||||
NEED_INSTALL=true
|
||||
fi
|
||||
else
|
||||
echo "Node.js not found, installing 22..."
|
||||
NEED_INSTALL=true
|
||||
fi
|
||||
if [ "$NEED_INSTALL" = true ]; then
|
||||
brew install node@22
|
||||
brew link --overwrite node@22
|
||||
fi
|
||||
node --version
|
||||
npm --version
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Compute version
|
||||
id: version
|
||||
run: |
|
||||
COMMIT_COUNT=$(git rev-list --count HEAD)
|
||||
VERSION="0.1.${COMMIT_COUNT}"
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "Computed version: ${VERSION}"
|
||||
|
||||
- name: Set app version
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.VERSION }}"
|
||||
sed -i '' "s/\"version\": \".*\"/\"version\": \"${VERSION}\"/" app/src-tauri/tauri.conf.json
|
||||
sed -i '' "s/\"version\": \".*\"/\"version\": \"${VERSION}\"/" app/package.json
|
||||
sed -i '' "s/^version = \".*\"/version = \"${VERSION}\"/" app/src-tauri/Cargo.toml
|
||||
echo "Patched version to ${VERSION}"
|
||||
|
||||
- name: Install Rust stable
|
||||
run: |
|
||||
if command -v rustup >/dev/null 2>&1; then
|
||||
echo "Rust already installed: $(rustc --version)"
|
||||
rustup update stable
|
||||
rustup default stable
|
||||
else
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
||||
fi
|
||||
export PATH="$HOME/.cargo/bin:$PATH"
|
||||
rustup target add aarch64-apple-darwin x86_64-apple-darwin
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
- name: Install frontend dependencies
|
||||
working-directory: ./app
|
||||
run: |
|
||||
rm -rf node_modules
|
||||
npm install
|
||||
|
||||
- name: Install Tauri CLI
|
||||
working-directory: ./app
|
||||
run: |
|
||||
export PATH="$HOME/.cargo/bin:$PATH"
|
||||
npx tauri --version || npm install @tauri-apps/cli
|
||||
|
||||
- name: Build Tauri app (universal)
|
||||
working-directory: ./app
|
||||
run: |
|
||||
export PATH="$HOME/.cargo/bin:$PATH"
|
||||
npx tauri build --target universal-apple-darwin
|
||||
|
||||
- name: Collect artifacts
|
||||
run: |
|
||||
mkdir -p artifacts
|
||||
cp app/src-tauri/target/universal-apple-darwin/release/bundle/dmg/*.dmg artifacts/ 2>/dev/null || true
|
||||
cp app/src-tauri/target/universal-apple-darwin/release/bundle/macos/*.app.tar.gz artifacts/ 2>/dev/null || true
|
||||
ls -la artifacts/
|
||||
|
||||
- name: Upload to Gitea release
|
||||
if: gitea.event_name == 'push'
|
||||
env:
|
||||
TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
run: |
|
||||
TAG="v${{ steps.version.outputs.VERSION }}-mac"
|
||||
# Create release
|
||||
curl -s -X POST \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"tag_name\": \"${TAG}\", \"name\": \"Triple-C v${{ steps.version.outputs.VERSION }} (macOS)\", \"body\": \"Automated build from commit ${{ gitea.sha }}\"}" \
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/releases" > release.json
|
||||
RELEASE_ID=$(cat release.json | grep -o '"id":[0-9]*' | head -1 | grep -o '[0-9]*')
|
||||
echo "Release ID: ${RELEASE_ID}"
|
||||
# Upload each artifact
|
||||
for file in artifacts/*; do
|
||||
[ -f "$file" ] || continue
|
||||
filename=$(basename "$file")
|
||||
echo "Uploading ${filename}..."
|
||||
curl -s -X POST \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@${file}" \
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/releases/${RELEASE_ID}/assets?name=${filename}"
|
||||
done
|
||||
|
||||
build-windows:
|
||||
runs-on: windows-latest
|
||||
defaults:
|
||||
|
||||
@@ -21,6 +21,9 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
@@ -36,6 +39,7 @@ jobs:
|
||||
with:
|
||||
context: ./container
|
||||
file: ./container/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ gitea.event_name == 'push' }}
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
|
||||
|
||||
60
.gitea/workflows/sync-release.yml
Normal file
60
.gitea/workflows/sync-release.yml
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Sync Release to GitHub
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
sync-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mirror release to GitHub
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
GITHUB_REPO: shadowdao/triple-c
|
||||
RELEASE_TAG: ${{ gitea.event.release.tag_name }}
|
||||
RELEASE_NAME: ${{ gitea.event.release.name }}
|
||||
RELEASE_BODY: ${{ gitea.event.release.body }}
|
||||
IS_PRERELEASE: ${{ gitea.event.release.prerelease }}
|
||||
IS_DRAFT: ${{ gitea.event.release.draft }}
|
||||
run: |
|
||||
set -e
|
||||
|
||||
echo "==> Creating release $RELEASE_TAG on GitHub..."
|
||||
|
||||
RESPONSE=$(curl -sf -X POST \
|
||||
-H "Authorization: Bearer $GH_PAT" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Content-Type: application/json" \
|
||||
https://api.github.com/repos/$GITHUB_REPO/releases \
|
||||
-d "{
|
||||
\"tag_name\": \"$RELEASE_TAG\",
|
||||
\"name\": \"$RELEASE_NAME\",
|
||||
\"body\": $(echo "$RELEASE_BODY" | jq -Rs .),
|
||||
\"draft\": $IS_DRAFT,
|
||||
\"prerelease\": $IS_PRERELEASE
|
||||
}")
|
||||
|
||||
UPLOAD_URL=$(echo "$RESPONSE" | jq -r '.upload_url' | sed 's/{?name,label}//')
|
||||
echo "Release created. Upload URL: $UPLOAD_URL"
|
||||
|
||||
echo '${{ toJSON(gitea.event.release.assets) }}' | jq -c '.[]' | while read asset; do
|
||||
ASSET_NAME=$(echo "$asset" | jq -r '.name')
|
||||
ASSET_URL=$(echo "$asset" | jq -r '.browser_download_url')
|
||||
|
||||
echo "==> Downloading asset: $ASSET_NAME"
|
||||
curl -sfL -o "/tmp/$ASSET_NAME" "$ASSET_URL"
|
||||
|
||||
echo "==> Uploading $ASSET_NAME to GitHub..."
|
||||
ENCODED_NAME=$(python3 -c "import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1]))" "$ASSET_NAME")
|
||||
curl -sf -X POST \
|
||||
-H "Authorization: Bearer $GH_PAT" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@/tmp/$ASSET_NAME" \
|
||||
"$UPLOAD_URL?name=$ENCODED_NAME"
|
||||
|
||||
echo " Uploaded: $ASSET_NAME"
|
||||
done
|
||||
|
||||
echo "==> Release sync complete."
|
||||
53
BUILDING.md
53
BUILDING.md
@@ -1,6 +1,6 @@
|
||||
# Building Triple-C
|
||||
|
||||
Triple-C is a Tauri v2 desktop application with a React/TypeScript frontend and a Rust backend. This guide covers building the app from source on Linux and Windows.
|
||||
Triple-C is a Tauri v2 desktop application with a React/TypeScript frontend and a Rust backend. This guide covers building the app from source on Linux, macOS, and Windows.
|
||||
|
||||
## Prerequisites (All Platforms)
|
||||
|
||||
@@ -79,6 +79,57 @@ Build artifacts are located in `app/src-tauri/target/release/bundle/`:
|
||||
| Debian pkg | `deb/*.deb` |
|
||||
| RPM pkg | `rpm/*.rpm` |
|
||||
|
||||
## macOS
|
||||
|
||||
### 1. Install prerequisites
|
||||
|
||||
- **Xcode Command Line Tools** — required for the C/C++ toolchain and system headers:
|
||||
|
||||
```bash
|
||||
xcode-select --install
|
||||
```
|
||||
|
||||
No additional system libraries are needed — macOS includes WebKit natively.
|
||||
|
||||
### 2. Install Rust targets (universal binary)
|
||||
|
||||
To build a universal binary that runs on both Apple Silicon and Intel Macs:
|
||||
|
||||
```bash
|
||||
rustup target add aarch64-apple-darwin x86_64-apple-darwin
|
||||
```
|
||||
|
||||
### 3. Install frontend dependencies
|
||||
|
||||
```bash
|
||||
cd app
|
||||
npm ci
|
||||
```
|
||||
|
||||
### 4. Build
|
||||
|
||||
For a universal binary (recommended for distribution):
|
||||
|
||||
```bash
|
||||
npx tauri build --target universal-apple-darwin
|
||||
```
|
||||
|
||||
For the current architecture only (faster, for local development):
|
||||
|
||||
```bash
|
||||
npx tauri build
|
||||
```
|
||||
|
||||
Build artifacts are located in `app/src-tauri/target/universal-apple-darwin/release/bundle/` (or `target/release/bundle/` for single-arch builds):
|
||||
|
||||
| Format | Path |
|
||||
|--------|------|
|
||||
| DMG | `dmg/*.dmg` |
|
||||
| macOS App | `macos/*.app` |
|
||||
| macOS App (compressed) | `macos/*.app.tar.gz` |
|
||||
|
||||
> **Note:** The app is not signed or notarized. On first launch, macOS Gatekeeper may block it. Right-click the app and select "Open" to bypass, or remove the quarantine attribute: `xattr -cr /Applications/Triple-C.app`
|
||||
|
||||
## Windows
|
||||
|
||||
### 1. Install prerequisites
|
||||
|
||||
115
CLAUDE.md
Normal file
115
CLAUDE.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Triple-C (Claude-Code-Container) is a Tauri v2 desktop application that sandboxes Claude Code inside Docker containers. It has two main parts: a React/TypeScript frontend, a Rust backend, and a Docker container image definition.
|
||||
|
||||
## Build & Development Commands
|
||||
|
||||
All frontend/tauri commands run from the `app/` directory:
|
||||
|
||||
```bash
|
||||
cd app
|
||||
npm ci # Install dependencies (required first time)
|
||||
npx tauri dev # Launch app in dev mode with hot reload (Vite on port 1420)
|
||||
npx tauri build # Production build (outputs to src-tauri/target/release/bundle/)
|
||||
npm run build # Frontend-only build (tsc + vite)
|
||||
npm run test # Run Vitest once
|
||||
npm run test:watch # Run Vitest in watch mode
|
||||
```
|
||||
|
||||
Rust backend is compiled automatically by `tauri dev`/`tauri build`. To check Rust independently:
|
||||
```bash
|
||||
cd app/src-tauri
|
||||
cargo check # Type-check without full build
|
||||
cargo build # Build Rust backend only
|
||||
```
|
||||
|
||||
Container image:
|
||||
```bash
|
||||
docker build -t triple-c-sandbox ./container
|
||||
```
|
||||
|
||||
### Linux Build Dependencies (Ubuntu/Debian)
|
||||
```bash
|
||||
sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.1-dev libayatana-appindicator3-dev librsvg2-dev libsoup-3.0-dev patchelf libssl-dev pkg-config build-essential
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Two-Process Model (Tauri IPC)
|
||||
|
||||
- **React frontend** (`app/src/`) renders UI in the OS webview
|
||||
- **Rust backend** (`app/src-tauri/src/`) handles Docker API, credential storage, and terminal I/O
|
||||
- Communication uses two patterns:
|
||||
- `invoke()` — request/response for discrete operations (CRUD, start/stop containers)
|
||||
- `emit()`/`listen()` — event streaming for continuous data (terminal I/O)
|
||||
|
||||
### Terminal I/O Flow
|
||||
|
||||
```
|
||||
User keystroke → xterm.js onData() → invoke("terminal_input") → mpsc channel → docker exec stdin
|
||||
docker exec stdout → tokio task → emit("terminal-output-{sessionId}") → listen() → xterm.js write()
|
||||
```
|
||||
|
||||
### Frontend Structure (`app/src/`)
|
||||
|
||||
- **`store/appState.ts`** — Single Zustand store for all app state (projects, sessions, UI)
|
||||
- **`hooks/`** — All Tauri IPC calls are encapsulated in hooks (`useTerminal`, `useProjects`, `useDocker`, `useSettings`)
|
||||
- **`lib/tauri-commands.ts`** — Typed `invoke()` wrappers; TypeScript types in `lib/types.ts` must match Rust models
|
||||
- **`components/terminal/TerminalView.tsx`** — xterm.js integration with WebGL rendering, URL detection for OAuth flow
|
||||
- **`components/layout/`** — TopBar (tabs + status), Sidebar (project list), StatusBar
|
||||
- **`components/projects/`** — ProjectCard, ProjectList, AddProjectDialog
|
||||
- **`components/settings/`** — Settings panels for API keys, Docker, AWS
|
||||
|
||||
### Backend Structure (`app/src-tauri/src/`)
|
||||
|
||||
- **`commands/`** — Tauri command handlers (docker, project, settings, terminal). These are the IPC entry points called by `invoke()`.
|
||||
- **`docker/`** — Docker API layer using bollard:
|
||||
- `client.rs` — Singleton Docker connection via `OnceLock`
|
||||
- `container.rs` — Container lifecycle (create, start, stop, remove, inspect)
|
||||
- `exec.rs` — PTY exec sessions with bidirectional stdin/stdout streaming
|
||||
- `image.rs` — Image build/pull with progress streaming
|
||||
- **`models/`** — Serde structs (`Project`, `AuthMode`, `BedrockConfig`, `ContainerInfo`, `AppSettings`). These define the IPC contract with the frontend.
|
||||
- **`storage/`** — Persistence: `projects_store.rs` (JSON file with atomic writes), `secure.rs` (OS keychain via `keyring` crate), `settings_store.rs`
|
||||
|
||||
### Container (`container/`)
|
||||
|
||||
- **`Dockerfile`** — Ubuntu 24.04 base with Claude Code, Node.js 22, Python 3.12, Rust, Docker CLI, git, gh, AWS CLI v2, ripgrep, pnpm, uv, ruff pre-installed
|
||||
- **`entrypoint.sh`** — UID/GID remapping to match host user, SSH key setup, git config, docker socket permissions, then `sleep infinity`
|
||||
- **`triple-c-scheduler`** — Bash-based scheduled task system for recurring Claude Code invocations
|
||||
|
||||
### Container Lifecycle
|
||||
|
||||
Containers use a **stop/start** model (not create/destroy). Installed packages persist across stops. The `.claude` config dir uses a named Docker volume (`triple-c-claude-config-{projectId}`) so OAuth tokens survive even container resets.
|
||||
|
||||
### Authentication
|
||||
|
||||
Per-project, independently configured:
|
||||
- **Anthropic (OAuth)** — `claude login` in terminal, token persists in config volume
|
||||
- **AWS Bedrock** — Static keys, profile, or bearer token injected as env vars
|
||||
|
||||
## Styling
|
||||
|
||||
- **Tailwind CSS v4** with the Vite plugin (`@tailwindcss/vite`). No separate tailwind config file.
|
||||
- All colors use CSS custom properties in `index.css` `:root` (e.g., `--bg-primary`, `--text-secondary`, `--accent`)
|
||||
- `color-scheme: dark` is set on `:root` for native dark-mode controls
|
||||
- **Do not** add a global `* { padding: 0 }` reset — Tailwind v4 uses CSS `@layer`, and unlayered CSS overrides all layered utilities
|
||||
|
||||
## Key Conventions
|
||||
|
||||
- Frontend types in `lib/types.ts` must stay in sync with Rust structs in `models/`
|
||||
- Tauri commands are registered in `lib.rs` via `.invoke_handler(tauri::generate_handler![...])`
|
||||
- Tauri v2 permissions are declared in `capabilities/default.json` — new IPC commands need permission grants there
|
||||
- The `projects.json` file uses atomic writes (write to `.tmp`, then `rename()`). Corrupted files are backed up to `.bak`.
|
||||
- Cross-platform paths: Docker socket is `/var/run/docker.sock` on Linux/macOS, `//./pipe/docker_engine` on Windows
|
||||
|
||||
## Testing
|
||||
|
||||
Frontend tests use Vitest with jsdom environment and React Testing Library. Setup file at `src/test/setup.ts`. Run a single test file:
|
||||
```bash
|
||||
cd app
|
||||
npx vitest run src/path/to/test.test.ts
|
||||
```
|
||||
397
HOW-TO-USE.md
Normal file
397
HOW-TO-USE.md
Normal file
@@ -0,0 +1,397 @@
|
||||
# How to Use Triple-C
|
||||
|
||||
Triple-C (Claude-Code-Container) is a desktop application that runs Claude Code inside isolated Docker containers. Each project gets its own sandboxed environment with bind-mounted directories, so Claude only has access to the files you explicitly provide.
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Docker
|
||||
|
||||
Triple-C requires a running Docker daemon. Install one of the following:
|
||||
|
||||
| Platform | Option | Link |
|
||||
|----------|--------|------|
|
||||
| **Windows** | Docker Desktop | https://docs.docker.com/desktop/install/windows-install/ |
|
||||
| **macOS** | Docker Desktop | https://docs.docker.com/desktop/install/mac-install/ |
|
||||
| **Linux** | Docker Engine | https://docs.docker.com/engine/install/ |
|
||||
| **Linux** | Docker Desktop (alternative) | https://docs.docker.com/desktop/install/linux/ |
|
||||
|
||||
After installation, verify Docker is running:
|
||||
|
||||
```bash
|
||||
docker info
|
||||
```
|
||||
|
||||
> **Windows note:** Docker Desktop must be running before launching Triple-C. The app communicates with Docker through the named pipe at `//./pipe/docker_engine`.
|
||||
|
||||
> **Linux note:** Your user must have permission to access the Docker socket (`/var/run/docker.sock`). Either add your user to the `docker` group (`sudo usermod -aG docker $USER`, then log out and back in) or run Docker in rootless mode.
|
||||
|
||||
### Claude Code Account
|
||||
|
||||
You need access to Claude Code through one of:
|
||||
|
||||
- **Anthropic account** — Sign up at https://claude.ai and use `claude login` (OAuth) inside the terminal
|
||||
- **AWS Bedrock** — An AWS account with Bedrock access and Claude models enabled
|
||||
|
||||
---
|
||||
|
||||
## First Launch
|
||||
|
||||
### 1. Get the Container Image
|
||||
|
||||
When you first open Triple-C, go to the **Settings** tab in the sidebar. Under **Docker**, you'll see:
|
||||
|
||||
- **Docker Status** — Should show "Connected" (green). If it shows "Not Available", make sure Docker is running.
|
||||
- **Image Status** — Will show "Not Found" on first launch.
|
||||
|
||||
Choose an **Image Source**:
|
||||
|
||||
| Source | Description | When to Use |
|
||||
|--------|-------------|-------------|
|
||||
| **Registry** | Pulls the pre-built image from `repo.anhonesthost.net` | Fastest setup — recommended for most users |
|
||||
| **Local Build** | Builds the image locally from the embedded Dockerfile | If you can't reach the registry, or want a custom build |
|
||||
| **Custom** | Use any Docker image you specify | Advanced — bring your own sandbox image |
|
||||
|
||||
Click **Pull Image** (for Registry/Custom) or **Build Image** (for Local Build). A progress log will stream below the button. When complete, the status changes to "Ready" (green).
|
||||
|
||||
### 2. Create Your First Project
|
||||
|
||||
Switch to the **Projects** tab in the sidebar and click the **+** button.
|
||||
|
||||
1. **Project Name** — Give it a meaningful name (e.g., "my-web-app").
|
||||
2. **Folders** — Click **Browse** to select a directory on your host machine. This directory will be mounted into the container at `/workspace/<folder-name>`. You can add multiple folders with the **+** button at the bottom of the folder list.
|
||||
3. Click **Add Project**.
|
||||
|
||||
### 3. Start the Container
|
||||
|
||||
Select your project in the sidebar and click **Start**. The status dot changes from gray (stopped) to orange (starting) to green (running).
|
||||
|
||||
### 4. Open a Terminal
|
||||
|
||||
Click the **Terminal** button (highlighted in accent color) to open an interactive terminal session. A new tab appears in the top bar and an xterm.js terminal loads in the main area.
|
||||
|
||||
Claude Code launches automatically with `--dangerously-skip-permissions` inside the sandboxed container.
|
||||
|
||||
### 5. Authenticate
|
||||
|
||||
**Anthropic (OAuth) — default:**
|
||||
|
||||
1. Type `claude login` or `/login` in the terminal.
|
||||
2. Claude prints an OAuth URL. Triple-C detects long URLs and shows a clickable toast at the top of the terminal — click **Open** to open it in your browser.
|
||||
3. Complete the login in your browser. The token is saved and persists across container stops and resets.
|
||||
|
||||
**AWS Bedrock:**
|
||||
|
||||
1. Stop the container first (settings can only be changed while stopped).
|
||||
2. In the project card, switch the auth mode to **Bedrock**.
|
||||
3. Expand the **Config** panel and fill in your AWS credentials (see [AWS Bedrock Configuration](#aws-bedrock-configuration) below).
|
||||
4. Start the container again.
|
||||
|
||||
---
|
||||
|
||||
## The Interface
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ TopBar [ Terminal Tabs ] Docker ● Image ●│
|
||||
├────────────┬────────────────────────────────────────┤
|
||||
│ Sidebar │ │
|
||||
│ │ Terminal View │
|
||||
│ Projects │ (xterm.js) │
|
||||
│ Settings │ │
|
||||
│ │ │
|
||||
├────────────┴────────────────────────────────────────┤
|
||||
│ StatusBar X projects · X running · X terminals │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
- **TopBar** — Terminal tabs for switching between sessions. Status dots on the right show Docker connection (green = connected) and image availability (green = ready).
|
||||
- **Sidebar** — Toggle between the **Projects** list and **Settings** panel.
|
||||
- **Terminal View** — Interactive terminal powered by xterm.js with WebGL rendering.
|
||||
- **StatusBar** — Counts of total projects, running containers, and open terminal sessions.
|
||||
|
||||
---
|
||||
|
||||
## Project Management
|
||||
|
||||
### Project Status
|
||||
|
||||
Each project shows a colored status dot:
|
||||
|
||||
| Color | Status | Meaning |
|
||||
|-------|--------|---------|
|
||||
| Gray | Stopped | Container is not running |
|
||||
| Orange | Starting / Stopping | Container is transitioning |
|
||||
| Green | Running | Container is active, ready for terminals |
|
||||
| Red | Error | Something went wrong (check error message) |
|
||||
|
||||
### Project Actions
|
||||
|
||||
Select a project in the sidebar to see its action buttons:
|
||||
|
||||
| Button | When Available | What It Does |
|
||||
|--------|---------------|--------------|
|
||||
| **Start** | Stopped | Creates (if needed) and starts the container |
|
||||
| **Stop** | Running | Stops the container but preserves its state |
|
||||
| **Terminal** | Running | Opens a new terminal session in this container |
|
||||
| **Reset** | Stopped | Destroys and recreates the container from scratch |
|
||||
| **Config** | Always | Toggles the configuration panel |
|
||||
| **Remove** | Stopped | Deletes the project and its container (with confirmation) |
|
||||
|
||||
### Container Lifecycle
|
||||
|
||||
Containers use a **stop/start** model. When you stop a container, everything inside it is preserved — installed packages, modified files, downloaded tools. Starting it again resumes where you left off.
|
||||
|
||||
**Reset** removes the container and creates a fresh one. However, your Claude Code configuration (including OAuth tokens from `claude login`) is stored in a separate Docker volume and survives resets.
|
||||
|
||||
Only **Remove** deletes everything, including the config volume and any stored credentials.
|
||||
|
||||
---
|
||||
|
||||
## Project Configuration
|
||||
|
||||
Click **Config** on a selected project to expand the configuration panel. Settings can only be changed when the container is **stopped** (an orange warning box appears if the container is running).
|
||||
|
||||
### Mounted Folders
|
||||
|
||||
Each project mounts one or more host directories into the container. The mount appears at `/workspace/<mount-name>` inside the container.
|
||||
|
||||
- Click **Browse** ("...") to change the host path
|
||||
- Edit the mount name to control where it appears inside `/workspace/`
|
||||
- Click **+** to add more folders, or **x** to remove one
|
||||
- Mount names must be unique and use only letters, numbers, dashes, underscores, and dots
|
||||
|
||||
### SSH Keys
|
||||
|
||||
Specify the path to your SSH key directory (typically `~/.ssh`). Keys are mounted read-only and copied into the container with correct permissions. This enables `git clone` via SSH inside the container.
|
||||
|
||||
### Git Configuration
|
||||
|
||||
- **Git Name / Email** — Sets `git config user.name` and `user.email` inside the container.
|
||||
- **Git HTTPS Token** — A personal access token (e.g., from GitHub) for HTTPS git operations. Stored securely in your OS keychain — never written to disk in plaintext.
|
||||
|
||||
### Allow Container Spawning
|
||||
|
||||
When enabled, the host Docker socket is mounted into the container so Claude Code can create sibling containers (e.g., for running databases, test environments). This is **off by default** for security.
|
||||
|
||||
> Toggling this requires stopping and restarting the container to take effect.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Click **Edit** to open the environment variables modal. Add key-value pairs that will be injected into the container. Per-project variables override global variables with the same key.
|
||||
|
||||
> Reserved prefixes (`ANTHROPIC_`, `AWS_`, `GIT_`, `HOST_`, `CLAUDE_`, `TRIPLE_C_`) are filtered out to prevent conflicts with internal variables.
|
||||
|
||||
### Port Mappings
|
||||
|
||||
Click **Edit** to map host ports to container ports. This is useful when Claude Code starts a web server or other service inside the container and you want to access it from your host browser.
|
||||
|
||||
Each mapping specifies:
|
||||
- **Host Port** — The port on your machine (1–65535)
|
||||
- **Container Port** — The port inside the container (1–65535)
|
||||
- **Protocol** — TCP (default) or UDP
|
||||
|
||||
### Claude Instructions
|
||||
|
||||
Click **Edit** to write per-project instructions for Claude Code. These are written to `~/.claude/CLAUDE.md` inside the container and provide project-specific context. If you also have global instructions (in Settings), the global instructions come first, followed by the per-project instructions.
|
||||
|
||||
---
|
||||
|
||||
## AWS Bedrock Configuration
|
||||
|
||||
To use Claude via AWS Bedrock instead of Anthropic's API, switch the auth mode to **Bedrock** on the project card.
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
| Method | Fields | Use Case |
|
||||
|--------|--------|----------|
|
||||
| **Keys** | Access Key ID, Secret Access Key, Session Token (optional) | Direct credentials — simplest setup |
|
||||
| **Profile** | AWS Profile name | Uses `~/.aws/config` and `~/.aws/credentials` on the host |
|
||||
| **Token** | Bearer Token | Temporary bearer token authentication |
|
||||
|
||||
### Additional Bedrock Settings
|
||||
|
||||
- **AWS Region** — Required. The region where your Bedrock models are deployed (e.g., `us-east-1`).
|
||||
- **Model ID** — Optional. Override the default Claude model (e.g., `anthropic.claude-sonnet-4-20250514-v1:0`).
|
||||
|
||||
### Global AWS Defaults
|
||||
|
||||
In **Settings > AWS Configuration**, you can set defaults that apply to all Bedrock projects:
|
||||
|
||||
- **AWS Config Path** — Path to your `~/.aws` directory. Click **Detect** to auto-find it.
|
||||
- **Default Profile** — Select from profiles found in your AWS config.
|
||||
- **Default Region** — Fallback region for projects that don't specify one.
|
||||
|
||||
Per-project settings always override these global defaults.
|
||||
|
||||
---
|
||||
|
||||
## Settings
|
||||
|
||||
Access global settings via the **Settings** tab in the sidebar.
|
||||
|
||||
### Docker Settings
|
||||
|
||||
- **Docker Status** — Connection status to the Docker daemon.
|
||||
- **Image Source** — Where to get the sandbox container image (Registry, Local Build, or Custom).
|
||||
- **Pull / Build Image** — Download or build the image. Progress streams in real time.
|
||||
- **Refresh** — Re-check Docker and image status.
|
||||
|
||||
### Container Timezone
|
||||
|
||||
Set the timezone for all containers (IANA format, e.g., `America/New_York`, `Europe/London`, `UTC`). Auto-detected from your host on first launch. This affects scheduled task timing inside containers.
|
||||
|
||||
### Global Claude Instructions
|
||||
|
||||
Instructions applied to **all** projects. Written to `~/.claude/CLAUDE.md` in every container, before any per-project instructions.
|
||||
|
||||
### Global Environment Variables
|
||||
|
||||
Environment variables applied to **all** project containers. Per-project variables with the same key take precedence.
|
||||
|
||||
### Updates
|
||||
|
||||
- **Current Version** — The installed version of Triple-C.
|
||||
- **Auto-check** — Toggle automatic update checks (every 24 hours).
|
||||
- **Check now** — Manually check for updates.
|
||||
|
||||
When an update is available, a pulsing **Update** button appears in the top bar. Click it to see release notes and download links.
|
||||
|
||||
---
|
||||
|
||||
## Terminal Features
|
||||
|
||||
### Multiple Sessions
|
||||
|
||||
You can open multiple terminal sessions (even for the same project). Each session gets its own tab in the top bar. Click a tab to switch, or click the **x** on a tab to close it.
|
||||
|
||||
### URL Detection
|
||||
|
||||
When Claude Code prints a long URL (e.g., during `claude login`), Triple-C detects it and shows a toast notification at the top of the terminal with an **Open** button. Clicking it opens the URL in your default browser. The toast auto-dismisses after 30 seconds.
|
||||
|
||||
Shorter URLs in terminal output are also clickable directly.
|
||||
|
||||
### Image Paste
|
||||
|
||||
You can paste images from your clipboard into the terminal (Ctrl+V / Cmd+V). The image is uploaded to the container and the file path is injected into the terminal input so Claude Code can reference it.
|
||||
|
||||
### Terminal Rendering
|
||||
|
||||
The terminal uses WebGL for hardware-accelerated rendering of the active tab. Inactive tabs fall back to canvas rendering to conserve GPU resources. The terminal automatically resizes when you resize the window.
|
||||
|
||||
---
|
||||
|
||||
## Scheduled Tasks (Inside the Container)
|
||||
|
||||
Once inside a running container terminal, you can set up recurring or one-time tasks using `triple-c-scheduler`. Tasks run as separate Claude Code sessions.
|
||||
|
||||
### Create a Recurring Task
|
||||
|
||||
```bash
|
||||
triple-c-scheduler add --name "daily-review" --schedule "0 9 * * *" --prompt "Review open issues and summarize"
|
||||
```
|
||||
|
||||
### Create a One-Time Task
|
||||
|
||||
```bash
|
||||
triple-c-scheduler add --name "migrate-db" --at "2026-03-05 14:00" --prompt "Run database migrations"
|
||||
```
|
||||
|
||||
One-time tasks automatically remove themselves after execution.
|
||||
|
||||
### Manage Tasks
|
||||
|
||||
```bash
|
||||
triple-c-scheduler list # List all tasks
|
||||
triple-c-scheduler enable --id abc123 # Enable a task
|
||||
triple-c-scheduler disable --id abc123 # Disable a task
|
||||
triple-c-scheduler remove --id abc123 # Delete a task
|
||||
triple-c-scheduler run --id abc123 # Trigger a task immediately
|
||||
triple-c-scheduler logs --id abc123 # View logs for a task
|
||||
triple-c-scheduler logs --tail 20 # View last 20 log entries (all tasks)
|
||||
triple-c-scheduler notifications # View completion notifications
|
||||
triple-c-scheduler notifications --clear # Clear notifications
|
||||
```
|
||||
|
||||
### Cron Schedule Format
|
||||
|
||||
Standard 5-field cron: `minute hour day-of-month month day-of-week`
|
||||
|
||||
| Example | Meaning |
|
||||
|---------|---------|
|
||||
| `*/30 * * * *` | Every 30 minutes |
|
||||
| `0 9 * * 1-5` | 9:00 AM on weekdays |
|
||||
| `0 */2 * * *` | Every 2 hours |
|
||||
| `0 0 1 * *` | Midnight on the 1st of each month |
|
||||
|
||||
### Working Directory
|
||||
|
||||
By default, tasks run in `/workspace`. Use `--working-dir` to specify a different directory:
|
||||
|
||||
```bash
|
||||
triple-c-scheduler add --name "test" --schedule "0 */6 * * *" --prompt "Run tests" --working-dir /workspace/my-project
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## What's Inside the Container
|
||||
|
||||
The sandbox container (Ubuntu 24.04) comes pre-installed with:
|
||||
|
||||
| Tool | Version | Purpose |
|
||||
|------|---------|---------|
|
||||
| Claude Code | Latest | AI coding assistant (the tool being sandboxed) |
|
||||
| Node.js | 22 LTS | JavaScript/TypeScript development |
|
||||
| pnpm | Latest | Fast Node.js package manager |
|
||||
| Python | 3.12 | Python development |
|
||||
| uv | Latest | Fast Python package manager |
|
||||
| ruff | Latest | Python linter/formatter |
|
||||
| Rust | Stable | Rust development (via rustup) |
|
||||
| Docker CLI | Latest | Container management (when spawning is enabled) |
|
||||
| git | Latest | Version control |
|
||||
| GitHub CLI (gh) | Latest | GitHub integration |
|
||||
| AWS CLI | v2 | AWS services and Bedrock |
|
||||
| ripgrep | Latest | Fast code search |
|
||||
| build-essential | — | C/C++ compiler toolchain |
|
||||
| openssh-client | — | SSH for git and remote access |
|
||||
|
||||
You can install additional tools at runtime with `sudo apt install`, `pip install`, `npm install -g`, etc. Installed packages persist across container stops (but not across resets).
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Docker is "Not Available"
|
||||
|
||||
- **Is Docker running?** Start Docker Desktop or the Docker daemon (`sudo systemctl start docker`).
|
||||
- **Permissions?** On Linux, ensure your user is in the `docker` group or the socket is accessible.
|
||||
- **Custom socket path?** If your Docker socket is not at the default location, set it in Settings. The app expects `/var/run/docker.sock` on Linux/macOS or `//./pipe/docker_engine` on Windows.
|
||||
|
||||
### Image is "Not Found"
|
||||
|
||||
- Click **Pull Image** or **Build Image** in Settings > Docker.
|
||||
- If pulling fails, check your network connection and whether you can reach the registry.
|
||||
- Try switching to **Local Build** as an alternative.
|
||||
|
||||
### Container Won't Start
|
||||
|
||||
- Check that the Docker image is "Ready" in Settings.
|
||||
- Verify that the mounted folder paths exist on your host.
|
||||
- Look at the error message displayed in red below the project card.
|
||||
|
||||
### OAuth Login URL Not Opening
|
||||
|
||||
- Triple-C detects long URLs printed by `claude login` and shows a toast with an **Open** button.
|
||||
- If the toast doesn't appear, try scrolling up in the terminal — the URL may have already been printed.
|
||||
- You can also manually copy the URL from the terminal output and paste it into your browser.
|
||||
|
||||
### File Permission Issues
|
||||
|
||||
- Triple-C automatically remaps the container user's UID/GID to match your host user, so files created inside the container should have the correct ownership on your host.
|
||||
- If you see permission errors, try resetting the container (stop, then click **Reset**).
|
||||
|
||||
### Settings Won't Save
|
||||
|
||||
- Most project settings can only be changed when the container is **stopped**. Stop the container first, make your changes, then start it again.
|
||||
- Some changes (like toggling Docker access or changing mounted folders) trigger an automatic container recreation on the next start.
|
||||
60
TODO.md
Normal file
60
TODO.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# TODO / Future Improvements
|
||||
|
||||
## In-App Auto-Update via `tauri-plugin-updater`
|
||||
|
||||
**Priority:** High
|
||||
**Status:** Planned
|
||||
|
||||
Currently the app detects available updates via the Gitea API (`check_for_updates` command) but cannot apply them. Users must manually download and install the new version. On macOS and Linux this is a poor experience compared to Windows (where NSIS handles upgrades cleanly).
|
||||
|
||||
### Recommended approach: `tauri-plugin-updater`
|
||||
|
||||
Full in-app auto-update: detects, downloads, verifies, and applies updates seamlessly on all platforms. The user clicks "Update" and the app restarts with the new version.
|
||||
|
||||
### Requirements
|
||||
|
||||
1. **Generate a Tauri update signing key pair** (this is Tauri's own Ed25519 key, not OS code signing):
|
||||
```bash
|
||||
npx @tauri-apps/cli signer generate -w ~/.tauri/triple-c.key
|
||||
```
|
||||
Set `TAURI_SIGNING_PRIVATE_KEY` and `TAURI_SIGNING_PRIVATE_KEY_PASSWORD` in CI.
|
||||
|
||||
2. **Add `tauri-plugin-updater`** to Rust and JS dependencies.
|
||||
|
||||
3. **Create an update endpoint** that returns Tauri's expected JSON format:
|
||||
```json
|
||||
{
|
||||
"version": "v0.1.100",
|
||||
"notes": "Changelog here",
|
||||
"pub_date": "2026-03-01T00:00:00Z",
|
||||
"platforms": {
|
||||
"darwin-x86_64": { "signature": "...", "url": "https://..." },
|
||||
"darwin-aarch64": { "signature": "...", "url": "https://..." },
|
||||
"linux-x86_64": { "signature": "...", "url": "https://..." },
|
||||
"windows-x86_64": { "signature": "...", "url": "https://..." }
|
||||
}
|
||||
}
|
||||
```
|
||||
This could be a static JSON file uploaded alongside release assets, or a small API that reads from Gitea releases and reformats.
|
||||
|
||||
4. **Configure the updater** in `tauri.conf.json`:
|
||||
```json
|
||||
"plugins": {
|
||||
"updater": {
|
||||
"endpoints": ["https://repo.anhonesthost.net/...update-endpoint..."],
|
||||
"pubkey": "<public key from step 1>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
5. **Add frontend UI** for the update prompt (replace or enhance the existing update check flow).
|
||||
|
||||
6. **Update CI pipeline** to:
|
||||
- Sign bundles with the Tauri key during build
|
||||
- Upload `.sig` files alongside installers
|
||||
- Generate/upload the update endpoint JSON
|
||||
|
||||
### References
|
||||
- https://v2.tauri.app/plugin/updater/
|
||||
- Existing update check code: `app/src-tauri/src/commands/update_commands.rs`
|
||||
- Existing models: `app/src-tauri/src/models/update_info.rs`
|
||||
@@ -40,6 +40,42 @@ After tasks run, check notifications with `triple-c-scheduler notifications` and
|
||||
### Timezone
|
||||
Scheduled times use the container's configured timezone (check with `date`). If no timezone is configured, UTC is used."#;
|
||||
|
||||
/// Build the full CLAUDE_INSTRUCTIONS value by merging global + project
|
||||
/// instructions, appending port mapping docs, and appending scheduler docs.
|
||||
/// Used by both create_container() and container_needs_recreation() to ensure
|
||||
/// the same value is produced in both paths.
|
||||
fn build_claude_instructions(
|
||||
global_instructions: Option<&str>,
|
||||
project_instructions: Option<&str>,
|
||||
port_mappings: &[PortMapping],
|
||||
) -> Option<String> {
|
||||
let mut combined = merge_claude_instructions(global_instructions, project_instructions);
|
||||
|
||||
if !port_mappings.is_empty() {
|
||||
let mut port_lines: Vec<String> = Vec::new();
|
||||
port_lines.push("## Available Port Mappings".to_string());
|
||||
port_lines.push("The following ports are mapped from the host to this container. Use these container ports when starting services that need to be accessible from the host:".to_string());
|
||||
for pm in port_mappings {
|
||||
port_lines.push(format!(
|
||||
"- Host port {} -> Container port {} ({})",
|
||||
pm.host_port, pm.container_port, pm.protocol
|
||||
));
|
||||
}
|
||||
let port_info = port_lines.join("\n");
|
||||
combined = Some(match combined {
|
||||
Some(existing) => format!("{}\n\n{}", existing, port_info),
|
||||
None => port_info,
|
||||
});
|
||||
}
|
||||
|
||||
combined = Some(match combined {
|
||||
Some(existing) => format!("{}\n\n{}", existing, SCHEDULER_INSTRUCTIONS),
|
||||
None => SCHEDULER_INSTRUCTIONS.to_string(),
|
||||
});
|
||||
|
||||
combined
|
||||
}
|
||||
|
||||
/// Compute a fingerprint string for the custom environment variables.
|
||||
/// Sorted alphabetically so order changes do not cause spurious recreation.
|
||||
fn compute_env_fingerprint(custom_env_vars: &[EnvVar]) -> String {
|
||||
@@ -307,33 +343,12 @@ pub async fn create_container(
|
||||
}
|
||||
}
|
||||
|
||||
// Claude instructions (global + per-project, plus port mapping info)
|
||||
let mut combined_instructions = merge_claude_instructions(
|
||||
// Claude instructions (global + per-project, plus port mapping info + scheduler docs)
|
||||
let combined_instructions = build_claude_instructions(
|
||||
global_claude_instructions,
|
||||
project.claude_instructions.as_deref(),
|
||||
&project.port_mappings,
|
||||
);
|
||||
if !project.port_mappings.is_empty() {
|
||||
let mut port_lines: Vec<String> = Vec::new();
|
||||
port_lines.push("## Available Port Mappings".to_string());
|
||||
port_lines.push("The following ports are mapped from the host to this container. Use these container ports when starting services that need to be accessible from the host:".to_string());
|
||||
for pm in &project.port_mappings {
|
||||
port_lines.push(format!(
|
||||
"- Host port {} -> Container port {} ({})",
|
||||
pm.host_port, pm.container_port, pm.protocol
|
||||
));
|
||||
}
|
||||
let port_info = port_lines.join("\n");
|
||||
combined_instructions = Some(match combined_instructions {
|
||||
Some(existing) => format!("{}\n\n{}", existing, port_info),
|
||||
None => port_info,
|
||||
});
|
||||
}
|
||||
// Scheduler instructions (always appended so all containers get scheduling docs)
|
||||
let scheduler_docs = SCHEDULER_INSTRUCTIONS;
|
||||
combined_instructions = Some(match combined_instructions {
|
||||
Some(existing) => format!("{}\n\n{}", existing, scheduler_docs),
|
||||
None => scheduler_docs.to_string(),
|
||||
});
|
||||
|
||||
if let Some(ref instructions) = combined_instructions {
|
||||
env_vars.push(format!("CLAUDE_INSTRUCTIONS={}", instructions));
|
||||
@@ -685,9 +700,10 @@ pub async fn container_needs_recreation(
|
||||
}
|
||||
|
||||
// ── Claude instructions ───────────────────────────────────────────────
|
||||
let expected_instructions = merge_claude_instructions(
|
||||
let expected_instructions = build_claude_instructions(
|
||||
global_claude_instructions,
|
||||
project.claude_instructions.as_deref(),
|
||||
&project.port_mappings,
|
||||
);
|
||||
let container_instructions = get_env("CLAUDE_INSTRUCTIONS");
|
||||
if container_instructions.as_deref() != expected_instructions.as_deref() {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
FROM ubuntu:24.04
|
||||
|
||||
# Multi-arch: builds for linux/amd64 and linux/arm64 (Apple Silicon)
|
||||
# Avoid interactive prompts during package install
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
Reference in New Issue
Block a user