Compare commits
42 Commits
v0.2.101-w
...
v0.3.143-m
| Author | SHA1 | Date | |
|---|---|---|---|
| d60124f1bd | |||
| 4f23951379 | |||
| d6ac3ae6c6 | |||
| ef67b447b3 | |||
| 15b03173a5 | |||
| a0b4dca0bd | |||
| 17c5d699f9 | |||
| e62af502d3 | |||
| 3e9053946f | |||
| 3bbd7fd55f | |||
| 49d09e4447 | |||
| 702ebb7247 | |||
| caf3e26816 | |||
| 765ba91d7b | |||
| 532de77927 | |||
| 8301fd3690 | |||
| 2dffef0767 | |||
| 57a7cee544 | |||
| 6369f7e0a8 | |||
| 9ee0d34c19 | |||
| 922543cc04 | |||
| 13038989b8 | |||
| b55de8d75e | |||
| 8512ca615d | |||
| ebae39026f | |||
| d34e8e2c6d | |||
| 3935104cb5 | |||
| b17c759bd6 | |||
| bab1df1c57 | |||
| b952b8e8de | |||
| d7d7a83aec | |||
| 879322bc9a | |||
| ecaa42fa77 | |||
| 280358166a | |||
| 4732feb33e | |||
| 5977024953 | |||
| 27007b90e3 | |||
| 38e65619e9 | |||
| d2c1c2108a | |||
| cc163e6650 | |||
| 38082059a5 | |||
| beae0942a1 |
@@ -5,11 +5,13 @@ on:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "app/**"
|
||||
- "VERSION"
|
||||
- ".gitea/workflows/build-app.yml"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "app/**"
|
||||
- "VERSION"
|
||||
- ".gitea/workflows/build-app.yml"
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -18,10 +20,44 @@ env:
|
||||
REPO: ${{ gitea.repository }}
|
||||
|
||||
jobs:
|
||||
build-linux:
|
||||
compute-version:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.VERSION }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Fetch all tags
|
||||
run: git fetch --tags
|
||||
|
||||
- name: Compute version from VERSION file and tags
|
||||
id: version
|
||||
run: |
|
||||
MAJOR_MINOR=$(cat VERSION | tr -d '[:space:]')
|
||||
echo "Major.Minor: ${MAJOR_MINOR}"
|
||||
|
||||
# Find the latest tag matching v{MAJOR_MINOR}.N (exclude -mac, -win suffixes)
|
||||
# `|| true` so an empty grep result doesn't fail the step under pipefail.
|
||||
LATEST_TAG=$(git tag -l "v${MAJOR_MINOR}.*" --sort=-v:refname | grep -E "^v${MAJOR_MINOR}\.[0-9]+$" | head -1 || true)
|
||||
|
||||
if [ -n "$LATEST_TAG" ]; then
|
||||
echo "Latest matching tag: ${LATEST_TAG}"
|
||||
PATCH=$(git rev-list --count "${LATEST_TAG}..HEAD")
|
||||
else
|
||||
echo "No matching tag found for v${MAJOR_MINOR}.*, using total commit count"
|
||||
PATCH=$(git rev-list --count HEAD)
|
||||
fi
|
||||
|
||||
VERSION="${MAJOR_MINOR}.${PATCH}"
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "Computed version: ${VERSION}"
|
||||
|
||||
build-linux:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [compute-version]
|
||||
steps:
|
||||
- name: Install Node.js 22
|
||||
run: |
|
||||
@@ -54,17 +90,9 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Compute version
|
||||
id: version
|
||||
run: |
|
||||
COMMIT_COUNT=$(git rev-list --count HEAD)
|
||||
VERSION="0.2.${COMMIT_COUNT}"
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "Computed version: ${VERSION}"
|
||||
|
||||
- name: Set app version
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.VERSION }}"
|
||||
VERSION="${{ needs.compute-version.outputs.version }}"
|
||||
sed -i "s/\"version\": \".*\"/\"version\": \"${VERSION}\"/" app/src-tauri/tauri.conf.json
|
||||
sed -i "s/\"version\": \".*\"/\"version\": \"${VERSION}\"/" app/package.json
|
||||
sed -i "s/^version = \".*\"/version = \"${VERSION}\"/" app/src-tauri/Cargo.toml
|
||||
@@ -133,7 +161,7 @@ jobs:
|
||||
env:
|
||||
TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
run: |
|
||||
TAG="v${{ steps.version.outputs.VERSION }}"
|
||||
TAG="v${{ needs.compute-version.outputs.version }}"
|
||||
# Create release
|
||||
curl -s -X POST \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
@@ -156,6 +184,7 @@ jobs:
|
||||
|
||||
build-macos:
|
||||
runs-on: macos-latest
|
||||
needs: [compute-version]
|
||||
steps:
|
||||
- name: Install Node.js 22
|
||||
run: |
|
||||
@@ -183,17 +212,9 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Compute version
|
||||
id: version
|
||||
run: |
|
||||
COMMIT_COUNT=$(git rev-list --count HEAD)
|
||||
VERSION="0.2.${COMMIT_COUNT}"
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "Computed version: ${VERSION}"
|
||||
|
||||
- name: Set app version
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.VERSION }}"
|
||||
VERSION="${{ needs.compute-version.outputs.version }}"
|
||||
sed -i '' "s/\"version\": \".*\"/\"version\": \"${VERSION}\"/" app/src-tauri/tauri.conf.json
|
||||
sed -i '' "s/\"version\": \".*\"/\"version\": \"${VERSION}\"/" app/package.json
|
||||
sed -i '' "s/^version = \".*\"/version = \"${VERSION}\"/" app/src-tauri/Cargo.toml
|
||||
@@ -243,12 +264,12 @@ jobs:
|
||||
env:
|
||||
TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
run: |
|
||||
TAG="v${{ steps.version.outputs.VERSION }}-mac"
|
||||
TAG="v${{ needs.compute-version.outputs.version }}-mac"
|
||||
# Create release
|
||||
curl -s -X POST \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"tag_name\": \"${TAG}\", \"name\": \"Triple-C v${{ steps.version.outputs.VERSION }} (macOS)\", \"body\": \"Automated build from commit ${{ gitea.sha }}\"}" \
|
||||
-d "{\"tag_name\": \"${TAG}\", \"name\": \"Triple-C v${{ needs.compute-version.outputs.version }} (macOS)\", \"body\": \"Automated build from commit ${{ gitea.sha }}\"}" \
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/releases" > release.json
|
||||
RELEASE_ID=$(cat release.json | grep -o '"id":[0-9]*' | head -1 | grep -o '[0-9]*')
|
||||
echo "Release ID: ${RELEASE_ID}"
|
||||
@@ -266,6 +287,7 @@ jobs:
|
||||
|
||||
build-windows:
|
||||
runs-on: windows-latest
|
||||
needs: [compute-version]
|
||||
defaults:
|
||||
run:
|
||||
shell: cmd
|
||||
@@ -275,18 +297,10 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Compute version
|
||||
id: version
|
||||
run: |
|
||||
for /f %%i in ('git rev-list --count HEAD') do set "COMMIT_COUNT=%%i"
|
||||
set "VERSION=0.2.%COMMIT_COUNT%"
|
||||
echo VERSION=%VERSION%>> %GITHUB_OUTPUT%
|
||||
echo Computed version: %VERSION%
|
||||
|
||||
- name: Set app version
|
||||
shell: powershell
|
||||
run: |
|
||||
$version = "${{ steps.version.outputs.VERSION }}"
|
||||
$version = "${{ needs.compute-version.outputs.version }}"
|
||||
(Get-Content app/src-tauri/tauri.conf.json) -replace '"version": ".*?"', "`"version`": `"$version`"" | Set-Content app/src-tauri/tauri.conf.json
|
||||
(Get-Content app/package.json) -replace '"version": ".*?"', "`"version`": `"$version`"" | Set-Content app/package.json
|
||||
(Get-Content app/src-tauri/Cargo.toml) -replace '^version = ".*?"', "version = `"$version`"" | Set-Content app/src-tauri/Cargo.toml
|
||||
@@ -367,9 +381,9 @@ jobs:
|
||||
TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
COMMIT_SHA: ${{ gitea.sha }}
|
||||
run: |
|
||||
set "TAG=v${{ steps.version.outputs.VERSION }}-win"
|
||||
set "TAG=v${{ needs.compute-version.outputs.version }}-win"
|
||||
echo Creating release %TAG%...
|
||||
curl -s -X POST -H "Authorization: token %TOKEN%" -H "Content-Type: application/json" -d "{\"tag_name\": \"%TAG%\", \"name\": \"Triple-C v${{ steps.version.outputs.VERSION }} (Windows)\", \"body\": \"Automated build from commit %COMMIT_SHA%\"}" "%GITEA_URL%/api/v1/repos/%REPO%/releases" > release.json
|
||||
curl -s -X POST -H "Authorization: token %TOKEN%" -H "Content-Type: application/json" -d "{\"tag_name\": \"%TAG%\", \"name\": \"Triple-C v${{ needs.compute-version.outputs.version }} (Windows)\", \"body\": \"Automated build from commit %COMMIT_SHA%\"}" "%GITEA_URL%/api/v1/repos/%REPO%/releases" > release.json
|
||||
for /f "tokens=2 delims=:," %%a in ('findstr /c:"\"id\"" release.json') do set "RELEASE_ID=%%a" & goto :found
|
||||
:found
|
||||
echo Release ID: %RELEASE_ID%
|
||||
@@ -378,9 +392,36 @@ jobs:
|
||||
curl -s -X POST -H "Authorization: token %TOKEN%" -H "Content-Type: application/octet-stream" --data-binary "@%%f" "%GITEA_URL%/api/v1/repos/%REPO%/releases/%RELEASE_ID%/assets?name=%%~nxf"
|
||||
)
|
||||
|
||||
create-tag:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [compute-version, build-linux, build-macos, build-windows]
|
||||
if: gitea.event_name == 'push'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create version tag
|
||||
env:
|
||||
TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
run: |
|
||||
VERSION="${{ needs.compute-version.outputs.version }}"
|
||||
TAG="v${VERSION}"
|
||||
echo "Creating tag ${TAG}..."
|
||||
|
||||
# Create annotated tag via Gitea API
|
||||
curl -s -X POST \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"tag_name\": \"${TAG}\", \"target\": \"${{ gitea.sha }}\", \"message\": \"Release ${TAG}\"}" \
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/tags" || echo "Tag may already exist (created by release)"
|
||||
|
||||
echo "Tag ${TAG} created successfully"
|
||||
|
||||
sync-to-github:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-linux, build-macos, build-windows]
|
||||
needs: [compute-version, build-linux, build-macos, build-windows]
|
||||
if: gitea.event_name == 'push'
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
@@ -389,7 +430,7 @@ jobs:
|
||||
- name: Download artifacts from Gitea releases
|
||||
env:
|
||||
TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
VERSION: ${{ needs.build-linux.outputs.version }}
|
||||
VERSION: ${{ needs.compute-version.outputs.version }}
|
||||
run: |
|
||||
set -e
|
||||
mkdir -p artifacts
|
||||
@@ -418,7 +459,7 @@ jobs:
|
||||
|
||||
- name: Create GitHub release and upload artifacts
|
||||
env:
|
||||
VERSION: ${{ needs.build-linux.outputs.version }}
|
||||
VERSION: ${{ needs.compute-version.outputs.version }}
|
||||
COMMIT_SHA: ${{ gitea.sha }}
|
||||
run: |
|
||||
set -e
|
||||
|
||||
59
.gitea/workflows/build-stt.yml
Normal file
59
.gitea/workflows/build-stt.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Build STT Container
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "stt-container/**"
|
||||
- ".gitea/workflows/build-stt.yml"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "stt-container/**"
|
||||
- ".gitea/workflows/build-stt.yml"
|
||||
|
||||
env:
|
||||
REGISTRY: repo.anhonesthost.net
|
||||
IMAGE_NAME: cybercovellc/triple-c/triple-c-stt
|
||||
|
||||
jobs:
|
||||
build-stt-container:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Gitea Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ gitea.actor }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: shadowdao
|
||||
password: ${{ secrets.GH_PAT }}
|
||||
|
||||
- name: Build and push STT container image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./stt-container
|
||||
file: ./stt-container/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ gitea.event_name == 'push' }}
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ gitea.sha }}
|
||||
ghcr.io/shadowdao/triple-c-stt:latest
|
||||
ghcr.io/shadowdao/triple-c-stt:${{ gitea.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
@@ -36,6 +36,13 @@ jobs:
|
||||
username: ${{ gitea.actor }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: shadowdao
|
||||
password: ${{ secrets.GH_PAT }}
|
||||
|
||||
- name: Build and push container image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
@@ -46,5 +53,7 @@ jobs:
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ gitea.sha }}
|
||||
ghcr.io/shadowdao/triple-c-sandbox:latest
|
||||
ghcr.io/shadowdao/triple-c-sandbox:${{ gitea.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
193
.gitea/workflows/cleanup-releases.yml
Normal file
193
.gitea/workflows/cleanup-releases.yml
Normal file
@@ -0,0 +1,193 @@
|
||||
name: Cleanup Old Releases
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
keep_versions:
|
||||
description: "Number of recent versions to keep (each version has 3 releases: Linux, macOS, Windows)"
|
||||
required: true
|
||||
default: "5"
|
||||
dry_run:
|
||||
description: "Dry run - list what would be deleted without actually deleting"
|
||||
required: true
|
||||
default: "true"
|
||||
type: choice
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
|
||||
env:
|
||||
GITEA_URL: ${{ gitea.server_url }}
|
||||
REPO: ${{ gitea.repository }}
|
||||
|
||||
jobs:
|
||||
cleanup:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cleanup old releases
|
||||
env:
|
||||
TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
GITHUB_REPO: shadowdao/triple-c
|
||||
KEEP_VERSIONS: ${{ gitea.event.inputs.keep_versions }}
|
||||
DRY_RUN: ${{ gitea.event.inputs.dry_run }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "==> Configuration"
|
||||
echo " Keep versions: ${KEEP_VERSIONS}"
|
||||
echo " Dry run: ${DRY_RUN}"
|
||||
echo ""
|
||||
|
||||
# ── Fetch all Gitea releases (paginated) ──
|
||||
ALL_RELEASES="[]"
|
||||
PAGE=1
|
||||
while true; do
|
||||
BATCH=$(curl -sf \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/releases?limit=50&page=${PAGE}")
|
||||
COUNT=$(echo "$BATCH" | jq 'length')
|
||||
[ "$COUNT" -eq 0 ] && break
|
||||
ALL_RELEASES=$(echo "$ALL_RELEASES" "$BATCH" | jq -s '.[0] + .[1]')
|
||||
PAGE=$((PAGE + 1))
|
||||
done
|
||||
|
||||
TOTAL=$(echo "$ALL_RELEASES" | jq 'length')
|
||||
echo "==> Found ${TOTAL} total Gitea releases"
|
||||
|
||||
# ── Extract unique version numbers and sort them ──
|
||||
# Tags are like: v0.2.26, v0.2.26-mac, v0.2.26-win, build-xxx
|
||||
# Extract the base version (strip -mac, -win suffixes)
|
||||
VERSIONS=$(echo "$ALL_RELEASES" | jq -r '.[].tag_name' \
|
||||
| sed 's/-mac$//' | sed 's/-win$//' \
|
||||
| grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' \
|
||||
| sort -t. -k1,1V -k2,2n -k3,3n \
|
||||
| uniq)
|
||||
|
||||
VERSION_COUNT=$(echo "$VERSIONS" | wc -l)
|
||||
echo "==> Found ${VERSION_COUNT} unique versions"
|
||||
echo ""
|
||||
|
||||
# ── Determine which versions to keep and which to delete ──
|
||||
KEEP=$(echo "$VERSIONS" | tail -n "${KEEP_VERSIONS}")
|
||||
DELETE=$(echo "$VERSIONS" | head -n -"${KEEP_VERSIONS}")
|
||||
|
||||
DELETE_COUNT=$(echo "$DELETE" | grep -c . || true)
|
||||
if [ "$DELETE_COUNT" -eq 0 ]; then
|
||||
echo "==> Nothing to clean up. Only ${VERSION_COUNT} versions exist, keeping ${KEEP_VERSIONS}."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "==> Keeping ${KEEP_VERSIONS} most recent versions:"
|
||||
echo "$KEEP" | sed 's/^/ /'
|
||||
echo ""
|
||||
echo "==> Will delete ${DELETE_COUNT} older versions ($(echo "$DELETE" | head -1) through $(echo "$DELETE" | tail -1)):"
|
||||
echo "$DELETE" | sed 's/^/ /'
|
||||
echo ""
|
||||
|
||||
# ── Delete releases ──
|
||||
DELETED_GITEA=0
|
||||
DELETED_GITHUB=0
|
||||
DELETED_TAGS=0
|
||||
|
||||
for VERSION in $DELETE; do
|
||||
# Each version can have up to 3 releases: base, -mac, -win
|
||||
for SUFFIX in "" "-mac" "-win"; do
|
||||
TAG="${VERSION}${SUFFIX}"
|
||||
|
||||
# Find the Gitea release ID for this tag
|
||||
RELEASE_ID=$(echo "$ALL_RELEASES" | jq -r --arg tag "$TAG" '.[] | select(.tag_name == $tag) | .id // empty')
|
||||
|
||||
if [ -n "$RELEASE_ID" ]; then
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo " [DRY RUN] Would delete Gitea release: ${TAG} (id: ${RELEASE_ID})"
|
||||
else
|
||||
echo " Deleting Gitea release: ${TAG} (id: ${RELEASE_ID})..."
|
||||
curl -sf -X DELETE \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/releases/${RELEASE_ID}" || echo " Warning: failed to delete Gitea release ${TAG}"
|
||||
DELETED_GITEA=$((DELETED_GITEA + 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
# Delete the Gitea tag
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo " [DRY RUN] Would delete Gitea tag: ${TAG}"
|
||||
else
|
||||
curl -sf -X DELETE \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/tags/${TAG}" 2>/dev/null && DELETED_TAGS=$((DELETED_TAGS + 1)) || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Delete the unified GitHub release (single tag per version, no suffix)
|
||||
if [ -n "$GH_PAT" ]; then
|
||||
GH_RELEASE=$(curl -sf \
|
||||
-H "Authorization: Bearer ${GH_PAT}" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/${GITHUB_REPO}/releases/tags/${VERSION}" 2>/dev/null || echo "{}")
|
||||
GH_RELEASE_ID=$(echo "$GH_RELEASE" | jq -r '.id // empty')
|
||||
|
||||
if [ -n "$GH_RELEASE_ID" ]; then
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo " [DRY RUN] Would delete GitHub release: ${VERSION} (id: ${GH_RELEASE_ID})"
|
||||
else
|
||||
echo " Deleting GitHub release: ${VERSION} (id: ${GH_RELEASE_ID})..."
|
||||
curl -sf -X DELETE \
|
||||
-H "Authorization: Bearer ${GH_PAT}" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/${GITHUB_REPO}/releases/${GH_RELEASE_ID}" || echo " Warning: failed to delete GitHub release ${VERSION}"
|
||||
DELETED_GITHUB=$((DELETED_GITHUB + 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
# Delete the GitHub tag
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo " [DRY RUN] Would delete GitHub tag: ${VERSION}"
|
||||
else
|
||||
curl -sf -X DELETE \
|
||||
-H "Authorization: Bearer ${GH_PAT}" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/${GITHUB_REPO}/git/refs/tags/${VERSION}" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
done
|
||||
|
||||
# ── Also clean up any legacy non-semver releases (e.g., build-xxx) ──
|
||||
LEGACY_RELEASES=$(echo "$ALL_RELEASES" | jq -r '.[] | select(.tag_name | test("^v[0-9]") | not) | "\(.id) \(.tag_name)"')
|
||||
LEGACY_COUNT=$(echo "$LEGACY_RELEASES" | grep -c . || true)
|
||||
|
||||
if [ "$LEGACY_COUNT" -gt 0 ]; then
|
||||
echo "==> Found ${LEGACY_COUNT} legacy (non-versioned) releases to clean up:"
|
||||
echo "$LEGACY_RELEASES" | while read -r ID TAG; do
|
||||
[ -z "$ID" ] && continue
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo " [DRY RUN] Would delete legacy release: ${TAG} (id: ${ID})"
|
||||
else
|
||||
echo " Deleting legacy release: ${TAG} (id: ${ID})..."
|
||||
curl -sf -X DELETE \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/releases/${ID}" || echo " Warning: failed to delete ${TAG}"
|
||||
# Delete the tag too
|
||||
curl -sf -X DELETE \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
"${GITEA_URL}/api/v1/repos/${REPO}/tags/${TAG}" 2>/dev/null || true
|
||||
DELETED_GITEA=$((DELETED_GITEA + 1))
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# ── Summary ──
|
||||
echo "==> Cleanup complete"
|
||||
if [ "$DRY_RUN" = "true" ]; then
|
||||
echo " Mode: DRY RUN (no changes made)"
|
||||
echo " Would delete: ${DELETE_COUNT} versions (up to $((DELETE_COUNT * 3)) Gitea releases + GitHub releases)"
|
||||
[ "$LEGACY_COUNT" -gt 0 ] && echo " Would also delete: ${LEGACY_COUNT} legacy releases"
|
||||
else
|
||||
echo " Gitea releases deleted: ${DELETED_GITEA}"
|
||||
echo " GitHub releases deleted: ${DELETED_GITHUB}"
|
||||
echo " Tags deleted: ${DELETED_TAGS}"
|
||||
fi
|
||||
12
CLAUDE.md
12
CLAUDE.md
@@ -62,7 +62,7 @@ docker exec stdout → tokio task → emit("terminal-output-{sessionId}") → li
|
||||
- **`components/terminal/TerminalView.tsx`** — xterm.js integration with WebGL rendering, URL detection for OAuth flow
|
||||
- **`components/layout/`** — TopBar (tabs + status), Sidebar (project list), StatusBar
|
||||
- **`components/projects/`** — ProjectCard, ProjectList, AddProjectDialog
|
||||
- **`components/settings/`** — Settings panels for API keys, Docker, AWS
|
||||
- **`components/settings/`** — Settings panels for API keys, Docker, AWS, Web Terminal
|
||||
|
||||
### Backend Structure (`app/src-tauri/src/`)
|
||||
|
||||
@@ -72,13 +72,17 @@ docker exec stdout → tokio task → emit("terminal-output-{sessionId}") → li
|
||||
- `container.rs` — Container lifecycle (create, start, stop, remove, inspect)
|
||||
- `exec.rs` — PTY exec sessions with bidirectional stdin/stdout streaming
|
||||
- `image.rs` — Image build/pull with progress streaming
|
||||
- **`models/`** — Serde structs (`Project`, `AuthMode`, `BedrockConfig`, `OllamaConfig`, `LiteLlmConfig`, `ContainerInfo`, `AppSettings`). These define the IPC contract with the frontend.
|
||||
- **`web_terminal/`** — Remote terminal access via axum HTTP+WebSocket server:
|
||||
- `server.rs` — Axum server lifecycle (start/stop), serves embedded HTML and handles WS upgrades
|
||||
- `ws_handler.rs` — Per-connection WebSocket handler with JSON protocol, session management, cleanup on disconnect
|
||||
- `terminal.html` — Self-contained xterm.js web UI embedded via `include_str!()`
|
||||
- **`models/`** — Serde structs (`Project`, `Backend`, `BedrockConfig`, `OllamaConfig`, `OpenAiCompatibleConfig`, `ClaudeCodeSettings`, `ContainerInfo`, `AppSettings`, `WebTerminalSettings`). These define the IPC contract with the frontend.
|
||||
- **`storage/`** — Persistence: `projects_store.rs` (JSON file with atomic writes), `secure.rs` (OS keychain via `keyring` crate), `settings_store.rs`
|
||||
|
||||
### Container (`container/`)
|
||||
|
||||
- **`Dockerfile`** — Ubuntu 24.04 base with Claude Code, Node.js 22, Python 3.12, Rust, Docker CLI, git, gh, AWS CLI v2, ripgrep, pnpm, uv, ruff pre-installed
|
||||
- **`entrypoint.sh`** — UID/GID remapping to match host user, SSH key setup, git config, docker socket permissions, then `sleep infinity`
|
||||
- **`entrypoint.sh`** — UID/GID remapping to match host user, SSH key setup, git config, docker socket permissions, Claude Code settings.json injection, then `sleep infinity`
|
||||
- **`triple-c-scheduler`** — Bash-based scheduled task system for recurring Claude Code invocations
|
||||
|
||||
### Container Lifecycle
|
||||
@@ -91,7 +95,7 @@ Per-project, independently configured:
|
||||
- **Anthropic (OAuth)** — `claude login` in terminal, token persists in config volume
|
||||
- **AWS Bedrock** — Static keys, profile, or bearer token injected as env vars
|
||||
- **Ollama** — Connect to a local or remote Ollama server via `ANTHROPIC_BASE_URL` (e.g., `http://host.docker.internal:11434`)
|
||||
- **LiteLLM** — Connect through a LiteLLM proxy gateway via `ANTHROPIC_BASE_URL` + `ANTHROPIC_AUTH_TOKEN` to access 100+ model providers
|
||||
- **OpenAI Compatible** — Connect through any OpenAI API-compatible endpoint (LiteLLM, OpenRouter, vLLM, etc.) via `ANTHROPIC_BASE_URL` + `ANTHROPIC_AUTH_TOKEN`
|
||||
|
||||
## Styling
|
||||
|
||||
|
||||
194
HOW-TO-USE.md
194
HOW-TO-USE.md
@@ -4,6 +4,26 @@ Triple-C (Claude-Code-Container) is a desktop application that runs Claude Code
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [First Launch](#first-launch)
|
||||
- [The Interface](#the-interface)
|
||||
- [Project Management](#project-management)
|
||||
- [Project Configuration](#project-configuration)
|
||||
- [MCP Servers (Beta)](#mcp-servers-beta)
|
||||
- [AWS Bedrock Configuration](#aws-bedrock-configuration)
|
||||
- [Ollama Configuration](#ollama-configuration)
|
||||
- [OpenAI Compatible Configuration](#openai-compatible-configuration)
|
||||
- [Settings](#settings)
|
||||
- [Web Terminal (Remote Access)](#web-terminal-remote-access)
|
||||
- [Terminal Features](#terminal-features)
|
||||
- [Scheduled Tasks (Inside the Container)](#scheduled-tasks-inside-the-container)
|
||||
- [What's Inside the Container](#whats-inside-the-container)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Docker
|
||||
@@ -34,7 +54,7 @@ You need access to Claude Code through one of:
|
||||
- **Anthropic account** — Sign up at https://claude.ai and use `claude login` (OAuth) inside the terminal
|
||||
- **AWS Bedrock** — An AWS account with Bedrock access and Claude models enabled
|
||||
- **Ollama** — A local or remote Ollama server running an Anthropic-compatible model (best-effort support)
|
||||
- **LiteLLM** — A LiteLLM proxy gateway providing access to 100+ model providers (best-effort support)
|
||||
- **OpenAI Compatible** — Any OpenAI API-compatible endpoint (LiteLLM, OpenRouter, vLLM, text-generation-inference, LocalAI, etc.) (best-effort support)
|
||||
|
||||
---
|
||||
|
||||
@@ -51,7 +71,7 @@ Choose an **Image Source**:
|
||||
|
||||
| Source | Description | When to Use |
|
||||
|--------|-------------|-------------|
|
||||
| **Registry** | Pulls the pre-built image from `repo.anhonesthost.net` | Fastest setup — recommended for most users |
|
||||
| **Registry** | Pulls the pre-built image from `ghcr.io` | Fastest setup — recommended for most users |
|
||||
| **Local Build** | Builds the image locally from the embedded Dockerfile | If you can't reach the registry, or want a custom build |
|
||||
| **Custom** | Use any Docker image you specify | Advanced — bring your own sandbox image |
|
||||
|
||||
@@ -73,7 +93,7 @@ Select your project in the sidebar and click **Start**. A progress modal appears
|
||||
|
||||
Click the **Terminal** button to open an interactive terminal session. A new tab appears in the top bar and an xterm.js terminal loads in the main area.
|
||||
|
||||
Claude Code launches automatically with `--dangerously-skip-permissions` inside the sandboxed container.
|
||||
Claude Code launches automatically. By default, it runs in standard permission mode and will ask for your approval before executing commands or editing files. To enable auto-approval of all actions within the sandbox, enable **Full Permissions** in the project configuration.
|
||||
|
||||
### 5. Authenticate
|
||||
|
||||
@@ -86,22 +106,23 @@ Claude Code launches automatically with `--dangerously-skip-permissions` inside
|
||||
**AWS Bedrock:**
|
||||
|
||||
1. Stop the container first (settings can only be changed while stopped).
|
||||
2. In the project card, switch the auth mode to **Bedrock**.
|
||||
2. In the project card, switch the backend to **Bedrock**.
|
||||
3. Expand the **Config** panel and fill in your AWS credentials (see [AWS Bedrock Configuration](#aws-bedrock-configuration) below).
|
||||
4. Start the container again.
|
||||
|
||||
**Ollama:**
|
||||
|
||||
1. Stop the container first (settings can only be changed while stopped).
|
||||
2. In the project card, switch the auth mode to **Ollama**.
|
||||
3. Expand the **Config** panel and set the base URL of your Ollama server (defaults to `http://host.docker.internal:11434` for a local instance). Optionally set a model ID.
|
||||
4. Start the container again.
|
||||
2. In the project card, switch the backend to **Ollama**.
|
||||
3. Expand the **Config** panel and set the base URL of your Ollama server (defaults to `http://host.docker.internal:11434` for a local instance). Set the **Model ID** to the model you want to use (required).
|
||||
4. Make sure the model has been pulled in Ollama (e.g., `ollama pull qwen3.5:27b`) or used via Ollama cloud before starting.
|
||||
5. Start the container again.
|
||||
|
||||
**LiteLLM:**
|
||||
**OpenAI Compatible:**
|
||||
|
||||
1. Stop the container first (settings can only be changed while stopped).
|
||||
2. In the project card, switch the auth mode to **LiteLLM**.
|
||||
3. Expand the **Config** panel and set the base URL of your LiteLLM proxy (defaults to `http://host.docker.internal:4000`). Optionally set an API key and model ID.
|
||||
2. In the project card, switch the backend to **OpenAI Compatible**.
|
||||
3. Expand the **Config** panel and set the base URL of your OpenAI-compatible endpoint (defaults to `http://host.docker.internal:4000` as an example). Optionally set an API key and model ID.
|
||||
4. Start the container again.
|
||||
|
||||
---
|
||||
@@ -205,22 +226,34 @@ When enabled, the host Docker socket is mounted into the container so Claude Cod
|
||||
|
||||
### Mission Control
|
||||
|
||||
Toggle **Mission Control** to integrate [Flight Control](https://github.com/msieurthenardier/mission-control) — an AI-first development methodology — into the project. When enabled:
|
||||
Toggle **Mission Control** to integrate Flight Control — an AI-first development methodology bundled with Triple-C — into the project. When enabled:
|
||||
|
||||
- The Flight Control repository is automatically cloned into the container
|
||||
- The bundled Flight Control files are installed into the container
|
||||
- Flight Control skills are installed to Claude Code's skill directory (`~/.claude/skills/`)
|
||||
- Project instructions are appended with Flight Control workflow guidance
|
||||
- The repository is symlinked at `/workspace/mission-control`
|
||||
- The files are symlinked at `/workspace/mission-control`
|
||||
|
||||
Available skills include `/mission`, `/flight`, `/leg`, `/agentic-workflow`, `/flight-debrief`, `/mission-debrief`, `/daily-briefing`, and `/init-project`.
|
||||
|
||||
> This setting can only be changed when the container is stopped. Toggling it triggers a container recreation on the next start.
|
||||
|
||||
### Full Permissions
|
||||
|
||||
Toggle **Full Permissions** to allow Claude Code to run with `--dangerously-skip-permissions` inside the container. This is **off by default**.
|
||||
|
||||
When **enabled**, Claude auto-approves all tool calls (file edits, shell commands, etc.) without prompting you. This is the fastest workflow since you won't be interrupted for approvals, and the Docker container provides isolation.
|
||||
|
||||
When **disabled** (default), Claude prompts you for approval before executing each action, giving you fine-grained control over what it does.
|
||||
|
||||
> **CAUTION:** Enabling full permissions means Claude can execute any command inside the container without asking. While the container sandbox limits the blast radius, make sure you understand the implications — especially if the container has Docker socket access or network connectivity.
|
||||
|
||||
> This setting can only be changed when the container is stopped. It takes effect the next time you open a terminal session.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Click **Edit** to open the environment variables modal. Add key-value pairs that will be injected into the container. Per-project variables override global variables with the same key.
|
||||
|
||||
> Reserved prefixes (`ANTHROPIC_`, `AWS_`, `GIT_`, `HOST_`, `CLAUDE_`, `TRIPLE_C_`) are filtered out to prevent conflicts with internal variables.
|
||||
> Reserved prefixes (`ANTHROPIC_`, `AWS_`, `GIT_`, `HOST_`, `TRIPLE_C_`) and specific internal variables (`CLAUDE_INSTRUCTIONS`, `MCP_SERVERS_JSON`, etc.) are filtered out to prevent conflicts. `CLAUDE_CODE_*` variables are now allowed, so you can set Claude Code feature flags directly (e.g., `CLAUDE_CODE_DISABLE_TERMINAL_TITLE=1`).
|
||||
|
||||
### Port Mappings
|
||||
|
||||
@@ -235,6 +268,25 @@ Each mapping specifies:
|
||||
|
||||
Click **Edit** to write per-project instructions for Claude Code. These are written to `~/.claude/CLAUDE.md` inside the container and provide project-specific context. If you also have global instructions (in Settings), the global instructions come first, followed by the per-project instructions.
|
||||
|
||||
### Claude Code Settings
|
||||
|
||||
Click **Edit** next to "Claude Code Settings" to configure Claude Code CLI behavior for this project. These settings control how Claude Code operates inside the container:
|
||||
|
||||
| Setting | What It Does |
|
||||
|---------|-------------|
|
||||
| **TUI Mode** | Set to **Fullscreen** for flicker-free alt-screen rendering (uses `CLAUDE_CODE_NO_FLICKER=1`) |
|
||||
| **Effort Level** | Controls reasoning depth: **Low** (fast, less thorough), **Medium**, **High** (deep reasoning) |
|
||||
| **Focus Mode** | Collapses tool output to one-line summaries, showing only the prompt and final response |
|
||||
| **Thinking Summaries** | Shows Claude's thinking process as summaries during responses |
|
||||
| **Session Recap** | Provides context when returning to a session after being away |
|
||||
| **Auto-Scroll Disabled** | Disables auto-scroll when in fullscreen TUI mode |
|
||||
| **Env Scrub** | Strips credentials from subprocess environments for security |
|
||||
| **Prompt Caching (1h)** | Enables 1-hour prompt cache TTL instead of the default 5 minutes |
|
||||
|
||||
Per-project settings override global defaults set in Settings. If all settings are at their defaults, no configuration is injected.
|
||||
|
||||
> These settings map to Claude Code environment variables and `~/.claude/settings.json` entries. Changes require stopping and restarting the container to take effect.
|
||||
|
||||
---
|
||||
|
||||
## MCP Servers (Beta)
|
||||
@@ -361,7 +413,7 @@ MCP server configuration is tracked via SHA-256 fingerprints stored as Docker la
|
||||
|
||||
## AWS Bedrock Configuration
|
||||
|
||||
To use Claude via AWS Bedrock instead of Anthropic's API, switch the auth mode to **Bedrock** on the project card.
|
||||
To use Claude via AWS Bedrock instead of Anthropic's API, switch the backend to **Bedrock** on the project card.
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
@@ -390,12 +442,12 @@ Per-project settings always override these global defaults.
|
||||
|
||||
## Ollama Configuration
|
||||
|
||||
To use Claude Code with a local or remote Ollama server, switch the auth mode to **Ollama** on the project card.
|
||||
To use Claude Code with a local or remote Ollama server, switch the backend to **Ollama** on the project card.
|
||||
|
||||
### Settings
|
||||
|
||||
- **Base URL** — The URL of your Ollama server. Defaults to `http://host.docker.internal:11434`, which reaches a locally running Ollama instance from inside the container. For a remote server, use its IP or hostname (e.g., `http://192.168.1.100:11434`).
|
||||
- **Model ID** — Optional. Override the model to use (e.g., `qwen3.5:27b`).
|
||||
- **Model ID** — **Required.** The model to use (e.g., `qwen3.5:27b`). The model must be pulled in Ollama before use — run `ollama pull <model>` or use it via Ollama cloud so it is available when the container starts.
|
||||
|
||||
### How It Works
|
||||
|
||||
@@ -403,23 +455,25 @@ Triple-C sets `ANTHROPIC_BASE_URL` to point Claude Code at your Ollama server in
|
||||
|
||||
> **Note:** Ollama support is best-effort. Claude Code is designed for Anthropic models, so some features (tool use, extended thinking, prompt caching, etc.) may not work as expected with non-Anthropic models.
|
||||
|
||||
> **Important:** The model must already be available in Ollama before starting the container. If using a local Ollama instance, pull the model first with `ollama pull <model-name>`. If using Ollama's cloud service, ensure the model has been used at least once so it is cached.
|
||||
|
||||
---
|
||||
|
||||
## LiteLLM Configuration
|
||||
## OpenAI Compatible Configuration
|
||||
|
||||
To use Claude Code through a [LiteLLM](https://docs.litellm.ai/) proxy gateway, switch the auth mode to **LiteLLM** on the project card. LiteLLM supports 100+ model providers (OpenAI, Gemini, Anthropic, and more) through a single proxy.
|
||||
To use Claude Code through any OpenAI API-compatible endpoint, switch the backend to **OpenAI Compatible** on the project card. This works with any server that exposes an OpenAI-compatible API, including LiteLLM, OpenRouter, vLLM, text-generation-inference, LocalAI, and others.
|
||||
|
||||
### Settings
|
||||
|
||||
- **Base URL** — The URL of your LiteLLM proxy. Defaults to `http://host.docker.internal:4000` for a locally running proxy.
|
||||
- **API Key** — Optional. The API key for your LiteLLM proxy, if authentication is required. Stored securely in your OS keychain.
|
||||
- **Base URL** — The URL of your OpenAI-compatible endpoint. Defaults to `http://host.docker.internal:4000` as an example (adjust to match your server's address and port).
|
||||
- **API Key** — Optional. The API key for your endpoint, if authentication is required. Stored securely in your OS keychain.
|
||||
- **Model ID** — Optional. Override the model to use.
|
||||
|
||||
### How It Works
|
||||
|
||||
Triple-C sets `ANTHROPIC_BASE_URL` to point Claude Code at your LiteLLM proxy. If an API key is provided, it is set as `ANTHROPIC_AUTH_TOKEN`.
|
||||
Triple-C sets `ANTHROPIC_BASE_URL` to point Claude Code at your OpenAI-compatible endpoint. If an API key is provided, it is set as `ANTHROPIC_AUTH_TOKEN`.
|
||||
|
||||
> **Note:** LiteLLM support is best-effort. Claude Code is designed for Anthropic models, so some features (tool use, extended thinking, prompt caching, etc.) may not work as expected when routing to non-Anthropic models through the proxy.
|
||||
> **Note:** OpenAI Compatible support is best-effort. Claude Code is designed for Anthropic models, so some features (tool use, extended thinking, prompt caching, etc.) may not work as expected when routing to non-Anthropic models through the endpoint.
|
||||
|
||||
---
|
||||
|
||||
@@ -446,6 +500,29 @@ Instructions applied to **all** projects. Written to `~/.claude/CLAUDE.md` in ev
|
||||
|
||||
Environment variables applied to **all** project containers. Per-project variables with the same key take precedence.
|
||||
|
||||
### Default SSH Key Directory
|
||||
|
||||
Path to your SSH key directory (typically `~/.ssh`). This is mounted into **all** containers that don't have a per-project SSH path set. Per-project SSH paths take precedence.
|
||||
|
||||
### Default Git Name / Email
|
||||
|
||||
Sets `git user.name` and `git user.email` inside all containers. Per-project Git Name / Email settings take precedence. This is useful so you don't have to set the same name and email on every project.
|
||||
|
||||
### Claude Code Settings (Global Defaults)
|
||||
|
||||
Default Claude Code CLI settings applied to all projects. See [Claude Code Settings](#claude-code-settings) in the Project Configuration section for a description of each setting. Per-project settings override these global defaults.
|
||||
|
||||
### Web Terminal
|
||||
|
||||
Enable remote access to your project terminals from any device on the local network (tablets, phones, other computers).
|
||||
|
||||
- **Toggle** — Click ON/OFF to start or stop the web terminal server.
|
||||
- **URL** — When running, shows the full URL including the access token. Click **Copy URL** to copy it to your clipboard, then open it in a browser on your tablet or phone.
|
||||
- **Token** — An access token is auto-generated on first enable. Click **Copy** to copy the token, or **Regenerate** to create a new one (this disconnects existing web sessions).
|
||||
- **Port** — Defaults to 7681. Configurable in `settings.json` if needed.
|
||||
|
||||
The web terminal server auto-starts on app launch if it was previously enabled, and stops when the app closes.
|
||||
|
||||
### Updates
|
||||
|
||||
- **Current Version** — The installed version of Triple-C.
|
||||
@@ -456,11 +533,48 @@ When an update is available, a pulsing **Update** button appears in the top bar.
|
||||
|
||||
---
|
||||
|
||||
## Web Terminal (Remote Access)
|
||||
|
||||
The web terminal lets you access your running project terminals from a tablet, phone, or any other device on the local network — no app installation required, just a web browser.
|
||||
|
||||
### Setup
|
||||
|
||||
1. Go to **Settings** in the sidebar.
|
||||
2. Find the **Web Terminal** section and click the toggle to **ON**.
|
||||
3. A URL appears (e.g., `http://192.168.1.100:7681?token=...`). Click **Copy URL**.
|
||||
4. Open the URL in a browser on your tablet or other device.
|
||||
|
||||
### Using the Web Terminal
|
||||
|
||||
The web terminal UI mirrors the desktop app's terminal experience:
|
||||
|
||||
- **Project picker** — Select a running project from the dropdown at the top.
|
||||
- **Claude / Bash buttons** — Open a new Claude Code or bash session for the selected project.
|
||||
- **Tab bar** — Switch between multiple open sessions. Click the **x** on a tab to close it.
|
||||
- **Input bar** — A text input at the bottom optimized for mobile/tablet keyboards. Characters are sent immediately without waiting for autocomplete. Helper buttons for **Enter**, **Tab**, and **^C** (Ctrl+C) are provided for keys that are awkward on virtual keyboards.
|
||||
- **Scroll to bottom** — A floating arrow button appears when you scroll up, letting you jump back to the latest output.
|
||||
|
||||
### Security
|
||||
|
||||
- Access requires a token in the URL query string. Without the correct token, connections are rejected.
|
||||
- The token is auto-generated (32 bytes, base64url-encoded) and can be regenerated at any time from Settings.
|
||||
- The server only listens on port 7681 (configurable) — make sure this port is not exposed to the public internet.
|
||||
- All sessions opened from a browser tab are automatically cleaned up when the tab is closed or the WebSocket disconnects.
|
||||
|
||||
### Tips
|
||||
|
||||
- **Bookmark the URL** on your tablet for quick access.
|
||||
- The web terminal works best in landscape orientation on tablets.
|
||||
- If the connection drops (e.g., Wi-Fi interruption), the web terminal auto-reconnects after 2 seconds.
|
||||
- Regenerating the token invalidates all existing browser sessions — you'll need to update bookmarks with the new URL.
|
||||
|
||||
---
|
||||
|
||||
## Terminal Features
|
||||
|
||||
### Multiple Sessions
|
||||
|
||||
You can open multiple terminal sessions (even for the same project). Each session gets its own tab in the top bar. Click a tab to switch, or click the **x** on a tab to close it. Tabs show the project name, with a "(bash)" suffix for shell sessions.
|
||||
You can open multiple terminal sessions (even for the same project). Each session gets its own tab in the top bar. Click a tab to switch, or click the **x** on a tab to close it. Tabs show the project name (or custom session name if provided), with a "(bash)" suffix for shell sessions.
|
||||
|
||||
### Bash Shell Sessions
|
||||
|
||||
@@ -472,6 +586,10 @@ When Claude Code prints a long URL (e.g., during `claude login`), Triple-C detec
|
||||
|
||||
Shorter URLs in terminal output are also clickable directly.
|
||||
|
||||
### Copying and Pasting
|
||||
|
||||
Use **Ctrl+Shift+C** (or **Cmd+C** on macOS) to copy selected text from the terminal, and **Ctrl+Shift+V** (or **Cmd+V** on macOS) to paste. This follows standard terminal emulator conventions since Ctrl+C is reserved for sending SIGINT.
|
||||
|
||||
### Clipboard Support (OSC 52)
|
||||
|
||||
Programs inside the container can copy text to your host clipboard. When a container program uses `xclip`, `xsel`, or `pbcopy`, the text is transparently forwarded to your host clipboard via OSC 52 escape sequences. No additional configuration is required — this works out of the box.
|
||||
@@ -581,6 +699,24 @@ You can install additional tools at runtime with `sudo apt install`, `pip instal
|
||||
|
||||
---
|
||||
|
||||
## Claude Code Tips
|
||||
|
||||
These features are built into Claude Code and work inside Triple-C containers with no extra configuration:
|
||||
|
||||
| Feature | How to Use |
|
||||
|---------|-----------|
|
||||
| **Focus Mode** | Run `/focus` or press `Ctrl+O` in the terminal to toggle collapsed tool output |
|
||||
| **Session Recap** | Run `/recap` to get a summary of what happened in the current session |
|
||||
| **Session Color** | Run `/color red` (or any color) to color-code your terminal prompt bar |
|
||||
| **Recurring Tasks** | Run `/loop 5m check the deploy` to repeat a prompt every 5 minutes |
|
||||
| **Interactive Lessons** | Run `/powerup` to learn Claude Code features with animated demos |
|
||||
| **Team Onboarding** | Run `/team-onboarding` to generate a teammate ramp-up guide |
|
||||
| **Bedrock Setup** | Select "3rd-party platform" on the login screen for an interactive Bedrock setup wizard |
|
||||
| **Vertex AI Setup** | Select "3rd-party platform" on the login screen for an interactive Vertex AI setup wizard |
|
||||
| **MCP Elicitation** | MCP servers can now request structured user input mid-task — works automatically |
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Docker is "Not Available"
|
||||
@@ -622,3 +758,13 @@ You can install additional tools at runtime with `sudo apt install`, `pip instal
|
||||
- Ensure the Docker image for the MCP server exists (pull it first if needed).
|
||||
- Check that Docker socket access is available (stdio + Docker MCP servers auto-enable this).
|
||||
- Try resetting the project container to force a clean recreation.
|
||||
|
||||
### "Failed to install Anthropic marketplace" Error
|
||||
|
||||
If Claude Code shows **"Failed to install Anthropic marketplace - Will retry on next startup"** repeatedly, the marketplace metadata in `~/.claude.json` may be corrupted. To fix this, open a **Shell** session in the project and run:
|
||||
|
||||
```bash
|
||||
cp ~/.claude.json ~/.claude.json.bak && jq 'with_entries(select(.key | startswith("officialMarketplace") | not))' ~/.claude.json.bak > ~/.claude.json
|
||||
```
|
||||
|
||||
This backs up your config and removes the corrupted marketplace entries. Claude Code will re-download them cleanly on the next startup.
|
||||
|
||||
106
MISSION-CONTROL-SETUP.md
Normal file
106
MISSION-CONTROL-SETUP.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# Mission Control Setup Instructions
|
||||
|
||||
Reference document for adding Flight Control methodology to any project.
|
||||
|
||||
## How Triple-C Installs Mission Control
|
||||
|
||||
When Mission Control is enabled for a project in Triple-C:
|
||||
|
||||
1. **Bundled files install**: The Mission Control files bundled with Triple-C are copied to `/home/claude/mission-control/` (persisted in the config volume)
|
||||
2. **Skills install**: All skills from the bundled `.claude/skills/` are copied to `~/.claude/skills/` so Claude Code discovers them automatically as `/slash-commands`
|
||||
3. **Workspace symlink**: `/workspace/mission-control/` symlinks to the installed copy for methodology doc access
|
||||
4. **Global instructions**: Mission Control usage instructions are injected into `~/.claude/CLAUDE.md`
|
||||
|
||||
This happens automatically on every container start, so skill updates from new Triple-C releases are picked up on restart.
|
||||
|
||||
## Two pieces are needed per project:
|
||||
|
||||
1. **Global CLAUDE.md** — Handled automatically by Triple-C when Mission Control is enabled
|
||||
2. **Project CLAUDE.md** — Add the Flight Operations section to each project's `CLAUDE.md`
|
||||
|
||||
Then run `/init-project` to create the `.flightops/` directory.
|
||||
|
||||
---
|
||||
|
||||
## 1. Global CLAUDE.md Instructions
|
||||
|
||||
These are **automatically injected by Triple-C** when Mission Control is enabled. For reference, the injected content is:
|
||||
|
||||
```markdown
|
||||
## Mission Control
|
||||
|
||||
The `/workspace/mission-control/` directory contains **Flight Control** — an AI-first development methodology for structured project management. Use it for all project work.
|
||||
|
||||
### How It Works
|
||||
|
||||
- **Mission Control is a tool, not a project.** It provides skills and methodology for managing other projects.
|
||||
- All Flight Control skills are installed as personal skills in `~/.claude/skills/` and are automatically available as `/slash-commands`
|
||||
- The methodology docs and project registry live in `/workspace/mission-control/`
|
||||
|
||||
### When to Use
|
||||
|
||||
When working on any project that has a `.flightops/` directory, follow the Flight Control methodology:
|
||||
1. Read the project's `.flightops/ARTIFACTS.md` to understand artifact storage
|
||||
2. Read `.flightops/FLIGHT_OPERATIONS.md` for the implementation workflow
|
||||
3. Use Mission Control skills for planning and execution
|
||||
|
||||
### Available Skills
|
||||
|
||||
| Skill | When to Use |
|
||||
|-------|-------------|
|
||||
| `/init-project` | Setting up a new project for Flight Control |
|
||||
| `/mission` | Defining new work outcomes (days-to-weeks scope) |
|
||||
| `/flight` | Creating technical specs from missions (hours-to-days scope) |
|
||||
| `/leg` | Generating implementation steps from flights (minutes-to-hours scope) |
|
||||
| `/agentic-workflow` | Executing legs with multi-agent workflow (implement, review, commit) |
|
||||
| `/flight-debrief` | Post-flight analysis after a flight lands |
|
||||
| `/mission-debrief` | Post-mission retrospective after completion |
|
||||
| `/daily-briefing` | Cross-project status report |
|
||||
|
||||
### Key Rules
|
||||
|
||||
- **Planning skills produce artifacts only** — never modify source code directly
|
||||
- **Phase gates require human confirmation** — missions before flights, flights before legs
|
||||
- **Legs are immutable once in-flight** — create new ones instead of modifying
|
||||
- **`/agentic-workflow` orchestrates implementation** — it spawns separate Developer and Reviewer agents
|
||||
- **Artifacts live in the target project** — not in mission-control
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. Project CLAUDE.md Instructions
|
||||
|
||||
Add this section to each project's `CLAUDE.md`:
|
||||
|
||||
```markdown
|
||||
## Flight Operations
|
||||
|
||||
This project uses Flight Control (bundled with Triple-C) for structured development.
|
||||
|
||||
**Before any mission/flight/leg work, read these files in order:**
|
||||
1. `.flightops/README.md` — What the flightops directory contains
|
||||
2. `.flightops/FLIGHT_OPERATIONS.md` — **The workflow you MUST follow**
|
||||
3. `.flightops/ARTIFACTS.md` — Where all artifacts are stored
|
||||
4. `.flightops/agent-crews/` — Project crew definitions for each phase (read the relevant crew file)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Initialize the Project
|
||||
|
||||
After adding the CLAUDE.md sections, run `/init-project` from mission-control to:
|
||||
|
||||
1. Create the `.flightops/` directory with methodology references
|
||||
2. Configure the artifact system (files or Jira)
|
||||
3. Set up agent crew definitions
|
||||
4. Register the project in `/workspace/mission-control/projects.md`
|
||||
|
||||
---
|
||||
|
||||
## Quick Checklist for New Projects
|
||||
|
||||
- [ ] Enable Mission Control for the project in Triple-C (auto-installs skills to `~/.claude/skills/`)
|
||||
- [ ] Add Flight Operations section to the project's `CLAUDE.md`
|
||||
- [ ] Run `/init-project` from mission-control
|
||||
- [ ] Add the project to `/workspace/mission-control/projects.md`
|
||||
- [ ] Add `.flightops/` to the project's `.gitignore` (if artifacts should not be committed) or commit it (if they should)
|
||||
62
README.md
62
README.md
@@ -1,6 +1,6 @@
|
||||
# Triple-C (Claude-Code-Container)
|
||||
|
||||
Triple-C is a cross-platform desktop application that sandboxes Claude Code inside Docker containers. When running with `--dangerously-skip-permissions`, Claude only has access to the files and projects you explicitly provide to it.
|
||||
Triple-C is a cross-platform desktop application that sandboxes Claude Code inside Docker containers. Each project can optionally enable full permissions mode (`--dangerously-skip-permissions`), giving Claude unrestricted access within the sandbox.
|
||||
|
||||
## Architecture
|
||||
|
||||
@@ -27,7 +27,7 @@ Triple-C is a cross-platform desktop application that sandboxes Claude Code insi
|
||||
### Container Lifecycle
|
||||
|
||||
1. **Create**: New container created with bind mounts, env vars, and labels
|
||||
2. **Start**: Container started, entrypoint remaps UID/GID, sets up SSH, configures Docker group, sets up MCP servers
|
||||
2. **Start**: Container started, entrypoint remaps UID/GID, sets up SSH, configures Docker group, sets up MCP servers, injects Claude Code settings
|
||||
3. **Terminal**: `docker exec` launches Claude Code (or bash shell) with a PTY
|
||||
4. **Stop**: Container halted (filesystem persists in named volume); MCP containers stopped
|
||||
5. **Restart**: Existing container restarted; recreated if settings changed (detected via SHA-256 fingerprint)
|
||||
@@ -49,10 +49,10 @@ Each project can independently use one of:
|
||||
|
||||
- **Anthropic** (OAuth): User runs `claude login` inside the terminal on first use. Token persisted in the config volume across restarts and resets.
|
||||
- **AWS Bedrock**: Per-project AWS credentials (static keys, profile, or bearer token). SSO sessions are validated before launching Claude for Profile auth.
|
||||
- **Ollama**: Connect to a local or remote Ollama server via `ANTHROPIC_BASE_URL` (e.g., `http://host.docker.internal:11434`). Optional model override.
|
||||
- **LiteLLM**: Connect through a LiteLLM proxy gateway via `ANTHROPIC_BASE_URL` + `ANTHROPIC_AUTH_TOKEN` to access 100+ model providers. API key stored securely in OS keychain.
|
||||
- **Ollama**: Connect to a local or remote Ollama server via `ANTHROPIC_BASE_URL` (e.g., `http://host.docker.internal:11434`). Requires a model ID, and the model must be pulled (or used via Ollama cloud) before starting the container.
|
||||
- **OpenAI Compatible**: Connect through any OpenAI API-compatible endpoint (LiteLLM, OpenRouter, vLLM, text-generation-inference, LocalAI, etc.) via `ANTHROPIC_BASE_URL` + `ANTHROPIC_AUTH_TOKEN`. API key stored securely in OS keychain.
|
||||
|
||||
> **Note:** Ollama and LiteLLM support is best-effort. Claude Code is designed for Anthropic models, so some features (tool use, extended thinking, prompt caching, etc.) may not work as expected with non-Anthropic models behind these backends.
|
||||
> **Note:** Ollama and OpenAI Compatible support is best-effort. Claude Code is designed for Anthropic models, so some features (tool use, extended thinking, prompt caching, etc.) may not work as expected with non-Anthropic models behind these backends.
|
||||
|
||||
### Container Spawning (Sibling Containers)
|
||||
|
||||
@@ -83,7 +83,32 @@ Triple-C supports [Model Context Protocol (MCP)](https://modelcontextprotocol.io
|
||||
|
||||
### Mission Control Integration
|
||||
|
||||
Optional per-project integration with [Flight Control](https://github.com/msieurthenardier/mission-control) — an AI-first development methodology. When enabled, the repo is cloned into the container, skills are installed, and workflow instructions are injected into CLAUDE.md.
|
||||
Optional per-project integration with Flight Control — an AI-first development methodology bundled with Triple-C. When enabled, the bundled files are installed into the container, skills are installed, and workflow instructions are injected into CLAUDE.md.
|
||||
|
||||
### Web Terminal (Remote Access)
|
||||
|
||||
Triple-C includes an optional web terminal server for accessing project terminals from tablets, phones, or other devices on the local network. When enabled in Settings, an axum HTTP+WebSocket server starts inside the Tauri process, serving a standalone xterm.js-based terminal UI.
|
||||
|
||||
- **URL**: `http://<LAN_IP>:7681?token=...` (port configurable)
|
||||
- **Authentication**: Token-based (auto-generated, copyable from Settings)
|
||||
- **Protocol**: JSON over WebSocket with base64-encoded terminal data
|
||||
- **Features**: Project picker, multiple tabs (Claude + bash sessions), mobile-optimized input bar, scroll-to-bottom button
|
||||
- **Session cleanup**: All terminal sessions are closed when the browser disconnects
|
||||
|
||||
The web terminal shares the existing `ExecSessionManager` via `Arc`-wrapped stores — same Docker exec sessions, different transport (WebSocket instead of Tauri IPC events).
|
||||
|
||||
### Speech-to-Text (Voice Mode)
|
||||
|
||||
Triple-C includes optional speech-to-text powered by [Faster Whisper](https://github.com/SYSTRAN/faster-whisper) running in a separate Docker container. When enabled, a microphone button appears in the bottom-left corner of each terminal view.
|
||||
|
||||
- **Hotkey**: `Ctrl+Shift+M` to toggle recording
|
||||
- **Models**: `tiny`, `small`, or `medium` (configurable in Settings)
|
||||
- **Port**: Default `9876` (configurable)
|
||||
- **Language**: Optional language hint for transcription
|
||||
- **Auto-start**: When STT is enabled in Settings, the container starts automatically with the app — no need to manually start it after each restart
|
||||
- **On-demand fallback**: If not auto-started, the container starts automatically when you first click the mic button
|
||||
|
||||
**How it works**: Audio is captured in the browser via the Web Audio API, encoded as WAV, and sent to the Faster Whisper container's `/transcribe` endpoint. The transcribed text is inserted directly into the active terminal. The STT container uses a named Docker volume (`triple-c-stt-model-cache`) to cache Whisper models across restarts.
|
||||
|
||||
### Docker Socket Path
|
||||
|
||||
@@ -102,19 +127,23 @@ Users can override this in Settings via the global `docker_socket_path` option.
|
||||
| `app/src/components/layout/TopBar.tsx` | Terminal tabs + Docker/Image status indicators |
|
||||
| `app/src/components/layout/Sidebar.tsx` | Responsive sidebar (25% width, min 224px, max 320px) |
|
||||
| `app/src/components/layout/StatusBar.tsx` | Running project/terminal counts |
|
||||
| `app/src/components/projects/ProjectCard.tsx` | Project config, auth mode, action buttons |
|
||||
| `app/src/components/projects/ProjectCard.tsx` | Project config, backend selector, action buttons |
|
||||
| `app/src/components/projects/ClaudeCodeSettingsModal.tsx` | Claude Code CLI settings modal (TUI mode, effort, focus, caching) |
|
||||
| `app/src/components/projects/ProjectList.tsx` | Project list in sidebar |
|
||||
| `app/src/components/projects/FileManagerModal.tsx` | File browser modal (browse, download, upload) |
|
||||
| `app/src/components/projects/ContainerProgressModal.tsx` | Real-time container operation progress |
|
||||
| `app/src/components/mcp/McpPanel.tsx` | MCP server library (global configuration) |
|
||||
| `app/src/components/mcp/McpServerCard.tsx` | Individual MCP server configuration card |
|
||||
| `app/src/components/settings/SettingsPanel.tsx` | Docker, AWS, timezone, and global settings |
|
||||
| `app/src/components/settings/SettingsPanel.tsx` | Docker, AWS, timezone, web terminal, and global settings |
|
||||
| `app/src/components/settings/WebTerminalSettings.tsx` | Web terminal toggle, URL, token management |
|
||||
| `app/src/components/settings/SttSettings.tsx` | STT settings panel (model, port, language, container controls) |
|
||||
| `app/src/components/terminal/TerminalView.tsx` | xterm.js terminal with WebGL, URL detection, OSC 52 clipboard, image paste |
|
||||
| `app/src/components/terminal/SttButton.tsx` | Mic button overlay with on-demand container start |
|
||||
| `app/src/components/terminal/TerminalTabs.tsx` | Tab bar for multiple terminal sessions (claude + bash) |
|
||||
| `app/src/hooks/useTerminal.ts` | Terminal session management (claude and bash modes) |
|
||||
| `app/src/hooks/useFileManager.ts` | File manager operations (list, download, upload) |
|
||||
| `app/src/hooks/useMcpServers.ts` | MCP server CRUD operations |
|
||||
| `app/src/hooks/useVoice.ts` | Voice mode audio capture (currently hidden) |
|
||||
| `app/src/hooks/useSTT.ts` | Speech-to-text recording, transcription, and container management |
|
||||
| `app/src-tauri/src/docker/container.rs` | Container creation, mounts, env vars, MCP injection, fingerprinting |
|
||||
| `app/src-tauri/src/docker/exec.rs` | PTY exec sessions, file upload/download via tar |
|
||||
| `app/src-tauri/src/docker/image.rs` | Image building/pulling |
|
||||
@@ -122,12 +151,21 @@ Users can override this in Settings via the global `docker_socket_path` option.
|
||||
| `app/src-tauri/src/commands/project_commands.rs` | Start/stop/rebuild Tauri command handlers |
|
||||
| `app/src-tauri/src/commands/file_commands.rs` | File manager Tauri commands (list, download, upload) |
|
||||
| `app/src-tauri/src/commands/mcp_commands.rs` | MCP server CRUD Tauri commands |
|
||||
| `app/src-tauri/src/models/project.rs` | Project struct (auth mode, Docker access, MCP servers, Mission Control) |
|
||||
| `app/src-tauri/src/models/project.rs` | Project struct (backend, Docker access, Claude Code settings, MCP servers, Mission Control) |
|
||||
| `app/src-tauri/src/models/mcp_server.rs` | MCP server struct (transport, Docker image, env vars) |
|
||||
| `app/src-tauri/src/models/app_settings.rs` | Global settings (image source, Docker socket, AWS, microphone) |
|
||||
| `app/src-tauri/src/models/app_settings.rs` | Global settings (image source, Docker socket, AWS, Claude Code settings, web terminal, STT) |
|
||||
| `app/src-tauri/src/web_terminal/server.rs` | Axum HTTP+WS server for remote terminal access |
|
||||
| `app/src-tauri/src/web_terminal/ws_handler.rs` | WebSocket connection handler and session management |
|
||||
| `app/src-tauri/src/web_terminal/terminal.html` | Embedded web UI (xterm.js, project picker, tabs) |
|
||||
| `app/src-tauri/src/commands/stt_commands.rs` | STT start/stop/transcribe Tauri commands |
|
||||
| `app/src-tauri/src/commands/web_terminal_commands.rs` | Web terminal start/stop/status Tauri commands |
|
||||
| `app/src-tauri/src/storage/mcp_store.rs` | MCP server persistence (JSON with atomic writes) |
|
||||
| `app/src-tauri/src/docker/stt.rs` | STT Docker container lifecycle (create, start, stop, build, pull) |
|
||||
| `app/src/lib/wav.ts` | WAV audio encoding for STT transcription |
|
||||
| `stt-container/Dockerfile` | Faster Whisper STT container image (Python 3.11 + FastAPI) |
|
||||
| `stt-container/server.py` | STT HTTP server (POST /transcribe endpoint) |
|
||||
| `container/Dockerfile` | Ubuntu 24.04 sandbox image with Claude Code + dev tools + clipboard/audio shims |
|
||||
| `container/entrypoint.sh` | UID/GID remap, SSH setup, Docker group config, MCP injection, Mission Control setup |
|
||||
| `container/entrypoint.sh` | UID/GID remap, SSH setup, Docker group config, MCP injection, Claude Code settings injection, Mission Control setup |
|
||||
| `container/osc52-clipboard` | Clipboard shim (xclip/xsel/pbcopy via OSC 52) |
|
||||
| `container/audio-shim` | Audio capture shim (rec/arecord via FIFO) for voice mode |
|
||||
|
||||
|
||||
25
TECHNICAL.md
25
TECHNICAL.md
@@ -100,9 +100,13 @@ Tauri uses a Rust backend paired with a web-based frontend rendered by the OS-na
|
||||
│ │ Project Management │◄─┤ ProjectsStore │ │
|
||||
│ │ Settings UI │ │ bollard Docker Client │ │
|
||||
│ │ │ │ keyring Credential Mgr │ │
|
||||
│ └───────────┬───────────┘ └────────────┬─────────────┘ │
|
||||
│ └───────────┬───────────┘ │ Web Terminal Server │ │
|
||||
│ │ └────────────┬─────────────┘ │
|
||||
│ │ Tauri IPC (invoke/emit) │ │
|
||||
│ └───────────┬───────────────┘ │
|
||||
│ ▲ │
|
||||
│ axum HTTP+WS│(port 7681) │
|
||||
│ │ │
|
||||
└──────────────────────────┼───────────────────────────────┘
|
||||
│ Docker Socket
|
||||
▼
|
||||
@@ -129,6 +133,8 @@ The application uses two IPC mechanisms between the React frontend and Rust back
|
||||
|
||||
**Request/Response** (`invoke()`): Used for discrete operations — starting containers, saving settings, listing projects. The frontend calls `invoke("command_name", { args })` and awaits a typed result.
|
||||
|
||||
**WebSocket Streaming** (Web Terminal): Used for remote terminal access from browsers on the local network. An axum HTTP+WebSocket server runs inside the Tauri process, sharing the same `ExecSessionManager` via `Arc`-wrapped stores. The WebSocket uses a JSON protocol with base64-encoded terminal data. Each browser connection can open multiple terminal sessions; all sessions are cleaned up when the WebSocket disconnects.
|
||||
|
||||
**Event Streaming** (`emit()`/`listen()`): Used for continuous data — terminal I/O. When a terminal session is opened, the Rust backend spawns two tokio tasks:
|
||||
1. **Output reader** — Reads from the Docker exec stdout stream and emits `terminal-output-{sessionId}` events to the frontend.
|
||||
2. **Input writer** — Listens on an `mpsc::unbounded_channel` for data sent from the frontend via `invoke("terminal_input")` and writes it to the Docker exec stdin.
|
||||
@@ -263,7 +269,7 @@ triple-c/
|
||||
│ ├── projects/ # ProjectCard, ProjectList, AddProjectDialog,
|
||||
│ │ # FileManagerModal, ContainerProgressModal, modals
|
||||
│ ├── settings/ # SettingsPanel, DockerSettings, AwsSettings,
|
||||
│ │ # UpdateDialog
|
||||
│ │ # WebTerminalSettings, UpdateDialog
|
||||
│ └── terminal/ # TerminalView (xterm.js), TerminalTabs, UrlToast
|
||||
│
|
||||
└── src-tauri/ # Rust backend
|
||||
@@ -282,7 +288,13 @@ triple-c/
|
||||
│ ├── project_commands.rs # Start/stop/rebuild containers
|
||||
│ ├── settings_commands.rs # Settings CRUD
|
||||
│ ├── terminal_commands.rs # Terminal I/O, resize
|
||||
│ └── update_commands.rs # App update checking
|
||||
│ ├── update_commands.rs # App update checking
|
||||
│ └── web_terminal_commands.rs # Web terminal start/stop/status
|
||||
├── web_terminal/ # Remote terminal access
|
||||
│ ├── mod.rs # Module root
|
||||
│ ├── server.rs # Axum HTTP+WS server lifecycle
|
||||
│ ├── ws_handler.rs # WebSocket connection handler
|
||||
│ └── terminal.html # Embedded xterm.js web UI
|
||||
├── docker/ # Docker API layer
|
||||
│ ├── client.rs # bollard singleton connection
|
||||
│ ├── container.rs # Create, start, stop, remove, fingerprinting
|
||||
@@ -290,7 +302,7 @@ triple-c/
|
||||
│ ├── image.rs # Build from Dockerfile, pull from registry
|
||||
│ └── network.rs # Per-project bridge networks for MCP
|
||||
├── models/ # Data structures
|
||||
│ ├── project.rs # Project, AuthMode, BedrockConfig
|
||||
│ ├── project.rs # Project, Backend, BedrockConfig
|
||||
│ ├── mcp_server.rs # MCP server configuration
|
||||
│ ├── app_settings.rs # Global settings (image source, AWS, etc.)
|
||||
│ ├── container_config.rs # Image name resolution
|
||||
@@ -323,6 +335,11 @@ triple-c/
|
||||
| `tar` | 0.4 | In-memory tar archives for Docker build context |
|
||||
| `dirs` | 6.x | Cross-platform app data directory paths |
|
||||
| `serde` / `serde_json` | 1.x | Serialization for IPC and persistence |
|
||||
| `axum` | 0.8 | HTTP+WebSocket server for web terminal |
|
||||
| `tower-http` | 0.6 | CORS middleware for web terminal |
|
||||
| `base64` | 0.22 | Terminal data encoding over WebSocket |
|
||||
| `rand` | 0.9 | Access token generation |
|
||||
| `local-ip-address` | 0.6 | LAN IP detection for web terminal URL |
|
||||
|
||||
### JavaScript (Frontend)
|
||||
|
||||
|
||||
10
app/package-lock.json
generated
10
app/package-lock.json
generated
@@ -9,7 +9,7 @@
|
||||
"version": "0.2.0",
|
||||
"dependencies": {
|
||||
"@tauri-apps/api": "^2",
|
||||
"@tauri-apps/plugin-dialog": "^2",
|
||||
"@tauri-apps/plugin-dialog": "^2.7.0",
|
||||
"@tauri-apps/plugin-opener": "^2.5.3",
|
||||
"@tauri-apps/plugin-store": "^2",
|
||||
"@xterm/addon-fit": "^0.10",
|
||||
@@ -1984,12 +1984,12 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tauri-apps/plugin-dialog": {
|
||||
"version": "2.6.0",
|
||||
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-dialog/-/plugin-dialog-2.6.0.tgz",
|
||||
"integrity": "sha512-q4Uq3eY87TdcYzXACiYSPhmpBA76shgmQswGkSVio4C82Sz2W4iehe9TnKYwbq7weHiL88Yw19XZm7v28+Micg==",
|
||||
"version": "2.7.0",
|
||||
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-dialog/-/plugin-dialog-2.7.0.tgz",
|
||||
"integrity": "sha512-4nS/hfGMGCXiAS3LtVjH9AgsSAPJeG/7R+q8agTFqytjnMa4Zq95Bq8WzVDkckpanX+yyRHXnRtrKXkANKDHvw==",
|
||||
"license": "MIT OR Apache-2.0",
|
||||
"dependencies": {
|
||||
"@tauri-apps/api": "^2.8.0"
|
||||
"@tauri-apps/api": "^2.10.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@tauri-apps/plugin-opener": {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "triple-c",
|
||||
"private": true,
|
||||
"version": "0.2.0",
|
||||
"version": "0.3.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
@@ -13,7 +13,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@tauri-apps/api": "^2",
|
||||
"@tauri-apps/plugin-dialog": "^2",
|
||||
"@tauri-apps/plugin-dialog": "^2.7.0",
|
||||
"@tauri-apps/plugin-opener": "^2.5.3",
|
||||
"@tauri-apps/plugin-store": "^2",
|
||||
"@xterm/addon-fit": "^0.10",
|
||||
|
||||
339
app/src-tauri/Cargo.lock
generated
339
app/src-tauri/Cargo.lock
generated
@@ -213,6 +213,61 @@ version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
|
||||
|
||||
[[package]]
|
||||
name = "axum"
|
||||
version = "0.8.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8"
|
||||
dependencies = [
|
||||
"axum-core",
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"form_urlencoded",
|
||||
"futures-util",
|
||||
"http",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
"hyper",
|
||||
"hyper-util",
|
||||
"itoa",
|
||||
"matchit",
|
||||
"memchr",
|
||||
"mime",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"serde_core",
|
||||
"serde_json",
|
||||
"serde_path_to_error",
|
||||
"serde_urlencoded",
|
||||
"sha1",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-core"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"http",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
"mime",
|
||||
"pin-project-lite",
|
||||
"sync_wrapper",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.21.7"
|
||||
@@ -664,14 +719,38 @@ dependencies = [
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
|
||||
dependencies = [
|
||||
"darling_core 0.20.11",
|
||||
"darling_macro 0.20.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.21.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"darling_macro",
|
||||
"darling_core 0.21.3",
|
||||
"darling_macro 0.21.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_core"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e"
|
||||
dependencies = [
|
||||
"fnv",
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"strsim",
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -688,17 +767,34 @@ dependencies = [
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
|
||||
dependencies = [
|
||||
"darling_core 0.20.11",
|
||||
"quote",
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.21.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"darling_core 0.21.3",
|
||||
"quote",
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "data-encoding"
|
||||
version = "2.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea"
|
||||
|
||||
[[package]]
|
||||
name = "deranged"
|
||||
version = "0.5.8"
|
||||
@@ -709,6 +805,37 @@ dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947"
|
||||
dependencies = [
|
||||
"derive_builder_macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_core"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8"
|
||||
dependencies = [
|
||||
"darling 0.20.11",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_macro"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
|
||||
dependencies = [
|
||||
"derive_builder_core",
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_more"
|
||||
version = "0.99.20"
|
||||
@@ -841,6 +968,12 @@ version = "1.0.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555"
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||
|
||||
[[package]]
|
||||
name = "embed-resource"
|
||||
version = "3.0.6"
|
||||
@@ -912,7 +1045,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1309,6 +1442,18 @@ dependencies = [
|
||||
"wasip3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getset"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912"
|
||||
dependencies = [
|
||||
"proc-macro-error2",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gio"
|
||||
version = "0.18.4"
|
||||
@@ -1812,6 +1957,25 @@ dependencies = [
|
||||
"png 0.18.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "include_dir"
|
||||
version = "0.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "923d117408f1e49d914f1a379a309cffe4f18c05cf4e3d12e613a15fc81bd0dd"
|
||||
dependencies = [
|
||||
"include_dir_macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "include_dir_macros"
|
||||
version = "0.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7cab85a7ed0bd5f0e76d93846e0147172bed2e2d3f859bcc33a8d9699cad1a75"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "1.9.3"
|
||||
@@ -2085,6 +2249,17 @@ version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
|
||||
|
||||
[[package]]
|
||||
name = "local-ip-address"
|
||||
version = "0.6.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "79ef8c257c92ade496781a32a581d43e3d512cf8ce714ecf04ea80f93ed0ff4a"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"neli",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.14"
|
||||
@@ -2143,6 +2318,12 @@ version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
|
||||
|
||||
[[package]]
|
||||
name = "matchit"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.8.0"
|
||||
@@ -2164,6 +2345,16 @@ version = "0.3.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||
|
||||
[[package]]
|
||||
name = "mime_guess"
|
||||
version = "2.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e"
|
||||
dependencies = [
|
||||
"mime",
|
||||
"unicase",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.9"
|
||||
@@ -2246,6 +2437,35 @@ dependencies = [
|
||||
"jni-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "neli"
|
||||
version = "0.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "22f9786d56d972959e1408b6a93be6af13b9c1392036c5c1fafa08a1b0c6ee87"
|
||||
dependencies = [
|
||||
"bitflags 2.11.0",
|
||||
"byteorder",
|
||||
"derive_builder",
|
||||
"getset",
|
||||
"libc",
|
||||
"log",
|
||||
"neli-proc-macros",
|
||||
"parking_lot",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "neli-proc-macros"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609"
|
||||
dependencies = [
|
||||
"either",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde",
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "new_debug_unreachable"
|
||||
version = "1.0.6"
|
||||
@@ -2916,6 +3136,28 @@ dependencies = [
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-error-attr2"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-error2"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802"
|
||||
dependencies = [
|
||||
"proc-macro-error-attr2",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.117",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.20+deprecated"
|
||||
@@ -3001,7 +3243,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"socket2",
|
||||
"tracing",
|
||||
"windows-sys 0.60.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3222,6 +3464,7 @@ dependencies = [
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
@@ -3230,6 +3473,7 @@ dependencies = [
|
||||
"hyper-util",
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime_guess",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"quinn",
|
||||
@@ -3348,7 +3592,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3594,6 +3838,17 @@ dependencies = [
|
||||
"zmij",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_path_to_error"
|
||||
version = "0.1.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"serde",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_repr"
|
||||
version = "0.1.20"
|
||||
@@ -3660,7 +3915,7 @@ version = "3.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6d4e30573c8cb306ed6ab1dca8423eec9a463ea0e155f45399455e0368b27e0"
|
||||
dependencies = [
|
||||
"darling",
|
||||
"darling 0.21.3",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.117",
|
||||
@@ -3698,6 +3953,17 @@ dependencies = [
|
||||
"stable_deref_trait",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha1"
|
||||
version = "0.10.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.9"
|
||||
@@ -4124,9 +4390,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tauri-plugin-dialog"
|
||||
version = "2.6.0"
|
||||
version = "2.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9204b425d9be8d12aa60c2a83a289cf7d1caae40f57f336ed1155b3a5c0e359b"
|
||||
checksum = "a1fa4150c95ae391946cc8b8f905ab14797427caba3a8a2f79628e956da91809"
|
||||
dependencies = [
|
||||
"log",
|
||||
"raw-window-handle",
|
||||
@@ -4142,13 +4408,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tauri-plugin-fs"
|
||||
version = "2.4.5"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed390cc669f937afeb8b28032ce837bac8ea023d975a2e207375ec05afaf1804"
|
||||
checksum = "36e1ec28b79f3d0683f4507e1615c36292c0ea6716668770d4396b9b39871ed8"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"dunce",
|
||||
"glob",
|
||||
"log",
|
||||
"objc2-foundation",
|
||||
"percent-encoding",
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
@@ -4311,7 +4579,7 @@ dependencies = [
|
||||
"getrandom 0.4.1",
|
||||
"once_cell",
|
||||
"rustix",
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4459,6 +4727,18 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-tungstenite"
|
||||
version = "0.28.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"log",
|
||||
"tokio",
|
||||
"tungstenite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.18"
|
||||
@@ -4581,6 +4861,7 @@ dependencies = [
|
||||
"tokio",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4619,6 +4900,7 @@ version = "0.1.44"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100"
|
||||
dependencies = [
|
||||
"log",
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
@@ -4668,16 +4950,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "triple-c"
|
||||
version = "0.2.0"
|
||||
version = "0.3.0"
|
||||
dependencies = [
|
||||
"axum",
|
||||
"base64 0.22.1",
|
||||
"bollard",
|
||||
"chrono",
|
||||
"dirs",
|
||||
"fern",
|
||||
"futures-util",
|
||||
"iana-time-zone",
|
||||
"include_dir",
|
||||
"keyring",
|
||||
"local-ip-address",
|
||||
"log",
|
||||
"rand 0.9.2",
|
||||
"reqwest 0.12.28",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -4689,6 +4976,7 @@ dependencies = [
|
||||
"tauri-plugin-opener",
|
||||
"tauri-plugin-store",
|
||||
"tokio",
|
||||
"tower-http",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
@@ -4698,6 +4986,23 @@ version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
|
||||
|
||||
[[package]]
|
||||
name = "tungstenite"
|
||||
version = "0.28.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8628dcc84e5a09eb3d8423d6cb682965dea9133204e8fb3efee74c2a0c259442"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"data-encoding",
|
||||
"http",
|
||||
"httparse",
|
||||
"log",
|
||||
"rand 0.9.2",
|
||||
"sha1",
|
||||
"thiserror 2.0.18",
|
||||
"utf-8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typeid"
|
||||
version = "1.0.3"
|
||||
@@ -4762,6 +5067,12 @@ dependencies = [
|
||||
"unic-common",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicase"
|
||||
version = "2.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.24"
|
||||
@@ -5153,7 +5464,7 @@ version = "0.1.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
|
||||
dependencies = [
|
||||
"windows-sys 0.61.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "triple-c"
|
||||
version = "0.2.0"
|
||||
version = "0.3.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
@@ -28,9 +28,15 @@ dirs = "6"
|
||||
log = "0.4"
|
||||
fern = { version = "0.7", features = ["date-based"] }
|
||||
tar = "0.4"
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||
include_dir = "0.7"
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls", "multipart"] }
|
||||
iana-time-zone = "0.1"
|
||||
sha2 = "0.10"
|
||||
axum = { version = "0.8", features = ["ws"] }
|
||||
tower-http = { version = "0.6", features = ["cors"] }
|
||||
base64 = "0.22"
|
||||
rand = "0.9"
|
||||
local-ip-address = "0.6"
|
||||
|
||||
[build-dependencies]
|
||||
tauri-build = { version = "2", features = [] }
|
||||
|
||||
@@ -1,23 +1,58 @@
|
||||
use tauri::State;
|
||||
|
||||
use crate::models::Project;
|
||||
use crate::AppState;
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn aws_sso_refresh(
|
||||
project_id: String,
|
||||
state: State<'_, AppState>,
|
||||
) -> Result<(), String> {
|
||||
let project = state.projects_store.get(&project_id)
|
||||
.ok_or_else(|| format!("Project {} not found", project_id))?;
|
||||
|
||||
let profile = project.bedrock_config.as_ref()
|
||||
/// Resolve AWS profile: project-level → global settings → "default".
|
||||
pub fn resolve_profile_for_project(project: &Project, global_profile: Option<&str>) -> String {
|
||||
project
|
||||
.bedrock_config
|
||||
.as_ref()
|
||||
.and_then(|b| b.aws_profile.clone())
|
||||
.or_else(|| state.settings_store.get().global_aws.aws_profile.clone())
|
||||
.unwrap_or_else(|| "default".to_string());
|
||||
.or_else(|| global_profile.map(|s| s.to_string()))
|
||||
.unwrap_or_else(|| "default".to_string())
|
||||
}
|
||||
|
||||
/// Check if the AWS session is valid for the given profile on the host.
|
||||
/// Returns `Ok(true)` if valid, `Ok(false)` if expired/invalid.
|
||||
pub async fn check_sso_session(profile: &str) -> Result<bool, String> {
|
||||
let output = tokio::process::Command::new("aws")
|
||||
.args(["sts", "get-caller-identity", "--profile", profile])
|
||||
.output()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to run aws sts get-caller-identity: {}", e))?;
|
||||
Ok(output.status.success())
|
||||
}
|
||||
|
||||
/// Check if the given AWS profile uses SSO (has sso_start_url or sso_session configured).
|
||||
pub async fn is_sso_profile(profile: &str) -> Result<bool, String> {
|
||||
let check_start_url = tokio::process::Command::new("aws")
|
||||
.args(["configure", "get", "sso_start_url", "--profile", profile])
|
||||
.output()
|
||||
.await;
|
||||
if let Ok(out) = check_start_url {
|
||||
if out.status.success() {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
let check_session = tokio::process::Command::new("aws")
|
||||
.args(["configure", "get", "sso_session", "--profile", profile])
|
||||
.output()
|
||||
.await;
|
||||
if let Ok(out) = check_session {
|
||||
if out.status.success() {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// Run `aws sso login --profile X` on the host. This is interactive (opens a browser).
|
||||
pub async fn run_sso_login(profile: &str) -> Result<(), String> {
|
||||
log::info!("Running host-side AWS SSO login for profile '{}'", profile);
|
||||
|
||||
let status = tokio::process::Command::new("aws")
|
||||
.args(["sso", "login", "--profile", &profile])
|
||||
.args(["sso", "login", "--profile", profile])
|
||||
.status()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to run aws sso login: {}", e))?;
|
||||
@@ -28,3 +63,19 @@ pub async fn aws_sso_refresh(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn aws_sso_refresh(
|
||||
project_id: String,
|
||||
state: State<'_, AppState>,
|
||||
) -> Result<(), String> {
|
||||
let project = state.projects_store.get(&project_id)
|
||||
.ok_or_else(|| format!("Project {} not found", project_id))?;
|
||||
|
||||
let profile = resolve_profile_for_project(
|
||||
&project,
|
||||
state.settings_store.get().global_aws.aws_profile.as_deref(),
|
||||
);
|
||||
|
||||
run_sso_login(&profile).await
|
||||
}
|
||||
|
||||
60
app/src-tauri/src/commands/help_commands.rs
Normal file
60
app/src-tauri/src/commands/help_commands.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
use std::sync::OnceLock;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
const HELP_URL: &str =
|
||||
"https://raw.githubusercontent.com/shadowdao/triple-c/main/HOW-TO-USE.md";
|
||||
|
||||
const EMBEDDED_HELP: &str = include_str!("../../../../HOW-TO-USE.md");
|
||||
|
||||
/// Cached help content fetched from the remote repo (or `None` if not yet fetched).
|
||||
static CACHED_HELP: OnceLock<Mutex<Option<String>>> = OnceLock::new();
|
||||
|
||||
/// Return the help markdown content.
|
||||
///
|
||||
/// On the first call, tries to fetch the latest version from the gitea repo.
|
||||
/// If that fails (network error, timeout, etc.), falls back to the version
|
||||
/// embedded at compile time. The result is cached for the rest of the session.
|
||||
#[tauri::command]
|
||||
pub async fn get_help_content() -> Result<String, String> {
|
||||
let mutex = CACHED_HELP.get_or_init(|| Mutex::new(None));
|
||||
let mut guard = mutex.lock().await;
|
||||
|
||||
if let Some(ref cached) = *guard {
|
||||
return Ok(cached.clone());
|
||||
}
|
||||
|
||||
let content = match fetch_remote_help().await {
|
||||
Ok(md) => {
|
||||
log::info!("Loaded help content from remote repo");
|
||||
md
|
||||
}
|
||||
Err(e) => {
|
||||
log::info!("Using embedded help content (remote fetch failed: {})", e);
|
||||
EMBEDDED_HELP.to_string()
|
||||
}
|
||||
};
|
||||
|
||||
*guard = Some(content.clone());
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
async fn fetch_remote_help() -> Result<String, String> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(10))
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to create HTTP client: {}", e))?;
|
||||
|
||||
let resp = client
|
||||
.get(HELP_URL)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to fetch help content: {}", e))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Remote returned status {}", resp.status()));
|
||||
}
|
||||
|
||||
resp.text()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to read response body: {}", e))
|
||||
}
|
||||
@@ -1,8 +1,11 @@
|
||||
pub mod aws_commands;
|
||||
pub mod docker_commands;
|
||||
pub mod file_commands;
|
||||
pub mod help_commands;
|
||||
pub mod mcp_commands;
|
||||
pub mod project_commands;
|
||||
pub mod settings_commands;
|
||||
pub mod stt_commands;
|
||||
pub mod terminal_commands;
|
||||
pub mod update_commands;
|
||||
pub mod web_terminal_commands;
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use tauri::{Emitter, State};
|
||||
|
||||
use crate::commands::aws_commands;
|
||||
use crate::docker;
|
||||
use crate::models::{container_config, AuthMode, McpServer, Project, ProjectPath, ProjectStatus};
|
||||
use crate::models::{container_config, Backend, BedrockAuthMethod, McpServer, Project, ProjectPath, ProjectStatus};
|
||||
use crate::storage::secure;
|
||||
use crate::AppState;
|
||||
|
||||
@@ -34,9 +35,9 @@ fn store_secrets_for_project(project: &Project) -> Result<(), String> {
|
||||
secure::store_project_secret(&project.id, "aws-bearer-token", v)?;
|
||||
}
|
||||
}
|
||||
if let Some(ref litellm) = project.litellm_config {
|
||||
if let Some(ref v) = litellm.api_key {
|
||||
secure::store_project_secret(&project.id, "litellm-api-key", v)?;
|
||||
if let Some(ref oai_config) = project.openai_compatible_config {
|
||||
if let Some(ref v) = oai_config.api_key {
|
||||
secure::store_project_secret(&project.id, "openai-compatible-api-key", v)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -56,8 +57,8 @@ fn load_secrets_for_project(project: &mut Project) {
|
||||
bedrock.aws_bearer_token = secure::get_project_secret(&project.id, "aws-bearer-token")
|
||||
.unwrap_or(None);
|
||||
}
|
||||
if let Some(ref mut litellm) = project.litellm_config {
|
||||
litellm.api_key = secure::get_project_secret(&project.id, "litellm-api-key")
|
||||
if let Some(ref mut oai_config) = project.openai_compatible_config {
|
||||
oai_config.api_key = secure::get_project_secret(&project.id, "openai-compatible-api-key")
|
||||
.unwrap_or(None);
|
||||
}
|
||||
}
|
||||
@@ -179,35 +180,105 @@ pub async fn start_project_container(
|
||||
// Resolve enabled MCP servers for this project
|
||||
let (enabled_mcp, docker_mcp) = resolve_mcp_servers(&project, &state);
|
||||
|
||||
// Validate auth mode requirements
|
||||
if project.auth_mode == AuthMode::Bedrock {
|
||||
// Validate backend requirements
|
||||
if project.backend == Backend::Bedrock {
|
||||
let bedrock = project.bedrock_config.as_ref()
|
||||
.ok_or_else(|| "Bedrock auth mode selected but no Bedrock configuration found.".to_string())?;
|
||||
.ok_or_else(|| "Bedrock backend selected but no Bedrock configuration found.".to_string())?;
|
||||
// Region can come from per-project or global
|
||||
if bedrock.aws_region.is_empty() && settings.global_aws.aws_region.is_none() {
|
||||
return Err("AWS region is required for Bedrock auth mode. Set it per-project or in global AWS settings.".to_string());
|
||||
return Err("AWS region is required for Bedrock backend. Set it per-project or in global AWS settings.".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if project.auth_mode == AuthMode::Ollama {
|
||||
if project.backend == Backend::Ollama {
|
||||
let ollama = project.ollama_config.as_ref()
|
||||
.ok_or_else(|| "Ollama auth mode selected but no Ollama configuration found.".to_string())?;
|
||||
.ok_or_else(|| "Ollama backend selected but no Ollama configuration found.".to_string())?;
|
||||
if ollama.base_url.is_empty() {
|
||||
return Err("Ollama base URL is required.".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if project.auth_mode == AuthMode::LiteLlm {
|
||||
let litellm = project.litellm_config.as_ref()
|
||||
.ok_or_else(|| "LiteLLM auth mode selected but no LiteLLM configuration found.".to_string())?;
|
||||
if litellm.base_url.is_empty() {
|
||||
return Err("LiteLLM base URL is required.".to_string());
|
||||
if project.backend == Backend::OpenAiCompatible {
|
||||
let oai_config = project.openai_compatible_config.as_ref()
|
||||
.ok_or_else(|| "OpenAI Compatible backend selected but no configuration found.".to_string())?;
|
||||
if oai_config.base_url.is_empty() {
|
||||
return Err("OpenAI Compatible base URL is required.".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Update status to starting
|
||||
state.projects_store.update_status(&project_id, ProjectStatus::Starting)?;
|
||||
|
||||
// Pre-validate AWS SSO session on the host for Bedrock Profile projects.
|
||||
// If the session is expired, trigger `aws sso login` before starting the container
|
||||
// so the entrypoint copies already-fresh credentials from the host mount.
|
||||
if project.backend == Backend::Bedrock {
|
||||
if let Some(ref bedrock) = project.bedrock_config {
|
||||
if bedrock.auth_method == BedrockAuthMethod::Profile {
|
||||
let profile = aws_commands::resolve_profile_for_project(
|
||||
&project,
|
||||
settings.global_aws.aws_profile.as_deref(),
|
||||
);
|
||||
|
||||
emit_progress(&app_handle, &project_id, "Validating AWS session...");
|
||||
|
||||
let session_valid = tokio::time::timeout(
|
||||
std::time::Duration::from_secs(10),
|
||||
aws_commands::check_sso_session(&profile),
|
||||
)
|
||||
.await;
|
||||
|
||||
match session_valid {
|
||||
Ok(Ok(true)) => {
|
||||
emit_progress(&app_handle, &project_id, "AWS session valid.");
|
||||
}
|
||||
Ok(Ok(false)) => {
|
||||
// Session expired — check if this is an SSO profile
|
||||
if aws_commands::is_sso_profile(&profile).await.unwrap_or(false) {
|
||||
emit_progress(
|
||||
&app_handle,
|
||||
&project_id,
|
||||
"AWS session expired. Starting SSO login (check your browser)...",
|
||||
);
|
||||
match aws_commands::run_sso_login(&profile).await {
|
||||
Ok(()) => {
|
||||
emit_progress(
|
||||
&app_handle,
|
||||
&project_id,
|
||||
"SSO login successful.",
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!(
|
||||
"SSO login failed for profile '{}': {} — continuing anyway",
|
||||
profile,
|
||||
e
|
||||
);
|
||||
emit_progress(
|
||||
&app_handle,
|
||||
&project_id,
|
||||
"SSO login failed or cancelled. Continuing...",
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log::warn!(
|
||||
"AWS session invalid for profile '{}' (not SSO). Continuing...",
|
||||
profile
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
log::warn!("Failed to check AWS session: {} — continuing anyway", e);
|
||||
}
|
||||
Err(_) => {
|
||||
log::warn!("AWS session check timed out — continuing anyway");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap container operations so that any failure resets status to Stopped.
|
||||
let result: Result<String, String> = async {
|
||||
// Ensure image exists
|
||||
@@ -267,6 +338,10 @@ pub async fn start_project_container(
|
||||
&settings.global_custom_env_vars,
|
||||
settings.timezone.as_deref(),
|
||||
&enabled_mcp,
|
||||
settings.global_claude_code_settings.as_ref(),
|
||||
settings.default_ssh_key_path.as_deref(),
|
||||
settings.default_git_user_name.as_deref(),
|
||||
settings.default_git_user_email.as_deref(),
|
||||
).await.unwrap_or(false);
|
||||
|
||||
if needs_recreate {
|
||||
@@ -299,6 +374,10 @@ pub async fn start_project_container(
|
||||
settings.timezone.as_deref(),
|
||||
&enabled_mcp,
|
||||
network_name.as_deref(),
|
||||
settings.global_claude_code_settings.as_ref(),
|
||||
settings.default_ssh_key_path.as_deref(),
|
||||
settings.default_git_user_name.as_deref(),
|
||||
settings.default_git_user_email.as_deref(),
|
||||
).await?;
|
||||
emit_progress(&app_handle, &project_id, "Starting container...");
|
||||
docker::start_container(&new_id).await?;
|
||||
@@ -332,6 +411,10 @@ pub async fn start_project_container(
|
||||
settings.timezone.as_deref(),
|
||||
&enabled_mcp,
|
||||
network_name.as_deref(),
|
||||
settings.global_claude_code_settings.as_ref(),
|
||||
settings.default_ssh_key_path.as_deref(),
|
||||
settings.default_git_user_name.as_deref(),
|
||||
settings.default_git_user_email.as_deref(),
|
||||
).await?;
|
||||
emit_progress(&app_handle, &project_id, "Starting container...");
|
||||
docker::start_container(&new_id).await?;
|
||||
|
||||
92
app/src-tauri/src/commands/stt_commands.rs
Normal file
92
app/src-tauri/src/commands/stt_commands.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
use tauri::{AppHandle, Emitter, State};
|
||||
|
||||
use crate::docker::stt;
|
||||
use crate::models::app_settings::SttStatus;
|
||||
use crate::AppState;
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn get_stt_status(state: State<'_, AppState>) -> Result<SttStatus, String> {
|
||||
let settings = state.settings_store.get();
|
||||
stt::get_stt_status(&settings.stt).await
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn start_stt(state: State<'_, AppState>) -> Result<SttStatus, String> {
|
||||
let settings = state.settings_store.get();
|
||||
stt::ensure_stt_running(&settings.stt).await
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn stop_stt() -> Result<(), String> {
|
||||
stt::stop_stt_container().await
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn build_stt_image(app_handle: AppHandle) -> Result<(), String> {
|
||||
stt::build_stt_image(move |msg| {
|
||||
let _ = app_handle.emit("stt-build-progress", &msg);
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn pull_stt_image(app_handle: AppHandle) -> Result<(), String> {
|
||||
stt::pull_stt_image(move |msg| {
|
||||
let _ = app_handle.emit("stt-pull-progress", &msg);
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn transcribe_audio(
|
||||
audio_data: Vec<u8>,
|
||||
state: State<'_, AppState>,
|
||||
) -> Result<String, String> {
|
||||
let settings = state.settings_store.get();
|
||||
if !settings.stt.enabled {
|
||||
return Err("STT is not enabled".to_string());
|
||||
}
|
||||
|
||||
let url = format!("http://127.0.0.1:{}/transcribe", settings.stt.port);
|
||||
|
||||
let file_part = reqwest::multipart::Part::bytes(audio_data)
|
||||
.file_name("recording.wav")
|
||||
.mime_str("audio/wav")
|
||||
.map_err(|e| format!("Failed to create multipart: {}", e))?;
|
||||
|
||||
let mut form = reqwest::multipart::Form::new().part("file", file_part);
|
||||
|
||||
if let Some(ref lang) = settings.stt.language {
|
||||
form = form.text("language", lang.clone());
|
||||
}
|
||||
|
||||
let client = reqwest::Client::new();
|
||||
let response = client
|
||||
.post(&url)
|
||||
.multipart(form)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e.is_connect() {
|
||||
"STT container is not running. Start it from Settings.".to_string()
|
||||
} else {
|
||||
format!("Transcription request failed: {}", e)
|
||||
}
|
||||
})?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
let status = response.status();
|
||||
let body = response.text().await.unwrap_or_default();
|
||||
return Err(format!("Transcription failed ({}): {}", status, body));
|
||||
}
|
||||
|
||||
let result: serde_json::Value = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse transcription response: {}", e))?;
|
||||
|
||||
result["text"]
|
||||
.as_str()
|
||||
.map(|s| s.to_string())
|
||||
.ok_or_else(|| "No text in transcription response".to_string())
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
use tauri::{AppHandle, Emitter, State};
|
||||
|
||||
use crate::models::{AuthMode, BedrockAuthMethod, Project};
|
||||
use crate::commands::aws_commands;
|
||||
use crate::models::{Backend, BedrockAuthMethod, Project};
|
||||
use crate::AppState;
|
||||
|
||||
/// Build the command to run in the container terminal.
|
||||
@@ -8,8 +9,8 @@ use crate::AppState;
|
||||
/// For Bedrock Profile projects, wraps `claude` in a bash script that validates
|
||||
/// the AWS session first. If the SSO session is expired, runs `aws sso login`
|
||||
/// so the user can re-authenticate (the URL is clickable via xterm.js WebLinksAddon).
|
||||
fn build_terminal_cmd(project: &Project, state: &AppState) -> Vec<String> {
|
||||
let is_bedrock_profile = project.auth_mode == AuthMode::Bedrock
|
||||
fn build_terminal_cmd(project: &Project, state: &AppState, session_name: Option<&str>) -> Vec<String> {
|
||||
let is_bedrock_profile = project.backend == Backend::Bedrock
|
||||
&& project
|
||||
.bedrock_config
|
||||
.as_ref()
|
||||
@@ -17,22 +18,36 @@ fn build_terminal_cmd(project: &Project, state: &AppState) -> Vec<String> {
|
||||
.unwrap_or(false);
|
||||
|
||||
if !is_bedrock_profile {
|
||||
return vec![
|
||||
"claude".to_string(),
|
||||
"--dangerously-skip-permissions".to_string(),
|
||||
];
|
||||
let mut cmd = vec!["claude".to_string()];
|
||||
if project.full_permissions {
|
||||
cmd.push("--dangerously-skip-permissions".to_string());
|
||||
}
|
||||
if let Some(name) = session_name {
|
||||
if !name.is_empty() {
|
||||
cmd.push("-n".to_string());
|
||||
cmd.push(name.to_string());
|
||||
}
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
|
||||
// Resolve AWS profile: project-level → global settings → "default"
|
||||
let profile = project
|
||||
.bedrock_config
|
||||
.as_ref()
|
||||
.and_then(|b| b.aws_profile.clone())
|
||||
.or_else(|| state.settings_store.get().global_aws.aws_profile.clone())
|
||||
.unwrap_or_else(|| "default".to_string());
|
||||
let profile = aws_commands::resolve_profile_for_project(
|
||||
project,
|
||||
state.settings_store.get().global_aws.aws_profile.as_deref(),
|
||||
);
|
||||
|
||||
// Build a bash wrapper that validates credentials, re-auths if needed,
|
||||
// then exec's into claude.
|
||||
let name_flag = session_name
|
||||
.filter(|n| !n.is_empty())
|
||||
.map(|n| format!(" -n '{}'", n.replace('\'', "'\\''")))
|
||||
.unwrap_or_default();
|
||||
let claude_cmd = if project.full_permissions {
|
||||
format!("exec claude --dangerously-skip-permissions{}", name_flag)
|
||||
} else {
|
||||
format!("exec claude{}", name_flag)
|
||||
};
|
||||
|
||||
let script = format!(
|
||||
r#"
|
||||
echo "Validating AWS session for profile '{profile}'..."
|
||||
@@ -58,9 +73,10 @@ else
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
exec claude --dangerously-skip-permissions
|
||||
{claude_cmd}
|
||||
"#,
|
||||
profile = profile
|
||||
profile = profile,
|
||||
claude_cmd = claude_cmd
|
||||
);
|
||||
|
||||
vec![
|
||||
@@ -75,6 +91,7 @@ pub async fn open_terminal_session(
|
||||
project_id: String,
|
||||
session_id: String,
|
||||
session_type: Option<String>,
|
||||
session_name: Option<String>,
|
||||
app_handle: AppHandle,
|
||||
state: State<'_, AppState>,
|
||||
) -> Result<(), String> {
|
||||
@@ -90,7 +107,7 @@ pub async fn open_terminal_session(
|
||||
|
||||
let cmd = match session_type.as_deref() {
|
||||
Some("bash") => vec!["bash".to_string(), "-l".to_string()],
|
||||
_ => build_terminal_cmd(&project, &state),
|
||||
_ => build_terminal_cmd(&project, &state, session_name.as_deref()),
|
||||
};
|
||||
|
||||
let output_event = format!("terminal-output-{}", session_id);
|
||||
|
||||
@@ -1,7 +1,20 @@
|
||||
use crate::models::{GiteaRelease, ReleaseAsset, UpdateInfo};
|
||||
use serde::Deserialize;
|
||||
use tauri::State;
|
||||
|
||||
use crate::docker;
|
||||
use crate::models::{container_config, GitHubRelease, ImageUpdateInfo, ReleaseAsset, UpdateInfo};
|
||||
use crate::AppState;
|
||||
|
||||
const RELEASES_URL: &str =
|
||||
"https://repo.anhonesthost.net/api/v1/repos/cybercovellc/triple-c/releases";
|
||||
"https://api.github.com/repos/shadowdao/triple-c/releases";
|
||||
|
||||
/// GHCR container-registry API base (OCI distribution spec).
|
||||
const REGISTRY_API_BASE: &str =
|
||||
"https://ghcr.io/v2/shadowdao/triple-c-sandbox";
|
||||
|
||||
/// GHCR token endpoint for anonymous pull access.
|
||||
const GHCR_TOKEN_URL: &str =
|
||||
"https://ghcr.io/token?scope=repository:shadowdao/triple-c-sandbox:pull";
|
||||
|
||||
#[tauri::command]
|
||||
pub fn get_app_version() -> String {
|
||||
@@ -15,9 +28,10 @@ pub async fn check_for_updates() -> Result<Option<UpdateInfo>, String> {
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to create HTTP client: {}", e))?;
|
||||
|
||||
let releases: Vec<GiteaRelease> = client
|
||||
let releases: Vec<GitHubRelease> = client
|
||||
.get(RELEASES_URL)
|
||||
.header("Accept", "application/json")
|
||||
.header("User-Agent", "triple-c-updater")
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to fetch releases: {}", e))?
|
||||
@@ -26,30 +40,34 @@ pub async fn check_for_updates() -> Result<Option<UpdateInfo>, String> {
|
||||
.map_err(|e| format!("Failed to parse releases: {}", e))?;
|
||||
|
||||
let current_version = env!("CARGO_PKG_VERSION");
|
||||
let is_windows = cfg!(target_os = "windows");
|
||||
let current_semver = parse_semver(current_version).unwrap_or((0, 0, 0));
|
||||
|
||||
// Filter releases by platform tag suffix
|
||||
let platform_releases: Vec<&GiteaRelease> = releases
|
||||
// Determine platform-specific asset extensions
|
||||
let platform_extensions: &[&str] = if cfg!(target_os = "windows") {
|
||||
&[".msi", ".exe"]
|
||||
} else if cfg!(target_os = "macos") {
|
||||
&[".dmg", ".app.tar.gz"]
|
||||
} else {
|
||||
&[".AppImage", ".deb", ".rpm"]
|
||||
};
|
||||
|
||||
// Filter releases that have at least one asset matching the current platform
|
||||
let platform_releases: Vec<&GitHubRelease> = releases
|
||||
.iter()
|
||||
.filter(|r| {
|
||||
if is_windows {
|
||||
r.tag_name.ends_with("-win")
|
||||
} else {
|
||||
!r.tag_name.ends_with("-win")
|
||||
}
|
||||
r.assets.iter().any(|a| {
|
||||
platform_extensions.iter().any(|ext| a.name.ends_with(ext))
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Find the latest release with a higher patch version
|
||||
// Version format: 0.1.X or v0.1.X (tag may have prefix/suffix)
|
||||
let current_patch = parse_patch_version(current_version).unwrap_or(0);
|
||||
|
||||
let mut best: Option<(&GiteaRelease, u32)> = None;
|
||||
// Find the latest release with a higher semver version
|
||||
let mut best: Option<(&GitHubRelease, (u32, u32, u32))> = None;
|
||||
for release in &platform_releases {
|
||||
if let Some(patch) = parse_patch_from_tag(&release.tag_name) {
|
||||
if patch > current_patch {
|
||||
if best.is_none() || patch > best.unwrap().1 {
|
||||
best = Some((release, patch));
|
||||
if let Some(ver) = parse_semver_from_tag(&release.tag_name) {
|
||||
if ver > current_semver {
|
||||
if best.is_none() || ver > best.unwrap().1 {
|
||||
best = Some((release, ver));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,9 +75,13 @@ pub async fn check_for_updates() -> Result<Option<UpdateInfo>, String> {
|
||||
|
||||
match best {
|
||||
Some((release, _)) => {
|
||||
// Only include assets matching the current platform
|
||||
let assets = release
|
||||
.assets
|
||||
.iter()
|
||||
.filter(|a| {
|
||||
platform_extensions.iter().any(|ext| a.name.ends_with(ext))
|
||||
})
|
||||
.map(|a| ReleaseAsset {
|
||||
name: a.name.clone(),
|
||||
browser_download_url: a.browser_download_url.clone(),
|
||||
@@ -67,7 +89,6 @@ pub async fn check_for_updates() -> Result<Option<UpdateInfo>, String> {
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Reconstruct version string from tag
|
||||
let version = extract_version_from_tag(&release.tag_name)
|
||||
.unwrap_or_else(|| release.tag_name.clone());
|
||||
|
||||
@@ -84,34 +105,152 @@ pub async fn check_for_updates() -> Result<Option<UpdateInfo>, String> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse patch version from a semver string like "0.1.5" -> 5
|
||||
fn parse_patch_version(version: &str) -> Option<u32> {
|
||||
/// Parse a semver string like "0.2.5" -> (0, 2, 5)
|
||||
fn parse_semver(version: &str) -> Option<(u32, u32, u32)> {
|
||||
let clean = version.trim_start_matches('v');
|
||||
let parts: Vec<&str> = clean.split('.').collect();
|
||||
if parts.len() >= 3 {
|
||||
parts[2].parse().ok()
|
||||
let major = parts[0].parse().ok()?;
|
||||
let minor = parts[1].parse().ok()?;
|
||||
let patch = parts[2].parse().ok()?;
|
||||
Some((major, minor, patch))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse patch version from a tag like "v0.1.5", "v0.1.5-win", "0.1.5" -> 5
|
||||
fn parse_patch_from_tag(tag: &str) -> Option<u32> {
|
||||
/// Parse semver from a tag like "v0.2.5" -> (0, 2, 5)
|
||||
fn parse_semver_from_tag(tag: &str) -> Option<(u32, u32, u32)> {
|
||||
let clean = tag.trim_start_matches('v');
|
||||
// Remove platform suffix
|
||||
let clean = clean.strip_suffix("-win").unwrap_or(clean);
|
||||
parse_patch_version(clean)
|
||||
parse_semver(clean)
|
||||
}
|
||||
|
||||
/// Extract a clean version string from a tag like "v0.1.5-win" -> "0.1.5"
|
||||
/// Extract a clean version string from a tag like "v0.2.5" -> "0.2.5"
|
||||
fn extract_version_from_tag(tag: &str) -> Option<String> {
|
||||
let clean = tag.trim_start_matches('v');
|
||||
let clean = clean.strip_suffix("-win").unwrap_or(clean);
|
||||
// Validate it looks like a version
|
||||
let parts: Vec<&str> = clean.split('.').collect();
|
||||
if parts.len() >= 3 && parts.iter().all(|p| p.parse::<u32>().is_ok()) {
|
||||
Some(clean.to_string())
|
||||
} else {
|
||||
None
|
||||
let (major, minor, patch) = parse_semver_from_tag(tag)?;
|
||||
Some(format!("{}.{}.{}", major, minor, patch))
|
||||
}
|
||||
|
||||
/// Check whether a newer container image is available in the registry.
|
||||
///
|
||||
/// Compares the local image digest with the remote registry digest using the
|
||||
/// Docker Registry HTTP API v2. Only applies when the image source is
|
||||
/// "registry" (the default); for local builds or custom images we cannot
|
||||
/// meaningfully check for remote updates.
|
||||
#[tauri::command]
|
||||
pub async fn check_image_update(
|
||||
state: State<'_, AppState>,
|
||||
) -> Result<Option<ImageUpdateInfo>, String> {
|
||||
let settings = state.settings_store.get();
|
||||
|
||||
// Only check for registry images
|
||||
if settings.image_source != crate::models::app_settings::ImageSource::Registry {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let image_name =
|
||||
container_config::resolve_image_name(&settings.image_source, &settings.custom_image_name);
|
||||
|
||||
// 1. Get local image digest via Docker
|
||||
let local_digest = docker::get_local_image_digest(&image_name).await.ok().flatten();
|
||||
|
||||
// 2. Get remote digest from the GHCR container registry (OCI distribution spec)
|
||||
let remote_digest = fetch_remote_digest("latest").await?;
|
||||
|
||||
// No remote digest available — nothing to compare
|
||||
let remote_digest = match remote_digest {
|
||||
Some(d) => d,
|
||||
None => return Ok(None),
|
||||
};
|
||||
|
||||
// If local digest matches remote, no update
|
||||
if let Some(ref local) = local_digest {
|
||||
if *local == remote_digest {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
// There's a difference (or no local image at all)
|
||||
Ok(Some(ImageUpdateInfo {
|
||||
remote_digest,
|
||||
local_digest,
|
||||
remote_updated_at: None,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Fetch the digest of a tag from GHCR using the OCI / Docker Registry HTTP API v2.
|
||||
///
|
||||
/// GHCR requires authentication even for public images, so we first obtain an
|
||||
/// anonymous token, then issue a HEAD request to /v2/<repo>/manifests/<tag>
|
||||
/// and read the `Docker-Content-Digest` header.
|
||||
async fn fetch_remote_digest(tag: &str) -> Result<Option<String>, String> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(15))
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to create HTTP client: {}", e))?;
|
||||
|
||||
// 1. Obtain anonymous bearer token from GHCR
|
||||
let token = match fetch_ghcr_token(&client).await {
|
||||
Ok(t) => t,
|
||||
Err(e) => {
|
||||
log::warn!("Failed to obtain GHCR token: {}", e);
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
// 2. HEAD the manifest with the token
|
||||
let url = format!("{}/manifests/{}", REGISTRY_API_BASE, tag);
|
||||
|
||||
let response = client
|
||||
.head(&url)
|
||||
.header(
|
||||
"Accept",
|
||||
"application/vnd.docker.distribution.manifest.v2+json, application/vnd.oci.image.index.v1+json",
|
||||
)
|
||||
.header("Authorization", format!("Bearer {}", token))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
match response {
|
||||
Ok(resp) => {
|
||||
if !resp.status().is_success() {
|
||||
log::warn!(
|
||||
"Registry returned status {} when checking image digest",
|
||||
resp.status()
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
// The digest is returned in the Docker-Content-Digest header
|
||||
if let Some(digest) = resp.headers().get("docker-content-digest") {
|
||||
if let Ok(val) = digest.to_str() {
|
||||
return Ok(Some(val.to_string()));
|
||||
}
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("Failed to check registry for image update: {}", e);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch an anonymous bearer token from GHCR for pulling public images.
|
||||
async fn fetch_ghcr_token(client: &reqwest::Client) -> Result<String, String> {
|
||||
#[derive(Deserialize)]
|
||||
struct TokenResponse {
|
||||
token: String,
|
||||
}
|
||||
|
||||
let resp: TokenResponse = client
|
||||
.get(GHCR_TOKEN_URL)
|
||||
.header("User-Agent", "triple-c-updater")
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("GHCR token request failed: {}", e))?
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse GHCR token response: {}", e))?;
|
||||
|
||||
Ok(resp.token)
|
||||
}
|
||||
|
||||
143
app/src-tauri/src/commands/web_terminal_commands.rs
Normal file
143
app/src-tauri/src/commands/web_terminal_commands.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use serde::Serialize;
|
||||
use tauri::State;
|
||||
|
||||
use crate::web_terminal::WebTerminalServer;
|
||||
use crate::AppState;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct WebTerminalInfo {
|
||||
pub running: bool,
|
||||
pub port: u16,
|
||||
pub access_token: String,
|
||||
pub local_ip: Option<String>,
|
||||
pub url: Option<String>,
|
||||
}
|
||||
|
||||
fn generate_token() -> String {
|
||||
use rand::Rng;
|
||||
let mut rng = rand::rng();
|
||||
let bytes: Vec<u8> = (0..32).map(|_| rng.random::<u8>()).collect();
|
||||
use base64::Engine;
|
||||
base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(&bytes)
|
||||
}
|
||||
|
||||
fn get_local_ip() -> Option<String> {
|
||||
local_ip_address::local_ip().ok().map(|ip| ip.to_string())
|
||||
}
|
||||
|
||||
fn build_info(running: bool, port: u16, token: &str) -> WebTerminalInfo {
|
||||
let local_ip = get_local_ip();
|
||||
let url = if running {
|
||||
local_ip
|
||||
.as_ref()
|
||||
.map(|ip| format!("http://{}:{}?token={}", ip, port, token))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
WebTerminalInfo {
|
||||
running,
|
||||
port,
|
||||
access_token: token.to_string(),
|
||||
local_ip,
|
||||
url,
|
||||
}
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn start_web_terminal(state: State<'_, AppState>) -> Result<WebTerminalInfo, String> {
|
||||
let mut server_guard = state.web_terminal_server.lock().await;
|
||||
if server_guard.is_some() {
|
||||
return Err("Web terminal server is already running".to_string());
|
||||
}
|
||||
|
||||
let mut settings = state.settings_store.get();
|
||||
|
||||
// Auto-generate token if not set
|
||||
if settings.web_terminal.access_token.is_none() {
|
||||
settings.web_terminal.access_token = Some(generate_token());
|
||||
settings.web_terminal.enabled = true;
|
||||
state.settings_store.update(settings.clone()).map_err(|e| format!("Failed to save settings: {}", e))?;
|
||||
}
|
||||
|
||||
let token = settings.web_terminal.access_token.clone().unwrap_or_default();
|
||||
let port = settings.web_terminal.port;
|
||||
|
||||
let server = WebTerminalServer::start(
|
||||
port,
|
||||
token.clone(),
|
||||
state.exec_manager.clone(),
|
||||
state.projects_store.clone(),
|
||||
state.settings_store.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
*server_guard = Some(server);
|
||||
|
||||
// Mark as enabled in settings
|
||||
if !settings.web_terminal.enabled {
|
||||
settings.web_terminal.enabled = true;
|
||||
let _ = state.settings_store.update(settings);
|
||||
}
|
||||
|
||||
Ok(build_info(true, port, &token))
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn stop_web_terminal(state: State<'_, AppState>) -> Result<(), String> {
|
||||
let mut server_guard = state.web_terminal_server.lock().await;
|
||||
if let Some(server) = server_guard.take() {
|
||||
server.stop();
|
||||
}
|
||||
|
||||
// Mark as disabled in settings
|
||||
let mut settings = state.settings_store.get();
|
||||
if settings.web_terminal.enabled {
|
||||
settings.web_terminal.enabled = false;
|
||||
let _ = state.settings_store.update(settings);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn get_web_terminal_status(state: State<'_, AppState>) -> Result<WebTerminalInfo, String> {
|
||||
let server_guard = state.web_terminal_server.lock().await;
|
||||
let settings = state.settings_store.get();
|
||||
let token = settings.web_terminal.access_token.clone().unwrap_or_default();
|
||||
let running = server_guard.is_some();
|
||||
Ok(build_info(running, settings.web_terminal.port, &token))
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn regenerate_web_terminal_token(state: State<'_, AppState>) -> Result<WebTerminalInfo, String> {
|
||||
// Stop current server if running
|
||||
{
|
||||
let mut server_guard = state.web_terminal_server.lock().await;
|
||||
if let Some(server) = server_guard.take() {
|
||||
server.stop();
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new token and save
|
||||
let new_token = generate_token();
|
||||
let mut settings = state.settings_store.get();
|
||||
settings.web_terminal.access_token = Some(new_token.clone());
|
||||
state.settings_store.update(settings.clone()).map_err(|e| format!("Failed to save settings: {}", e))?;
|
||||
|
||||
// Restart if was enabled
|
||||
if settings.web_terminal.enabled {
|
||||
let server = WebTerminalServer::start(
|
||||
settings.web_terminal.port,
|
||||
new_token.clone(),
|
||||
state.exec_manager.clone(),
|
||||
state.projects_store.clone(),
|
||||
state.settings_store.clone(),
|
||||
)
|
||||
.await?;
|
||||
let mut server_guard = state.web_terminal_server.lock().await;
|
||||
*server_guard = Some(server);
|
||||
return Ok(build_info(true, settings.web_terminal.port, &new_token));
|
||||
}
|
||||
|
||||
Ok(build_info(false, settings.web_terminal.port, &new_token))
|
||||
}
|
||||
@@ -8,7 +8,7 @@ use std::collections::HashMap;
|
||||
use sha2::{Sha256, Digest};
|
||||
|
||||
use super::client::get_docker;
|
||||
use crate::models::{AuthMode, BedrockAuthMethod, ContainerInfo, EnvVar, GlobalAwsSettings, McpServer, McpTransportType, PortMapping, Project, ProjectPath};
|
||||
use crate::models::{Backend, BedrockAuthMethod, ClaudeCodeSettings, ContainerInfo, EnvVar, GlobalAwsSettings, McpServer, McpTransportType, PortMapping, Project, ProjectPath};
|
||||
|
||||
const SCHEDULER_INSTRUCTIONS: &str = r#"## Scheduled Tasks
|
||||
|
||||
@@ -80,7 +80,7 @@ When working on any project that has a `.flightops/` directory, follow the Fligh
|
||||
|
||||
const MISSION_CONTROL_PROJECT_INSTRUCTIONS: &str = r#"## Flight Operations
|
||||
|
||||
This project uses [Flight Control](https://github.com/msieurthenardier/mission-control) for structured development.
|
||||
This project uses **Flight Control** (bundled with Triple-C) for structured development.
|
||||
|
||||
**Before any mission/flight/leg work, read these files in order:**
|
||||
1. `.flightops/README.md` — What the flightops directory contains
|
||||
@@ -132,14 +132,17 @@ fn build_claude_instructions(
|
||||
/// Compute a fingerprint string for the custom environment variables.
|
||||
/// Sorted alphabetically so order changes do not cause spurious recreation.
|
||||
fn compute_env_fingerprint(custom_env_vars: &[EnvVar]) -> String {
|
||||
let reserved_prefixes = ["ANTHROPIC_", "AWS_", "GIT_", "HOST_", "CLAUDE_", "TRIPLE_C_"];
|
||||
let reserved_prefixes = ["ANTHROPIC_", "AWS_", "GIT_", "HOST_", "TRIPLE_C_"];
|
||||
let reserved_exact = ["CLAUDE_INSTRUCTIONS", "MCP_SERVERS_JSON", "CLAUDE_CODE_SETTINGS_JSON", "MISSION_CONTROL_ENABLED"];
|
||||
let mut parts: Vec<String> = Vec::new();
|
||||
for env_var in custom_env_vars {
|
||||
let key = env_var.key.trim();
|
||||
if key.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let is_reserved = reserved_prefixes.iter().any(|p| key.to_uppercase().starts_with(p));
|
||||
let upper = key.to_uppercase();
|
||||
let is_reserved = reserved_prefixes.iter().any(|p| upper.starts_with(p))
|
||||
|| reserved_exact.iter().any(|e| upper == *e);
|
||||
if is_reserved {
|
||||
continue;
|
||||
}
|
||||
@@ -244,13 +247,13 @@ fn compute_ollama_fingerprint(project: &Project) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute a fingerprint for the LiteLLM configuration so we can detect changes.
|
||||
fn compute_litellm_fingerprint(project: &Project) -> String {
|
||||
if let Some(ref litellm) = project.litellm_config {
|
||||
/// Compute a fingerprint for the OpenAI Compatible configuration so we can detect changes.
|
||||
fn compute_openai_compatible_fingerprint(project: &Project) -> String {
|
||||
if let Some(ref config) = project.openai_compatible_config {
|
||||
let parts = vec![
|
||||
litellm.base_url.clone(),
|
||||
litellm.api_key.as_deref().unwrap_or("").to_string(),
|
||||
litellm.model_id.as_deref().unwrap_or("").to_string(),
|
||||
config.base_url.clone(),
|
||||
config.api_key.as_deref().unwrap_or("").to_string(),
|
||||
config.model_id.as_deref().unwrap_or("").to_string(),
|
||||
];
|
||||
sha256_hex(&parts.join("|"))
|
||||
} else {
|
||||
@@ -282,6 +285,80 @@ fn compute_ports_fingerprint(port_mappings: &[PortMapping]) -> String {
|
||||
sha256_hex(&joined)
|
||||
}
|
||||
|
||||
/// Merge global and per-project ClaudeCodeSettings.
|
||||
/// Per-project fields override global fields when set (non-default).
|
||||
fn merge_claude_code_settings(
|
||||
global: Option<&ClaudeCodeSettings>,
|
||||
project: Option<&ClaudeCodeSettings>,
|
||||
) -> Option<ClaudeCodeSettings> {
|
||||
match (global, project) {
|
||||
(None, None) => None,
|
||||
(Some(g), None) => Some(g.clone()),
|
||||
(None, Some(p)) => Some(p.clone()),
|
||||
(Some(g), Some(p)) => {
|
||||
// Project overrides global for each field when the project value is non-default
|
||||
Some(ClaudeCodeSettings {
|
||||
tui_mode: p.tui_mode.clone().or_else(|| g.tui_mode.clone()),
|
||||
effort: p.effort.clone().or_else(|| g.effort.clone()),
|
||||
auto_scroll_disabled: if p.auto_scroll_disabled { true } else { g.auto_scroll_disabled },
|
||||
focus_mode: if p.focus_mode { true } else { g.focus_mode },
|
||||
show_thinking_summaries: if p.show_thinking_summaries { true } else { g.show_thinking_summaries },
|
||||
enable_session_recap: if p.enable_session_recap { true } else { g.enable_session_recap },
|
||||
env_scrub: if p.env_scrub { true } else { g.env_scrub },
|
||||
prompt_caching_1h: if p.prompt_caching_1h { true } else { g.prompt_caching_1h },
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute a fingerprint for the Claude Code settings so we can detect changes.
|
||||
fn compute_claude_code_settings_fingerprint(settings: Option<&ClaudeCodeSettings>) -> String {
|
||||
match settings {
|
||||
None => String::new(),
|
||||
Some(s) => {
|
||||
let parts = vec![
|
||||
s.tui_mode.as_deref().unwrap_or("").to_string(),
|
||||
s.effort.as_deref().unwrap_or("").to_string(),
|
||||
format!("{}", s.auto_scroll_disabled),
|
||||
format!("{}", s.focus_mode),
|
||||
format!("{}", s.show_thinking_summaries),
|
||||
format!("{}", s.enable_session_recap),
|
||||
format!("{}", s.env_scrub),
|
||||
format!("{}", s.prompt_caching_1h),
|
||||
];
|
||||
sha256_hex(&parts.join("|"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Build the settings.json content for Claude Code from ClaudeCodeSettings.
|
||||
/// Returns a JSON string of the settings to be written to ~/.claude/settings.json.
|
||||
fn build_claude_code_settings_json(settings: &ClaudeCodeSettings) -> Option<String> {
|
||||
let mut map = serde_json::Map::new();
|
||||
|
||||
if let Some(ref tui) = settings.tui_mode {
|
||||
map.insert("tui".to_string(), serde_json::json!(tui));
|
||||
}
|
||||
if let Some(ref effort) = settings.effort {
|
||||
map.insert("effort".to_string(), serde_json::json!(effort));
|
||||
}
|
||||
if settings.auto_scroll_disabled {
|
||||
map.insert("autoScrollEnabled".to_string(), serde_json::json!(false));
|
||||
}
|
||||
if settings.focus_mode {
|
||||
map.insert("focusMode".to_string(), serde_json::json!(true));
|
||||
}
|
||||
if settings.show_thinking_summaries {
|
||||
map.insert("showThinkingSummaries".to_string(), serde_json::json!(true));
|
||||
}
|
||||
|
||||
if map.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(serde_json::Value::Object(map).to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Build the JSON value for MCP servers config to be injected into ~/.claude.json.
|
||||
/// Produces `{"mcpServers": {"name": {"type": "stdio", ...}, ...}}`.
|
||||
///
|
||||
@@ -400,6 +477,10 @@ pub async fn create_container(
|
||||
timezone: Option<&str>,
|
||||
mcp_servers: &[McpServer],
|
||||
network_name: Option<&str>,
|
||||
global_claude_code_settings: Option<&ClaudeCodeSettings>,
|
||||
default_ssh_key_path: Option<&str>,
|
||||
default_git_user_name: Option<&str>,
|
||||
default_git_user_email: Option<&str>,
|
||||
) -> Result<String, String> {
|
||||
let docker = get_docker()?;
|
||||
let container_name = project.container_name();
|
||||
@@ -445,15 +526,18 @@ pub async fn create_container(
|
||||
if let Some(ref token) = project.git_token {
|
||||
env_vars.push(format!("GIT_TOKEN={}", token));
|
||||
}
|
||||
if let Some(ref name) = project.git_user_name {
|
||||
// Per-project git user overrides global defaults
|
||||
let effective_git_name = project.git_user_name.as_deref().or(default_git_user_name);
|
||||
let effective_git_email = project.git_user_email.as_deref().or(default_git_user_email);
|
||||
if let Some(name) = effective_git_name {
|
||||
env_vars.push(format!("GIT_USER_NAME={}", name));
|
||||
}
|
||||
if let Some(ref email) = project.git_user_email {
|
||||
if let Some(email) = effective_git_email {
|
||||
env_vars.push(format!("GIT_USER_EMAIL={}", email));
|
||||
}
|
||||
|
||||
// Bedrock configuration
|
||||
if project.auth_mode == AuthMode::Bedrock {
|
||||
if project.backend == Backend::Bedrock {
|
||||
if let Some(ref bedrock) = project.bedrock_config {
|
||||
env_vars.push("CLAUDE_CODE_USE_BEDROCK=1".to_string());
|
||||
|
||||
@@ -506,7 +590,7 @@ pub async fn create_container(
|
||||
}
|
||||
|
||||
// Ollama configuration
|
||||
if project.auth_mode == AuthMode::Ollama {
|
||||
if project.backend == Backend::Ollama {
|
||||
if let Some(ref ollama) = project.ollama_config {
|
||||
env_vars.push(format!("ANTHROPIC_BASE_URL={}", ollama.base_url));
|
||||
env_vars.push("ANTHROPIC_AUTH_TOKEN=ollama".to_string());
|
||||
@@ -516,14 +600,14 @@ pub async fn create_container(
|
||||
}
|
||||
}
|
||||
|
||||
// LiteLLM configuration
|
||||
if project.auth_mode == AuthMode::LiteLlm {
|
||||
if let Some(ref litellm) = project.litellm_config {
|
||||
env_vars.push(format!("ANTHROPIC_BASE_URL={}", litellm.base_url));
|
||||
if let Some(ref key) = litellm.api_key {
|
||||
// OpenAI Compatible configuration
|
||||
if project.backend == Backend::OpenAiCompatible {
|
||||
if let Some(ref config) = project.openai_compatible_config {
|
||||
env_vars.push(format!("ANTHROPIC_BASE_URL={}", config.base_url));
|
||||
if let Some(ref key) = config.api_key {
|
||||
env_vars.push(format!("ANTHROPIC_AUTH_TOKEN={}", key));
|
||||
}
|
||||
if let Some(ref model) = litellm.model_id {
|
||||
if let Some(ref model) = config.model_id {
|
||||
env_vars.push(format!("ANTHROPIC_MODEL={}", model));
|
||||
}
|
||||
}
|
||||
@@ -531,13 +615,16 @@ pub async fn create_container(
|
||||
|
||||
// Custom environment variables (global + per-project, project overrides global for same key)
|
||||
let merged_env = merge_custom_env_vars(global_custom_env_vars, &project.custom_env_vars);
|
||||
let reserved_prefixes = ["ANTHROPIC_", "AWS_", "GIT_", "HOST_", "CLAUDE_", "TRIPLE_C_"];
|
||||
let reserved_prefixes = ["ANTHROPIC_", "AWS_", "GIT_", "HOST_", "TRIPLE_C_"];
|
||||
let reserved_exact = ["CLAUDE_INSTRUCTIONS", "MCP_SERVERS_JSON", "CLAUDE_CODE_SETTINGS_JSON", "MISSION_CONTROL_ENABLED"];
|
||||
for env_var in &merged_env {
|
||||
let key = env_var.key.trim();
|
||||
if key.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let is_reserved = reserved_prefixes.iter().any(|p| key.to_uppercase().starts_with(p));
|
||||
let upper = key.to_uppercase();
|
||||
let is_reserved = reserved_prefixes.iter().any(|p| upper.starts_with(p))
|
||||
|| reserved_exact.iter().any(|e| upper == *e);
|
||||
if is_reserved {
|
||||
log::warn!("Skipping reserved env var: {}", key);
|
||||
continue;
|
||||
@@ -577,6 +664,32 @@ pub async fn create_container(
|
||||
env_vars.push(format!("MCP_SERVERS_JSON={}", mcp_json));
|
||||
}
|
||||
|
||||
// Claude Code settings (global + per-project merged)
|
||||
let merged_cc_settings = merge_claude_code_settings(
|
||||
global_claude_code_settings,
|
||||
project.claude_code_settings.as_ref(),
|
||||
);
|
||||
if let Some(ref cc) = merged_cc_settings {
|
||||
// Env-var-based settings (these are read directly by Claude Code)
|
||||
if cc.tui_mode.as_deref() == Some("fullscreen") {
|
||||
env_vars.push("CLAUDE_CODE_NO_FLICKER=1".to_string());
|
||||
}
|
||||
if cc.enable_session_recap {
|
||||
env_vars.push("CLAUDE_CODE_ENABLE_AWAY_SUMMARY=1".to_string());
|
||||
}
|
||||
if cc.env_scrub {
|
||||
env_vars.push("CLAUDE_CODE_SUBPROCESS_ENV_SCRUB=1".to_string());
|
||||
}
|
||||
if cc.prompt_caching_1h {
|
||||
env_vars.push("ENABLE_PROMPT_CACHING_1H=1".to_string());
|
||||
}
|
||||
|
||||
// settings.json-based settings (written by the entrypoint)
|
||||
if let Some(settings_json) = build_claude_code_settings_json(cc) {
|
||||
env_vars.push(format!("CLAUDE_CODE_SETTINGS_JSON={}", settings_json));
|
||||
}
|
||||
}
|
||||
|
||||
let mut mounts: Vec<Mount> = Vec::new();
|
||||
|
||||
// Project directories -> /workspace/{mount_name}
|
||||
@@ -612,10 +725,12 @@ pub async fn create_container(
|
||||
});
|
||||
|
||||
// SSH keys mount (read-only staging; entrypoint copies to ~/.ssh with correct perms)
|
||||
if let Some(ref ssh_path) = project.ssh_key_path {
|
||||
// Per-project ssh_key_path overrides global default_ssh_key_path
|
||||
let effective_ssh_path = project.ssh_key_path.as_deref().or(default_ssh_key_path);
|
||||
if let Some(ssh_path) = effective_ssh_path {
|
||||
mounts.push(Mount {
|
||||
target: Some("/tmp/.host-ssh".to_string()),
|
||||
source: Some(ssh_path.clone()),
|
||||
source: Some(ssh_path.to_string()),
|
||||
typ: Some(MountTypeEnum::BIND),
|
||||
read_only: Some(true),
|
||||
..Default::default()
|
||||
@@ -624,7 +739,7 @@ pub async fn create_container(
|
||||
|
||||
// AWS config mount (read-only)
|
||||
// Mount if: Bedrock profile auth needs it, OR a global aws_config_path is set
|
||||
let should_mount_aws = if project.auth_mode == AuthMode::Bedrock {
|
||||
let should_mount_aws = if project.backend == Backend::Bedrock {
|
||||
if let Some(ref bedrock) = project.bedrock_config {
|
||||
bedrock.auth_method == BedrockAuthMethod::Profile
|
||||
} else {
|
||||
@@ -694,16 +809,25 @@ pub async fn create_container(
|
||||
labels.insert("triple-c.managed".to_string(), "true".to_string());
|
||||
labels.insert("triple-c.project-id".to_string(), project.id.clone());
|
||||
labels.insert("triple-c.project-name".to_string(), project.name.clone());
|
||||
labels.insert("triple-c.auth-mode".to_string(), format!("{:?}", project.auth_mode));
|
||||
labels.insert("triple-c.backend".to_string(), format!("{:?}", project.backend));
|
||||
labels.insert("triple-c.paths-fingerprint".to_string(), compute_paths_fingerprint(&project.paths));
|
||||
labels.insert("triple-c.bedrock-fingerprint".to_string(), compute_bedrock_fingerprint(project));
|
||||
labels.insert("triple-c.ollama-fingerprint".to_string(), compute_ollama_fingerprint(project));
|
||||
labels.insert("triple-c.litellm-fingerprint".to_string(), compute_litellm_fingerprint(project));
|
||||
labels.insert("triple-c.openai-compatible-fingerprint".to_string(), compute_openai_compatible_fingerprint(project));
|
||||
labels.insert("triple-c.ports-fingerprint".to_string(), compute_ports_fingerprint(&project.port_mappings));
|
||||
labels.insert("triple-c.image".to_string(), image_name.to_string());
|
||||
labels.insert("triple-c.timezone".to_string(), timezone.unwrap_or("").to_string());
|
||||
labels.insert("triple-c.mcp-fingerprint".to_string(), compute_mcp_fingerprint(mcp_servers));
|
||||
labels.insert("triple-c.mission-control".to_string(), project.mission_control_enabled.to_string());
|
||||
labels.insert("triple-c.custom-env-fingerprint".to_string(), custom_env_fingerprint.clone());
|
||||
labels.insert("triple-c.claude-code-settings-fingerprint".to_string(),
|
||||
compute_claude_code_settings_fingerprint(merged_cc_settings.as_ref()));
|
||||
labels.insert("triple-c.instructions-fingerprint".to_string(),
|
||||
combined_instructions.as_ref().map(|s| sha256_hex(s)).unwrap_or_default());
|
||||
labels.insert("triple-c.git-user-name".to_string(), effective_git_name.unwrap_or_default().to_string());
|
||||
labels.insert("triple-c.git-user-email".to_string(), effective_git_email.unwrap_or_default().to_string());
|
||||
labels.insert("triple-c.git-token-hash".to_string(),
|
||||
project.git_token.as_ref().map(|t| sha256_hex(t)).unwrap_or_default());
|
||||
|
||||
let host_config = HostConfig {
|
||||
mounts: Some(mounts),
|
||||
@@ -870,6 +994,10 @@ pub async fn container_needs_recreation(
|
||||
global_custom_env_vars: &[EnvVar],
|
||||
timezone: Option<&str>,
|
||||
mcp_servers: &[McpServer],
|
||||
global_claude_code_settings: Option<&ClaudeCodeSettings>,
|
||||
default_ssh_key_path: Option<&str>,
|
||||
default_git_user_name: Option<&str>,
|
||||
default_git_user_email: Option<&str>,
|
||||
) -> Result<bool, String> {
|
||||
let docker = get_docker()?;
|
||||
let info = docker
|
||||
@@ -897,11 +1025,13 @@ pub async fn container_needs_recreation(
|
||||
// Code settings stored in the named volume). The change takes effect
|
||||
// on the next explicit rebuild instead.
|
||||
|
||||
// ── Auth mode ────────────────────────────────────────────────────────
|
||||
let current_auth_mode = format!("{:?}", project.auth_mode);
|
||||
if let Some(container_auth_mode) = get_label("triple-c.auth-mode") {
|
||||
if container_auth_mode != current_auth_mode {
|
||||
log::info!("Auth mode mismatch (container={:?}, project={:?})", container_auth_mode, current_auth_mode);
|
||||
// ── Backend ──────────────────────────────────────────────────────────
|
||||
let current_backend = format!("{:?}", project.backend);
|
||||
// Check new label name, falling back to old "triple-c.auth-mode" for pre-rename containers
|
||||
let container_backend = get_label("triple-c.backend").or_else(|| get_label("triple-c.auth-mode"));
|
||||
if let Some(container_backend) = container_backend {
|
||||
if container_backend != current_backend {
|
||||
log::info!("Backend mismatch (container={:?}, project={:?})", container_backend, current_backend);
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
@@ -946,11 +1076,11 @@ pub async fn container_needs_recreation(
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// ── LiteLLM config fingerprint ───────────────────────────────────────
|
||||
let expected_litellm_fp = compute_litellm_fingerprint(project);
|
||||
let container_litellm_fp = get_label("triple-c.litellm-fingerprint").unwrap_or_default();
|
||||
if container_litellm_fp != expected_litellm_fp {
|
||||
log::info!("LiteLLM config mismatch");
|
||||
// ── OpenAI Compatible config fingerprint ────────────────────────────
|
||||
let expected_oai_fp = compute_openai_compatible_fingerprint(project);
|
||||
let container_oai_fp = get_label("triple-c.openai-compatible-fingerprint").unwrap_or_default();
|
||||
if container_oai_fp != expected_oai_fp {
|
||||
log::info!("OpenAI Compatible config mismatch");
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@@ -988,51 +1118,48 @@ pub async fn container_needs_recreation(
|
||||
.find(|mount| mount.target.as_deref() == Some("/tmp/.host-ssh"))
|
||||
})
|
||||
.and_then(|mount| mount.source.as_deref());
|
||||
let project_ssh = project.ssh_key_path.as_deref();
|
||||
if ssh_mount_source != project_ssh {
|
||||
let effective_ssh = project.ssh_key_path.as_deref().or(default_ssh_key_path);
|
||||
if ssh_mount_source != effective_ssh {
|
||||
log::info!(
|
||||
"SSH key path mismatch (container={:?}, project={:?})",
|
||||
"SSH key path mismatch (container={:?}, expected={:?})",
|
||||
ssh_mount_source,
|
||||
project_ssh
|
||||
effective_ssh
|
||||
);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// ── Git environment variables ────────────────────────────────────────
|
||||
let env_vars = info
|
||||
.config
|
||||
.as_ref()
|
||||
.and_then(|c| c.env.as_ref());
|
||||
|
||||
let get_env = |name: &str| -> Option<String> {
|
||||
env_vars.and_then(|vars| {
|
||||
vars.iter()
|
||||
.find(|v| v.starts_with(&format!("{}=", name)))
|
||||
.map(|v| v[name.len() + 1..].to_string())
|
||||
})
|
||||
};
|
||||
|
||||
let container_git_name = get_env("GIT_USER_NAME");
|
||||
let container_git_email = get_env("GIT_USER_EMAIL");
|
||||
let container_git_token = get_env("GIT_TOKEN");
|
||||
|
||||
if container_git_name.as_deref() != project.git_user_name.as_deref() {
|
||||
log::info!("GIT_USER_NAME mismatch (container={:?}, project={:?})", container_git_name, project.git_user_name);
|
||||
// ── Git settings (label-based to avoid stale snapshot env vars) ─────
|
||||
let expected_git_name = project.git_user_name.as_deref()
|
||||
.or(default_git_user_name)
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let container_git_name = get_label("triple-c.git-user-name").unwrap_or_default();
|
||||
if container_git_name != expected_git_name {
|
||||
log::info!("GIT_USER_NAME mismatch (container={:?}, expected={:?})", container_git_name, expected_git_name);
|
||||
return Ok(true);
|
||||
}
|
||||
if container_git_email.as_deref() != project.git_user_email.as_deref() {
|
||||
log::info!("GIT_USER_EMAIL mismatch (container={:?}, project={:?})", container_git_email, project.git_user_email);
|
||||
|
||||
let expected_git_email = project.git_user_email.as_deref()
|
||||
.or(default_git_user_email)
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let container_git_email = get_label("triple-c.git-user-email").unwrap_or_default();
|
||||
if container_git_email != expected_git_email {
|
||||
log::info!("GIT_USER_EMAIL mismatch (container={:?}, expected={:?})", container_git_email, expected_git_email);
|
||||
return Ok(true);
|
||||
}
|
||||
if container_git_token.as_deref() != project.git_token.as_deref() {
|
||||
|
||||
let expected_git_token_hash = project.git_token.as_ref().map(|t| sha256_hex(t)).unwrap_or_default();
|
||||
let container_git_token_hash = get_label("triple-c.git-token-hash").unwrap_or_default();
|
||||
if container_git_token_hash != expected_git_token_hash {
|
||||
log::info!("GIT_TOKEN mismatch");
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// ── Custom environment variables ──────────────────────────────────────
|
||||
// ── Custom environment variables (label-based fingerprint) ──────────
|
||||
let merged_env = merge_custom_env_vars(global_custom_env_vars, &project.custom_env_vars);
|
||||
let expected_fingerprint = compute_env_fingerprint(&merged_env);
|
||||
let container_fingerprint = get_env("TRIPLE_C_CUSTOM_ENV").unwrap_or_default();
|
||||
let container_fingerprint = get_label("triple-c.custom-env-fingerprint").unwrap_or_default();
|
||||
if container_fingerprint != expected_fingerprint {
|
||||
log::info!("Custom env vars mismatch (container={:?}, expected={:?})", container_fingerprint, expected_fingerprint);
|
||||
return Ok(true);
|
||||
@@ -1046,19 +1173,32 @@ pub async fn container_needs_recreation(
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// ── Claude instructions ───────────────────────────────────────────────
|
||||
// ── Claude instructions (label-based fingerprint) ─────────────────────
|
||||
let expected_instructions = build_claude_instructions(
|
||||
global_claude_instructions,
|
||||
project.claude_instructions.as_deref(),
|
||||
&project.port_mappings,
|
||||
project.mission_control_enabled,
|
||||
);
|
||||
let container_instructions = get_env("CLAUDE_INSTRUCTIONS");
|
||||
if container_instructions.as_deref() != expected_instructions.as_deref() {
|
||||
let expected_instructions_fp = expected_instructions.as_ref().map(|s| sha256_hex(s)).unwrap_or_default();
|
||||
let container_instructions_fp = get_label("triple-c.instructions-fingerprint").unwrap_or_default();
|
||||
if container_instructions_fp != expected_instructions_fp {
|
||||
log::info!("CLAUDE_INSTRUCTIONS mismatch");
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// ── Claude Code settings fingerprint ───────────────────────────────
|
||||
let merged_cc = merge_claude_code_settings(
|
||||
global_claude_code_settings,
|
||||
project.claude_code_settings.as_ref(),
|
||||
);
|
||||
let expected_cc_fp = compute_claude_code_settings_fingerprint(merged_cc.as_ref());
|
||||
let container_cc_fp = get_label("triple-c.claude-code-settings-fingerprint").unwrap_or_default();
|
||||
if container_cc_fp != expected_cc_fp {
|
||||
log::info!("Claude Code settings mismatch (container={:?}, expected={:?})", container_cc_fp, expected_cc_fp);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// ── MCP servers fingerprint ─────────────────────────────────────────
|
||||
let expected_mcp_fp = compute_mcp_fingerprint(mcp_servers);
|
||||
let container_mcp_fp = get_label("triple-c.mcp-fingerprint").unwrap_or_default();
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use bollard::image::{BuildImageOptions, CreateImageOptions, ListImagesOptions};
|
||||
use bollard::models::ImageSummary;
|
||||
use futures_util::StreamExt;
|
||||
use include_dir::{include_dir, Dir};
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
|
||||
@@ -11,6 +12,11 @@ const DOCKERFILE: &str = include_str!("../../../../container/Dockerfile");
|
||||
const ENTRYPOINT: &str = include_str!("../../../../container/entrypoint.sh");
|
||||
const SCHEDULER: &str = include_str!("../../../../container/triple-c-scheduler");
|
||||
const TASK_RUNNER: &str = include_str!("../../../../container/triple-c-task-runner");
|
||||
const OSC52_CLIPBOARD: &str = include_str!("../../../../container/osc52-clipboard");
|
||||
const AUDIO_SHIM: &str = include_str!("../../../../container/audio-shim");
|
||||
const SSO_REFRESH: &str = include_str!("../../../../container/triple-c-sso-refresh");
|
||||
|
||||
static MISSION_CONTROL_DIR: Dir = include_dir!("$CARGO_MANIFEST_DIR/../../container/mission-control");
|
||||
|
||||
pub async fn image_exists(image_name: &str) -> Result<bool, String> {
|
||||
let docker = get_docker()?;
|
||||
@@ -31,6 +37,38 @@ pub async fn image_exists(image_name: &str) -> Result<bool, String> {
|
||||
Ok(!images.is_empty())
|
||||
}
|
||||
|
||||
/// Returns the first repo digest (e.g. "sha256:abc...") for the given image,
|
||||
/// or None if the image doesn't exist locally or has no repo digests.
|
||||
pub async fn get_local_image_digest(image_name: &str) -> Result<Option<String>, String> {
|
||||
let docker = get_docker()?;
|
||||
|
||||
let filters: HashMap<String, Vec<String>> = HashMap::from([(
|
||||
"reference".to_string(),
|
||||
vec![image_name.to_string()],
|
||||
)]);
|
||||
|
||||
let images: Vec<ImageSummary> = docker
|
||||
.list_images(Some(ListImagesOptions {
|
||||
filters,
|
||||
..Default::default()
|
||||
}))
|
||||
.await
|
||||
.map_err(|e| format!("Failed to list images: {}", e))?;
|
||||
|
||||
if let Some(img) = images.first() {
|
||||
// RepoDigests contains entries like "registry/repo@sha256:abc..."
|
||||
if let Some(digest_str) = img.repo_digests.first() {
|
||||
// Extract the sha256:... part after '@'
|
||||
if let Some(pos) = digest_str.find('@') {
|
||||
return Ok(Some(digest_str[pos + 1..].to_string()));
|
||||
}
|
||||
return Ok(Some(digest_str.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub async fn pull_image<F>(image_name: &str, on_progress: F) -> Result<(), String>
|
||||
where
|
||||
F: Fn(String) + Send + 'static,
|
||||
@@ -118,38 +156,48 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn append_file_to_archive(
|
||||
archive: &mut tar::Builder<&mut Vec<u8>>,
|
||||
path: &str,
|
||||
content: &[u8],
|
||||
mode: u32,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_size(content.len() as u64);
|
||||
header.set_mode(mode);
|
||||
header.set_cksum();
|
||||
archive.append_data(&mut header, path, content)
|
||||
}
|
||||
|
||||
fn append_embedded_dir(
|
||||
archive: &mut tar::Builder<&mut Vec<u8>>,
|
||||
dir: &Dir,
|
||||
prefix: &str,
|
||||
) -> Result<(), std::io::Error> {
|
||||
for file in dir.files() {
|
||||
let path = format!("{}/{}", prefix, file.path().display());
|
||||
append_file_to_archive(archive, &path, file.contents(), 0o644)?;
|
||||
}
|
||||
for subdir in dir.dirs() {
|
||||
append_embedded_dir(archive, subdir, prefix)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
|
||||
let mut buf = Vec::new();
|
||||
{
|
||||
let mut archive = tar::Builder::new(&mut buf);
|
||||
|
||||
let dockerfile_bytes = DOCKERFILE.as_bytes();
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_size(dockerfile_bytes.len() as u64);
|
||||
header.set_mode(0o644);
|
||||
header.set_cksum();
|
||||
archive.append_data(&mut header, "Dockerfile", dockerfile_bytes)?;
|
||||
append_file_to_archive(&mut archive, "Dockerfile", DOCKERFILE.as_bytes(), 0o644)?;
|
||||
append_file_to_archive(&mut archive, "entrypoint.sh", ENTRYPOINT.as_bytes(), 0o755)?;
|
||||
append_file_to_archive(&mut archive, "triple-c-scheduler", SCHEDULER.as_bytes(), 0o755)?;
|
||||
append_file_to_archive(&mut archive, "triple-c-task-runner", TASK_RUNNER.as_bytes(), 0o755)?;
|
||||
append_file_to_archive(&mut archive, "osc52-clipboard", OSC52_CLIPBOARD.as_bytes(), 0o755)?;
|
||||
append_file_to_archive(&mut archive, "audio-shim", AUDIO_SHIM.as_bytes(), 0o755)?;
|
||||
append_file_to_archive(&mut archive, "triple-c-sso-refresh", SSO_REFRESH.as_bytes(), 0o755)?;
|
||||
|
||||
let entrypoint_bytes = ENTRYPOINT.as_bytes();
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_size(entrypoint_bytes.len() as u64);
|
||||
header.set_mode(0o755);
|
||||
header.set_cksum();
|
||||
archive.append_data(&mut header, "entrypoint.sh", entrypoint_bytes)?;
|
||||
|
||||
let scheduler_bytes = SCHEDULER.as_bytes();
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_size(scheduler_bytes.len() as u64);
|
||||
header.set_mode(0o755);
|
||||
header.set_cksum();
|
||||
archive.append_data(&mut header, "triple-c-scheduler", scheduler_bytes)?;
|
||||
|
||||
let task_runner_bytes = TASK_RUNNER.as_bytes();
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_size(task_runner_bytes.len() as u64);
|
||||
header.set_mode(0o755);
|
||||
header.set_cksum();
|
||||
archive.append_data(&mut header, "triple-c-task-runner", task_runner_bytes)?;
|
||||
append_embedded_dir(&mut archive, &MISSION_CONTROL_DIR, "mission-control")?;
|
||||
|
||||
archive.finish()?;
|
||||
}
|
||||
|
||||
@@ -3,7 +3,10 @@ pub mod container;
|
||||
pub mod image;
|
||||
pub mod exec;
|
||||
pub mod network;
|
||||
pub mod stt;
|
||||
|
||||
#[allow(unused_imports)]
|
||||
pub use stt::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use client::*;
|
||||
#[allow(unused_imports)]
|
||||
|
||||
266
app/src-tauri/src/docker/stt.rs
Normal file
266
app/src-tauri/src/docker/stt.rs
Normal file
@@ -0,0 +1,266 @@
|
||||
use bollard::container::{
|
||||
Config, CreateContainerOptions, ListContainersOptions, RemoveContainerOptions,
|
||||
StartContainerOptions, StopContainerOptions,
|
||||
};
|
||||
use bollard::image::BuildImageOptions;
|
||||
use bollard::models::{HostConfig, Mount, MountTypeEnum, PortBinding};
|
||||
use futures_util::StreamExt;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
|
||||
use super::client::get_docker;
|
||||
use crate::models::app_settings::{SttSettings, SttStatus};
|
||||
|
||||
const STT_CONTAINER_NAME: &str = "triple-c-stt";
|
||||
const STT_MODEL_VOLUME: &str = "triple-c-stt-model-cache";
|
||||
const STT_REGISTRY_IMAGE: &str = "ghcr.io/shadowdao/triple-c-stt:latest";
|
||||
const STT_LOCAL_IMAGE: &str = "triple-c-stt:latest";
|
||||
const STT_DOCKERFILE: &str = include_str!("../../../../stt-container/Dockerfile");
|
||||
const STT_SERVER: &str = include_str!("../../../../stt-container/server.py");
|
||||
|
||||
pub async fn get_stt_status(settings: &SttSettings) -> Result<SttStatus, String> {
|
||||
let image_exists = super::image::image_exists(STT_REGISTRY_IMAGE).await.unwrap_or(false)
|
||||
|| super::image::image_exists(STT_LOCAL_IMAGE).await.unwrap_or(false);
|
||||
|
||||
let (container_exists, running, model) = match find_stt_container().await? {
|
||||
Some((_, state, env_model)) => (true, state == "running", env_model),
|
||||
None => (false, false, settings.model.clone()),
|
||||
};
|
||||
|
||||
Ok(SttStatus {
|
||||
container_exists,
|
||||
running,
|
||||
port: settings.port,
|
||||
model,
|
||||
image_exists,
|
||||
})
|
||||
}
|
||||
|
||||
async fn find_stt_container() -> Result<Option<(String, String, String)>, String> {
|
||||
let docker = get_docker()?;
|
||||
|
||||
let filters: HashMap<String, Vec<String>> = HashMap::from([(
|
||||
"name".to_string(),
|
||||
vec![format!("/{}", STT_CONTAINER_NAME)],
|
||||
)]);
|
||||
|
||||
let containers = docker
|
||||
.list_containers(Some(ListContainersOptions {
|
||||
all: true,
|
||||
filters,
|
||||
..Default::default()
|
||||
}))
|
||||
.await
|
||||
.map_err(|e| format!("Failed to list containers: {}", e))?;
|
||||
|
||||
if let Some(container) = containers.first() {
|
||||
let id = container.id.clone().unwrap_or_default();
|
||||
let state = container.state.clone().unwrap_or_default();
|
||||
|
||||
// Extract WHISPER_MODEL from container env
|
||||
let model = container
|
||||
.labels
|
||||
.as_ref()
|
||||
.and_then(|l| l.get("triple-c.stt.model"))
|
||||
.cloned()
|
||||
.unwrap_or_else(|| "tiny".to_string());
|
||||
|
||||
return Ok(Some((id, state, model)));
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn create_stt_container(settings: &SttSettings) -> Result<String, String> {
|
||||
let docker = get_docker()?;
|
||||
|
||||
// Try local image first, fall back to registry
|
||||
let image = if super::image::image_exists(STT_LOCAL_IMAGE).await.unwrap_or(false) {
|
||||
STT_LOCAL_IMAGE.to_string()
|
||||
} else if super::image::image_exists(STT_REGISTRY_IMAGE).await.unwrap_or(false) {
|
||||
STT_REGISTRY_IMAGE.to_string()
|
||||
} else {
|
||||
return Err("STT image not found. Please build or pull the image first.".to_string());
|
||||
};
|
||||
|
||||
let port_binding = PortBinding {
|
||||
host_ip: Some("127.0.0.1".to_string()),
|
||||
host_port: Some(settings.port.to_string()),
|
||||
};
|
||||
|
||||
let mut port_bindings = HashMap::new();
|
||||
port_bindings.insert(
|
||||
"9876/tcp".to_string(),
|
||||
Some(vec![port_binding]),
|
||||
);
|
||||
|
||||
let host_config = HostConfig {
|
||||
port_bindings: Some(port_bindings),
|
||||
mounts: Some(vec![Mount {
|
||||
target: Some("/root/.cache/huggingface".to_string()),
|
||||
source: Some(STT_MODEL_VOLUME.to_string()),
|
||||
typ: Some(MountTypeEnum::VOLUME),
|
||||
..Default::default()
|
||||
}]),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert(
|
||||
"triple-c.stt.model".to_string(),
|
||||
settings.model.clone(),
|
||||
);
|
||||
labels.insert(
|
||||
"triple-c.stt.port".to_string(),
|
||||
settings.port.to_string(),
|
||||
);
|
||||
|
||||
let config = Config {
|
||||
image: Some(image),
|
||||
env: Some(vec![format!("WHISPER_MODEL={}", settings.model)]),
|
||||
host_config: Some(host_config),
|
||||
labels: Some(labels),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let options = CreateContainerOptions {
|
||||
name: STT_CONTAINER_NAME,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let response = docker
|
||||
.create_container(Some(options), config)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to create STT container: {}", e))?;
|
||||
|
||||
Ok(response.id)
|
||||
}
|
||||
|
||||
pub async fn ensure_stt_running(settings: &SttSettings) -> Result<SttStatus, String> {
|
||||
let docker = get_docker()?;
|
||||
|
||||
// Check if container exists and if settings match
|
||||
if let Some((id, state, model)) = find_stt_container().await? {
|
||||
let needs_recreate = model != settings.model;
|
||||
|
||||
if needs_recreate {
|
||||
// Settings changed, recreate
|
||||
if state == "running" {
|
||||
docker
|
||||
.stop_container(&id, None::<StopContainerOptions>)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to stop STT container: {}", e))?;
|
||||
}
|
||||
docker
|
||||
.remove_container(
|
||||
&id,
|
||||
Some(RemoveContainerOptions {
|
||||
force: true,
|
||||
..Default::default()
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to remove STT container: {}", e))?;
|
||||
} else if state == "running" {
|
||||
return get_stt_status(settings).await;
|
||||
} else {
|
||||
// Container exists but stopped, start it
|
||||
docker
|
||||
.start_container(&id, None::<StartContainerOptions<String>>)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to start STT container: {}", e))?;
|
||||
return get_stt_status(settings).await;
|
||||
}
|
||||
}
|
||||
|
||||
// Create and start new container
|
||||
let id = create_stt_container(settings).await?;
|
||||
docker
|
||||
.start_container(&id, None::<StartContainerOptions<String>>)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to start STT container: {}", e))?;
|
||||
|
||||
get_stt_status(settings).await
|
||||
}
|
||||
|
||||
pub async fn stop_stt_container() -> Result<(), String> {
|
||||
let docker = get_docker()?;
|
||||
|
||||
if let Some((id, state, _)) = find_stt_container().await? {
|
||||
if state == "running" {
|
||||
docker
|
||||
.stop_container(&id, None::<StopContainerOptions>)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to stop STT container: {}", e))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_stt_image<F>(on_progress: F) -> Result<(), String>
|
||||
where
|
||||
F: Fn(String) + Send + 'static,
|
||||
{
|
||||
super::image::pull_image(STT_REGISTRY_IMAGE, on_progress).await
|
||||
}
|
||||
|
||||
pub async fn build_stt_image<F>(on_progress: F) -> Result<(), String>
|
||||
where
|
||||
F: Fn(String) + Send + 'static,
|
||||
{
|
||||
let docker = get_docker()?;
|
||||
|
||||
let tar_bytes = create_stt_build_context()
|
||||
.map_err(|e| format!("Failed to create STT build context: {}", e))?;
|
||||
|
||||
let options = BuildImageOptions {
|
||||
t: STT_LOCAL_IMAGE,
|
||||
rm: true,
|
||||
forcerm: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut stream = docker.build_image(options, None, Some(tar_bytes.into()));
|
||||
|
||||
while let Some(result) = stream.next().await {
|
||||
match result {
|
||||
Ok(output) => {
|
||||
if let Some(stream) = output.stream {
|
||||
on_progress(stream);
|
||||
}
|
||||
if let Some(error) = output.error {
|
||||
return Err(format!("Build error: {}", error));
|
||||
}
|
||||
}
|
||||
Err(e) => return Err(format!("Build stream error: {}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_stt_build_context() -> Result<Vec<u8>, std::io::Error> {
|
||||
let mut buf = Vec::new();
|
||||
{
|
||||
let mut archive = tar::Builder::new(&mut buf);
|
||||
|
||||
let mut dockerfile_header = tar::Header::new_gnu();
|
||||
dockerfile_header.set_size(STT_DOCKERFILE.len() as u64);
|
||||
dockerfile_header.set_mode(0o644);
|
||||
dockerfile_header.set_cksum();
|
||||
archive.append_data(&mut dockerfile_header, "Dockerfile", STT_DOCKERFILE.as_bytes())?;
|
||||
|
||||
let mut server_header = tar::Header::new_gnu();
|
||||
server_header.set_size(STT_SERVER.len() as u64);
|
||||
server_header.set_mode(0o644);
|
||||
server_header.set_cksum();
|
||||
archive.append_data(&mut server_header, "server.py", STT_SERVER.as_bytes())?;
|
||||
|
||||
archive.finish()?;
|
||||
}
|
||||
|
||||
let _ = buf.flush();
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
@@ -3,44 +3,55 @@ mod docker;
|
||||
mod logging;
|
||||
mod models;
|
||||
mod storage;
|
||||
pub mod web_terminal;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use docker::exec::ExecSessionManager;
|
||||
use storage::projects_store::ProjectsStore;
|
||||
use storage::settings_store::SettingsStore;
|
||||
use storage::mcp_store::McpStore;
|
||||
use tauri::Manager;
|
||||
use web_terminal::WebTerminalServer;
|
||||
|
||||
pub struct AppState {
|
||||
pub projects_store: ProjectsStore,
|
||||
pub settings_store: SettingsStore,
|
||||
pub mcp_store: McpStore,
|
||||
pub exec_manager: ExecSessionManager,
|
||||
pub projects_store: Arc<ProjectsStore>,
|
||||
pub settings_store: Arc<SettingsStore>,
|
||||
pub mcp_store: Arc<McpStore>,
|
||||
pub exec_manager: Arc<ExecSessionManager>,
|
||||
pub web_terminal_server: Arc<tokio::sync::Mutex<Option<WebTerminalServer>>>,
|
||||
}
|
||||
|
||||
pub fn run() {
|
||||
logging::init();
|
||||
|
||||
let projects_store = match ProjectsStore::new() {
|
||||
let projects_store = Arc::new(match ProjectsStore::new() {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
log::error!("Failed to initialize projects store: {}", e);
|
||||
panic!("Failed to initialize projects store: {}", e);
|
||||
}
|
||||
};
|
||||
let settings_store = match SettingsStore::new() {
|
||||
});
|
||||
let settings_store = Arc::new(match SettingsStore::new() {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
log::error!("Failed to initialize settings store: {}", e);
|
||||
panic!("Failed to initialize settings store: {}", e);
|
||||
}
|
||||
};
|
||||
let mcp_store = match McpStore::new() {
|
||||
});
|
||||
let mcp_store = Arc::new(match McpStore::new() {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
log::error!("Failed to initialize MCP store: {}", e);
|
||||
panic!("Failed to initialize MCP store: {}", e);
|
||||
}
|
||||
};
|
||||
});
|
||||
let exec_manager = Arc::new(ExecSessionManager::new());
|
||||
|
||||
// Clone Arcs for the setup closure (web terminal auto-start)
|
||||
let projects_store_setup = projects_store.clone();
|
||||
let settings_store_setup = settings_store.clone();
|
||||
let exec_manager_setup = exec_manager.clone();
|
||||
|
||||
tauri::Builder::default()
|
||||
.plugin(tauri_plugin_store::Builder::default().build())
|
||||
@@ -50,9 +61,10 @@ pub fn run() {
|
||||
projects_store,
|
||||
settings_store,
|
||||
mcp_store,
|
||||
exec_manager: ExecSessionManager::new(),
|
||||
exec_manager,
|
||||
web_terminal_server: Arc::new(tokio::sync::Mutex::new(None)),
|
||||
})
|
||||
.setup(|app| {
|
||||
.setup(move |app| {
|
||||
match tauri::image::Image::from_bytes(include_bytes!("../icons/icon.png")) {
|
||||
Ok(icon) => {
|
||||
if let Some(window) = app.get_webview_window("main") {
|
||||
@@ -63,12 +75,75 @@ pub fn run() {
|
||||
log::error!("Failed to load window icon: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-start web terminal server if enabled in settings
|
||||
let settings = settings_store_setup.get();
|
||||
if settings.web_terminal.enabled {
|
||||
if let Some(token) = &settings.web_terminal.access_token {
|
||||
let token = token.clone();
|
||||
let port = settings.web_terminal.port;
|
||||
let exec_mgr = exec_manager_setup.clone();
|
||||
let proj_store = projects_store_setup.clone();
|
||||
let set_store = settings_store_setup.clone();
|
||||
let state = app.state::<AppState>();
|
||||
let web_server_mutex = state.web_terminal_server.clone();
|
||||
|
||||
tauri::async_runtime::spawn(async move {
|
||||
match WebTerminalServer::start(
|
||||
port,
|
||||
token,
|
||||
exec_mgr,
|
||||
proj_store,
|
||||
set_store,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(server) => {
|
||||
let mut guard = web_server_mutex.lock().await;
|
||||
*guard = Some(server);
|
||||
log::info!("Web terminal auto-started on port {}", port);
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Failed to auto-start web terminal: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-start STT container if enabled in settings
|
||||
if settings.stt.enabled {
|
||||
let stt_settings = settings.stt.clone();
|
||||
tauri::async_runtime::spawn(async move {
|
||||
match docker::stt::ensure_stt_running(&stt_settings).await {
|
||||
Ok(status) => {
|
||||
if status.running {
|
||||
log::info!("STT container auto-started on port {}", stt_settings.port);
|
||||
} else {
|
||||
log::warn!("STT auto-start: container not running after ensure_stt_running");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Failed to auto-start STT container: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.on_window_event(|window, event| {
|
||||
if let tauri::WindowEvent::CloseRequested { .. } = event {
|
||||
let state = window.state::<AppState>();
|
||||
tauri::async_runtime::block_on(async {
|
||||
// Stop web terminal server
|
||||
let mut server_guard = state.web_terminal_server.lock().await;
|
||||
if let Some(server) = server_guard.take() {
|
||||
server.stop();
|
||||
}
|
||||
// Stop STT container
|
||||
let _ = docker::stt::stop_stt_container().await;
|
||||
// Close all exec sessions
|
||||
state.exec_manager.close_all_sessions().await;
|
||||
});
|
||||
}
|
||||
@@ -119,6 +194,21 @@ pub fn run() {
|
||||
// Updates
|
||||
commands::update_commands::get_app_version,
|
||||
commands::update_commands::check_for_updates,
|
||||
commands::update_commands::check_image_update,
|
||||
// Help
|
||||
commands::help_commands::get_help_content,
|
||||
// Web Terminal
|
||||
commands::web_terminal_commands::start_web_terminal,
|
||||
commands::web_terminal_commands::stop_web_terminal,
|
||||
commands::web_terminal_commands::get_web_terminal_status,
|
||||
commands::web_terminal_commands::regenerate_web_terminal_token,
|
||||
// STT
|
||||
commands::stt_commands::get_stt_status,
|
||||
commands::stt_commands::start_stt,
|
||||
commands::stt_commands::stop_stt,
|
||||
commands::stt_commands::build_stt_image,
|
||||
commands::stt_commands::pull_stt_image,
|
||||
commands::stt_commands::transcribe_audio,
|
||||
])
|
||||
.run(tauri::generate_context!())
|
||||
.expect("error while running tauri application");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::project::EnvVar;
|
||||
use super::project::{ClaudeCodeSettings, EnvVar};
|
||||
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
@@ -72,6 +72,78 @@ pub struct AppSettings {
|
||||
pub timezone: Option<String>,
|
||||
#[serde(default)]
|
||||
pub default_microphone: Option<String>,
|
||||
#[serde(default)]
|
||||
pub dismissed_image_digest: Option<String>,
|
||||
#[serde(default)]
|
||||
pub web_terminal: WebTerminalSettings,
|
||||
#[serde(default)]
|
||||
pub stt: SttSettings,
|
||||
#[serde(default)]
|
||||
pub global_claude_code_settings: Option<ClaudeCodeSettings>,
|
||||
}
|
||||
|
||||
fn default_stt_model() -> String {
|
||||
"tiny".to_string()
|
||||
}
|
||||
|
||||
fn default_stt_port() -> u16 {
|
||||
9876
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SttSettings {
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
#[serde(default = "default_stt_model")]
|
||||
pub model: String,
|
||||
#[serde(default = "default_stt_port")]
|
||||
pub port: u16,
|
||||
#[serde(default)]
|
||||
pub language: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for SttSettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
model: default_stt_model(),
|
||||
port: 9876,
|
||||
language: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SttStatus {
|
||||
pub container_exists: bool,
|
||||
pub running: bool,
|
||||
pub port: u16,
|
||||
pub model: String,
|
||||
pub image_exists: bool,
|
||||
}
|
||||
|
||||
fn default_web_terminal_port() -> u16 {
|
||||
7681
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct WebTerminalSettings {
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
#[serde(default = "default_web_terminal_port")]
|
||||
pub port: u16,
|
||||
#[serde(default)]
|
||||
pub access_token: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for WebTerminalSettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
port: 7681,
|
||||
access_token: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AppSettings {
|
||||
@@ -90,6 +162,10 @@ impl Default for AppSettings {
|
||||
dismissed_update_version: None,
|
||||
timezone: None,
|
||||
default_microphone: None,
|
||||
dismissed_image_digest: None,
|
||||
web_terminal: WebTerminalSettings::default(),
|
||||
stt: SttSettings::default(),
|
||||
global_claude_code_settings: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ pub struct ContainerInfo {
|
||||
|
||||
pub const LOCAL_IMAGE_NAME: &str = "triple-c";
|
||||
pub const IMAGE_TAG: &str = "latest";
|
||||
pub const REGISTRY_IMAGE: &str = "repo.anhonesthost.net/cybercovellc/triple-c/triple-c-sandbox:latest";
|
||||
pub const REGISTRY_IMAGE: &str = "ghcr.io/shadowdao/triple-c-sandbox:latest";
|
||||
|
||||
pub fn local_build_image_name() -> String {
|
||||
format!("{LOCAL_IMAGE_NAME}:{IMAGE_TAG}")
|
||||
|
||||
@@ -24,6 +24,40 @@ fn default_protocol() -> String {
|
||||
"tcp".to_string()
|
||||
}
|
||||
|
||||
fn default_full_permissions() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// Settings for Claude Code CLI behavior inside the container.
|
||||
/// These map to Claude Code env vars and ~/.claude/settings.json entries.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
||||
pub struct ClaudeCodeSettings {
|
||||
/// TUI rendering mode: None = default, Some("fullscreen") = flicker-free alt-screen
|
||||
#[serde(default)]
|
||||
pub tui_mode: Option<String>,
|
||||
/// Effort level: None = default, Some("low"|"medium"|"high")
|
||||
#[serde(default)]
|
||||
pub effort: Option<String>,
|
||||
/// Disable auto-scroll in fullscreen TUI mode
|
||||
#[serde(default)]
|
||||
pub auto_scroll_disabled: bool,
|
||||
/// Enable focus mode (collapsed tool output)
|
||||
#[serde(default)]
|
||||
pub focus_mode: bool,
|
||||
/// Show thinking summaries in responses
|
||||
#[serde(default)]
|
||||
pub show_thinking_summaries: bool,
|
||||
/// Enable session recap when returning to a session
|
||||
#[serde(default)]
|
||||
pub enable_session_recap: bool,
|
||||
/// Strip credentials from subprocess environments
|
||||
#[serde(default)]
|
||||
pub env_scrub: bool,
|
||||
/// Enable 1-hour prompt cache TTL (vs default 5-minute)
|
||||
#[serde(default)]
|
||||
pub prompt_caching_1h: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Project {
|
||||
pub id: String,
|
||||
@@ -31,13 +65,17 @@ pub struct Project {
|
||||
pub paths: Vec<ProjectPath>,
|
||||
pub container_id: Option<String>,
|
||||
pub status: ProjectStatus,
|
||||
pub auth_mode: AuthMode,
|
||||
#[serde(alias = "auth_mode")]
|
||||
pub backend: Backend,
|
||||
pub bedrock_config: Option<BedrockConfig>,
|
||||
pub ollama_config: Option<OllamaConfig>,
|
||||
pub litellm_config: Option<LiteLlmConfig>,
|
||||
#[serde(alias = "litellm_config")]
|
||||
pub openai_compatible_config: Option<OpenAiCompatibleConfig>,
|
||||
pub allow_docker_access: bool,
|
||||
#[serde(default)]
|
||||
pub mission_control_enabled: bool,
|
||||
#[serde(default = "default_full_permissions")]
|
||||
pub full_permissions: bool,
|
||||
pub ssh_key_path: Option<String>,
|
||||
#[serde(skip_serializing, default)]
|
||||
pub git_token: Option<String>,
|
||||
@@ -51,6 +89,8 @@ pub struct Project {
|
||||
pub claude_instructions: Option<String>,
|
||||
#[serde(default)]
|
||||
pub enabled_mcp_servers: Vec<String>,
|
||||
#[serde(default)]
|
||||
pub claude_code_settings: Option<ClaudeCodeSettings>,
|
||||
pub created_at: String,
|
||||
pub updated_at: String,
|
||||
}
|
||||
@@ -65,23 +105,24 @@ pub enum ProjectStatus {
|
||||
Error,
|
||||
}
|
||||
|
||||
/// How the project authenticates with Claude.
|
||||
/// - `Anthropic`: User runs `claude login` inside the container (OAuth via Anthropic Console,
|
||||
/// persisted in the config volume)
|
||||
/// - `Bedrock`: Uses AWS Bedrock with per-project AWS credentials
|
||||
/// Which AI model backend/provider the project uses.
|
||||
/// - `Anthropic`: Direct Anthropic API (user runs `claude login` inside the container)
|
||||
/// - `Bedrock`: AWS Bedrock with per-project AWS credentials
|
||||
/// - `Ollama`: Local or remote Ollama server
|
||||
/// - `OpenAiCompatible`: Any OpenAI API-compatible endpoint (e.g., LiteLLM, vLLM, etc.)
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum AuthMode {
|
||||
pub enum Backend {
|
||||
/// Backward compat: old projects stored as "login" or "api_key" map to Anthropic.
|
||||
#[serde(alias = "login", alias = "api_key")]
|
||||
Anthropic,
|
||||
Bedrock,
|
||||
Ollama,
|
||||
#[serde(alias = "litellm")]
|
||||
LiteLlm,
|
||||
#[serde(alias = "lite_llm", alias = "litellm")]
|
||||
OpenAiCompatible,
|
||||
}
|
||||
|
||||
impl Default for AuthMode {
|
||||
impl Default for Backend {
|
||||
fn default() -> Self {
|
||||
Self::Anthropic
|
||||
}
|
||||
@@ -130,13 +171,14 @@ pub struct OllamaConfig {
|
||||
pub model_id: Option<String>,
|
||||
}
|
||||
|
||||
/// LiteLLM gateway configuration for a project.
|
||||
/// LiteLLM translates Anthropic API calls to 100+ model providers.
|
||||
/// OpenAI Compatible endpoint configuration for a project.
|
||||
/// Routes Anthropic API calls through any OpenAI API-compatible endpoint
|
||||
/// (e.g., LiteLLM, vLLM, or other compatible gateways).
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LiteLlmConfig {
|
||||
/// The base URL of the LiteLLM proxy (e.g., "http://host.docker.internal:4000" or "https://litellm.example.com")
|
||||
pub struct OpenAiCompatibleConfig {
|
||||
/// The base URL of the OpenAI-compatible endpoint (e.g., "http://host.docker.internal:4000" or "https://api.example.com")
|
||||
pub base_url: String,
|
||||
/// API key for the LiteLLM proxy
|
||||
/// API key for the OpenAI-compatible endpoint
|
||||
#[serde(skip_serializing, default)]
|
||||
pub api_key: Option<String>,
|
||||
/// Optional model override
|
||||
@@ -152,12 +194,13 @@ impl Project {
|
||||
paths,
|
||||
container_id: None,
|
||||
status: ProjectStatus::Stopped,
|
||||
auth_mode: AuthMode::default(),
|
||||
backend: Backend::default(),
|
||||
bedrock_config: None,
|
||||
ollama_config: None,
|
||||
litellm_config: None,
|
||||
openai_compatible_config: None,
|
||||
allow_docker_access: false,
|
||||
mission_control_enabled: false,
|
||||
full_permissions: false,
|
||||
ssh_key_path: None,
|
||||
git_token: None,
|
||||
git_user_name: None,
|
||||
@@ -166,6 +209,7 @@ impl Project {
|
||||
port_mappings: Vec::new(),
|
||||
claude_instructions: None,
|
||||
enabled_mcp_servers: Vec::new(),
|
||||
claude_code_settings: None,
|
||||
created_at: now.clone(),
|
||||
updated_at: now,
|
||||
}
|
||||
|
||||
@@ -18,20 +18,31 @@ pub struct ReleaseAsset {
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// Gitea API release response (internal).
|
||||
/// GitHub API release response (internal).
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct GiteaRelease {
|
||||
pub struct GitHubRelease {
|
||||
pub tag_name: String,
|
||||
pub html_url: String,
|
||||
pub body: String,
|
||||
pub assets: Vec<GiteaAsset>,
|
||||
pub assets: Vec<GitHubAsset>,
|
||||
pub published_at: String,
|
||||
}
|
||||
|
||||
/// Gitea API asset response (internal).
|
||||
/// GitHub API asset response (internal).
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct GiteaAsset {
|
||||
pub struct GitHubAsset {
|
||||
pub name: String,
|
||||
pub browser_download_url: String,
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// Info returned to the frontend about an available container image update.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ImageUpdateInfo {
|
||||
/// The remote digest (e.g. sha256:abc...)
|
||||
pub remote_digest: String,
|
||||
/// The local digest, if available
|
||||
pub local_digest: Option<String>,
|
||||
/// When the remote image was last updated (if known)
|
||||
pub remote_updated_at: Option<String>,
|
||||
}
|
||||
|
||||
4
app/src-tauri/src/web_terminal/mod.rs
Normal file
4
app/src-tauri/src/web_terminal/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod server;
|
||||
mod ws_handler;
|
||||
|
||||
pub use server::WebTerminalServer;
|
||||
155
app/src-tauri/src/web_terminal/server.rs
Normal file
155
app/src-tauri/src/web_terminal/server.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::extract::{Query, State as AxumState, WebSocketUpgrade};
|
||||
use axum::response::{Html, IntoResponse};
|
||||
use axum::routing::get;
|
||||
use axum::Router;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::watch;
|
||||
use tower_http::cors::CorsLayer;
|
||||
|
||||
use crate::docker::exec::ExecSessionManager;
|
||||
use crate::storage::projects_store::ProjectsStore;
|
||||
use crate::storage::settings_store::SettingsStore;
|
||||
|
||||
use super::ws_handler;
|
||||
|
||||
/// Shared state passed to all axum handlers.
|
||||
pub struct WebTerminalState {
|
||||
pub exec_manager: Arc<ExecSessionManager>,
|
||||
pub projects_store: Arc<ProjectsStore>,
|
||||
pub settings_store: Arc<SettingsStore>,
|
||||
pub access_token: String,
|
||||
}
|
||||
|
||||
/// Manages the lifecycle of the axum HTTP+WS server.
|
||||
pub struct WebTerminalServer {
|
||||
shutdown_tx: watch::Sender<()>,
|
||||
port: u16,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct TokenQuery {
|
||||
pub token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ProjectInfo {
|
||||
id: String,
|
||||
name: String,
|
||||
status: String,
|
||||
}
|
||||
|
||||
impl WebTerminalServer {
|
||||
/// Start the web terminal server on the given port.
|
||||
pub async fn start(
|
||||
port: u16,
|
||||
access_token: String,
|
||||
exec_manager: Arc<ExecSessionManager>,
|
||||
projects_store: Arc<ProjectsStore>,
|
||||
settings_store: Arc<SettingsStore>,
|
||||
) -> Result<Self, String> {
|
||||
let (shutdown_tx, shutdown_rx) = watch::channel(());
|
||||
|
||||
let shared_state = Arc::new(WebTerminalState {
|
||||
exec_manager,
|
||||
projects_store,
|
||||
settings_store,
|
||||
access_token,
|
||||
});
|
||||
|
||||
let app = Router::new()
|
||||
.route("/", get(serve_html))
|
||||
.route("/ws", get(ws_upgrade))
|
||||
.route("/api/projects", get(list_projects))
|
||||
.layer(CorsLayer::permissive())
|
||||
.with_state(shared_state);
|
||||
|
||||
let addr = format!("0.0.0.0:{}", port);
|
||||
let listener = tokio::net::TcpListener::bind(&addr)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to bind web terminal to {}: {}", addr, e))?;
|
||||
|
||||
log::info!("Web terminal server listening on {}", addr);
|
||||
|
||||
let mut shutdown_rx_clone = shutdown_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
axum::serve(listener, app)
|
||||
.with_graceful_shutdown(async move {
|
||||
let _ = shutdown_rx_clone.changed().await;
|
||||
})
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
log::error!("Web terminal server error: {}", e);
|
||||
});
|
||||
log::info!("Web terminal server shut down");
|
||||
});
|
||||
|
||||
Ok(Self { shutdown_tx, port })
|
||||
}
|
||||
|
||||
/// Stop the server gracefully.
|
||||
pub fn stop(&self) {
|
||||
log::info!("Stopping web terminal server on port {}", self.port);
|
||||
let _ = self.shutdown_tx.send(());
|
||||
}
|
||||
|
||||
pub fn port(&self) -> u16 {
|
||||
self.port
|
||||
}
|
||||
}
|
||||
|
||||
/// Serve the embedded HTML page.
|
||||
async fn serve_html() -> Html<&'static str> {
|
||||
Html(include_str!("terminal.html"))
|
||||
}
|
||||
|
||||
/// Validate token from query params.
|
||||
fn validate_token(state: &WebTerminalState, token: &Option<String>) -> bool {
|
||||
match token {
|
||||
Some(t) => t == &state.access_token,
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// WebSocket upgrade handler.
|
||||
async fn ws_upgrade(
|
||||
ws: WebSocketUpgrade,
|
||||
AxumState(state): AxumState<Arc<WebTerminalState>>,
|
||||
Query(query): Query<TokenQuery>,
|
||||
) -> impl IntoResponse {
|
||||
if !validate_token(&state, &query.token) {
|
||||
return (axum::http::StatusCode::UNAUTHORIZED, "Invalid token").into_response();
|
||||
}
|
||||
ws.on_upgrade(move |socket| ws_handler::handle_connection(socket, state))
|
||||
.into_response()
|
||||
}
|
||||
|
||||
/// List running projects (REST endpoint).
|
||||
async fn list_projects(
|
||||
AxumState(state): AxumState<Arc<WebTerminalState>>,
|
||||
Query(query): Query<TokenQuery>,
|
||||
) -> impl IntoResponse {
|
||||
if !validate_token(&state, &query.token) {
|
||||
return (
|
||||
axum::http::StatusCode::UNAUTHORIZED,
|
||||
axum::Json(serde_json::json!({"error": "Invalid token"})),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
let projects = state.projects_store.list();
|
||||
let infos: Vec<ProjectInfo> = projects
|
||||
.into_iter()
|
||||
.map(|p| ProjectInfo {
|
||||
id: p.id,
|
||||
name: p.name,
|
||||
status: serde_json::to_value(&p.status)
|
||||
.ok()
|
||||
.and_then(|v| v.as_str().map(|s| s.to_string()))
|
||||
.unwrap_or_else(|| "unknown".to_string()),
|
||||
})
|
||||
.collect();
|
||||
|
||||
axum::Json(infos).into_response()
|
||||
}
|
||||
669
app/src-tauri/src/web_terminal/terminal.html
Normal file
669
app/src-tauri/src/web_terminal/terminal.html
Normal file
@@ -0,0 +1,669 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
|
||||
<title>Triple-C Web Terminal</title>
|
||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/css/xterm.min.css">
|
||||
<script src="https://cdn.jsdelivr.net/npm/@xterm/xterm@5.5.0/lib/xterm.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@xterm/addon-fit@0.10.0/lib/addon-fit.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@xterm/addon-web-links@0.11.0/lib/addon-web-links.min.js"></script>
|
||||
<style>
|
||||
:root {
|
||||
--bg-primary: #1a1b26;
|
||||
--bg-secondary: #24283b;
|
||||
--bg-tertiary: #2f3347;
|
||||
--text-primary: #c0caf5;
|
||||
--text-secondary: #565f89;
|
||||
--accent: #7aa2f7;
|
||||
--accent-hover: #89b4fa;
|
||||
--border: #3b3f57;
|
||||
--success: #9ece6a;
|
||||
--warning: #e0af68;
|
||||
--error: #f7768e;
|
||||
}
|
||||
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
|
||||
body {
|
||||
background: var(--bg-primary);
|
||||
color: var(--text-primary);
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||
height: 100vh;
|
||||
height: 100dvh; /* dynamic viewport height — shrinks when mobile keyboard opens */
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
overflow: hidden;
|
||||
-webkit-tap-highlight-color: transparent;
|
||||
}
|
||||
|
||||
/* ── Top Bar ─────────────────────────────── */
|
||||
.topbar {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
padding: 6px 12px;
|
||||
background: var(--bg-secondary);
|
||||
border-bottom: 1px solid var(--border);
|
||||
flex-shrink: 0;
|
||||
min-height: 42px;
|
||||
position: sticky;
|
||||
top: 0;
|
||||
z-index: 20;
|
||||
}
|
||||
|
||||
.topbar-title {
|
||||
font-size: 13px;
|
||||
font-weight: 600;
|
||||
color: var(--accent);
|
||||
white-space: nowrap;
|
||||
margin-right: 8px;
|
||||
}
|
||||
|
||||
.status-dot {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 50%;
|
||||
background: var(--error);
|
||||
flex-shrink: 0;
|
||||
}
|
||||
.status-dot.connected { background: var(--success); }
|
||||
.status-dot.reconnecting { background: var(--warning); animation: pulse 1s infinite; }
|
||||
|
||||
@keyframes pulse { 0%,100% { opacity: 1; } 50% { opacity: 0.4; } }
|
||||
|
||||
select, button {
|
||||
font-size: 12px;
|
||||
padding: 4px 8px;
|
||||
background: var(--bg-tertiary);
|
||||
color: var(--text-primary);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 4px;
|
||||
cursor: pointer;
|
||||
touch-action: manipulation;
|
||||
}
|
||||
|
||||
select:focus, button:focus { outline: none; border-color: var(--accent); }
|
||||
button:hover { background: var(--border); }
|
||||
button:active { background: var(--accent); color: var(--bg-primary); }
|
||||
|
||||
.btn-new {
|
||||
font-weight: 600;
|
||||
min-width: 44px;
|
||||
min-height: 32px;
|
||||
}
|
||||
|
||||
/* ── Tab Bar ─────────────────────────────── */
|
||||
.tabbar {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1px;
|
||||
padding: 0 8px;
|
||||
background: var(--bg-secondary);
|
||||
border-bottom: 1px solid var(--border);
|
||||
flex-shrink: 0;
|
||||
overflow-x: auto;
|
||||
-webkit-overflow-scrolling: touch;
|
||||
min-height: 32px;
|
||||
position: sticky;
|
||||
top: 42px; /* below .topbar min-height */
|
||||
z-index: 20;
|
||||
}
|
||||
|
||||
.tab {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
padding: 6px 12px;
|
||||
font-size: 11px;
|
||||
color: var(--text-secondary);
|
||||
cursor: pointer;
|
||||
white-space: nowrap;
|
||||
border-bottom: 2px solid transparent;
|
||||
transition: all 0.15s;
|
||||
min-height: 32px;
|
||||
}
|
||||
|
||||
.tab:hover { color: var(--text-primary); }
|
||||
.tab.active {
|
||||
color: var(--text-primary);
|
||||
border-bottom-color: var(--accent);
|
||||
}
|
||||
|
||||
.tab-close {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
border-radius: 3px;
|
||||
font-size: 12px;
|
||||
line-height: 1;
|
||||
color: var(--text-secondary);
|
||||
background: none;
|
||||
border: none;
|
||||
padding: 0;
|
||||
min-width: unset;
|
||||
min-height: unset;
|
||||
}
|
||||
.tab-close:hover { background: var(--error); color: white; }
|
||||
|
||||
/* ── Terminal Area ───────────────────────── */
|
||||
.terminal-area {
|
||||
flex: 1;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.terminal-container {
|
||||
position: absolute;
|
||||
inset: 0;
|
||||
display: none;
|
||||
padding: 4px;
|
||||
}
|
||||
.terminal-container.active { display: block; }
|
||||
|
||||
/* ── Input Bar (mobile/tablet) ──────────── */
|
||||
.input-bar {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
padding: 6px 8px;
|
||||
background: var(--bg-secondary);
|
||||
border-top: 1px solid var(--border);
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.input-bar input {
|
||||
flex: 1;
|
||||
min-width: 0;
|
||||
padding: 8px 10px;
|
||||
font-size: 16px; /* prevents iOS zoom on focus */
|
||||
font-family: 'Cascadia Code', 'Fira Code', 'JetBrains Mono', 'Menlo', monospace;
|
||||
background: var(--bg-primary);
|
||||
color: var(--text-primary);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 6px;
|
||||
outline: none;
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
.input-bar input:focus { border-color: var(--accent); }
|
||||
|
||||
.input-bar .key-btn {
|
||||
padding: 8px 10px;
|
||||
font-size: 11px;
|
||||
font-weight: 600;
|
||||
min-width: 40px;
|
||||
min-height: 36px;
|
||||
border-radius: 6px;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
/* ── Scroll-to-bottom FAB ──────────────── */
|
||||
.scroll-bottom-btn {
|
||||
position: absolute;
|
||||
bottom: 12px;
|
||||
right: 16px;
|
||||
width: 36px;
|
||||
height: 36px;
|
||||
border-radius: 50%;
|
||||
background: var(--accent);
|
||||
color: var(--bg-primary);
|
||||
border: none;
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
display: none;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
cursor: pointer;
|
||||
box-shadow: 0 2px 8px rgba(0,0,0,0.4);
|
||||
z-index: 10;
|
||||
padding: 0;
|
||||
min-width: unset;
|
||||
min-height: unset;
|
||||
line-height: 1;
|
||||
}
|
||||
.scroll-bottom-btn:hover { background: var(--accent-hover); }
|
||||
.scroll-bottom-btn.visible { display: flex; }
|
||||
|
||||
/* ── Empty State ─────────────────────────── */
|
||||
.empty-state {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
height: 100%;
|
||||
color: var(--text-secondary);
|
||||
font-size: 14px;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.empty-state .hint {
|
||||
font-size: 12px;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
/* ── Scrollbar ───────────────────────────── */
|
||||
::-webkit-scrollbar { width: 6px; height: 6px; }
|
||||
::-webkit-scrollbar-track { background: transparent; }
|
||||
::-webkit-scrollbar-thumb { background: var(--border); border-radius: 3px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<!-- Top Bar -->
|
||||
<div class="topbar">
|
||||
<span class="topbar-title">Triple-C</span>
|
||||
<span class="status-dot" id="statusDot"></span>
|
||||
<select id="projectSelect" style="flex:1; max-width:240px;">
|
||||
<option value="">Select project...</option>
|
||||
</select>
|
||||
<button class="btn-new" id="btnClaude" title="New Claude session">Claude</button>
|
||||
<button class="btn-new" id="btnBash" title="New Bash session">Bash</button>
|
||||
</div>
|
||||
|
||||
<!-- Tab Bar -->
|
||||
<div class="tabbar" id="tabbar"></div>
|
||||
|
||||
<!-- Terminal Area -->
|
||||
<div class="terminal-area" id="terminalArea">
|
||||
<div class="empty-state" id="emptyState">
|
||||
<div>Select a project and open a terminal session</div>
|
||||
<div class="hint">Use the buttons above to start a Claude or Bash session</div>
|
||||
</div>
|
||||
<button class="scroll-bottom-btn" id="scrollBottomBtn" title="Scroll to bottom">↓</button>
|
||||
</div>
|
||||
|
||||
<!-- Input Bar for mobile/tablet -->
|
||||
<div class="input-bar" id="inputBar">
|
||||
<input type="text" id="mobileInput" placeholder="Type here..."
|
||||
autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false"
|
||||
enterkeyhint="send" inputmode="text">
|
||||
<button class="key-btn" id="btnEnter">Enter</button>
|
||||
<button class="key-btn" id="btnTab">Tab</button>
|
||||
<button class="key-btn" id="btnCtrlC">^C</button>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
(function() {
|
||||
'use strict';
|
||||
|
||||
// ── State ──────────────────────────────────
|
||||
const params = new URLSearchParams(window.location.search);
|
||||
const TOKEN = params.get('token') || '';
|
||||
let ws = null;
|
||||
let reconnectTimer = null;
|
||||
let sessions = {}; // { sessionId: { term, fitAddon, projectName, type, containerId } }
|
||||
let activeSessionId = null;
|
||||
|
||||
// ── DOM refs ───────────────────────────────
|
||||
const statusDot = document.getElementById('statusDot');
|
||||
const projectSelect = document.getElementById('projectSelect');
|
||||
const btnClaude = document.getElementById('btnClaude');
|
||||
const btnBash = document.getElementById('btnBash');
|
||||
const tabbar = document.getElementById('tabbar');
|
||||
const terminalArea = document.getElementById('terminalArea');
|
||||
const emptyState = document.getElementById('emptyState');
|
||||
const mobileInput = document.getElementById('mobileInput');
|
||||
const btnEnter = document.getElementById('btnEnter');
|
||||
const btnTab = document.getElementById('btnTab');
|
||||
const btnCtrlC = document.getElementById('btnCtrlC');
|
||||
const scrollBottomBtn = document.getElementById('scrollBottomBtn');
|
||||
|
||||
// ── WebSocket ──────────────────────────────
|
||||
function connect() {
|
||||
const proto = location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
const url = `${proto}//${location.host}/ws?token=${encodeURIComponent(TOKEN)}`;
|
||||
ws = new WebSocket(url);
|
||||
|
||||
ws.onopen = () => {
|
||||
statusDot.className = 'status-dot connected';
|
||||
clearTimeout(reconnectTimer);
|
||||
send({ type: 'list_projects' });
|
||||
// Start keepalive
|
||||
ws._pingInterval = setInterval(() => send({ type: 'ping' }), 30000);
|
||||
};
|
||||
|
||||
ws.onmessage = (evt) => {
|
||||
try {
|
||||
const msg = JSON.parse(evt.data);
|
||||
handleMessage(msg);
|
||||
} catch (e) {
|
||||
console.error('Parse error:', e);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
statusDot.className = 'status-dot reconnecting';
|
||||
if (ws && ws._pingInterval) clearInterval(ws._pingInterval);
|
||||
reconnectTimer = setTimeout(connect, 2000);
|
||||
};
|
||||
|
||||
ws.onerror = () => {
|
||||
ws.close();
|
||||
};
|
||||
}
|
||||
|
||||
function send(msg) {
|
||||
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||
ws.send(JSON.stringify(msg));
|
||||
}
|
||||
}
|
||||
|
||||
// ── Message handling ───────────────────────
|
||||
function handleMessage(msg) {
|
||||
switch (msg.type) {
|
||||
case 'projects':
|
||||
updateProjectList(msg.projects);
|
||||
break;
|
||||
case 'opened':
|
||||
onSessionOpened(msg.session_id, msg.project_name);
|
||||
break;
|
||||
case 'output':
|
||||
onSessionOutput(msg.session_id, msg.data);
|
||||
break;
|
||||
case 'exit':
|
||||
onSessionExit(msg.session_id);
|
||||
break;
|
||||
case 'error':
|
||||
console.error('Server error:', msg.message);
|
||||
// Show in active terminal if available
|
||||
if (activeSessionId && sessions[activeSessionId]) {
|
||||
sessions[activeSessionId].term.writeln(`\r\n\x1b[31mError: ${msg.message}\x1b[0m`);
|
||||
}
|
||||
break;
|
||||
case 'pong':
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
function updateProjectList(projects) {
|
||||
const current = projectSelect.value;
|
||||
projectSelect.innerHTML = '<option value="">Select project...</option>';
|
||||
projects.forEach(p => {
|
||||
const opt = document.createElement('option');
|
||||
opt.value = p.id;
|
||||
opt.textContent = `${p.name} (${p.status})`;
|
||||
opt.disabled = p.status !== 'running';
|
||||
projectSelect.appendChild(opt);
|
||||
});
|
||||
// Restore selection if still valid
|
||||
if (current) projectSelect.value = current;
|
||||
}
|
||||
|
||||
// ── Session management ─────────────────────
|
||||
let pendingSessionType = null;
|
||||
|
||||
function openSession(type) {
|
||||
const projectId = projectSelect.value;
|
||||
if (!projectId) {
|
||||
alert('Please select a running project first.');
|
||||
return;
|
||||
}
|
||||
pendingSessionType = type;
|
||||
send({
|
||||
type: 'open',
|
||||
project_id: projectId,
|
||||
session_type: type,
|
||||
});
|
||||
}
|
||||
|
||||
function onSessionOpened(sessionId, projectName) {
|
||||
const sessionType = pendingSessionType || 'claude';
|
||||
pendingSessionType = null;
|
||||
|
||||
// Create terminal
|
||||
const term = new Terminal({
|
||||
theme: {
|
||||
background: '#1a1b26',
|
||||
foreground: '#c0caf5',
|
||||
cursor: '#c0caf5',
|
||||
selectionBackground: '#33467c',
|
||||
black: '#15161e',
|
||||
red: '#f7768e',
|
||||
green: '#9ece6a',
|
||||
yellow: '#e0af68',
|
||||
blue: '#7aa2f7',
|
||||
magenta: '#bb9af7',
|
||||
cyan: '#7dcfff',
|
||||
white: '#a9b1d6',
|
||||
brightBlack: '#414868',
|
||||
brightRed: '#f7768e',
|
||||
brightGreen: '#9ece6a',
|
||||
brightYellow: '#e0af68',
|
||||
brightBlue: '#7aa2f7',
|
||||
brightMagenta: '#bb9af7',
|
||||
brightCyan: '#7dcfff',
|
||||
brightWhite: '#c0caf5',
|
||||
},
|
||||
fontSize: 14,
|
||||
fontFamily: "'Cascadia Code', 'Fira Code', 'JetBrains Mono', 'Menlo', monospace",
|
||||
cursorBlink: true,
|
||||
allowProposedApi: true,
|
||||
});
|
||||
|
||||
const fitAddon = new FitAddon.FitAddon();
|
||||
term.loadAddon(fitAddon);
|
||||
|
||||
const webLinksAddon = new WebLinksAddon.WebLinksAddon();
|
||||
term.loadAddon(webLinksAddon);
|
||||
|
||||
// Create container div
|
||||
const container = document.createElement('div');
|
||||
container.className = 'terminal-container';
|
||||
container.id = `term-${sessionId}`;
|
||||
terminalArea.appendChild(container);
|
||||
|
||||
term.open(container);
|
||||
fitAddon.fit();
|
||||
|
||||
// Send initial resize
|
||||
send({
|
||||
type: 'resize',
|
||||
session_id: sessionId,
|
||||
cols: term.cols,
|
||||
rows: term.rows,
|
||||
});
|
||||
|
||||
// Handle user input
|
||||
term.onData(data => {
|
||||
const bytes = new TextEncoder().encode(data);
|
||||
const b64 = btoa(String.fromCharCode(...bytes));
|
||||
send({
|
||||
type: 'input',
|
||||
session_id: sessionId,
|
||||
data: b64,
|
||||
});
|
||||
});
|
||||
|
||||
// Track scroll position for scroll-to-bottom button
|
||||
term.onScroll(() => updateScrollButton());
|
||||
|
||||
// Store session
|
||||
sessions[sessionId] = { term, fitAddon, projectName, type: sessionType, container };
|
||||
|
||||
// Add tab and switch to it
|
||||
addTab(sessionId, projectName, sessionType);
|
||||
switchToSession(sessionId);
|
||||
|
||||
emptyState.style.display = 'none';
|
||||
}
|
||||
|
||||
function onSessionOutput(sessionId, b64data) {
|
||||
const session = sessions[sessionId];
|
||||
if (!session) return;
|
||||
const bytes = Uint8Array.from(atob(b64data), c => c.charCodeAt(0));
|
||||
session.term.write(bytes);
|
||||
// Update scroll button if this is the active session
|
||||
if (sessionId === activeSessionId) updateScrollButton();
|
||||
}
|
||||
|
||||
function onSessionExit(sessionId) {
|
||||
const session = sessions[sessionId];
|
||||
if (!session) return;
|
||||
session.term.writeln('\r\n\x1b[90m[Session ended]\x1b[0m');
|
||||
}
|
||||
|
||||
function closeSession(sessionId) {
|
||||
send({ type: 'close', session_id: sessionId });
|
||||
removeSession(sessionId);
|
||||
}
|
||||
|
||||
function removeSession(sessionId) {
|
||||
const session = sessions[sessionId];
|
||||
if (!session) return;
|
||||
|
||||
session.term.dispose();
|
||||
session.container.remove();
|
||||
delete sessions[sessionId];
|
||||
|
||||
// Remove tab
|
||||
const tab = document.getElementById(`tab-${sessionId}`);
|
||||
if (tab) tab.remove();
|
||||
|
||||
// Switch to another session or show empty state
|
||||
const remaining = Object.keys(sessions);
|
||||
if (remaining.length > 0) {
|
||||
switchToSession(remaining[remaining.length - 1]);
|
||||
} else {
|
||||
activeSessionId = null;
|
||||
emptyState.style.display = '';
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tab bar ────────────────────────────────
|
||||
function addTab(sessionId, projectName, sessionType) {
|
||||
const tab = document.createElement('div');
|
||||
tab.className = 'tab';
|
||||
tab.id = `tab-${sessionId}`;
|
||||
|
||||
const label = document.createElement('span');
|
||||
label.textContent = `${projectName} (${sessionType})`;
|
||||
tab.appendChild(label);
|
||||
|
||||
const close = document.createElement('button');
|
||||
close.className = 'tab-close';
|
||||
close.textContent = '\u00d7';
|
||||
close.onclick = (e) => { e.stopPropagation(); closeSession(sessionId); };
|
||||
tab.appendChild(close);
|
||||
|
||||
tab.onclick = () => switchToSession(sessionId);
|
||||
tabbar.appendChild(tab);
|
||||
}
|
||||
|
||||
function switchToSession(sessionId) {
|
||||
activeSessionId = sessionId;
|
||||
|
||||
// Update tab styles
|
||||
document.querySelectorAll('.tab').forEach(t => t.classList.remove('active'));
|
||||
const tab = document.getElementById(`tab-${sessionId}`);
|
||||
if (tab) tab.classList.add('active');
|
||||
|
||||
// Show/hide terminal containers
|
||||
document.querySelectorAll('.terminal-container').forEach(c => c.classList.remove('active'));
|
||||
const container = document.getElementById(`term-${sessionId}`);
|
||||
if (container) {
|
||||
container.classList.add('active');
|
||||
const session = sessions[sessionId];
|
||||
if (session) {
|
||||
// Fit after making visible
|
||||
requestAnimationFrame(() => {
|
||||
session.fitAddon.fit();
|
||||
session.term.focus();
|
||||
updateScrollButton();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Resize handling ────────────────────────
|
||||
function handleResize() {
|
||||
if (activeSessionId && sessions[activeSessionId]) {
|
||||
const session = sessions[activeSessionId];
|
||||
session.fitAddon.fit();
|
||||
send({
|
||||
type: 'resize',
|
||||
session_id: activeSessionId,
|
||||
cols: session.term.cols,
|
||||
rows: session.term.rows,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let resizeTimeout;
|
||||
window.addEventListener('resize', () => {
|
||||
clearTimeout(resizeTimeout);
|
||||
resizeTimeout = setTimeout(handleResize, 100);
|
||||
});
|
||||
|
||||
// ── Send helper ─────────────────────────────
|
||||
function sendTerminalInput(str) {
|
||||
if (!activeSessionId) return;
|
||||
const bytes = new TextEncoder().encode(str);
|
||||
const b64 = btoa(String.fromCharCode(...bytes));
|
||||
send({
|
||||
type: 'input',
|
||||
session_id: activeSessionId,
|
||||
data: b64,
|
||||
});
|
||||
}
|
||||
|
||||
// ── Input bar (mobile/tablet) ──────────────
|
||||
// Send characters immediately, bypassing IME composition buffering.
|
||||
// Clearing value on each input event cancels any active composition.
|
||||
mobileInput.addEventListener('input', () => {
|
||||
const val = mobileInput.value;
|
||||
if (val) {
|
||||
sendTerminalInput(val);
|
||||
mobileInput.value = '';
|
||||
}
|
||||
});
|
||||
|
||||
// Catch Enter in the input field itself
|
||||
mobileInput.addEventListener('keydown', (e) => {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault();
|
||||
const val = mobileInput.value;
|
||||
if (val) {
|
||||
sendTerminalInput(val);
|
||||
mobileInput.value = '';
|
||||
}
|
||||
sendTerminalInput('\r');
|
||||
} else if (e.key === 'Tab') {
|
||||
e.preventDefault();
|
||||
sendTerminalInput('\t');
|
||||
}
|
||||
});
|
||||
|
||||
btnEnter.onclick = () => { sendTerminalInput('\r'); mobileInput.focus(); };
|
||||
btnTab.onclick = () => { sendTerminalInput('\t'); mobileInput.focus(); };
|
||||
btnCtrlC.onclick = () => { sendTerminalInput('\x03'); mobileInput.focus(); };
|
||||
|
||||
// ── Scroll to bottom ──────────────────────
|
||||
function updateScrollButton() {
|
||||
if (!activeSessionId || !sessions[activeSessionId]) {
|
||||
scrollBottomBtn.classList.remove('visible');
|
||||
return;
|
||||
}
|
||||
const term = sessions[activeSessionId].term;
|
||||
const isAtBottom = term.buffer.active.viewportY >= term.buffer.active.baseY;
|
||||
scrollBottomBtn.classList.toggle('visible', !isAtBottom);
|
||||
}
|
||||
|
||||
scrollBottomBtn.onclick = () => {
|
||||
if (activeSessionId && sessions[activeSessionId]) {
|
||||
sessions[activeSessionId].term.scrollToBottom();
|
||||
scrollBottomBtn.classList.remove('visible');
|
||||
}
|
||||
};
|
||||
|
||||
// ── Event listeners ────────────────────────
|
||||
btnClaude.onclick = () => openSession('claude');
|
||||
btnBash.onclick = () => openSession('bash');
|
||||
|
||||
// ── Init ───────────────────────────────────
|
||||
connect();
|
||||
})();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
323
app/src-tauri/src/web_terminal/ws_handler.rs
Normal file
323
app/src-tauri/src/web_terminal/ws_handler.rs
Normal file
@@ -0,0 +1,323 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::extract::ws::{Message, WebSocket};
|
||||
use base64::engine::general_purpose::STANDARD as BASE64;
|
||||
use base64::Engine;
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::commands::aws_commands;
|
||||
use crate::models::{Backend, BedrockAuthMethod, Project, ProjectStatus};
|
||||
|
||||
use super::server::WebTerminalState;
|
||||
|
||||
// ── Wire protocol types ──────────────────────────────────────────────
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
enum ClientMessage {
|
||||
ListProjects,
|
||||
Open {
|
||||
project_id: String,
|
||||
session_type: Option<String>,
|
||||
},
|
||||
Input {
|
||||
session_id: String,
|
||||
data: String, // base64
|
||||
},
|
||||
Resize {
|
||||
session_id: String,
|
||||
cols: u16,
|
||||
rows: u16,
|
||||
},
|
||||
Close {
|
||||
session_id: String,
|
||||
},
|
||||
Ping,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
enum ServerMessage {
|
||||
Projects {
|
||||
projects: Vec<ProjectEntry>,
|
||||
},
|
||||
Opened {
|
||||
session_id: String,
|
||||
project_name: String,
|
||||
},
|
||||
Output {
|
||||
session_id: String,
|
||||
data: String, // base64
|
||||
},
|
||||
Exit {
|
||||
session_id: String,
|
||||
},
|
||||
Error {
|
||||
message: String,
|
||||
},
|
||||
Pong,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ProjectEntry {
|
||||
id: String,
|
||||
name: String,
|
||||
status: String,
|
||||
}
|
||||
|
||||
// ── Connection handler ───────────────────────────────────────────────
|
||||
|
||||
pub async fn handle_connection(socket: WebSocket, state: Arc<WebTerminalState>) {
|
||||
let (mut ws_tx, mut ws_rx) = socket.split();
|
||||
|
||||
// Channel for sending messages from session output tasks → WS writer
|
||||
let (out_tx, mut out_rx) = mpsc::unbounded_channel::<ServerMessage>();
|
||||
|
||||
// Track session IDs owned by this connection for cleanup
|
||||
let owned_sessions: Arc<tokio::sync::Mutex<Vec<String>>> =
|
||||
Arc::new(tokio::sync::Mutex::new(Vec::new()));
|
||||
|
||||
// Writer task: serializes ServerMessages and sends as WS text frames
|
||||
let writer_handle = tokio::spawn(async move {
|
||||
while let Some(msg) = out_rx.recv().await {
|
||||
if let Ok(json) = serde_json::to_string(&msg) {
|
||||
if ws_tx.send(Message::Text(json.into())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Reader loop: parse incoming messages and dispatch
|
||||
while let Some(Ok(msg)) = ws_rx.next().await {
|
||||
let text = match &msg {
|
||||
Message::Text(t) => t.to_string(),
|
||||
Message::Close(_) => break,
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let client_msg: ClientMessage = match serde_json::from_str(&text) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
let _ = out_tx.send(ServerMessage::Error {
|
||||
message: format!("Invalid message: {}", e),
|
||||
});
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
match client_msg {
|
||||
ClientMessage::Ping => {
|
||||
let _ = out_tx.send(ServerMessage::Pong);
|
||||
}
|
||||
|
||||
ClientMessage::ListProjects => {
|
||||
let projects = state.projects_store.list();
|
||||
let entries: Vec<ProjectEntry> = projects
|
||||
.into_iter()
|
||||
.map(|p| ProjectEntry {
|
||||
id: p.id,
|
||||
name: p.name,
|
||||
status: serde_json::to_value(&p.status)
|
||||
.ok()
|
||||
.and_then(|v| v.as_str().map(|s| s.to_string()))
|
||||
.unwrap_or_else(|| "unknown".to_string()),
|
||||
})
|
||||
.collect();
|
||||
let _ = out_tx.send(ServerMessage::Projects { projects: entries });
|
||||
}
|
||||
|
||||
ClientMessage::Open {
|
||||
project_id,
|
||||
session_type,
|
||||
} => {
|
||||
let result = handle_open(
|
||||
&state,
|
||||
&project_id,
|
||||
session_type.as_deref(),
|
||||
&out_tx,
|
||||
&owned_sessions,
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = result {
|
||||
let _ = out_tx.send(ServerMessage::Error { message: e });
|
||||
}
|
||||
}
|
||||
|
||||
ClientMessage::Input { session_id, data } => {
|
||||
match BASE64.decode(&data) {
|
||||
Ok(bytes) => {
|
||||
if let Err(e) = state.exec_manager.send_input(&session_id, bytes).await {
|
||||
let _ = out_tx.send(ServerMessage::Error {
|
||||
message: format!("Input error: {}", e),
|
||||
});
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = out_tx.send(ServerMessage::Error {
|
||||
message: format!("Base64 decode error: {}", e),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ClientMessage::Resize {
|
||||
session_id,
|
||||
cols,
|
||||
rows,
|
||||
} => {
|
||||
if let Err(e) = state.exec_manager.resize(&session_id, cols, rows).await {
|
||||
let _ = out_tx.send(ServerMessage::Error {
|
||||
message: format!("Resize error: {}", e),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
ClientMessage::Close { session_id } => {
|
||||
state.exec_manager.close_session(&session_id).await;
|
||||
// Remove from owned list
|
||||
owned_sessions
|
||||
.lock()
|
||||
.await
|
||||
.retain(|id| id != &session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Connection closed — clean up all owned sessions
|
||||
log::info!("Web terminal WebSocket disconnected, cleaning up sessions");
|
||||
let sessions = owned_sessions.lock().await.clone();
|
||||
for session_id in sessions {
|
||||
state.exec_manager.close_session(&session_id).await;
|
||||
}
|
||||
|
||||
writer_handle.abort();
|
||||
}
|
||||
|
||||
/// Build the command for a terminal session, mirroring terminal_commands.rs logic.
|
||||
fn build_terminal_cmd(project: &Project, settings_store: &crate::storage::settings_store::SettingsStore) -> Vec<String> {
|
||||
let is_bedrock_profile = project.backend == Backend::Bedrock
|
||||
&& project
|
||||
.bedrock_config
|
||||
.as_ref()
|
||||
.map(|b| b.auth_method == BedrockAuthMethod::Profile)
|
||||
.unwrap_or(false);
|
||||
|
||||
if !is_bedrock_profile {
|
||||
let mut cmd = vec!["claude".to_string()];
|
||||
if project.full_permissions {
|
||||
cmd.push("--dangerously-skip-permissions".to_string());
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
|
||||
let profile = aws_commands::resolve_profile_for_project(
|
||||
project,
|
||||
settings_store.get().global_aws.aws_profile.as_deref(),
|
||||
);
|
||||
|
||||
let claude_cmd = if project.full_permissions {
|
||||
"exec claude --dangerously-skip-permissions"
|
||||
} else {
|
||||
"exec claude"
|
||||
};
|
||||
|
||||
let script = format!(
|
||||
r#"
|
||||
echo "Validating AWS session for profile '{profile}'..."
|
||||
if aws sts get-caller-identity --profile '{profile}' >/dev/null 2>&1; then
|
||||
echo "AWS session valid."
|
||||
else
|
||||
echo "AWS session expired or invalid."
|
||||
if aws configure get sso_start_url --profile '{profile}' >/dev/null 2>&1 || \
|
||||
aws configure get sso_session --profile '{profile}' >/dev/null 2>&1; then
|
||||
echo "Starting SSO login..."
|
||||
echo ""
|
||||
triple-c-sso-refresh
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ""
|
||||
echo "SSO login failed or was cancelled. Starting Claude anyway..."
|
||||
echo "You may see authentication errors."
|
||||
echo ""
|
||||
fi
|
||||
else
|
||||
echo "Profile '{profile}' does not use SSO. Check your AWS credentials."
|
||||
echo "Starting Claude anyway..."
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
{claude_cmd}
|
||||
"#,
|
||||
profile = profile,
|
||||
claude_cmd = claude_cmd
|
||||
);
|
||||
|
||||
vec!["bash".to_string(), "-c".to_string(), script]
|
||||
}
|
||||
|
||||
/// Open a new terminal session for a project.
|
||||
async fn handle_open(
|
||||
state: &WebTerminalState,
|
||||
project_id: &str,
|
||||
session_type: Option<&str>,
|
||||
out_tx: &mpsc::UnboundedSender<ServerMessage>,
|
||||
owned_sessions: &Arc<tokio::sync::Mutex<Vec<String>>>,
|
||||
) -> Result<(), String> {
|
||||
let project = state
|
||||
.projects_store
|
||||
.get(project_id)
|
||||
.ok_or_else(|| format!("Project {} not found", project_id))?;
|
||||
|
||||
if project.status != ProjectStatus::Running {
|
||||
return Err(format!("Project '{}' is not running", project.name));
|
||||
}
|
||||
|
||||
let container_id = project
|
||||
.container_id
|
||||
.as_ref()
|
||||
.ok_or_else(|| "Container not running".to_string())?;
|
||||
|
||||
let cmd = match session_type {
|
||||
Some("bash") => vec!["bash".to_string(), "-l".to_string()],
|
||||
_ => build_terminal_cmd(&project, &state.settings_store),
|
||||
};
|
||||
|
||||
let session_id = uuid::Uuid::new_v4().to_string();
|
||||
let project_name = project.name.clone();
|
||||
|
||||
// Set up output routing through the WS channel
|
||||
let out_tx_output = out_tx.clone();
|
||||
let session_id_output = session_id.clone();
|
||||
let on_output = move |data: Vec<u8>| {
|
||||
let encoded = BASE64.encode(&data);
|
||||
let _ = out_tx_output.send(ServerMessage::Output {
|
||||
session_id: session_id_output.clone(),
|
||||
data: encoded,
|
||||
});
|
||||
};
|
||||
|
||||
let out_tx_exit = out_tx.clone();
|
||||
let session_id_exit = session_id.clone();
|
||||
let on_exit = Box::new(move || {
|
||||
let _ = out_tx_exit.send(ServerMessage::Exit {
|
||||
session_id: session_id_exit,
|
||||
});
|
||||
});
|
||||
|
||||
state
|
||||
.exec_manager
|
||||
.create_session(container_id, &session_id, cmd, on_output, on_exit)
|
||||
.await?;
|
||||
|
||||
// Track this session for cleanup on disconnect
|
||||
owned_sessions.lock().await.push(session_id.clone());
|
||||
|
||||
let _ = out_tx.send(ServerMessage::Opened {
|
||||
session_id,
|
||||
project_name,
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"$schema": "https://raw.githubusercontent.com/tauri-apps/tauri/dev/crates/tauri-cli/schema.json",
|
||||
"productName": "Triple-C",
|
||||
"version": "0.2.0",
|
||||
"version": "0.3.0",
|
||||
"identifier": "com.triple-c.desktop",
|
||||
"build": {
|
||||
"beforeDevCommand": "npm run dev",
|
||||
|
||||
@@ -17,7 +17,7 @@ export default function App() {
|
||||
const { loadSettings } = useSettings();
|
||||
const { refresh } = useProjects();
|
||||
const { refresh: refreshMcp } = useMcpServers();
|
||||
const { loadVersion, checkForUpdates, startPeriodicCheck } = useUpdates();
|
||||
const { loadVersion, checkForUpdates, checkImageUpdate, startPeriodicCheck } = useUpdates();
|
||||
const { sessions, activeSessionId, setProjects } = useAppState(
|
||||
useShallow(s => ({ sessions: s.sessions, activeSessionId: s.activeSessionId, setProjects: s.setProjects }))
|
||||
);
|
||||
@@ -46,7 +46,10 @@ export default function App() {
|
||||
|
||||
// Update detection
|
||||
loadVersion();
|
||||
const updateTimer = setTimeout(() => checkForUpdates(), 3000);
|
||||
const updateTimer = setTimeout(() => {
|
||||
checkForUpdates();
|
||||
checkImageUpdate();
|
||||
}, 3000);
|
||||
const cleanup = startPeriodicCheck();
|
||||
return () => {
|
||||
clearTimeout(updateTimer);
|
||||
|
||||
218
app/src/components/layout/HelpDialog.tsx
Normal file
218
app/src/components/layout/HelpDialog.tsx
Normal file
@@ -0,0 +1,218 @@
|
||||
import { useEffect, useRef, useCallback, useState } from "react";
|
||||
import { getHelpContent } from "../../lib/tauri-commands";
|
||||
|
||||
interface Props {
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
/** Convert header text to a URL-friendly slug for anchor links. */
|
||||
function slugify(text: string): string {
|
||||
return text
|
||||
.toLowerCase()
|
||||
.replace(/<[^>]+>/g, "") // strip HTML tags (e.g. from inline code)
|
||||
.replace(/[^\w\s-]/g, "") // remove non-word chars except spaces/dashes
|
||||
.replace(/\s+/g, "-") // spaces to dashes
|
||||
.replace(/-+/g, "-") // collapse consecutive dashes
|
||||
.replace(/^-|-$/g, ""); // trim leading/trailing dashes
|
||||
}
|
||||
|
||||
/** Simple markdown-to-HTML converter for the help content. */
|
||||
function renderMarkdown(md: string): string {
|
||||
let html = md;
|
||||
|
||||
// Normalize line endings
|
||||
html = html.replace(/\r\n/g, "\n");
|
||||
|
||||
// Escape HTML entities (but we'll re-introduce tags below)
|
||||
html = html.replace(/&/g, "&").replace(/</g, "<").replace(/>/g, ">");
|
||||
|
||||
// Fenced code blocks (```...```)
|
||||
html = html.replace(/```(\w*)\n([\s\S]*?)```/g, (_m, _lang, code) => {
|
||||
return `<pre class="help-code-block"><code>${code.trimEnd()}</code></pre>`;
|
||||
});
|
||||
|
||||
// Inline code (`...`)
|
||||
html = html.replace(/`([^`]+)`/g, '<code class="help-inline-code">$1</code>');
|
||||
|
||||
// Tables
|
||||
html = html.replace(
|
||||
/(?:^|\n)(\|.+\|)\n(\|[\s:|-]+\|)\n((?:\|.+\|\n?)+)/g,
|
||||
(_m, headerRow: string, _sep: string, bodyRows: string) => {
|
||||
const headers = headerRow
|
||||
.split("|")
|
||||
.slice(1, -1)
|
||||
.map((c: string) => `<th>${c.trim()}</th>`)
|
||||
.join("");
|
||||
const rows = bodyRows
|
||||
.trim()
|
||||
.split("\n")
|
||||
.map((row: string) => {
|
||||
const cells = row
|
||||
.split("|")
|
||||
.slice(1, -1)
|
||||
.map((c: string) => `<td>${c.trim()}</td>`)
|
||||
.join("");
|
||||
return `<tr>${cells}</tr>`;
|
||||
})
|
||||
.join("");
|
||||
return `<table class="help-table"><thead><tr>${headers}</tr></thead><tbody>${rows}</tbody></table>`;
|
||||
},
|
||||
);
|
||||
|
||||
// Blockquotes (> ...)
|
||||
html = html.replace(/(?:^|\n)> (.+)/g, '<blockquote class="help-blockquote">$1</blockquote>');
|
||||
// Merge adjacent blockquotes
|
||||
html = html.replace(/<\/blockquote>\s*<blockquote class="help-blockquote">/g, "<br/>");
|
||||
|
||||
// Horizontal rules
|
||||
html = html.replace(/\n---\n/g, '<hr class="help-hr"/>');
|
||||
|
||||
// Headers with id attributes for anchor navigation (process from h4 down to h1)
|
||||
html = html.replace(/^#### (.+)$/gm, (_m, title) => `<h4 class="help-h4" id="${slugify(title)}">${title}</h4>`);
|
||||
html = html.replace(/^### (.+)$/gm, (_m, title) => `<h3 class="help-h3" id="${slugify(title)}">${title}</h3>`);
|
||||
html = html.replace(/^## (.+)$/gm, (_m, title) => `<h2 class="help-h2" id="${slugify(title)}">${title}</h2>`);
|
||||
html = html.replace(/^# (.+)$/gm, (_m, title) => `<h1 class="help-h1" id="${slugify(title)}">${title}</h1>`);
|
||||
|
||||
// Bold (**...**)
|
||||
html = html.replace(/\*\*([^*]+)\*\*/g, "<strong>$1</strong>");
|
||||
|
||||
// Italic (*...*)
|
||||
html = html.replace(/\*([^*]+)\*/g, "<em>$1</em>");
|
||||
|
||||
// Markdown-style anchor links [text](#anchor)
|
||||
html = html.replace(
|
||||
/\[([^\]]+)\]\(#([^)]+)\)/g,
|
||||
'<a class="help-link" href="#$2">$1</a>',
|
||||
);
|
||||
|
||||
// Markdown-style external links [text](url)
|
||||
html = html.replace(
|
||||
/\[([^\]]+)\]\((https?:\/\/[^)]+)\)/g,
|
||||
'<a class="help-link" href="$2" target="_blank" rel="noopener noreferrer">$1</a>',
|
||||
);
|
||||
|
||||
// Unordered list items (- ...)
|
||||
// Group consecutive list items
|
||||
html = html.replace(/((?:^|\n)- .+(?:\n- .+)*)/g, (block) => {
|
||||
const items = block
|
||||
.trim()
|
||||
.split("\n")
|
||||
.map((line) => `<li>${line.replace(/^- /, "")}</li>`)
|
||||
.join("");
|
||||
return `<ul class="help-ul">${items}</ul>`;
|
||||
});
|
||||
|
||||
// Ordered list items (1. ...)
|
||||
html = html.replace(/((?:^|\n)\d+\. .+(?:\n\d+\. .+)*)/g, (block) => {
|
||||
const items = block
|
||||
.trim()
|
||||
.split("\n")
|
||||
.map((line) => `<li>${line.replace(/^\d+\. /, "")}</li>`)
|
||||
.join("");
|
||||
return `<ol class="help-ol">${items}</ol>`;
|
||||
});
|
||||
|
||||
// Links - convert bare URLs to clickable links (skip already-wrapped URLs)
|
||||
html = html.replace(
|
||||
/(?<!="|'>)(https?:\/\/[^\s<)]+)/g,
|
||||
'<a class="help-link" href="$1" target="_blank" rel="noopener noreferrer">$1</a>',
|
||||
);
|
||||
|
||||
// Wrap remaining loose text lines in paragraphs
|
||||
// Split by double newlines for paragraph breaks
|
||||
const blocks = html.split(/\n\n+/);
|
||||
html = blocks
|
||||
.map((block) => {
|
||||
const trimmed = block.trim();
|
||||
if (!trimmed) return "";
|
||||
// Don't wrap blocks that are already HTML elements
|
||||
if (
|
||||
/^<(h[1-4]|ul|ol|pre|table|blockquote|hr)/.test(trimmed)
|
||||
) {
|
||||
return trimmed;
|
||||
}
|
||||
// Wrap in paragraph, replacing single newlines with <br/>
|
||||
return `<p class="help-p">${trimmed.replace(/\n/g, "<br/>")}</p>`;
|
||||
})
|
||||
.join("\n");
|
||||
|
||||
return html;
|
||||
}
|
||||
|
||||
export default function HelpDialog({ onClose }: Props) {
|
||||
const overlayRef = useRef<HTMLDivElement>(null);
|
||||
const contentRef = useRef<HTMLDivElement>(null);
|
||||
const [markdown, setMarkdown] = useState<string | null>(null);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
if (e.key === "Escape") onClose();
|
||||
};
|
||||
document.addEventListener("keydown", handleKeyDown);
|
||||
return () => document.removeEventListener("keydown", handleKeyDown);
|
||||
}, [onClose]);
|
||||
|
||||
useEffect(() => {
|
||||
getHelpContent()
|
||||
.then(setMarkdown)
|
||||
.catch((e) => setError(String(e)));
|
||||
}, []);
|
||||
|
||||
const handleOverlayClick = useCallback(
|
||||
(e: React.MouseEvent<HTMLDivElement>) => {
|
||||
if (e.target === overlayRef.current) onClose();
|
||||
},
|
||||
[onClose],
|
||||
);
|
||||
|
||||
// Handle anchor link clicks to scroll within the dialog
|
||||
const handleContentClick = useCallback((e: React.MouseEvent<HTMLDivElement>) => {
|
||||
const target = e.target as HTMLElement;
|
||||
const anchor = target.closest("a");
|
||||
if (!anchor) return;
|
||||
const href = anchor.getAttribute("href");
|
||||
if (!href || !href.startsWith("#")) return;
|
||||
e.preventDefault();
|
||||
const el = contentRef.current?.querySelector(href);
|
||||
if (el) el.scrollIntoView({ behavior: "smooth" });
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={overlayRef}
|
||||
onClick={handleOverlayClick}
|
||||
className="fixed inset-0 bg-black/50 flex items-center justify-center z-50"
|
||||
>
|
||||
<div className="bg-[var(--bg-secondary)] border border-[var(--border-color)] rounded-lg shadow-xl w-[48rem] max-w-[90vw] max-h-[85vh] flex flex-col">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-6 py-4 border-b border-[var(--border-color)] flex-shrink-0">
|
||||
<h2 className="text-lg font-semibold">How to Use Triple-C</h2>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="px-3 py-1.5 text-xs bg-[var(--bg-tertiary)] border border-[var(--border-color)] rounded hover:bg-[var(--border-color)] transition-colors"
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Scrollable content */}
|
||||
<div
|
||||
ref={contentRef}
|
||||
onClick={handleContentClick}
|
||||
className="flex-1 overflow-y-auto px-6 py-4 help-content"
|
||||
>
|
||||
{error && (
|
||||
<p className="text-[var(--error)] text-sm">Failed to load help content: {error}</p>
|
||||
)}
|
||||
{!markdown && !error && (
|
||||
<p className="text-[var(--text-secondary)] text-sm">Loading...</p>
|
||||
)}
|
||||
{markdown && (
|
||||
<div dangerouslySetInnerHTML={{ __html: renderMarkdown(markdown) }} />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -2,8 +2,8 @@ import { useShallow } from "zustand/react/shallow";
|
||||
import { useAppState } from "../../store/appState";
|
||||
|
||||
export default function StatusBar() {
|
||||
const { projects, sessions } = useAppState(
|
||||
useShallow(s => ({ projects: s.projects, sessions: s.sessions }))
|
||||
const { projects, sessions, terminalHasSelection } = useAppState(
|
||||
useShallow(s => ({ projects: s.projects, sessions: s.sessions, terminalHasSelection: s.terminalHasSelection }))
|
||||
);
|
||||
const running = projects.filter((p) => p.status === "running").length;
|
||||
|
||||
@@ -20,6 +20,12 @@ export default function StatusBar() {
|
||||
<span>
|
||||
{sessions.length} terminal{sessions.length !== 1 ? "s" : ""}
|
||||
</span>
|
||||
{terminalHasSelection && (
|
||||
<>
|
||||
<span className="mx-2">|</span>
|
||||
<span className="text-[var(--accent)]">Ctrl+Shift+C to copy</span>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@ import TerminalTabs from "../terminal/TerminalTabs";
|
||||
import { useAppState } from "../../store/appState";
|
||||
import { useSettings } from "../../hooks/useSettings";
|
||||
import UpdateDialog from "../settings/UpdateDialog";
|
||||
import ImageUpdateDialog from "../settings/ImageUpdateDialog";
|
||||
import HelpDialog from "./HelpDialog";
|
||||
|
||||
export default function TopBar() {
|
||||
const { dockerAvailable, imageExists, updateInfo, appVersion, setUpdateInfo } = useAppState(
|
||||
const { dockerAvailable, imageExists, updateInfo, imageUpdateInfo, appVersion, setUpdateInfo, setImageUpdateInfo } = useAppState(
|
||||
useShallow(s => ({
|
||||
dockerAvailable: s.dockerAvailable,
|
||||
imageExists: s.imageExists,
|
||||
updateInfo: s.updateInfo,
|
||||
imageUpdateInfo: s.imageUpdateInfo,
|
||||
appVersion: s.appVersion,
|
||||
setUpdateInfo: s.setUpdateInfo,
|
||||
setImageUpdateInfo: s.setImageUpdateInfo,
|
||||
}))
|
||||
);
|
||||
const { appSettings, saveSettings } = useSettings();
|
||||
const [showUpdateDialog, setShowUpdateDialog] = useState(false);
|
||||
const [showImageUpdateDialog, setShowImageUpdateDialog] = useState(false);
|
||||
const [showHelpDialog, setShowHelpDialog] = useState(false);
|
||||
|
||||
const handleDismiss = async () => {
|
||||
if (appSettings && updateInfo) {
|
||||
@@ -29,6 +35,17 @@ export default function TopBar() {
|
||||
setShowUpdateDialog(false);
|
||||
};
|
||||
|
||||
const handleImageUpdateDismiss = async () => {
|
||||
if (appSettings && imageUpdateInfo) {
|
||||
await saveSettings({
|
||||
...appSettings,
|
||||
dismissed_image_digest: imageUpdateInfo.remote_digest,
|
||||
});
|
||||
}
|
||||
setImageUpdateInfo(null);
|
||||
setShowImageUpdateDialog(false);
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="flex items-center h-10 bg-[var(--bg-secondary)] border border-[var(--border-color)] rounded-lg overflow-hidden">
|
||||
@@ -44,8 +61,24 @@ export default function TopBar() {
|
||||
Update
|
||||
</button>
|
||||
)}
|
||||
{imageUpdateInfo && (
|
||||
<button
|
||||
onClick={() => setShowImageUpdateDialog(true)}
|
||||
className="px-2 py-0.5 rounded text-xs font-medium bg-[var(--warning,#f59e0b)] text-white hover:opacity-80 transition-colors"
|
||||
title="A newer container image is available"
|
||||
>
|
||||
Image Update
|
||||
</button>
|
||||
)}
|
||||
<StatusDot ok={dockerAvailable === true} label="Docker" />
|
||||
<StatusDot ok={imageExists === true} label="Image" />
|
||||
<button
|
||||
onClick={() => setShowHelpDialog(true)}
|
||||
title="Help"
|
||||
className="ml-1 w-5 h-5 flex items-center justify-center rounded-full border border-[var(--border-color)] text-[var(--text-secondary)] hover:text-[var(--text-primary)] hover:border-[var(--text-secondary)] transition-colors text-xs font-semibold leading-none"
|
||||
>
|
||||
?
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
{showUpdateDialog && updateInfo && (
|
||||
@@ -56,6 +89,16 @@ export default function TopBar() {
|
||||
onClose={() => setShowUpdateDialog(false)}
|
||||
/>
|
||||
)}
|
||||
{showImageUpdateDialog && imageUpdateInfo && (
|
||||
<ImageUpdateDialog
|
||||
imageUpdateInfo={imageUpdateInfo}
|
||||
onDismiss={handleImageUpdateDismiss}
|
||||
onClose={() => setShowImageUpdateDialog(false)}
|
||||
/>
|
||||
)}
|
||||
{showHelpDialog && (
|
||||
<HelpDialog onClose={() => setShowHelpDialog(false)} />
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
191
app/src/components/projects/ClaudeCodeSettingsModal.tsx
Normal file
191
app/src/components/projects/ClaudeCodeSettingsModal.tsx
Normal file
@@ -0,0 +1,191 @@
|
||||
import { useState, useEffect, useRef, useCallback } from "react";
|
||||
import type { ClaudeCodeSettings } from "../../lib/types";
|
||||
|
||||
interface Props {
|
||||
settings: ClaudeCodeSettings | null;
|
||||
disabled: boolean;
|
||||
onSave: (settings: ClaudeCodeSettings | null) => Promise<void>;
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
const DEFAULTS: ClaudeCodeSettings = {
|
||||
tui_mode: null,
|
||||
effort: null,
|
||||
auto_scroll_disabled: false,
|
||||
focus_mode: false,
|
||||
show_thinking_summaries: false,
|
||||
enable_session_recap: false,
|
||||
env_scrub: false,
|
||||
prompt_caching_1h: false,
|
||||
};
|
||||
|
||||
function isAllDefaults(s: ClaudeCodeSettings): boolean {
|
||||
return (
|
||||
s.tui_mode === null &&
|
||||
s.effort === null &&
|
||||
s.auto_scroll_disabled === false &&
|
||||
s.focus_mode === false &&
|
||||
s.show_thinking_summaries === false &&
|
||||
s.enable_session_recap === false &&
|
||||
s.env_scrub === false &&
|
||||
s.prompt_caching_1h === false
|
||||
);
|
||||
}
|
||||
|
||||
export default function ClaudeCodeSettingsModal({ settings, disabled, onSave, onClose }: Props) {
|
||||
const [local, setLocal] = useState<ClaudeCodeSettings>(settings ?? { ...DEFAULTS });
|
||||
const overlayRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
if (e.key === "Escape") onClose();
|
||||
};
|
||||
document.addEventListener("keydown", handleKeyDown);
|
||||
return () => document.removeEventListener("keydown", handleKeyDown);
|
||||
}, [onClose]);
|
||||
|
||||
const handleOverlayClick = useCallback(
|
||||
(e: React.MouseEvent<HTMLDivElement>) => {
|
||||
if (e.target === overlayRef.current) onClose();
|
||||
},
|
||||
[onClose],
|
||||
);
|
||||
|
||||
const update = async (patch: Partial<ClaudeCodeSettings>) => {
|
||||
const next = { ...local, ...patch };
|
||||
setLocal(next);
|
||||
try {
|
||||
await onSave(isAllDefaults(next) ? null : next);
|
||||
} catch (err) {
|
||||
console.error("Failed to save Claude Code settings:", err);
|
||||
}
|
||||
};
|
||||
|
||||
const toggleButton = (label: string, description: string, value: boolean, onChange: (v: boolean) => void) => (
|
||||
<div className="flex items-center justify-between gap-4">
|
||||
<div className="min-w-0">
|
||||
<div className="text-sm font-medium text-[var(--text-primary)]">{label}</div>
|
||||
<div className="text-xs text-[var(--text-secondary)]">{description}</div>
|
||||
</div>
|
||||
<button
|
||||
onClick={() => onChange(!value)}
|
||||
disabled={disabled}
|
||||
className={`px-2 py-0.5 text-xs rounded transition-colors disabled:opacity-50 shrink-0 ${
|
||||
value
|
||||
? "bg-[var(--success)] text-white"
|
||||
: "bg-[var(--bg-primary)] border border-[var(--border-color)] text-[var(--text-secondary)]"
|
||||
}`}
|
||||
>
|
||||
{value ? "ON" : "OFF"}
|
||||
</button>
|
||||
</div>
|
||||
);
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={overlayRef}
|
||||
onClick={handleOverlayClick}
|
||||
className="fixed inset-0 bg-black/50 flex items-center justify-center z-50"
|
||||
>
|
||||
<div className="bg-[var(--bg-secondary)] border border-[var(--border-color)] rounded-lg p-6 w-[32rem] shadow-xl max-h-[80vh] overflow-y-auto">
|
||||
<h2 className="text-lg font-semibold mb-4">Claude Code Settings</h2>
|
||||
|
||||
{disabled && (
|
||||
<div className="px-2 py-1.5 mb-3 bg-[var(--warning)]/15 border border-[var(--warning)]/30 rounded text-xs text-[var(--warning)]">
|
||||
Container must be stopped to change Claude Code settings.
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="space-y-4 mb-6">
|
||||
{/* TUI Mode */}
|
||||
<div className="flex items-center justify-between gap-4">
|
||||
<div className="min-w-0">
|
||||
<div className="text-sm font-medium text-[var(--text-primary)]">TUI Mode</div>
|
||||
<div className="text-xs text-[var(--text-secondary)]">Enables flicker-free alt-screen rendering</div>
|
||||
</div>
|
||||
<select
|
||||
value={local.tui_mode ?? ""}
|
||||
onChange={(e) => update({ tui_mode: e.target.value || null })}
|
||||
disabled={disabled}
|
||||
className="px-2 py-1 bg-[var(--bg-primary)] border border-[var(--border-color)] rounded text-sm text-[var(--text-primary)] focus:outline-none focus:border-[var(--accent)] disabled:opacity-50 shrink-0"
|
||||
>
|
||||
<option value="">Default</option>
|
||||
<option value="fullscreen">Fullscreen</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
{/* Effort Level */}
|
||||
<div className="flex items-center justify-between gap-4">
|
||||
<div className="min-w-0">
|
||||
<div className="text-sm font-medium text-[var(--text-primary)]">Effort Level</div>
|
||||
<div className="text-xs text-[var(--text-secondary)]">Controls how much reasoning Claude applies</div>
|
||||
</div>
|
||||
<select
|
||||
value={local.effort ?? ""}
|
||||
onChange={(e) => update({ effort: e.target.value || null })}
|
||||
disabled={disabled}
|
||||
className="px-2 py-1 bg-[var(--bg-primary)] border border-[var(--border-color)] rounded text-sm text-[var(--text-primary)] focus:outline-none focus:border-[var(--accent)] disabled:opacity-50 shrink-0"
|
||||
>
|
||||
<option value="">Default</option>
|
||||
<option value="low">Low</option>
|
||||
<option value="medium">Medium</option>
|
||||
<option value="high">High</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
{/* Boolean toggles */}
|
||||
{toggleButton(
|
||||
"Focus Mode",
|
||||
"Collapses tool output to one-line summaries",
|
||||
local.focus_mode,
|
||||
(v) => update({ focus_mode: v }),
|
||||
)}
|
||||
|
||||
{toggleButton(
|
||||
"Thinking Summaries",
|
||||
"Shows thinking process as summaries",
|
||||
local.show_thinking_summaries,
|
||||
(v) => update({ show_thinking_summaries: v }),
|
||||
)}
|
||||
|
||||
{toggleButton(
|
||||
"Session Recap",
|
||||
"Provides context when returning to a session",
|
||||
local.enable_session_recap,
|
||||
(v) => update({ enable_session_recap: v }),
|
||||
)}
|
||||
|
||||
{toggleButton(
|
||||
"Auto-Scroll Disabled",
|
||||
"Disables auto-scroll when in fullscreen TUI mode",
|
||||
local.auto_scroll_disabled,
|
||||
(v) => update({ auto_scroll_disabled: v }),
|
||||
)}
|
||||
|
||||
{toggleButton(
|
||||
"Env Scrub",
|
||||
"Strips credentials from subprocess environments for security",
|
||||
local.env_scrub,
|
||||
(v) => update({ env_scrub: v }),
|
||||
)}
|
||||
|
||||
{toggleButton(
|
||||
"Prompt Caching (1h)",
|
||||
"Enables 1-hour prompt cache TTL instead of 5 minutes",
|
||||
local.prompt_caching_1h,
|
||||
(v) => update({ prompt_caching_1h: v }),
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="flex justify-end">
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="px-4 py-2 text-sm text-[var(--text-secondary)] hover:text-[var(--text-primary)] transition-colors"
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -57,7 +57,7 @@ const mockProject: Project = {
|
||||
paths: [{ host_path: "/home/user/project", mount_name: "project" }],
|
||||
container_id: null,
|
||||
status: "stopped",
|
||||
auth_mode: "anthropic",
|
||||
backend: "anthropic",
|
||||
bedrock_config: null,
|
||||
allow_docker_access: false,
|
||||
ssh_key_path: null,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import { open } from "@tauri-apps/plugin-dialog";
|
||||
import { listen } from "@tauri-apps/api/event";
|
||||
import type { Project, ProjectPath, AuthMode, BedrockConfig, BedrockAuthMethod, OllamaConfig, LiteLlmConfig } from "../../lib/types";
|
||||
import type { Project, ProjectPath, Backend, BedrockConfig, BedrockAuthMethod, OllamaConfig, OpenAiCompatibleConfig } from "../../lib/types";
|
||||
import { useProjects } from "../../hooks/useProjects";
|
||||
import { useMcpServers } from "../../hooks/useMcpServers";
|
||||
import { useTerminal } from "../../hooks/useTerminal";
|
||||
@@ -9,9 +9,11 @@ import { useAppState } from "../../store/appState";
|
||||
import EnvVarsModal from "./EnvVarsModal";
|
||||
import PortMappingsModal from "./PortMappingsModal";
|
||||
import ClaudeInstructionsModal from "./ClaudeInstructionsModal";
|
||||
import ClaudeCodeSettingsModal from "./ClaudeCodeSettingsModal";
|
||||
import ContainerProgressModal from "./ContainerProgressModal";
|
||||
import FileManagerModal from "./FileManagerModal";
|
||||
import ConfirmRemoveModal from "./ConfirmRemoveModal";
|
||||
import Tooltip from "../ui/Tooltip";
|
||||
|
||||
interface Props {
|
||||
project: Project;
|
||||
@@ -29,6 +31,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
const [showEnvVarsModal, setShowEnvVarsModal] = useState(false);
|
||||
const [showPortMappingsModal, setShowPortMappingsModal] = useState(false);
|
||||
const [showClaudeInstructionsModal, setShowClaudeInstructionsModal] = useState(false);
|
||||
const [showClaudeCodeSettingsModal, setShowClaudeCodeSettingsModal] = useState(false);
|
||||
const [showFileManager, setShowFileManager] = useState(false);
|
||||
const [progressMsg, setProgressMsg] = useState<string | null>(null);
|
||||
const [activeOperation, setActiveOperation] = useState<"starting" | "stopping" | "resetting" | null>(null);
|
||||
@@ -62,10 +65,10 @@ export default function ProjectCard({ project }: Props) {
|
||||
const [ollamaBaseUrl, setOllamaBaseUrl] = useState(project.ollama_config?.base_url ?? "http://host.docker.internal:11434");
|
||||
const [ollamaModelId, setOllamaModelId] = useState(project.ollama_config?.model_id ?? "");
|
||||
|
||||
// LiteLLM local state
|
||||
const [litellmBaseUrl, setLitellmBaseUrl] = useState(project.litellm_config?.base_url ?? "http://host.docker.internal:4000");
|
||||
const [litellmApiKey, setLitellmApiKey] = useState(project.litellm_config?.api_key ?? "");
|
||||
const [litellmModelId, setLitellmModelId] = useState(project.litellm_config?.model_id ?? "");
|
||||
// OpenAI Compatible local state
|
||||
const [openaiCompatibleBaseUrl, setOpenaiCompatibleBaseUrl] = useState(project.openai_compatible_config?.base_url ?? "http://host.docker.internal:4000");
|
||||
const [openaiCompatibleApiKey, setOpenaiCompatibleApiKey] = useState(project.openai_compatible_config?.api_key ?? "");
|
||||
const [openaiCompatibleModelId, setOpenaiCompatibleModelId] = useState(project.openai_compatible_config?.model_id ?? "");
|
||||
|
||||
// Sync local state when project prop changes (e.g., after save or external update)
|
||||
useEffect(() => {
|
||||
@@ -87,9 +90,9 @@ export default function ProjectCard({ project }: Props) {
|
||||
setBedrockModelId(project.bedrock_config?.model_id ?? "");
|
||||
setOllamaBaseUrl(project.ollama_config?.base_url ?? "http://host.docker.internal:11434");
|
||||
setOllamaModelId(project.ollama_config?.model_id ?? "");
|
||||
setLitellmBaseUrl(project.litellm_config?.base_url ?? "http://host.docker.internal:4000");
|
||||
setLitellmApiKey(project.litellm_config?.api_key ?? "");
|
||||
setLitellmModelId(project.litellm_config?.model_id ?? "");
|
||||
setOpenaiCompatibleBaseUrl(project.openai_compatible_config?.base_url ?? "http://host.docker.internal:4000");
|
||||
setOpenaiCompatibleApiKey(project.openai_compatible_config?.api_key ?? "");
|
||||
setOpenaiCompatibleModelId(project.openai_compatible_config?.model_id ?? "");
|
||||
}, [project]);
|
||||
|
||||
// Listen for container progress events
|
||||
@@ -196,23 +199,23 @@ export default function ProjectCard({ project }: Props) {
|
||||
model_id: null,
|
||||
};
|
||||
|
||||
const defaultLiteLlmConfig: LiteLlmConfig = {
|
||||
const defaultOpenAiCompatibleConfig: OpenAiCompatibleConfig = {
|
||||
base_url: "http://host.docker.internal:4000",
|
||||
api_key: null,
|
||||
model_id: null,
|
||||
};
|
||||
|
||||
const handleAuthModeChange = async (mode: AuthMode) => {
|
||||
const handleBackendChange = async (mode: Backend) => {
|
||||
try {
|
||||
const updates: Partial<Project> = { auth_mode: mode };
|
||||
const updates: Partial<Project> = { backend: mode };
|
||||
if (mode === "bedrock" && !project.bedrock_config) {
|
||||
updates.bedrock_config = defaultBedrockConfig;
|
||||
}
|
||||
if (mode === "ollama" && !project.ollama_config) {
|
||||
updates.ollama_config = defaultOllamaConfig;
|
||||
}
|
||||
if (mode === "lit_llm" && !project.litellm_config) {
|
||||
updates.litellm_config = defaultLiteLlmConfig;
|
||||
if (mode === "open_ai_compatible" && !project.openai_compatible_config) {
|
||||
updates.openai_compatible_config = defaultOpenAiCompatibleConfig;
|
||||
}
|
||||
await update({ ...project, ...updates });
|
||||
} catch (e) {
|
||||
@@ -354,30 +357,30 @@ export default function ProjectCard({ project }: Props) {
|
||||
}
|
||||
};
|
||||
|
||||
const handleLitellmBaseUrlBlur = async () => {
|
||||
const handleOpenaiCompatibleBaseUrlBlur = async () => {
|
||||
try {
|
||||
const current = project.litellm_config ?? defaultLiteLlmConfig;
|
||||
await update({ ...project, litellm_config: { ...current, base_url: litellmBaseUrl } });
|
||||
const current = project.openai_compatible_config ?? defaultOpenAiCompatibleConfig;
|
||||
await update({ ...project, openai_compatible_config: { ...current, base_url: openaiCompatibleBaseUrl } });
|
||||
} catch (err) {
|
||||
console.error("Failed to update LiteLLM base URL:", err);
|
||||
console.error("Failed to update OpenAI Compatible base URL:", err);
|
||||
}
|
||||
};
|
||||
|
||||
const handleLitellmApiKeyBlur = async () => {
|
||||
const handleOpenaiCompatibleApiKeyBlur = async () => {
|
||||
try {
|
||||
const current = project.litellm_config ?? defaultLiteLlmConfig;
|
||||
await update({ ...project, litellm_config: { ...current, api_key: litellmApiKey || null } });
|
||||
const current = project.openai_compatible_config ?? defaultOpenAiCompatibleConfig;
|
||||
await update({ ...project, openai_compatible_config: { ...current, api_key: openaiCompatibleApiKey || null } });
|
||||
} catch (err) {
|
||||
console.error("Failed to update LiteLLM API key:", err);
|
||||
console.error("Failed to update OpenAI Compatible API key:", err);
|
||||
}
|
||||
};
|
||||
|
||||
const handleLitellmModelIdBlur = async () => {
|
||||
const handleOpenaiCompatibleModelIdBlur = async () => {
|
||||
try {
|
||||
const current = project.litellm_config ?? defaultLiteLlmConfig;
|
||||
await update({ ...project, litellm_config: { ...current, model_id: litellmModelId || null } });
|
||||
const current = project.openai_compatible_config ?? defaultOpenAiCompatibleConfig;
|
||||
await update({ ...project, openai_compatible_config: { ...current, model_id: openaiCompatibleModelId || null } });
|
||||
} catch (err) {
|
||||
console.error("Failed to update LiteLLM model ID:", err);
|
||||
console.error("Failed to update OpenAI Compatible model ID:", err);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -446,12 +449,12 @@ export default function ProjectCard({ project }: Props) {
|
||||
|
||||
{isSelected && (
|
||||
<div className="mt-2 ml-4 space-y-2 min-w-0 overflow-hidden">
|
||||
{/* Auth mode selector */}
|
||||
{/* Backend selector */}
|
||||
<div className="flex items-center gap-1 text-xs">
|
||||
<span className="text-[var(--text-secondary)] mr-1">Auth:</span>
|
||||
<span className="text-[var(--text-secondary)] mr-1">Backend:<Tooltip text="Choose the AI model provider for this project. Anthropic: Connect directly to Claude via OAuth login (run 'claude login' in terminal). Bedrock: Route through AWS Bedrock using your AWS credentials. Ollama: Use locally-hosted open-source models (Llama, Mistral, etc.) via an Ollama server. OpenAI Compatible: Connect through any OpenAI API-compatible endpoint (LiteLLM, OpenRouter, vLLM, etc.) to access 100+ model providers." /></span>
|
||||
<select
|
||||
value={project.auth_mode}
|
||||
onChange={(e) => { e.stopPropagation(); handleAuthModeChange(e.target.value as AuthMode); }}
|
||||
value={project.backend}
|
||||
onChange={(e) => { e.stopPropagation(); handleBackendChange(e.target.value as Backend); }}
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
disabled={!isStopped}
|
||||
className="px-2 py-0.5 rounded bg-[var(--bg-primary)] border border-[var(--border-color)] text-xs text-[var(--text-primary)] focus:outline-none focus:border-[var(--accent)] disabled:opacity-50"
|
||||
@@ -459,7 +462,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
<option value="anthropic">Anthropic</option>
|
||||
<option value="bedrock">Bedrock</option>
|
||||
<option value="ollama">Ollama</option>
|
||||
<option value="lit_llm">LiteLLM</option>
|
||||
<option value="open_ai_compatible">OpenAI Compatible</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
@@ -609,7 +612,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
|
||||
{/* SSH Key */}
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">SSH Key Directory</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">SSH Key Directory<Tooltip text="Path to your .ssh directory. Mounted into the container so Claude can authenticate with Git remotes over SSH." /></label>
|
||||
<div className="flex gap-1">
|
||||
<input
|
||||
value={sshKeyPath}
|
||||
@@ -631,7 +634,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
|
||||
{/* Git Name */}
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Git Name</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Git Name<Tooltip text="Sets git user.name inside the container for commit authorship." /></label>
|
||||
<input
|
||||
value={gitName}
|
||||
onChange={(e) => setGitName(e.target.value)}
|
||||
@@ -644,7 +647,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
|
||||
{/* Git Email */}
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Git Email</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Git Email<Tooltip text="Sets git user.email inside the container for commit authorship." /></label>
|
||||
<input
|
||||
value={gitEmail}
|
||||
onChange={(e) => setGitEmail(e.target.value)}
|
||||
@@ -657,7 +660,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
|
||||
{/* Git Token (HTTPS) */}
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Git HTTPS Token</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Git HTTPS Token<Tooltip text="A personal access token (e.g. GitHub PAT) for HTTPS git operations inside the container." /></label>
|
||||
<input
|
||||
type="password"
|
||||
value={gitToken}
|
||||
@@ -671,7 +674,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
|
||||
{/* Docker access toggle */}
|
||||
<div className="flex items-center gap-2">
|
||||
<label className="text-xs text-[var(--text-secondary)]">Allow container spawning</label>
|
||||
<label className="text-xs text-[var(--text-secondary)]">Allow container spawning<Tooltip text="Mounts the Docker socket so Claude can build and run Docker containers from inside the sandbox." /></label>
|
||||
<button
|
||||
onClick={async () => {
|
||||
try { await update({ ...project, allow_docker_access: !project.allow_docker_access }); } catch (err) {
|
||||
@@ -691,7 +694,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
|
||||
{/* Mission Control toggle */}
|
||||
<div className="flex items-center gap-2">
|
||||
<label className="text-xs text-[var(--text-secondary)]">Mission Control</label>
|
||||
<label className="text-xs text-[var(--text-secondary)]">Mission Control<Tooltip text="Enables a web dashboard for monitoring and managing Claude sessions remotely." /></label>
|
||||
<button
|
||||
onClick={async () => {
|
||||
try {
|
||||
@@ -711,10 +714,36 @@ export default function ProjectCard({ project }: Props) {
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Full Permissions toggle */}
|
||||
<div className="flex items-center gap-2">
|
||||
<label className="text-xs text-[var(--text-secondary)]">
|
||||
Full Permissions
|
||||
<span className="text-[var(--error)] font-semibold ml-1">(CAUTION)</span>
|
||||
<Tooltip text="When enabled, Claude runs with --dangerously-skip-permissions and auto-approves all tool calls without prompting. Only enable this if you trust the sandboxed environment to contain all actions. When disabled, Claude will ask for your approval before running commands, editing files, etc." />
|
||||
</label>
|
||||
<button
|
||||
onClick={async () => {
|
||||
try {
|
||||
await update({ ...project, full_permissions: !project.full_permissions });
|
||||
} catch (err) {
|
||||
console.error("Failed to update full permissions setting:", err);
|
||||
}
|
||||
}}
|
||||
disabled={!isStopped}
|
||||
className={`px-2 py-0.5 text-xs rounded transition-colors disabled:opacity-50 ${
|
||||
project.full_permissions
|
||||
? "bg-[var(--error)] text-white"
|
||||
: "bg-[var(--bg-primary)] border border-[var(--border-color)] text-[var(--text-secondary)]"
|
||||
}`}
|
||||
>
|
||||
{project.full_permissions ? "ON" : "OFF"}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Environment Variables */}
|
||||
<div className="flex items-center justify-between">
|
||||
<label className="text-xs text-[var(--text-secondary)]">
|
||||
Environment Variables{envVars.length > 0 && ` (${envVars.length})`}
|
||||
Environment Variables{envVars.length > 0 && ` (${envVars.length})`}<Tooltip text="Custom env vars injected into this project's container. Useful for API keys or tool configuration." />
|
||||
</label>
|
||||
<button
|
||||
onClick={() => setShowEnvVarsModal(true)}
|
||||
@@ -727,7 +756,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
{/* Port Mappings */}
|
||||
<div className="flex items-center justify-between">
|
||||
<label className="text-xs text-[var(--text-secondary)]">
|
||||
Port Mappings{portMappings.length > 0 && ` (${portMappings.length})`}
|
||||
Port Mappings{portMappings.length > 0 && ` (${portMappings.length})`}<Tooltip text="Map container ports to host ports so you can access dev servers running inside the container." />
|
||||
</label>
|
||||
<button
|
||||
onClick={() => setShowPortMappingsModal(true)}
|
||||
@@ -740,7 +769,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
{/* Claude Instructions */}
|
||||
<div className="flex items-center justify-between">
|
||||
<label className="text-xs text-[var(--text-secondary)]">
|
||||
Claude Instructions{claudeInstructions ? " (set)" : ""}
|
||||
Claude Instructions{claudeInstructions ? " (set)" : ""}<Tooltip text="Project-specific instructions written to CLAUDE.md. Guides Claude's behavior for this project." />
|
||||
</label>
|
||||
<button
|
||||
onClick={() => setShowClaudeInstructionsModal(true)}
|
||||
@@ -750,10 +779,23 @@ export default function ProjectCard({ project }: Props) {
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Claude Code Settings */}
|
||||
<div className="flex items-center justify-between">
|
||||
<label className="text-xs text-[var(--text-secondary)]">
|
||||
Claude Code Settings{project.claude_code_settings ? " (set)" : ""}<Tooltip text="Configure Claude Code CLI behavior: TUI mode, effort level, focus mode, prompt caching, and more. These override global defaults for this project." />
|
||||
</label>
|
||||
<button
|
||||
onClick={() => setShowClaudeCodeSettingsModal(true)}
|
||||
className="text-xs px-2 py-0.5 text-[var(--accent)] hover:text-[var(--accent-hover)] hover:bg-[var(--bg-primary)] rounded transition-colors"
|
||||
>
|
||||
Edit
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* MCP Servers */}
|
||||
{mcpServers.length > 0 && (
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-1">MCP Servers</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-1">MCP Servers<Tooltip text="Model Context Protocol servers give Claude access to external tools and data sources." /></label>
|
||||
<div className="space-y-1">
|
||||
{mcpServers.map((server) => {
|
||||
const enabled = project.enabled_mcp_servers.includes(server.id);
|
||||
@@ -794,7 +836,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
)}
|
||||
|
||||
{/* Bedrock config */}
|
||||
{project.auth_mode === "bedrock" && (() => {
|
||||
{project.backend === "bedrock" && (() => {
|
||||
const bc = project.bedrock_config ?? defaultBedrockConfig;
|
||||
const inputCls = "w-full px-2 py-1 bg-[var(--bg-primary)] border border-[var(--border-color)] rounded text-xs text-[var(--text-primary)] focus:outline-none focus:border-[var(--accent)] disabled:opacity-50";
|
||||
return (
|
||||
@@ -819,7 +861,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
|
||||
{/* AWS Region (always shown) */}
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">AWS Region</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">AWS Region<Tooltip text="The AWS region where your Bedrock endpoint is available (e.g. us-east-1)." /></label>
|
||||
<input
|
||||
value={bedrockRegion}
|
||||
onChange={(e) => setBedrockRegion(e.target.value)}
|
||||
@@ -834,7 +876,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
{bc.auth_method === "static_credentials" && (
|
||||
<>
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Access Key ID</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Access Key ID<Tooltip text="Your AWS IAM access key ID for Bedrock API authentication." /></label>
|
||||
<input
|
||||
value={bedrockAccessKeyId}
|
||||
onChange={(e) => setBedrockAccessKeyId(e.target.value)}
|
||||
@@ -845,7 +887,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Secret Access Key</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Secret Access Key<Tooltip text="Your AWS IAM secret key. Stored locally and injected as an env var into the container." /></label>
|
||||
<input
|
||||
type="password"
|
||||
value={bedrockSecretKey}
|
||||
@@ -856,7 +898,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Session Token (optional)</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Session Token (optional)<Tooltip text="Temporary session token for assumed-role or MFA-based AWS credentials." /></label>
|
||||
<input
|
||||
type="password"
|
||||
value={bedrockSessionToken}
|
||||
@@ -872,7 +914,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
{/* Profile field */}
|
||||
{bc.auth_method === "profile" && (
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">AWS Profile</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">AWS Profile<Tooltip text="Named profile from your AWS config/credentials files (e.g. 'default' or 'prod')." /></label>
|
||||
<input
|
||||
value={bedrockProfile}
|
||||
onChange={(e) => setBedrockProfile(e.target.value)}
|
||||
@@ -887,7 +929,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
{/* Bearer token field */}
|
||||
{bc.auth_method === "bearer_token" && (
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Bearer Token</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Bearer Token<Tooltip text="An SSO or identity-center bearer token for Bedrock authentication." /></label>
|
||||
<input
|
||||
type="password"
|
||||
value={bedrockBearerToken}
|
||||
@@ -901,7 +943,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
|
||||
{/* Model override */}
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Model ID (optional)</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Model ID (optional)<Tooltip text="Override the default Bedrock model. Leave blank to use Claude's default." /></label>
|
||||
<input
|
||||
value={bedrockModelId}
|
||||
onChange={(e) => setBedrockModelId(e.target.value)}
|
||||
@@ -916,7 +958,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
})()}
|
||||
|
||||
{/* Ollama config */}
|
||||
{project.auth_mode === "ollama" && (() => {
|
||||
{project.backend === "ollama" && (() => {
|
||||
const inputCls = "w-full px-2 py-1 bg-[var(--bg-primary)] border border-[var(--border-color)] rounded text-xs text-[var(--text-primary)] focus:outline-none focus:border-[var(--accent)] disabled:opacity-50";
|
||||
return (
|
||||
<div className="space-y-2 pt-1 border-t border-[var(--border-color)]">
|
||||
@@ -926,7 +968,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
</p>
|
||||
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Base URL</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Base URL<Tooltip text="URL of your Ollama server. Use host.docker.internal to reach the host machine from inside the container." /></label>
|
||||
<input
|
||||
value={ollamaBaseUrl}
|
||||
onChange={(e) => setOllamaBaseUrl(e.target.value)}
|
||||
@@ -941,7 +983,7 @@ export default function ProjectCard({ project }: Props) {
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Model (optional)</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Model (required)<Tooltip text="Ollama model name to use (e.g. qwen3.5:27b). The model must be pulled in Ollama before starting the container." /></label>
|
||||
<input
|
||||
value={ollamaModelId}
|
||||
onChange={(e) => setOllamaModelId(e.target.value)}
|
||||
@@ -955,38 +997,38 @@ export default function ProjectCard({ project }: Props) {
|
||||
);
|
||||
})()}
|
||||
|
||||
{/* LiteLLM config */}
|
||||
{project.auth_mode === "lit_llm" && (() => {
|
||||
{/* OpenAI Compatible config */}
|
||||
{project.backend === "open_ai_compatible" && (() => {
|
||||
const inputCls = "w-full px-2 py-1 bg-[var(--bg-primary)] border border-[var(--border-color)] rounded text-xs text-[var(--text-primary)] focus:outline-none focus:border-[var(--accent)] disabled:opacity-50";
|
||||
return (
|
||||
<div className="space-y-2 pt-1 border-t border-[var(--border-color)]">
|
||||
<label className="block text-xs font-medium text-[var(--text-primary)]">LiteLLM Gateway</label>
|
||||
<label className="block text-xs font-medium text-[var(--text-primary)]">OpenAI Compatible Endpoint</label>
|
||||
<p className="text-xs text-[var(--text-secondary)]">
|
||||
Connect through a LiteLLM proxy to use 100+ model providers.
|
||||
Connect through any OpenAI API-compatible endpoint (LiteLLM, OpenRouter, vLLM, etc.).
|
||||
</p>
|
||||
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Base URL</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Base URL<Tooltip text="URL of your OpenAI API-compatible server. Use host.docker.internal for a locally running service." /></label>
|
||||
<input
|
||||
value={litellmBaseUrl}
|
||||
onChange={(e) => setLitellmBaseUrl(e.target.value)}
|
||||
onBlur={handleLitellmBaseUrlBlur}
|
||||
value={openaiCompatibleBaseUrl}
|
||||
onChange={(e) => setOpenaiCompatibleBaseUrl(e.target.value)}
|
||||
onBlur={handleOpenaiCompatibleBaseUrlBlur}
|
||||
placeholder="http://host.docker.internal:4000"
|
||||
disabled={!isStopped}
|
||||
className={inputCls}
|
||||
/>
|
||||
<p className="text-xs text-[var(--text-secondary)] mt-0.5 opacity-70">
|
||||
Use host.docker.internal for local, or a URL for remote/containerized LiteLLM.
|
||||
Use host.docker.internal for local, or a URL for a remote OpenAI-compatible service.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">API Key</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">API Key<Tooltip text="Authentication key for your OpenAI-compatible endpoint, if required." /></label>
|
||||
<input
|
||||
type="password"
|
||||
value={litellmApiKey}
|
||||
onChange={(e) => setLitellmApiKey(e.target.value)}
|
||||
onBlur={handleLitellmApiKeyBlur}
|
||||
value={openaiCompatibleApiKey}
|
||||
onChange={(e) => setOpenaiCompatibleApiKey(e.target.value)}
|
||||
onBlur={handleOpenaiCompatibleApiKeyBlur}
|
||||
placeholder="sk-..."
|
||||
disabled={!isStopped}
|
||||
className={inputCls}
|
||||
@@ -994,11 +1036,11 @@ export default function ProjectCard({ project }: Props) {
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Model (optional)</label>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Model (optional)<Tooltip text="Model identifier as configured in your provider (e.g. gpt-4o, gemini-pro)." /></label>
|
||||
<input
|
||||
value={litellmModelId}
|
||||
onChange={(e) => setLitellmModelId(e.target.value)}
|
||||
onBlur={handleLitellmModelIdBlur}
|
||||
value={openaiCompatibleModelId}
|
||||
onChange={(e) => setOpenaiCompatibleModelId(e.target.value)}
|
||||
onBlur={handleOpenaiCompatibleModelIdBlur}
|
||||
placeholder="gpt-4o / gemini-pro / etc."
|
||||
disabled={!isStopped}
|
||||
className={inputCls}
|
||||
@@ -1052,6 +1094,17 @@ export default function ProjectCard({ project }: Props) {
|
||||
/>
|
||||
)}
|
||||
|
||||
{showClaudeCodeSettingsModal && (
|
||||
<ClaudeCodeSettingsModal
|
||||
settings={project.claude_code_settings}
|
||||
disabled={!isStopped}
|
||||
onSave={async (ccSettings) => {
|
||||
await update({ ...project, claude_code_settings: ccSettings });
|
||||
}}
|
||||
onClose={() => setShowClaudeCodeSettingsModal(false)}
|
||||
/>
|
||||
)}
|
||||
|
||||
{showFileManager && (
|
||||
<FileManagerModal
|
||||
projectId={project.id}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
export default function ApiKeyInput() {
|
||||
return (
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">Authentication</label>
|
||||
<label className="block text-sm font-medium mb-1">Backend</label>
|
||||
<p className="text-xs text-[var(--text-secondary)] mb-3">
|
||||
Each project can use <strong>claude login</strong> (OAuth, run inside the terminal) or <strong>AWS Bedrock</strong>. Set auth mode per-project.
|
||||
Each project can use <strong>claude login</strong> (OAuth, run inside the terminal) or <strong>AWS Bedrock</strong>. Set backend per-project.
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import { useSettings } from "../../hooks/useSettings";
|
||||
import * as commands from "../../lib/tauri-commands";
|
||||
import Tooltip from "../ui/Tooltip";
|
||||
|
||||
export default function AwsSettings() {
|
||||
const { appSettings, saveSettings } = useSettings();
|
||||
@@ -56,7 +57,7 @@ export default function AwsSettings() {
|
||||
|
||||
{/* AWS Config Path */}
|
||||
<div>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1">AWS Config Path</span>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1">AWS Config Path<Tooltip text="Path to your AWS config/credentials directory. Mounted into containers for Bedrock auth." /></span>
|
||||
<div className="flex gap-2">
|
||||
<input
|
||||
type="text"
|
||||
@@ -80,7 +81,7 @@ export default function AwsSettings() {
|
||||
|
||||
{/* AWS Profile */}
|
||||
<div>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1">Default Profile</span>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1">Default Profile<Tooltip text="AWS named profile to use by default. Per-project settings can override this." /></span>
|
||||
<select
|
||||
value={globalAws.aws_profile ?? ""}
|
||||
onChange={(e) => handleChange("aws_profile", e.target.value)}
|
||||
@@ -95,7 +96,7 @@ export default function AwsSettings() {
|
||||
|
||||
{/* AWS Region */}
|
||||
<div>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1">Default Region</span>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1">Default Region<Tooltip text="Default AWS region for Bedrock API calls (e.g. us-east-1). Can be overridden per project." /></span>
|
||||
<input
|
||||
type="text"
|
||||
value={globalAws.aws_region ?? ""}
|
||||
|
||||
@@ -2,8 +2,9 @@ import { useState } from "react";
|
||||
import { useDocker } from "../../hooks/useDocker";
|
||||
import { useSettings } from "../../hooks/useSettings";
|
||||
import type { ImageSource } from "../../lib/types";
|
||||
import Tooltip from "../ui/Tooltip";
|
||||
|
||||
const REGISTRY_IMAGE = "repo.anhonesthost.net/cybercovellc/triple-c/triple-c-sandbox:latest";
|
||||
const REGISTRY_IMAGE = "ghcr.io/shadowdao/triple-c-sandbox:latest";
|
||||
|
||||
const IMAGE_SOURCE_OPTIONS: { value: ImageSource; label: string; description: string }[] = [
|
||||
{ value: "registry", label: "Registry", description: "Pull from container registry" },
|
||||
@@ -87,7 +88,7 @@ export default function DockerSettings() {
|
||||
|
||||
{/* Image Source Selector */}
|
||||
<div>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1.5">Image Source</span>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1.5">Image Source<Tooltip text="Registry pulls the pre-built image. Local Build compiles from the bundled Dockerfile. Custom lets you specify any image." /></span>
|
||||
<div className="flex gap-1">
|
||||
{IMAGE_SOURCE_OPTIONS.map((opt) => (
|
||||
<button
|
||||
@@ -109,7 +110,7 @@ export default function DockerSettings() {
|
||||
{/* Custom image input */}
|
||||
{imageSource === "custom" && (
|
||||
<div>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1">Custom Image</span>
|
||||
<span className="text-[var(--text-secondary)] text-xs block mb-1">Custom Image<Tooltip text="Full image name including registry and tag (e.g. myregistry.com/image:tag)." /></span>
|
||||
<input
|
||||
type="text"
|
||||
value={customInput}
|
||||
@@ -121,9 +122,9 @@ export default function DockerSettings() {
|
||||
)}
|
||||
|
||||
{/* Resolved image display */}
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<span className="text-[var(--text-secondary)]">Image</span>
|
||||
<span className="text-xs text-[var(--text-secondary)] truncate max-w-[200px]" title={resolvedImageName}>
|
||||
<span className="block text-xs text-[var(--text-secondary)] font-mono mt-0.5 truncate" title={resolvedImageName}>
|
||||
{resolvedImageName}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
91
app/src/components/settings/ImageUpdateDialog.tsx
Normal file
91
app/src/components/settings/ImageUpdateDialog.tsx
Normal file
@@ -0,0 +1,91 @@
|
||||
import { useEffect, useRef, useCallback } from "react";
|
||||
import type { ImageUpdateInfo } from "../../lib/types";
|
||||
|
||||
interface Props {
|
||||
imageUpdateInfo: ImageUpdateInfo;
|
||||
onDismiss: () => void;
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
export default function ImageUpdateDialog({
|
||||
imageUpdateInfo,
|
||||
onDismiss,
|
||||
onClose,
|
||||
}: Props) {
|
||||
const overlayRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
if (e.key === "Escape") onClose();
|
||||
};
|
||||
document.addEventListener("keydown", handleKeyDown);
|
||||
return () => document.removeEventListener("keydown", handleKeyDown);
|
||||
}, [onClose]);
|
||||
|
||||
const handleOverlayClick = useCallback(
|
||||
(e: React.MouseEvent<HTMLDivElement>) => {
|
||||
if (e.target === overlayRef.current) onClose();
|
||||
},
|
||||
[onClose],
|
||||
);
|
||||
|
||||
const shortDigest = (digest: string) => {
|
||||
// Show first 16 chars of the hash part (after "sha256:")
|
||||
const hash = digest.startsWith("sha256:") ? digest.slice(7) : digest;
|
||||
return hash.slice(0, 16);
|
||||
};
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={overlayRef}
|
||||
onClick={handleOverlayClick}
|
||||
className="fixed inset-0 bg-black/50 flex items-center justify-center z-50"
|
||||
>
|
||||
<div className="bg-[var(--bg-secondary)] border border-[var(--border-color)] rounded-lg p-6 w-[28rem] max-h-[80vh] overflow-y-auto shadow-xl">
|
||||
<h2 className="text-lg font-semibold mb-3">Container Image Update</h2>
|
||||
|
||||
<p className="text-sm text-[var(--text-secondary)] mb-4">
|
||||
A newer version of the container image is available in the registry.
|
||||
Re-pull the image in Docker settings to get the latest tools and fixes.
|
||||
</p>
|
||||
|
||||
<div className="space-y-2 mb-4 text-xs bg-[var(--bg-primary)] rounded p-3 border border-[var(--border-color)]">
|
||||
{imageUpdateInfo.local_digest && (
|
||||
<div className="flex justify-between">
|
||||
<span className="text-[var(--text-secondary)]">Local digest</span>
|
||||
<span className="font-mono text-[var(--text-primary)]">
|
||||
{shortDigest(imageUpdateInfo.local_digest)}...
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex justify-between">
|
||||
<span className="text-[var(--text-secondary)]">Remote digest</span>
|
||||
<span className="font-mono text-[var(--accent)]">
|
||||
{shortDigest(imageUpdateInfo.remote_digest)}...
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p className="text-xs text-[var(--text-secondary)] mb-4">
|
||||
Go to Settings > Docker and click "Re-pull Image" to update.
|
||||
Running containers will not be affected until restarted.
|
||||
</p>
|
||||
|
||||
<div className="flex items-center justify-end gap-2">
|
||||
<button
|
||||
onClick={onDismiss}
|
||||
className="px-3 py-1.5 text-xs text-[var(--text-secondary)] hover:text-[var(--text-primary)] transition-colors"
|
||||
>
|
||||
Dismiss
|
||||
</button>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="px-3 py-1.5 text-xs bg-[var(--bg-tertiary)] border border-[var(--border-color)] rounded hover:bg-[var(--border-color)] transition-colors"
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,30 +1,40 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import ApiKeyInput from "./ApiKeyInput";
|
||||
import DockerSettings from "./DockerSettings";
|
||||
import AwsSettings from "./AwsSettings";
|
||||
import { useSettings } from "../../hooks/useSettings";
|
||||
import { useUpdates } from "../../hooks/useUpdates";
|
||||
import ClaudeInstructionsModal from "../projects/ClaudeInstructionsModal";
|
||||
import ClaudeCodeSettingsModal from "../projects/ClaudeCodeSettingsModal";
|
||||
import EnvVarsModal from "../projects/EnvVarsModal";
|
||||
import { detectHostTimezone } from "../../lib/tauri-commands";
|
||||
import type { EnvVar } from "../../lib/types";
|
||||
import Tooltip from "../ui/Tooltip";
|
||||
import WebTerminalSettings from "./WebTerminalSettings";
|
||||
import SttSettings from "./SttSettings";
|
||||
|
||||
export default function SettingsPanel() {
|
||||
const { appSettings, saveSettings } = useSettings();
|
||||
const { appVersion, checkForUpdates } = useUpdates();
|
||||
const { appVersion, imageUpdateInfo, checkForUpdates, checkImageUpdate } = useUpdates();
|
||||
const [globalInstructions, setGlobalInstructions] = useState(appSettings?.global_claude_instructions ?? "");
|
||||
const [globalEnvVars, setGlobalEnvVars] = useState<EnvVar[]>(appSettings?.global_custom_env_vars ?? []);
|
||||
const [checkingUpdates, setCheckingUpdates] = useState(false);
|
||||
const [timezone, setTimezone] = useState(appSettings?.timezone ?? "");
|
||||
const [sshKeyPath, setSshKeyPath] = useState(appSettings?.default_ssh_key_path ?? "");
|
||||
const [gitName, setGitName] = useState(appSettings?.default_git_user_name ?? "");
|
||||
const [gitEmail, setGitEmail] = useState(appSettings?.default_git_user_email ?? "");
|
||||
const [showInstructionsModal, setShowInstructionsModal] = useState(false);
|
||||
const [showEnvVarsModal, setShowEnvVarsModal] = useState(false);
|
||||
const [showClaudeCodeSettingsModal, setShowClaudeCodeSettingsModal] = useState(false);
|
||||
|
||||
// Sync local state when appSettings change
|
||||
useEffect(() => {
|
||||
setGlobalInstructions(appSettings?.global_claude_instructions ?? "");
|
||||
setGlobalEnvVars(appSettings?.global_custom_env_vars ?? []);
|
||||
setTimezone(appSettings?.timezone ?? "");
|
||||
}, [appSettings?.global_claude_instructions, appSettings?.global_custom_env_vars, appSettings?.timezone]);
|
||||
setSshKeyPath(appSettings?.default_ssh_key_path ?? "");
|
||||
setGitName(appSettings?.default_git_user_name ?? "");
|
||||
setGitEmail(appSettings?.default_git_user_email ?? "");
|
||||
}, [appSettings?.global_claude_instructions, appSettings?.global_custom_env_vars, appSettings?.timezone, appSettings?.default_ssh_key_path, appSettings?.default_git_user_name, appSettings?.default_git_user_email]);
|
||||
|
||||
// Auto-detect timezone on first load if not yet set
|
||||
useEffect(() => {
|
||||
@@ -39,7 +49,7 @@ export default function SettingsPanel() {
|
||||
const handleCheckNow = async () => {
|
||||
setCheckingUpdates(true);
|
||||
try {
|
||||
await checkForUpdates();
|
||||
await Promise.all([checkForUpdates(), checkImageUpdate()]);
|
||||
} finally {
|
||||
setCheckingUpdates(false);
|
||||
}
|
||||
@@ -55,13 +65,66 @@ export default function SettingsPanel() {
|
||||
<h2 className="text-xs font-semibold uppercase text-[var(--text-secondary)]">
|
||||
Settings
|
||||
</h2>
|
||||
<ApiKeyInput />
|
||||
<DockerSettings />
|
||||
<AwsSettings />
|
||||
|
||||
{/* Default SSH Key Directory */}
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">Default SSH Key Directory<Tooltip text="Global default SSH key directory. Mounted into containers that don't have a per-project SSH path set." /></label>
|
||||
<p className="text-xs text-[var(--text-secondary)] mb-1.5">
|
||||
Mounted into all containers unless overridden by a per-project setting.
|
||||
</p>
|
||||
<input
|
||||
type="text"
|
||||
value={sshKeyPath}
|
||||
onChange={(e) => setSshKeyPath(e.target.value)}
|
||||
onBlur={async () => {
|
||||
if (appSettings) {
|
||||
await saveSettings({ ...appSettings, default_ssh_key_path: sshKeyPath || null });
|
||||
}
|
||||
}}
|
||||
placeholder="~/.ssh"
|
||||
className="w-full px-2 py-1 text-sm bg-[var(--bg-primary)] border border-[var(--border-color)] rounded focus:outline-none focus:border-[var(--accent)]"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Default Git Name */}
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">Default Git Name<Tooltip text="Sets git user.name inside containers. Per-project Git Name takes precedence." /></label>
|
||||
<input
|
||||
type="text"
|
||||
value={gitName}
|
||||
onChange={(e) => setGitName(e.target.value)}
|
||||
onBlur={async () => {
|
||||
if (appSettings) {
|
||||
await saveSettings({ ...appSettings, default_git_user_name: gitName || null });
|
||||
}
|
||||
}}
|
||||
placeholder="Your Name"
|
||||
className="w-full px-2 py-1 text-sm bg-[var(--bg-primary)] border border-[var(--border-color)] rounded focus:outline-none focus:border-[var(--accent)]"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Default Git Email */}
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">Default Git Email<Tooltip text="Sets git user.email inside containers. Per-project Git Email takes precedence." /></label>
|
||||
<input
|
||||
type="text"
|
||||
value={gitEmail}
|
||||
onChange={(e) => setGitEmail(e.target.value)}
|
||||
onBlur={async () => {
|
||||
if (appSettings) {
|
||||
await saveSettings({ ...appSettings, default_git_user_email: gitEmail || null });
|
||||
}
|
||||
}}
|
||||
placeholder="you@example.com"
|
||||
className="w-full px-2 py-1 text-sm bg-[var(--bg-primary)] border border-[var(--border-color)] rounded focus:outline-none focus:border-[var(--accent)]"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Container Timezone */}
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">Container Timezone</label>
|
||||
<label className="block text-sm font-medium mb-1">Container Timezone<Tooltip text="Sets the timezone inside containers. Affects scheduled task timing and log timestamps." /></label>
|
||||
<p className="text-xs text-[var(--text-secondary)] mb-1.5">
|
||||
Timezone for containers — affects scheduled task timing (IANA format, e.g. America/New_York)
|
||||
</p>
|
||||
@@ -81,7 +144,7 @@ export default function SettingsPanel() {
|
||||
|
||||
{/* Global Claude Instructions */}
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">Claude Instructions</label>
|
||||
<label className="block text-sm font-medium mb-1">Claude Instructions<Tooltip text="Global instructions applied to all projects. Written to ~/.claude/CLAUDE.md in every container." /></label>
|
||||
<p className="text-xs text-[var(--text-secondary)] mb-1.5">
|
||||
Global instructions applied to all projects (written to ~/.claude/CLAUDE.md in containers)
|
||||
</p>
|
||||
@@ -100,7 +163,7 @@ export default function SettingsPanel() {
|
||||
|
||||
{/* Global Environment Variables */}
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">Global Environment Variables</label>
|
||||
<label className="block text-sm font-medium mb-1">Global Environment Variables<Tooltip text="Env vars injected into all containers. Per-project vars with the same key take precedence." /></label>
|
||||
<p className="text-xs text-[var(--text-secondary)] mb-1.5">
|
||||
Applied to all project containers. Per-project variables override global ones with the same key.
|
||||
</p>
|
||||
@@ -117,9 +180,34 @@ export default function SettingsPanel() {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Global Claude Code Settings */}
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">Claude Code Settings<Tooltip text="Global defaults for Claude Code CLI behavior (TUI mode, effort, focus mode, caching, etc.). Per-project settings override these." /></label>
|
||||
<p className="text-xs text-[var(--text-secondary)] mb-1.5">
|
||||
Default Claude Code CLI settings applied to all projects. Per-project settings take precedence.
|
||||
</p>
|
||||
<div className="flex items-center justify-between">
|
||||
<span className="text-xs text-[var(--text-secondary)]">
|
||||
{appSettings?.global_claude_code_settings ? "Configured" : "Using defaults"}
|
||||
</span>
|
||||
<button
|
||||
onClick={() => setShowClaudeCodeSettingsModal(true)}
|
||||
className="text-xs px-2 py-0.5 text-[var(--accent)] hover:text-[var(--accent-hover)] hover:bg-[var(--bg-primary)] rounded transition-colors"
|
||||
>
|
||||
Edit
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Web Terminal */}
|
||||
<WebTerminalSettings />
|
||||
|
||||
{/* Speech to Text */}
|
||||
<SttSettings />
|
||||
|
||||
{/* Updates section */}
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-2">Updates</label>
|
||||
<label className="block text-sm font-medium mb-2">Updates<Tooltip text="Check for new versions of the Triple-C app and container image." /></label>
|
||||
<div className="space-y-2">
|
||||
{appVersion && (
|
||||
<p className="text-xs text-[var(--text-secondary)]">
|
||||
@@ -146,6 +234,12 @@ export default function SettingsPanel() {
|
||||
>
|
||||
{checkingUpdates ? "Checking..." : "Check now"}
|
||||
</button>
|
||||
{imageUpdateInfo && (
|
||||
<div className="flex items-center gap-2 px-3 py-2 text-xs bg-[var(--bg-primary)] border border-[var(--warning,#f59e0b)] rounded">
|
||||
<span className="inline-block w-2 h-2 rounded-full bg-[var(--warning,#f59e0b)]" />
|
||||
<span>A newer container image is available. Re-pull the image in Docker settings above to update.</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -176,6 +270,19 @@ export default function SettingsPanel() {
|
||||
onClose={() => setShowEnvVarsModal(false)}
|
||||
/>
|
||||
)}
|
||||
|
||||
{showClaudeCodeSettingsModal && (
|
||||
<ClaudeCodeSettingsModal
|
||||
settings={appSettings?.global_claude_code_settings ?? null}
|
||||
disabled={false}
|
||||
onSave={async (ccSettings) => {
|
||||
if (appSettings) {
|
||||
await saveSettings({ ...appSettings, global_claude_code_settings: ccSettings });
|
||||
}
|
||||
}}
|
||||
onClose={() => setShowClaudeCodeSettingsModal(false)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
249
app/src/components/settings/SttSettings.tsx
Normal file
249
app/src/components/settings/SttSettings.tsx
Normal file
@@ -0,0 +1,249 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import { useSettings } from "../../hooks/useSettings";
|
||||
import { getSttStatus, startStt, stopStt, pullSttImage, buildSttImage } from "../../lib/tauri-commands";
|
||||
import { listen } from "@tauri-apps/api/event";
|
||||
import type { SttStatus } from "../../lib/types";
|
||||
import Tooltip from "../ui/Tooltip";
|
||||
|
||||
export default function SttSettings() {
|
||||
const { appSettings, saveSettings } = useSettings();
|
||||
const [status, setStatus] = useState<SttStatus | null>(null);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [pulling, setPulling] = useState(false);
|
||||
const [building, setBuilding] = useState(false);
|
||||
const [buildLog, setBuildLog] = useState<string | null>(null);
|
||||
const [model, setModel] = useState(appSettings?.stt?.model ?? "tiny");
|
||||
const [port, setPort] = useState(String(appSettings?.stt?.port ?? 9876));
|
||||
const [language, setLanguage] = useState(appSettings?.stt?.language ?? "");
|
||||
|
||||
useEffect(() => {
|
||||
setModel(appSettings?.stt?.model ?? "tiny");
|
||||
setPort(String(appSettings?.stt?.port ?? 9876));
|
||||
setLanguage(appSettings?.stt?.language ?? "");
|
||||
}, [appSettings?.stt?.model, appSettings?.stt?.port, appSettings?.stt?.language]);
|
||||
|
||||
useEffect(() => {
|
||||
refreshStatus();
|
||||
}, []);
|
||||
|
||||
const refreshStatus = () => {
|
||||
getSttStatus().then(setStatus).catch(console.error);
|
||||
};
|
||||
|
||||
const handleToggleEnabled = async () => {
|
||||
if (!appSettings) return;
|
||||
const newEnabled = !appSettings.stt.enabled;
|
||||
await saveSettings({
|
||||
...appSettings,
|
||||
stt: { ...appSettings.stt, enabled: newEnabled },
|
||||
});
|
||||
};
|
||||
|
||||
const handleSaveModel = async () => {
|
||||
if (!appSettings) return;
|
||||
await saveSettings({
|
||||
...appSettings,
|
||||
stt: { ...appSettings.stt, model },
|
||||
});
|
||||
};
|
||||
|
||||
const handleSavePort = async () => {
|
||||
if (!appSettings) return;
|
||||
const portNum = parseInt(port, 10);
|
||||
if (isNaN(portNum) || portNum < 1 || portNum > 65535) return;
|
||||
await saveSettings({
|
||||
...appSettings,
|
||||
stt: { ...appSettings.stt, port: portNum },
|
||||
});
|
||||
};
|
||||
|
||||
const handleSaveLanguage = async () => {
|
||||
if (!appSettings) return;
|
||||
await saveSettings({
|
||||
...appSettings,
|
||||
stt: { ...appSettings.stt, language: language || null },
|
||||
});
|
||||
};
|
||||
|
||||
const handleStartStop = async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
if (status?.running) {
|
||||
await stopStt();
|
||||
} else {
|
||||
await startStt();
|
||||
}
|
||||
refreshStatus();
|
||||
} catch (e) {
|
||||
console.error("STT toggle failed:", e);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handlePull = async () => {
|
||||
setPulling(true);
|
||||
setBuildLog(null);
|
||||
const unlisten = await listen<string>("stt-pull-progress", (event) => {
|
||||
setBuildLog(event.payload);
|
||||
});
|
||||
try {
|
||||
await pullSttImage();
|
||||
refreshStatus();
|
||||
} catch (e) {
|
||||
console.error("STT image pull failed:", e);
|
||||
setBuildLog(`Error: ${e}`);
|
||||
} finally {
|
||||
setPulling(false);
|
||||
unlisten();
|
||||
}
|
||||
};
|
||||
|
||||
const handleBuild = async () => {
|
||||
setBuilding(true);
|
||||
setBuildLog(null);
|
||||
const unlisten = await listen<string>("stt-build-progress", (event) => {
|
||||
setBuildLog(event.payload);
|
||||
});
|
||||
try {
|
||||
await buildSttImage();
|
||||
refreshStatus();
|
||||
} catch (e) {
|
||||
console.error("STT image build failed:", e);
|
||||
setBuildLog(`Error: ${e}`);
|
||||
} finally {
|
||||
setBuilding(false);
|
||||
unlisten();
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">
|
||||
Speech to Text
|
||||
<Tooltip text="Transcribe speech to text using Faster Whisper in a Docker container. Adds a mic button to the terminal." />
|
||||
</label>
|
||||
<p className="text-xs text-[var(--text-secondary)] mb-2">
|
||||
Click the mic button in the terminal to dictate text via speech recognition.
|
||||
</p>
|
||||
|
||||
<div className="space-y-2">
|
||||
{/* Enable toggle */}
|
||||
<div className="flex items-center gap-2">
|
||||
<button
|
||||
onClick={handleToggleEnabled}
|
||||
className={`px-2 py-0.5 text-xs rounded transition-colors ${
|
||||
appSettings?.stt?.enabled
|
||||
? "bg-[var(--success)] text-white"
|
||||
: "bg-[var(--bg-primary)] border border-[var(--border-color)] text-[var(--text-secondary)]"
|
||||
}`}
|
||||
>
|
||||
{appSettings?.stt?.enabled ? "ON" : "OFF"}
|
||||
</button>
|
||||
<span className="text-xs text-[var(--text-secondary)]">
|
||||
{appSettings?.stt?.enabled ? "Enabled" : "Disabled"}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{appSettings?.stt?.enabled && (
|
||||
<>
|
||||
{/* Model selector */}
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-1">Model</label>
|
||||
<select
|
||||
value={model}
|
||||
onChange={(e) => setModel(e.target.value)}
|
||||
onBlur={handleSaveModel}
|
||||
className="w-full px-2 py-1 text-sm bg-[var(--bg-primary)] border border-[var(--border-color)] rounded focus:outline-none focus:border-[var(--accent)]"
|
||||
>
|
||||
<option value="tiny">Tiny (fastest, ~75MB)</option>
|
||||
<option value="small">Small (balanced, ~500MB)</option>
|
||||
<option value="medium">Medium (most accurate, ~1.5GB)</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
{/* Port */}
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-1">Port</label>
|
||||
<input
|
||||
type="number"
|
||||
value={port}
|
||||
onChange={(e) => setPort(e.target.value)}
|
||||
onBlur={handleSavePort}
|
||||
min={1}
|
||||
max={65535}
|
||||
className="w-full px-2 py-1 text-sm bg-[var(--bg-primary)] border border-[var(--border-color)] rounded focus:outline-none focus:border-[var(--accent)]"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Language */}
|
||||
<div>
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-1">Language (optional)</label>
|
||||
<input
|
||||
type="text"
|
||||
value={language}
|
||||
onChange={(e) => setLanguage(e.target.value)}
|
||||
onBlur={handleSaveLanguage}
|
||||
placeholder="Auto-detect"
|
||||
className="w-full px-2 py-1 text-sm bg-[var(--bg-primary)] border border-[var(--border-color)] rounded focus:outline-none focus:border-[var(--accent)]"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Container status + controls */}
|
||||
<div className="pt-1">
|
||||
<label className="block text-xs text-[var(--text-secondary)] mb-1">STT Container</label>
|
||||
<div className="flex items-center gap-2 flex-wrap">
|
||||
<span className="text-xs text-[var(--text-secondary)]">
|
||||
{status?.image_exists
|
||||
? status.running
|
||||
? `Running (port ${status.port}, model: ${status.model})`
|
||||
: status.container_exists
|
||||
? "Stopped"
|
||||
: "Image ready"
|
||||
: "No image"}
|
||||
</span>
|
||||
{status?.image_exists && (
|
||||
<button
|
||||
onClick={handleStartStop}
|
||||
disabled={loading}
|
||||
className={`px-2 py-0.5 text-xs rounded transition-colors ${
|
||||
status?.running
|
||||
? "text-[var(--error)] hover:bg-[var(--bg-primary)]"
|
||||
: "text-[var(--success)] hover:bg-[var(--bg-primary)]"
|
||||
}`}
|
||||
>
|
||||
{loading ? "..." : status?.running ? "Stop" : "Start"}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Image actions */}
|
||||
<div className="flex items-center gap-2 mt-2">
|
||||
<button
|
||||
onClick={handlePull}
|
||||
disabled={pulling || building}
|
||||
className="px-3 py-1 text-xs bg-[var(--bg-primary)] border border-[var(--border-color)] rounded hover:bg-[var(--border-color)] disabled:opacity-50 transition-colors"
|
||||
>
|
||||
{pulling ? "Pulling..." : "Pull Image"}
|
||||
</button>
|
||||
<button
|
||||
onClick={handleBuild}
|
||||
disabled={pulling || building}
|
||||
className="px-3 py-1 text-xs bg-[var(--bg-primary)] border border-[var(--border-color)] rounded hover:bg-[var(--border-color)] disabled:opacity-50 transition-colors"
|
||||
>
|
||||
{building ? "Building..." : "Build Locally"}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{buildLog && (
|
||||
<pre className="mt-2 text-[10px] text-[var(--text-secondary)] bg-[var(--bg-primary)] border border-[var(--border-color)] rounded px-2 py-1 max-h-20 overflow-y-auto whitespace-pre-wrap">
|
||||
{buildLog}
|
||||
</pre>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
128
app/src/components/settings/WebTerminalSettings.tsx
Normal file
128
app/src/components/settings/WebTerminalSettings.tsx
Normal file
@@ -0,0 +1,128 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import { startWebTerminal, stopWebTerminal, getWebTerminalStatus, regenerateWebTerminalToken } from "../../lib/tauri-commands";
|
||||
import type { WebTerminalInfo } from "../../lib/types";
|
||||
import Tooltip from "../ui/Tooltip";
|
||||
|
||||
export default function WebTerminalSettings() {
|
||||
const [info, setInfo] = useState<WebTerminalInfo | null>(null);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [copied, setCopied] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
getWebTerminalStatus().then(setInfo).catch(console.error);
|
||||
}, []);
|
||||
|
||||
const handleToggle = async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
if (info?.running) {
|
||||
await stopWebTerminal();
|
||||
const updated = await getWebTerminalStatus();
|
||||
setInfo(updated);
|
||||
} else {
|
||||
const updated = await startWebTerminal();
|
||||
setInfo(updated);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Web terminal toggle failed:", e);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleRegenerate = async () => {
|
||||
try {
|
||||
const updated = await regenerateWebTerminalToken();
|
||||
setInfo(updated);
|
||||
} catch (e) {
|
||||
console.error("Token regeneration failed:", e);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCopyUrl = async () => {
|
||||
if (info?.url) {
|
||||
await navigator.clipboard.writeText(info.url);
|
||||
setCopied(true);
|
||||
setTimeout(() => setCopied(false), 2000);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCopyToken = async () => {
|
||||
if (info?.access_token) {
|
||||
await navigator.clipboard.writeText(info.access_token);
|
||||
setCopied(true);
|
||||
setTimeout(() => setCopied(false), 2000);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<label className="block text-sm font-medium mb-1">
|
||||
Web Terminal
|
||||
<Tooltip text="Access your terminals from a tablet or phone on the local network via a web browser." />
|
||||
</label>
|
||||
<p className="text-xs text-[var(--text-secondary)] mb-2">
|
||||
Serves a browser-based terminal UI on your local network for remote access to running projects.
|
||||
</p>
|
||||
|
||||
<div className="space-y-2">
|
||||
{/* Toggle */}
|
||||
<div className="flex items-center gap-2">
|
||||
<button
|
||||
onClick={handleToggle}
|
||||
disabled={loading}
|
||||
className={`px-2 py-0.5 text-xs rounded transition-colors ${
|
||||
info?.running
|
||||
? "bg-[var(--success)] text-white"
|
||||
: "bg-[var(--bg-primary)] border border-[var(--border-color)] text-[var(--text-secondary)]"
|
||||
}`}
|
||||
>
|
||||
{loading ? "..." : info?.running ? "ON" : "OFF"}
|
||||
</button>
|
||||
<span className="text-xs text-[var(--text-secondary)]">
|
||||
{info?.running
|
||||
? `Running on port ${info.port}`
|
||||
: "Stopped"}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{/* URL + Copy */}
|
||||
{info?.running && info.url && (
|
||||
<div className="flex items-center gap-2">
|
||||
<code className="text-xs text-[var(--accent)] bg-[var(--bg-primary)] px-2 py-1 rounded border border-[var(--border-color)] truncate flex-1">
|
||||
{info.url}
|
||||
</code>
|
||||
<button
|
||||
onClick={handleCopyUrl}
|
||||
className="text-xs px-2 py-0.5 text-[var(--accent)] hover:text-[var(--accent-hover)] hover:bg-[var(--bg-primary)] rounded transition-colors"
|
||||
>
|
||||
{copied ? "Copied!" : "Copy URL"}
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Token */}
|
||||
{info && (
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-xs text-[var(--text-secondary)]">Token:</span>
|
||||
<code className="text-xs text-[var(--text-primary)] bg-[var(--bg-primary)] px-2 py-0.5 rounded border border-[var(--border-color)] truncate max-w-[160px]">
|
||||
{info.access_token ? `${info.access_token.slice(0, 12)}...` : "None"}
|
||||
</code>
|
||||
<button
|
||||
onClick={handleCopyToken}
|
||||
className="text-xs px-2 py-0.5 text-[var(--accent)] hover:text-[var(--accent-hover)] hover:bg-[var(--bg-primary)] rounded transition-colors"
|
||||
>
|
||||
Copy
|
||||
</button>
|
||||
<button
|
||||
onClick={handleRegenerate}
|
||||
className="text-xs px-2 py-0.5 text-[var(--warning,#f59e0b)] hover:bg-[var(--bg-primary)] rounded transition-colors"
|
||||
>
|
||||
Regenerate
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
114
app/src/components/terminal/SttButton.tsx
Normal file
114
app/src/components/terminal/SttButton.tsx
Normal file
@@ -0,0 +1,114 @@
|
||||
import { useCallback, useEffect, useRef, useState } from "react";
|
||||
import type { SttState } from "../../hooks/useSTT";
|
||||
import * as commands from "../../lib/tauri-commands";
|
||||
|
||||
interface Props {
|
||||
state: SttState;
|
||||
error: string | null;
|
||||
onToggle: () => Promise<void>;
|
||||
onCancel: () => Promise<void>;
|
||||
}
|
||||
|
||||
export default function SttButton({ state, error, onToggle, onCancel }: Props) {
|
||||
const [elapsed, setElapsed] = useState(0);
|
||||
const [hovered, setHovered] = useState(false);
|
||||
const timerRef = useRef<ReturnType<typeof setInterval> | null>(null);
|
||||
|
||||
// Track recording duration
|
||||
useEffect(() => {
|
||||
if (state === "recording") {
|
||||
setElapsed(0);
|
||||
timerRef.current = setInterval(() => setElapsed((e) => e + 1), 1000);
|
||||
} else {
|
||||
if (timerRef.current) {
|
||||
clearInterval(timerRef.current);
|
||||
timerRef.current = null;
|
||||
}
|
||||
}
|
||||
return () => {
|
||||
if (timerRef.current) clearInterval(timerRef.current);
|
||||
};
|
||||
}, [state]);
|
||||
|
||||
const handleClick = useCallback(async () => {
|
||||
// Auto-start STT container if not running
|
||||
if (state === "idle") {
|
||||
try {
|
||||
const status = await commands.getSttStatus();
|
||||
if (!status.running) {
|
||||
await commands.startStt();
|
||||
}
|
||||
} catch {
|
||||
// Container start failed, toggle will still attempt transcription
|
||||
}
|
||||
}
|
||||
await onToggle();
|
||||
}, [state, onToggle]);
|
||||
|
||||
const handleContextMenu = useCallback(
|
||||
(e: React.MouseEvent) => {
|
||||
e.preventDefault();
|
||||
if (state === "recording") {
|
||||
onCancel();
|
||||
}
|
||||
},
|
||||
[state, onCancel],
|
||||
);
|
||||
|
||||
const formatTime = (seconds: number) => {
|
||||
const m = Math.floor(seconds / 60);
|
||||
const s = seconds % 60;
|
||||
return `${m}:${s.toString().padStart(2, "0")}`;
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="absolute bottom-1 left-1 z-50 flex items-center gap-2">
|
||||
<div className="relative">
|
||||
<button
|
||||
onClick={handleClick}
|
||||
onContextMenu={handleContextMenu}
|
||||
onMouseDown={(e) => e.preventDefault()} // prevent stealing focus from terminal
|
||||
onMouseEnter={() => setHovered(true)}
|
||||
onMouseLeave={() => setHovered(false)}
|
||||
disabled={state === "transcribing"}
|
||||
className={`w-8 h-8 rounded-full flex items-center justify-center transition-all cursor-pointer ${
|
||||
state === "recording"
|
||||
? "bg-[#f85149] text-white shadow-lg animate-pulse"
|
||||
: state === "transcribing"
|
||||
? "bg-[#1f2937] text-[#58a6ff] border border-[#30363d] opacity-80"
|
||||
: "bg-[#1f2937]/80 text-[#8b949e] border border-[#30363d] hover:text-[#e6edf3] hover:bg-[#2d3748]"
|
||||
}`}
|
||||
>
|
||||
{state === "transcribing" ? (
|
||||
<svg className="w-4 h-4 animate-spin" viewBox="0 0 24 24" fill="none">
|
||||
<circle cx="12" cy="12" r="10" stroke="currentColor" strokeWidth="2" opacity="0.25" />
|
||||
<path d="M12 2a10 10 0 0 1 10 10" stroke="currentColor" strokeWidth="2" strokeLinecap="round" />
|
||||
</svg>
|
||||
) : (
|
||||
<svg className="w-4 h-4" viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 14c1.66 0 3-1.34 3-3V5c0-1.66-1.34-3-3-3S9 3.34 9 5v6c0 1.66 1.34 3 3 3z" />
|
||||
<path d="M17 11c0 2.76-2.24 5-5 5s-5-2.24-5-5H5c0 3.53 2.61 6.43 6 6.92V21h2v-3.08c3.39-.49 6-3.39 6-6.92h-2z" />
|
||||
</svg>
|
||||
)}
|
||||
</button>
|
||||
{hovered && state !== "recording" && (
|
||||
<div className="absolute bottom-full left-0 mb-1.5 px-2 py-1 text-[11px] leading-snug text-[#e6edf3] bg-[#21262d] border border-[#30363d] rounded shadow-lg whitespace-nowrap pointer-events-none">
|
||||
{state === "transcribing" ? "Transcribing..." : (
|
||||
<>Speech to text <kbd className="ml-1 px-1 py-0.5 text-[10px] bg-[#0d1117] border border-[#30363d] rounded font-mono">Ctrl+Shift+M</kbd></>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
{state === "recording" && (
|
||||
<span className="text-xs text-[#f85149] font-mono bg-[#1f2937] px-2 py-0.5 rounded border border-[#30363d]">
|
||||
{formatTime(elapsed)}
|
||||
</span>
|
||||
)}
|
||||
{state === "error" && error && (
|
||||
<span className="text-xs text-[#f85149] bg-[#1f2937] px-2 py-0.5 rounded border border-[#30363d] max-w-[200px] truncate">
|
||||
{error}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -23,8 +23,8 @@ export default function TerminalTabs() {
|
||||
: "text-[var(--text-secondary)] hover:text-[var(--text-primary)]"
|
||||
}`}
|
||||
>
|
||||
<span className="truncate max-w-[120px]">
|
||||
{session.projectName}{session.sessionType === "bash" ? " (bash)" : ""}
|
||||
<span className="truncate max-w-[140px]">
|
||||
{session.sessionName ?? session.projectName}{session.sessionType === "bash" ? " (bash)" : ""}
|
||||
</span>
|
||||
<button
|
||||
onClick={(e) => {
|
||||
|
||||
@@ -7,6 +7,8 @@ import { openUrl } from "@tauri-apps/plugin-opener";
|
||||
import "@xterm/xterm/css/xterm.css";
|
||||
import { useTerminal } from "../../hooks/useTerminal";
|
||||
import { useAppState } from "../../store/appState";
|
||||
import { useSTT } from "../../hooks/useSTT";
|
||||
import SttButton from "./SttButton";
|
||||
import { awsSsoRefresh } from "../../lib/tauri-commands";
|
||||
import { UrlDetector } from "../../lib/urlDetector";
|
||||
import UrlToast from "./UrlToast";
|
||||
@@ -24,6 +26,11 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
const webglRef = useRef<WebglAddon | null>(null);
|
||||
const detectorRef = useRef<UrlDetector | null>(null);
|
||||
const { sendInput, pasteImage, resize, onOutput, onExit } = useTerminal();
|
||||
const setTerminalHasSelection = useAppState(s => s.setTerminalHasSelection);
|
||||
const sttEnabled = useAppState(s => s.appSettings?.stt?.enabled);
|
||||
const stt = useSTT(sessionId, sendInput);
|
||||
const sttToggleRef = useRef(stt.toggle);
|
||||
sttToggleRef.current = stt.toggle;
|
||||
|
||||
const ssoBufferRef = useRef("");
|
||||
const ssoTriggeredRef = useRef(false);
|
||||
@@ -34,6 +41,12 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
const [detectedUrl, setDetectedUrl] = useState<string | null>(null);
|
||||
const [imagePasteMsg, setImagePasteMsg] = useState<string | null>(null);
|
||||
const [isAtBottom, setIsAtBottom] = useState(true);
|
||||
const [isAutoFollow, setIsAutoFollow] = useState(true);
|
||||
const isAtBottomRef = useRef(true);
|
||||
// Tracks user intent to follow output — only set to false by explicit user
|
||||
// actions (mouse wheel up), not by xterm scroll events during writes.
|
||||
const autoFollowRef = useRef(true);
|
||||
const lastUserScrollTimeRef = useRef(0);
|
||||
|
||||
useEffect(() => {
|
||||
if (!containerRef.current) return;
|
||||
@@ -80,6 +93,27 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
|
||||
term.open(containerRef.current);
|
||||
|
||||
// Ctrl+Shift+C copies selected terminal text to clipboard.
|
||||
// This prevents the keystroke from reaching the container (where
|
||||
// Ctrl+C would send SIGINT and cancel running work).
|
||||
term.attachCustomKeyEventHandler((event) => {
|
||||
if (event.type === "keydown" && event.ctrlKey && event.shiftKey && event.key === "C") {
|
||||
const sel = term.getSelection();
|
||||
if (sel) {
|
||||
navigator.clipboard.writeText(sel).catch((e) =>
|
||||
console.error("Ctrl+Shift+C clipboard write failed:", e),
|
||||
);
|
||||
}
|
||||
return false; // prevent xterm from processing this key
|
||||
}
|
||||
// Ctrl+Shift+M toggles speech-to-text recording
|
||||
if (event.type === "keydown" && event.ctrlKey && event.shiftKey && event.key === "M") {
|
||||
sttToggleRef.current();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
// WebGL addon is loaded/disposed dynamically in the active effect
|
||||
// to avoid exhausting the browser's limited WebGL context pool.
|
||||
|
||||
@@ -114,10 +148,45 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
sendInput(sessionId, data);
|
||||
});
|
||||
|
||||
// Track scroll position to show "Jump to Current" button
|
||||
// Detect user-initiated scroll-up (mouse wheel) to pause auto-follow.
|
||||
// Captured during capture phase so it fires before xterm's own handler.
|
||||
const handleWheel = (e: WheelEvent) => {
|
||||
lastUserScrollTimeRef.current = Date.now();
|
||||
if (e.deltaY < 0) {
|
||||
autoFollowRef.current = false;
|
||||
setIsAutoFollow(false);
|
||||
isAtBottomRef.current = false;
|
||||
setIsAtBottom(false);
|
||||
}
|
||||
};
|
||||
containerRef.current.addEventListener("wheel", handleWheel, { capture: true, passive: true });
|
||||
|
||||
// Track scroll position to show "Jump to Current" button.
|
||||
// Debounce state updates via rAF to avoid excessive re-renders during rapid output.
|
||||
let scrollStateRafId: number | null = null;
|
||||
const scrollDisposable = term.onScroll(() => {
|
||||
const buf = term.buffer.active;
|
||||
setIsAtBottom(buf.viewportY >= buf.baseY);
|
||||
const atBottom = buf.viewportY >= buf.baseY;
|
||||
isAtBottomRef.current = atBottom;
|
||||
|
||||
// Re-enable auto-follow only when USER scrolls to bottom (not write-triggered)
|
||||
const isUserScroll = (Date.now() - lastUserScrollTimeRef.current) < 300;
|
||||
if (atBottom && isUserScroll && !autoFollowRef.current) {
|
||||
autoFollowRef.current = true;
|
||||
setIsAutoFollow(true);
|
||||
}
|
||||
|
||||
if (scrollStateRafId === null) {
|
||||
scrollStateRafId = requestAnimationFrame(() => {
|
||||
scrollStateRafId = null;
|
||||
setIsAtBottom(isAtBottomRef.current);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Track text selection to show copy hint in status bar
|
||||
const selectionDisposable = term.onSelectionChange(() => {
|
||||
setTerminalHasSelection(term.hasSelection());
|
||||
});
|
||||
|
||||
// Handle image paste: intercept paste events with image data,
|
||||
@@ -165,7 +234,15 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
|
||||
const outputPromise = onOutput(sessionId, (data) => {
|
||||
if (aborted) return;
|
||||
term.write(data);
|
||||
term.write(data, () => {
|
||||
if (autoFollowRef.current) {
|
||||
term.scrollToBottom();
|
||||
if (!isAtBottomRef.current) {
|
||||
isAtBottomRef.current = true;
|
||||
setIsAtBottom(true);
|
||||
}
|
||||
}
|
||||
});
|
||||
detector.feed(data);
|
||||
|
||||
// Scan for SSO refresh marker in terminal output
|
||||
@@ -209,6 +286,9 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
if (!containerRef.current || containerRef.current.offsetWidth === 0) return;
|
||||
fitAddon.fit();
|
||||
resize(sessionId, term.cols, term.rows);
|
||||
if (autoFollowRef.current) {
|
||||
term.scrollToBottom();
|
||||
}
|
||||
});
|
||||
});
|
||||
resizeObserver.observe(containerRef.current);
|
||||
@@ -222,9 +302,13 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
osc52Disposable.dispose();
|
||||
inputDisposable.dispose();
|
||||
scrollDisposable.dispose();
|
||||
selectionDisposable.dispose();
|
||||
setTerminalHasSelection(false);
|
||||
containerRef.current?.removeEventListener("wheel", handleWheel, { capture: true });
|
||||
containerRef.current?.removeEventListener("paste", handlePaste, { capture: true });
|
||||
outputPromise.then((fn) => fn?.());
|
||||
exitPromise.then((fn) => fn?.());
|
||||
if (scrollStateRafId !== null) cancelAnimationFrame(scrollStateRafId);
|
||||
if (resizeRafId !== null) cancelAnimationFrame(resizeRafId);
|
||||
resizeObserver.disconnect();
|
||||
try { webglRef.current?.dispose(); } catch { /* may already be disposed */ }
|
||||
@@ -256,6 +340,9 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
}
|
||||
}
|
||||
fitRef.current?.fit();
|
||||
if (autoFollowRef.current) {
|
||||
term.scrollToBottom();
|
||||
}
|
||||
term.focus();
|
||||
} else {
|
||||
// Release WebGL context for inactive terminals
|
||||
@@ -290,8 +377,30 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
}, [detectedUrl]);
|
||||
|
||||
const handleScrollToBottom = useCallback(() => {
|
||||
termRef.current?.scrollToBottom();
|
||||
setIsAtBottom(true);
|
||||
const term = termRef.current;
|
||||
if (term) {
|
||||
autoFollowRef.current = true;
|
||||
setIsAutoFollow(true);
|
||||
fitRef.current?.fit();
|
||||
term.scrollToBottom();
|
||||
isAtBottomRef.current = true;
|
||||
setIsAtBottom(true);
|
||||
}
|
||||
}, []);
|
||||
|
||||
const handleToggleAutoFollow = useCallback(() => {
|
||||
const next = !autoFollowRef.current;
|
||||
autoFollowRef.current = next;
|
||||
setIsAutoFollow(next);
|
||||
if (next) {
|
||||
const term = termRef.current;
|
||||
if (term) {
|
||||
fitRef.current?.fit();
|
||||
term.scrollToBottom();
|
||||
isAtBottomRef.current = true;
|
||||
setIsAtBottom(true);
|
||||
}
|
||||
}
|
||||
}, []);
|
||||
|
||||
return (
|
||||
@@ -314,6 +423,21 @@ export default function TerminalView({ sessionId, active }: Props) {
|
||||
{imagePasteMsg}
|
||||
</div>
|
||||
)}
|
||||
{/* Auto-follow toggle - top right */}
|
||||
<button
|
||||
onClick={handleToggleAutoFollow}
|
||||
className={`absolute top-2 right-4 z-50 px-2 py-1 rounded text-[10px] font-medium border shadow-sm transition-colors cursor-pointer ${
|
||||
isAutoFollow
|
||||
? "bg-[#1a2332] text-[#3fb950] border-[#238636] hover:bg-[#1f2d3d]"
|
||||
: "bg-[#1f2937] text-[#8b949e] border-[#30363d] hover:bg-[#2d3748]"
|
||||
}`}
|
||||
title={isAutoFollow ? "Auto-scrolling to latest output (click to pause)" : "Auto-scroll paused (click to resume)"}
|
||||
>
|
||||
{isAutoFollow ? "▼ Following" : "▽ Paused"}
|
||||
</button>
|
||||
{/* STT mic button - bottom left */}
|
||||
{sttEnabled && <SttButton state={stt.state} error={stt.error} onToggle={stt.toggle} onCancel={stt.cancelRecording} />}
|
||||
{/* Jump to Current - bottom right, when scrolled up */}
|
||||
{!isAtBottom && (
|
||||
<button
|
||||
onClick={handleScrollToBottom}
|
||||
|
||||
78
app/src/components/ui/Tooltip.tsx
Normal file
78
app/src/components/ui/Tooltip.tsx
Normal file
@@ -0,0 +1,78 @@
|
||||
import { useState, useRef, useLayoutEffect, type ReactNode } from "react";
|
||||
import { createPortal } from "react-dom";
|
||||
|
||||
interface TooltipProps {
|
||||
text: string;
|
||||
children?: ReactNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* A small circled question-mark icon that shows a tooltip on hover.
|
||||
* Uses a portal to render at `document.body` so the tooltip is never
|
||||
* clipped by ancestor `overflow: hidden` containers.
|
||||
*/
|
||||
export default function Tooltip({ text, children }: TooltipProps) {
|
||||
const [visible, setVisible] = useState(false);
|
||||
const [coords, setCoords] = useState({ top: 0, left: 0 });
|
||||
const [, setPlacement] = useState<"top" | "bottom">("top");
|
||||
const triggerRef = useRef<HTMLSpanElement>(null);
|
||||
const tooltipRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useLayoutEffect(() => {
|
||||
if (!visible || !triggerRef.current || !tooltipRef.current) return;
|
||||
|
||||
const trigger = triggerRef.current.getBoundingClientRect();
|
||||
const tooltip = tooltipRef.current.getBoundingClientRect();
|
||||
const gap = 6;
|
||||
|
||||
// Vertical: prefer above, fall back to below
|
||||
const above = trigger.top - tooltip.height - gap >= 4;
|
||||
const pos = above ? "top" : "bottom";
|
||||
setPlacement(pos);
|
||||
|
||||
const top =
|
||||
pos === "top"
|
||||
? trigger.top - tooltip.height - gap
|
||||
: trigger.bottom + gap;
|
||||
|
||||
// Horizontal: center on trigger, clamp to viewport
|
||||
let left = trigger.left + trigger.width / 2 - tooltip.width / 2;
|
||||
left = Math.max(4, Math.min(left, window.innerWidth - tooltip.width - 4));
|
||||
|
||||
setCoords({ top, left });
|
||||
}, [visible]);
|
||||
|
||||
return (
|
||||
<span
|
||||
ref={triggerRef}
|
||||
className="inline-flex items-center ml-1"
|
||||
onMouseEnter={() => setVisible(true)}
|
||||
onMouseLeave={() => setVisible(false)}
|
||||
>
|
||||
{children ?? (
|
||||
<span
|
||||
className="inline-flex items-center justify-center w-3.5 h-3.5 rounded-full border border-[var(--text-secondary)] text-[var(--text-secondary)] text-[9px] leading-none cursor-help select-none hover:border-[var(--accent)] hover:text-[var(--accent)] transition-colors"
|
||||
aria-label="Help"
|
||||
>
|
||||
?
|
||||
</span>
|
||||
)}
|
||||
{visible &&
|
||||
createPortal(
|
||||
<div
|
||||
ref={tooltipRef}
|
||||
style={{
|
||||
position: "fixed",
|
||||
top: coords.top,
|
||||
left: coords.left,
|
||||
zIndex: 9999,
|
||||
}}
|
||||
className={`px-2.5 py-1.5 text-[11px] leading-snug text-[var(--text-primary)] bg-[var(--bg-tertiary)] border border-[var(--border-color)] rounded shadow-lg whitespace-normal max-w-[280px] w-max pointer-events-none`}
|
||||
>
|
||||
{text}
|
||||
</div>,
|
||||
document.body
|
||||
)}
|
||||
</span>
|
||||
);
|
||||
}
|
||||
145
app/src/hooks/useSTT.ts
Normal file
145
app/src/hooks/useSTT.ts
Normal file
@@ -0,0 +1,145 @@
|
||||
import { useCallback, useRef, useState } from "react";
|
||||
import * as commands from "../lib/tauri-commands";
|
||||
import { encodeWav } from "../lib/wav";
|
||||
import { useAppState } from "../store/appState";
|
||||
|
||||
export type SttState = "idle" | "recording" | "transcribing" | "error";
|
||||
|
||||
export function useSTT(sessionId: string, sendInput: (sessionId: string, data: string) => Promise<void>) {
|
||||
const [state, setState] = useState<SttState>("idle");
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const audioContextRef = useRef<AudioContext | null>(null);
|
||||
const streamRef = useRef<MediaStream | null>(null);
|
||||
const workletRef = useRef<AudioWorkletNode | null>(null);
|
||||
const chunksRef = useRef<Int16Array[]>([]);
|
||||
|
||||
const appSettings = useAppState((s) => s.appSettings);
|
||||
const deviceId = appSettings?.default_microphone;
|
||||
|
||||
const startRecording = useCallback(async () => {
|
||||
if (state === "recording" || state === "transcribing") return;
|
||||
setState("recording");
|
||||
setError(null);
|
||||
chunksRef.current = [];
|
||||
|
||||
try {
|
||||
const audioConstraints: MediaTrackConstraints = {
|
||||
channelCount: 1,
|
||||
echoCancellation: true,
|
||||
noiseSuppression: true,
|
||||
autoGainControl: true,
|
||||
};
|
||||
if (deviceId) {
|
||||
audioConstraints.deviceId = { exact: deviceId };
|
||||
}
|
||||
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: audioConstraints });
|
||||
streamRef.current = stream;
|
||||
|
||||
const audioContext = new AudioContext({ sampleRate: 16000 });
|
||||
audioContextRef.current = audioContext;
|
||||
|
||||
await audioContext.audioWorklet.addModule("/audio-capture-processor.js");
|
||||
|
||||
const source = audioContext.createMediaStreamSource(stream);
|
||||
const processor = new AudioWorkletNode(audioContext, "audio-capture-processor");
|
||||
workletRef.current = processor;
|
||||
|
||||
processor.port.onmessage = (event: MessageEvent<ArrayBuffer>) => {
|
||||
chunksRef.current.push(new Int16Array(event.data));
|
||||
};
|
||||
|
||||
source.connect(processor);
|
||||
processor.connect(audioContext.destination);
|
||||
} catch (e) {
|
||||
const msg = e instanceof Error ? e.message : String(e);
|
||||
setError(msg);
|
||||
setState("error");
|
||||
}
|
||||
}, [state, deviceId]);
|
||||
|
||||
const stopRecording = useCallback(async () => {
|
||||
if (state !== "recording") return;
|
||||
|
||||
// Stop audio capture
|
||||
workletRef.current?.disconnect();
|
||||
workletRef.current = null;
|
||||
|
||||
if (audioContextRef.current) {
|
||||
await audioContextRef.current.close().catch(() => {});
|
||||
audioContextRef.current = null;
|
||||
}
|
||||
|
||||
if (streamRef.current) {
|
||||
streamRef.current.getTracks().forEach((t) => t.stop());
|
||||
streamRef.current = null;
|
||||
}
|
||||
|
||||
// Concatenate PCM chunks
|
||||
const chunks = chunksRef.current;
|
||||
chunksRef.current = [];
|
||||
|
||||
if (chunks.length === 0) {
|
||||
setState("idle");
|
||||
return;
|
||||
}
|
||||
|
||||
const totalLength = chunks.reduce((sum, c) => sum + c.length, 0);
|
||||
const pcm = new Int16Array(totalLength);
|
||||
let offset = 0;
|
||||
for (const chunk of chunks) {
|
||||
pcm.set(chunk, offset);
|
||||
offset += chunk.length;
|
||||
}
|
||||
|
||||
// Encode to WAV and transcribe
|
||||
setState("transcribing");
|
||||
try {
|
||||
const wavBlob = encodeWav(pcm, 16000);
|
||||
const wavBuffer = await wavBlob.arrayBuffer();
|
||||
const audioData = Array.from(new Uint8Array(wavBuffer));
|
||||
|
||||
const text = await commands.transcribeAudio(audioData);
|
||||
if (text) {
|
||||
await sendInput(sessionId, text);
|
||||
}
|
||||
setState("idle");
|
||||
} catch (e) {
|
||||
const msg = e instanceof Error ? e.message : String(e);
|
||||
setError(msg);
|
||||
setState("error");
|
||||
// Reset to idle after a brief delay so the UI shows the error
|
||||
setTimeout(() => setState("idle"), 3000);
|
||||
}
|
||||
}, [state, sessionId, sendInput]);
|
||||
|
||||
const cancelRecording = useCallback(async () => {
|
||||
workletRef.current?.disconnect();
|
||||
workletRef.current = null;
|
||||
|
||||
if (audioContextRef.current) {
|
||||
await audioContextRef.current.close().catch(() => {});
|
||||
audioContextRef.current = null;
|
||||
}
|
||||
|
||||
if (streamRef.current) {
|
||||
streamRef.current.getTracks().forEach((t) => t.stop());
|
||||
streamRef.current = null;
|
||||
}
|
||||
|
||||
chunksRef.current = [];
|
||||
setState("idle");
|
||||
setError(null);
|
||||
}, []);
|
||||
|
||||
const toggle = useCallback(async () => {
|
||||
if (state === "recording") {
|
||||
await stopRecording();
|
||||
} else if (state === "idle" || state === "error") {
|
||||
await startRecording();
|
||||
}
|
||||
}, [state, startRecording, stopRecording]);
|
||||
|
||||
return { state, error, startRecording, stopRecording, cancelRecording, toggle };
|
||||
}
|
||||
@@ -17,10 +17,10 @@ export function useTerminal() {
|
||||
);
|
||||
|
||||
const open = useCallback(
|
||||
async (projectId: string, projectName: string, sessionType: "claude" | "bash" = "claude") => {
|
||||
async (projectId: string, projectName: string, sessionType: "claude" | "bash" = "claude", sessionName?: string) => {
|
||||
const sessionId = crypto.randomUUID();
|
||||
await commands.openTerminalSession(projectId, sessionId, sessionType);
|
||||
addSession({ id: sessionId, projectId, projectName, sessionType });
|
||||
await commands.openTerminalSession(projectId, sessionId, sessionType, sessionName);
|
||||
addSession({ id: sessionId, projectId, projectName, sessionType, sessionName: sessionName ?? null });
|
||||
return sessionId;
|
||||
},
|
||||
[addSession],
|
||||
|
||||
@@ -6,16 +6,25 @@ import * as commands from "../lib/tauri-commands";
|
||||
const CHECK_INTERVAL_MS = 24 * 60 * 60 * 1000; // 24 hours
|
||||
|
||||
export function useUpdates() {
|
||||
const { updateInfo, setUpdateInfo, appVersion, setAppVersion, appSettings } =
|
||||
useAppState(
|
||||
useShallow((s) => ({
|
||||
updateInfo: s.updateInfo,
|
||||
setUpdateInfo: s.setUpdateInfo,
|
||||
appVersion: s.appVersion,
|
||||
setAppVersion: s.setAppVersion,
|
||||
appSettings: s.appSettings,
|
||||
})),
|
||||
);
|
||||
const {
|
||||
updateInfo,
|
||||
setUpdateInfo,
|
||||
imageUpdateInfo,
|
||||
setImageUpdateInfo,
|
||||
appVersion,
|
||||
setAppVersion,
|
||||
appSettings,
|
||||
} = useAppState(
|
||||
useShallow((s) => ({
|
||||
updateInfo: s.updateInfo,
|
||||
setUpdateInfo: s.setUpdateInfo,
|
||||
imageUpdateInfo: s.imageUpdateInfo,
|
||||
setImageUpdateInfo: s.setImageUpdateInfo,
|
||||
appVersion: s.appVersion,
|
||||
setAppVersion: s.setAppVersion,
|
||||
appSettings: s.appSettings,
|
||||
})),
|
||||
);
|
||||
|
||||
const intervalRef = useRef<ReturnType<typeof setInterval> | null>(null);
|
||||
|
||||
@@ -47,11 +56,31 @@ export function useUpdates() {
|
||||
}
|
||||
}, [setUpdateInfo, appSettings?.dismissed_update_version]);
|
||||
|
||||
const checkImageUpdate = useCallback(async () => {
|
||||
try {
|
||||
const info = await commands.checkImageUpdate();
|
||||
if (info) {
|
||||
// Respect dismissed image digest
|
||||
const dismissed = appSettings?.dismissed_image_digest;
|
||||
if (dismissed && dismissed === info.remote_digest) {
|
||||
setImageUpdateInfo(null);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
setImageUpdateInfo(info);
|
||||
return info;
|
||||
} catch (e) {
|
||||
console.error("Failed to check for image updates:", e);
|
||||
return null;
|
||||
}
|
||||
}, [setImageUpdateInfo, appSettings?.dismissed_image_digest]);
|
||||
|
||||
const startPeriodicCheck = useCallback(() => {
|
||||
if (intervalRef.current) return;
|
||||
intervalRef.current = setInterval(() => {
|
||||
if (appSettings?.auto_check_updates !== false) {
|
||||
checkForUpdates();
|
||||
checkImageUpdate();
|
||||
}
|
||||
}, CHECK_INTERVAL_MS);
|
||||
return () => {
|
||||
@@ -60,13 +89,15 @@ export function useUpdates() {
|
||||
intervalRef.current = null;
|
||||
}
|
||||
};
|
||||
}, [checkForUpdates, appSettings?.auto_check_updates]);
|
||||
}, [checkForUpdates, checkImageUpdate, appSettings?.auto_check_updates]);
|
||||
|
||||
return {
|
||||
updateInfo,
|
||||
imageUpdateInfo,
|
||||
appVersion,
|
||||
loadVersion,
|
||||
checkForUpdates,
|
||||
checkImageUpdate,
|
||||
startPeriodicCheck,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -53,3 +53,135 @@ body {
|
||||
to { opacity: 1; transform: translate(-50%, 0); }
|
||||
}
|
||||
.animate-slide-down { animation: slide-down 0.2s ease-out; }
|
||||
|
||||
/* Help dialog content styles */
|
||||
.help-content {
|
||||
font-size: 0.8125rem;
|
||||
line-height: 1.6;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.help-content .help-h1 {
|
||||
font-size: 1.5rem;
|
||||
font-weight: 700;
|
||||
margin: 0 0 1rem 0;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.help-content .help-h2 {
|
||||
font-size: 1.15rem;
|
||||
font-weight: 600;
|
||||
margin: 1.5rem 0 0.75rem 0;
|
||||
padding-bottom: 0.375rem;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.help-content .help-h3 {
|
||||
font-size: 0.95rem;
|
||||
font-weight: 600;
|
||||
margin: 1.25rem 0 0.5rem 0;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.help-content .help-h4 {
|
||||
font-size: 0.875rem;
|
||||
font-weight: 600;
|
||||
margin: 1rem 0 0.375rem 0;
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
.help-content .help-p {
|
||||
margin: 0.5rem 0;
|
||||
}
|
||||
|
||||
.help-content .help-ul,
|
||||
.help-content .help-ol {
|
||||
margin: 0.5rem 0;
|
||||
padding-left: 1.5rem;
|
||||
}
|
||||
|
||||
.help-content .help-ul {
|
||||
list-style-type: disc;
|
||||
}
|
||||
|
||||
.help-content .help-ol {
|
||||
list-style-type: decimal;
|
||||
}
|
||||
|
||||
.help-content .help-ul li,
|
||||
.help-content .help-ol li {
|
||||
margin: 0.25rem 0;
|
||||
}
|
||||
|
||||
.help-content .help-code-block {
|
||||
display: block;
|
||||
background: var(--bg-primary);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 6px;
|
||||
padding: 0.75rem 1rem;
|
||||
margin: 0.5rem 0;
|
||||
overflow-x: auto;
|
||||
font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, monospace;
|
||||
font-size: 0.75rem;
|
||||
line-height: 1.5;
|
||||
white-space: pre;
|
||||
}
|
||||
|
||||
.help-content .help-inline-code {
|
||||
background: var(--bg-tertiary);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 3px;
|
||||
padding: 0.125rem 0.375rem;
|
||||
font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, monospace;
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
.help-content .help-table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
margin: 0.5rem 0;
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
.help-content .help-table th,
|
||||
.help-content .help-table td {
|
||||
border: 1px solid var(--border-color);
|
||||
padding: 0.375rem 0.625rem;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.help-content .help-table th {
|
||||
background: var(--bg-tertiary);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.help-content .help-table td {
|
||||
background: var(--bg-primary);
|
||||
}
|
||||
|
||||
.help-content .help-blockquote {
|
||||
border-left: 3px solid var(--accent);
|
||||
background: var(--bg-primary);
|
||||
margin: 0.5rem 0;
|
||||
padding: 0.5rem 0.75rem;
|
||||
border-radius: 0 4px 4px 0;
|
||||
color: var(--text-secondary);
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
.help-content .help-hr {
|
||||
border: none;
|
||||
border-top: 1px solid var(--border-color);
|
||||
margin: 1.5rem 0;
|
||||
}
|
||||
|
||||
.help-content .help-link {
|
||||
color: var(--accent);
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.help-content .help-link:hover {
|
||||
color: var(--accent-hover);
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { invoke } from "@tauri-apps/api/core";
|
||||
import type { Project, ProjectPath, ContainerInfo, SiblingContainer, AppSettings, UpdateInfo, McpServer, FileEntry } from "./types";
|
||||
import type { Project, ProjectPath, ContainerInfo, SiblingContainer, AppSettings, UpdateInfo, ImageUpdateInfo, McpServer, FileEntry, WebTerminalInfo, SttStatus } from "./types";
|
||||
|
||||
// Docker
|
||||
export const checkDocker = () => invoke<boolean>("check_docker");
|
||||
@@ -45,8 +45,8 @@ export const awsSsoRefresh = (projectId: string) =>
|
||||
invoke<void>("aws_sso_refresh", { projectId });
|
||||
|
||||
// Terminal
|
||||
export const openTerminalSession = (projectId: string, sessionId: string, sessionType?: string) =>
|
||||
invoke<void>("open_terminal_session", { projectId, sessionId, sessionType });
|
||||
export const openTerminalSession = (projectId: string, sessionId: string, sessionType?: string, sessionName?: string) =>
|
||||
invoke<void>("open_terminal_session", { projectId, sessionId, sessionType, sessionName });
|
||||
export const terminalInput = (sessionId: string, data: number[]) =>
|
||||
invoke<void>("terminal_input", { sessionId, data });
|
||||
export const terminalResize = (sessionId: string, cols: number, rows: number) =>
|
||||
@@ -83,3 +83,27 @@ export const uploadFileToContainer = (projectId: string, hostPath: string, conta
|
||||
export const getAppVersion = () => invoke<string>("get_app_version");
|
||||
export const checkForUpdates = () =>
|
||||
invoke<UpdateInfo | null>("check_for_updates");
|
||||
export const checkImageUpdate = () =>
|
||||
invoke<ImageUpdateInfo | null>("check_image_update");
|
||||
|
||||
// Help
|
||||
export const getHelpContent = () => invoke<string>("get_help_content");
|
||||
|
||||
// Web Terminal
|
||||
export const startWebTerminal = () =>
|
||||
invoke<WebTerminalInfo>("start_web_terminal");
|
||||
export const stopWebTerminal = () =>
|
||||
invoke<void>("stop_web_terminal");
|
||||
export const getWebTerminalStatus = () =>
|
||||
invoke<WebTerminalInfo>("get_web_terminal_status");
|
||||
export const regenerateWebTerminalToken = () =>
|
||||
invoke<WebTerminalInfo>("regenerate_web_terminal_token");
|
||||
|
||||
// STT
|
||||
export const getSttStatus = () => invoke<SttStatus>("get_stt_status");
|
||||
export const startStt = () => invoke<SttStatus>("start_stt");
|
||||
export const stopStt = () => invoke<void>("stop_stt");
|
||||
export const buildSttImage = () => invoke<void>("build_stt_image");
|
||||
export const pullSttImage = () => invoke<void>("pull_stt_image");
|
||||
export const transcribeAudio = (audioData: number[]) =>
|
||||
invoke<string>("transcribe_audio", { audioData });
|
||||
|
||||
@@ -20,12 +20,13 @@ export interface Project {
|
||||
paths: ProjectPath[];
|
||||
container_id: string | null;
|
||||
status: ProjectStatus;
|
||||
auth_mode: AuthMode;
|
||||
backend: Backend;
|
||||
bedrock_config: BedrockConfig | null;
|
||||
ollama_config: OllamaConfig | null;
|
||||
litellm_config: LiteLlmConfig | null;
|
||||
openai_compatible_config: OpenAiCompatibleConfig | null;
|
||||
allow_docker_access: boolean;
|
||||
mission_control_enabled: boolean;
|
||||
full_permissions: boolean;
|
||||
ssh_key_path: string | null;
|
||||
git_token: string | null;
|
||||
git_user_name: string | null;
|
||||
@@ -34,6 +35,7 @@ export interface Project {
|
||||
port_mappings: PortMapping[];
|
||||
claude_instructions: string | null;
|
||||
enabled_mcp_servers: string[];
|
||||
claude_code_settings: ClaudeCodeSettings | null;
|
||||
created_at: string;
|
||||
updated_at: string;
|
||||
}
|
||||
@@ -45,7 +47,7 @@ export type ProjectStatus =
|
||||
| "stopping"
|
||||
| "error";
|
||||
|
||||
export type AuthMode = "anthropic" | "bedrock" | "ollama" | "lit_llm";
|
||||
export type Backend = "anthropic" | "bedrock" | "ollama" | "open_ai_compatible";
|
||||
|
||||
export type BedrockAuthMethod = "static_credentials" | "profile" | "bearer_token";
|
||||
|
||||
@@ -66,12 +68,23 @@ export interface OllamaConfig {
|
||||
model_id: string | null;
|
||||
}
|
||||
|
||||
export interface LiteLlmConfig {
|
||||
export interface OpenAiCompatibleConfig {
|
||||
base_url: string;
|
||||
api_key: string | null;
|
||||
model_id: string | null;
|
||||
}
|
||||
|
||||
export interface ClaudeCodeSettings {
|
||||
tui_mode: string | null;
|
||||
effort: string | null;
|
||||
auto_scroll_disabled: boolean;
|
||||
focus_mode: boolean;
|
||||
show_thinking_summaries: boolean;
|
||||
enable_session_recap: boolean;
|
||||
env_scrub: boolean;
|
||||
prompt_caching_1h: boolean;
|
||||
}
|
||||
|
||||
export interface ContainerInfo {
|
||||
container_id: string;
|
||||
project_id: string;
|
||||
@@ -92,6 +105,7 @@ export interface TerminalSession {
|
||||
projectId: string;
|
||||
projectName: string;
|
||||
sessionType: "claude" | "bash";
|
||||
sessionName: string | null;
|
||||
}
|
||||
|
||||
export type ImageSource = "registry" | "local_build" | "custom";
|
||||
@@ -116,6 +130,39 @@ export interface AppSettings {
|
||||
dismissed_update_version: string | null;
|
||||
timezone: string | null;
|
||||
default_microphone: string | null;
|
||||
dismissed_image_digest: string | null;
|
||||
web_terminal: WebTerminalSettings;
|
||||
stt: SttSettings;
|
||||
global_claude_code_settings: ClaudeCodeSettings | null;
|
||||
}
|
||||
|
||||
export interface SttSettings {
|
||||
enabled: boolean;
|
||||
model: string;
|
||||
port: number;
|
||||
language: string | null;
|
||||
}
|
||||
|
||||
export interface SttStatus {
|
||||
container_exists: boolean;
|
||||
running: boolean;
|
||||
port: number;
|
||||
model: string;
|
||||
image_exists: boolean;
|
||||
}
|
||||
|
||||
export interface WebTerminalSettings {
|
||||
enabled: boolean;
|
||||
port: number;
|
||||
access_token: string | null;
|
||||
}
|
||||
|
||||
export interface WebTerminalInfo {
|
||||
running: boolean;
|
||||
port: number;
|
||||
access_token: string;
|
||||
local_ip: string | null;
|
||||
url: string | null;
|
||||
}
|
||||
|
||||
export interface UpdateInfo {
|
||||
@@ -133,6 +180,12 @@ export interface ReleaseAsset {
|
||||
size: number;
|
||||
}
|
||||
|
||||
export interface ImageUpdateInfo {
|
||||
remote_digest: string;
|
||||
local_digest: string | null;
|
||||
remote_updated_at: string | null;
|
||||
}
|
||||
|
||||
export type McpTransportType = "stdio" | "http";
|
||||
|
||||
export interface McpServer {
|
||||
|
||||
40
app/src/lib/wav.ts
Normal file
40
app/src/lib/wav.ts
Normal file
@@ -0,0 +1,40 @@
|
||||
/**
|
||||
* Encode PCM Int16 samples into a WAV file blob.
|
||||
* Assumes mono channel at the given sample rate.
|
||||
*/
|
||||
export function encodeWav(samples: Int16Array, sampleRate: number): Blob {
|
||||
const byteLength = samples.length * 2;
|
||||
const buffer = new ArrayBuffer(44 + byteLength);
|
||||
const view = new DataView(buffer);
|
||||
|
||||
// RIFF header
|
||||
writeString(view, 0, "RIFF");
|
||||
view.setUint32(4, 36 + byteLength, true);
|
||||
writeString(view, 8, "WAVE");
|
||||
|
||||
// fmt chunk
|
||||
writeString(view, 12, "fmt ");
|
||||
view.setUint32(16, 16, true); // chunk size
|
||||
view.setUint16(20, 1, true); // PCM format
|
||||
view.setUint16(22, 1, true); // mono
|
||||
view.setUint32(24, sampleRate, true);
|
||||
view.setUint32(28, sampleRate * 2, true); // byte rate
|
||||
view.setUint16(32, 2, true); // block align
|
||||
view.setUint16(34, 16, true); // bits per sample
|
||||
|
||||
// data chunk
|
||||
writeString(view, 36, "data");
|
||||
view.setUint32(40, byteLength, true);
|
||||
|
||||
// PCM samples
|
||||
const output = new Int16Array(buffer, 44);
|
||||
output.set(samples);
|
||||
|
||||
return new Blob([buffer], { type: "audio/wav" });
|
||||
}
|
||||
|
||||
function writeString(view: DataView, offset: number, str: string) {
|
||||
for (let i = 0; i < str.length; i++) {
|
||||
view.setUint8(offset + i, str.charCodeAt(i));
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
import { create } from "zustand";
|
||||
import type { Project, TerminalSession, AppSettings, UpdateInfo, McpServer } from "../lib/types";
|
||||
import type { Project, TerminalSession, AppSettings, UpdateInfo, ImageUpdateInfo, McpServer } from "../lib/types";
|
||||
|
||||
interface AppState {
|
||||
// Projects
|
||||
@@ -24,6 +24,8 @@ interface AppState {
|
||||
removeMcpServerFromList: (id: string) => void;
|
||||
|
||||
// UI state
|
||||
terminalHasSelection: boolean;
|
||||
setTerminalHasSelection: (has: boolean) => void;
|
||||
sidebarView: "projects" | "mcp" | "settings";
|
||||
setSidebarView: (view: "projects" | "mcp" | "settings") => void;
|
||||
dockerAvailable: boolean | null;
|
||||
@@ -39,6 +41,10 @@ interface AppState {
|
||||
setUpdateInfo: (info: UpdateInfo | null) => void;
|
||||
appVersion: string;
|
||||
setAppVersion: (version: string) => void;
|
||||
|
||||
// Image update info
|
||||
imageUpdateInfo: ImageUpdateInfo | null;
|
||||
setImageUpdateInfo: (info: ImageUpdateInfo | null) => void;
|
||||
}
|
||||
|
||||
export const useAppState = create<AppState>((set) => ({
|
||||
@@ -96,6 +102,8 @@ export const useAppState = create<AppState>((set) => ({
|
||||
})),
|
||||
|
||||
// UI state
|
||||
terminalHasSelection: false,
|
||||
setTerminalHasSelection: (has) => set({ terminalHasSelection: has }),
|
||||
sidebarView: "projects",
|
||||
setSidebarView: (view) => set({ sidebarView: view }),
|
||||
dockerAvailable: null,
|
||||
@@ -111,4 +119,8 @@ export const useAppState = create<AppState>((set) => ({
|
||||
setUpdateInfo: (info) => set({ updateInfo: info }),
|
||||
appVersion: "",
|
||||
setAppVersion: (version) => set({ appVersion: version }),
|
||||
|
||||
// Image update info
|
||||
imageUpdateInfo: null,
|
||||
setImageUpdateInfo: (info) => set({ imageUpdateInfo: info }),
|
||||
}));
|
||||
|
||||
@@ -5,7 +5,17 @@ FROM ubuntu:24.04
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# ── System packages ──────────────────────────────────────────────────────────
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
# The shell retry loop handles transient mirror-sync failures where
|
||||
# archive.ubuntu.com returns stale Packages.gz files with mismatched hashes
|
||||
# during hourly resyncs. Clearing /var/lib/apt/lists/* between attempts
|
||||
# forces a fresh fetch.
|
||||
RUN for i in 1 2 3 4 5; do \
|
||||
apt-get -o Acquire::Retries=3 update && break; \
|
||||
echo "apt-get update failed (attempt $i), retrying in 10s..."; \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
sleep 10; \
|
||||
done \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
curl \
|
||||
wget \
|
||||
@@ -38,7 +48,13 @@ RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg \
|
||||
&& chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
|
||||
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" \
|
||||
> /etc/apt/sources.list.d/github-cli.list \
|
||||
&& apt-get update && apt-get install -y gh \
|
||||
&& for i in 1 2 3 4 5; do \
|
||||
apt-get -o Acquire::Retries=3 update && break; \
|
||||
echo "apt-get update failed (attempt $i), retrying in 10s..."; \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
sleep 10; \
|
||||
done \
|
||||
&& apt-get install -y gh \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# ── Node.js LTS (22.x) + pnpm ───────────────────────────────────────────────
|
||||
@@ -48,7 +64,7 @@ RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
|
||||
&& npm install -g pnpm
|
||||
|
||||
# ── Python 3 + pip + uv + ruff ──────────────────────────────────────────────
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
RUN apt-get -o Acquire::Retries=3 update && apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-venv \
|
||||
@@ -61,7 +77,7 @@ RUN install -m 0755 -d /etc/apt/keyrings \
|
||||
&& chmod a+r /etc/apt/keyrings/docker.gpg \
|
||||
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" \
|
||||
> /etc/apt/sources.list.d/docker.list \
|
||||
&& apt-get update && apt-get install -y docker-ce-cli \
|
||||
&& apt-get -o Acquire::Retries=3 update && apt-get install -y docker-ce-cli \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# ── AWS CLI v2 ───────────────────────────────────────────────────────────────
|
||||
@@ -122,6 +138,8 @@ RUN chmod +x /usr/local/bin/audio-shim \
|
||||
COPY triple-c-sso-refresh /usr/local/bin/triple-c-sso-refresh
|
||||
RUN chmod +x /usr/local/bin/triple-c-sso-refresh
|
||||
|
||||
COPY mission-control /opt/mission-control
|
||||
|
||||
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
COPY triple-c-scheduler /usr/local/bin/triple-c-scheduler
|
||||
|
||||
@@ -145,13 +145,12 @@ fi
|
||||
if [ "$MISSION_CONTROL_ENABLED" = "1" ]; then
|
||||
MC_HOME="/home/claude/mission-control"
|
||||
MC_LINK="/workspace/mission-control"
|
||||
if [ ! -d "$MC_HOME/.git" ]; then
|
||||
echo "entrypoint: cloning mission-control..."
|
||||
su -s /bin/bash claude -c \
|
||||
'git clone https://github.com/msieurthenardier/mission-control.git /home/claude/mission-control' \
|
||||
|| echo "entrypoint: warning — failed to clone mission-control"
|
||||
if [ ! -d "$MC_HOME" ]; then
|
||||
echo "entrypoint: installing mission-control..."
|
||||
cp -r /opt/mission-control "$MC_HOME"
|
||||
chown -R claude:claude "$MC_HOME"
|
||||
else
|
||||
echo "entrypoint: mission-control already present, skipping clone"
|
||||
echo "entrypoint: mission-control already present, skipping install"
|
||||
fi
|
||||
# Symlink into workspace so Claude sees it at /workspace/mission-control
|
||||
ln -sfn "$MC_HOME" "$MC_LINK"
|
||||
@@ -189,6 +188,29 @@ if [ -n "$MCP_SERVERS_JSON" ]; then
|
||||
unset MCP_SERVERS_JSON
|
||||
fi
|
||||
|
||||
# ── Claude Code settings ────────────────────────────────────────────────────
|
||||
# Merge Claude Code settings into ~/.claude/settings.json (preserves existing
|
||||
# keys). Creates the file if it doesn't exist. These control TUI mode, effort
|
||||
# level, focus mode, thinking summaries, and other CLI behavior.
|
||||
if [ -n "$CLAUDE_CODE_SETTINGS_JSON" ]; then
|
||||
SETTINGS_FILE="/home/claude/.claude/settings.json"
|
||||
mkdir -p /home/claude/.claude
|
||||
if [ -f "$SETTINGS_FILE" ]; then
|
||||
# Merge: existing settings + new settings (new keys override on conflict)
|
||||
MERGED=$(jq -s '.[0] * .[1]' "$SETTINGS_FILE" <(printf '%s' "$CLAUDE_CODE_SETTINGS_JSON") 2>/dev/null)
|
||||
if [ -n "$MERGED" ]; then
|
||||
printf '%s\n' "$MERGED" > "$SETTINGS_FILE"
|
||||
else
|
||||
echo "entrypoint: warning — failed to merge Claude Code settings into $SETTINGS_FILE"
|
||||
fi
|
||||
else
|
||||
printf '%s\n' "$CLAUDE_CODE_SETTINGS_JSON" > "$SETTINGS_FILE"
|
||||
fi
|
||||
chown claude:claude "$SETTINGS_FILE"
|
||||
chmod 600 "$SETTINGS_FILE"
|
||||
unset CLAUDE_CODE_SETTINGS_JSON
|
||||
fi
|
||||
|
||||
# ── AWS SSO auth refresh command ──────────────────────────────────────────────
|
||||
# When set, inject awsAuthRefresh into ~/.claude.json so Claude Code calls
|
||||
# triple-c-sso-refresh when AWS credentials expire mid-session.
|
||||
|
||||
@@ -0,0 +1,233 @@
|
||||
---
|
||||
name: agentic-workflow
|
||||
description: Active orchestrator for multi-agent flight execution. Drives the full leg cycle (design, implement, review, commit) using three separate Claude instances.
|
||||
---
|
||||
|
||||
# Agentic Workflow
|
||||
|
||||
Orchestrate multi-agent flight execution. You drive the full leg cycle — designing legs, spawning Developer and Reviewer agents, and managing git workflow — for a target project's flight.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Project must be initialized with `/init-project` (`.flightops/ARTIFACTS.md` must exist)
|
||||
- A mission must exist and be `active`
|
||||
- A flight must exist and be `ready` or `in-flight`
|
||||
|
||||
## Invocation
|
||||
|
||||
```
|
||||
/agentic-workflow flight {number} for {project-slug} mission {number}
|
||||
```
|
||||
|
||||
Example: `/agentic-workflow flight 03 for epipen mission 04`
|
||||
|
||||
## Phase 1: Context Loading
|
||||
|
||||
1. **Read `projects.md`** to find the target project's path
|
||||
2. **Read `{target-project}/.flightops/ARTIFACTS.md`** for artifact locations
|
||||
3. **Read `{target-project}/.flightops/agent-crews/leg-execution.md`** for project crew definitions, interaction protocol, and prompts (fall back to defaults at `.claude/skills/init-project/defaults/agent-crews/leg-execution.md`)
|
||||
- **Validate structure**: The phase file MUST contain `## Crew`, `## Interaction Protocol`, and `## Prompts` sections. Each prompt subsection MUST have a fenced code block.
|
||||
- **If the file exists but is malformed**: STOP. Tell the user: "Phase file `leg-execution.md` is missing required sections. Either fix it manually or re-run `/init-project` to reset to defaults." Do NOT improvise missing prompts — halt and get the file fixed.
|
||||
4. **Read the mission artifact** — outcomes, success criteria, constraints
|
||||
5. **Read the flight artifact** — objective, design decisions, leg list
|
||||
6. **Read the flight log** — ground truth from prior execution
|
||||
7. **Count total legs** from the flight spec — track progress throughout
|
||||
8. **Determine starting point** — which leg is next based on flight log and leg statuses
|
||||
9. **Read git strategy** from `{target-project}/.flightops/ARTIFACTS.md` `## Git Workflow` section. Default to `branch` if the section is absent.
|
||||
10. **Set `{working-directory}`** — `branch`: the target project root; `worktree`: the worktree path (see Git Workflow section below)
|
||||
|
||||
**Mark flight as in-flight**: After loading the flight artifact, if the flight status is `ready`, update it to `in-flight` before proceeding. If already `in-flight`, leave it as-is.
|
||||
|
||||
If resuming a flight already in progress, verify state consistency:
|
||||
- Flight log entries must match leg statuses
|
||||
- If discrepancies exist, remediate before proceeding
|
||||
|
||||
## Phase 2: Leg Cycle
|
||||
|
||||
Repeat for each leg in the flight.
|
||||
|
||||
### 2a: Leg Design
|
||||
|
||||
1. **Design the leg** using the `/leg` skill (if the Skill tool is unavailable, read `.claude/skills/leg/SKILL.md` and follow the workflow directly)
|
||||
- Read the flight spec, flight log, and relevant source code
|
||||
- Create the leg artifact with acceptance criteria
|
||||
2. **Spawn a Developer agent for design review** (Task tool, `subagent_type: "general-purpose"`)
|
||||
- Working directory: `{working-directory}`
|
||||
- Provide the "Review Leg Design" prompt from the leg-execution phase file's Prompts section
|
||||
- The Developer reads the leg artifact and cross-references against actual codebase state
|
||||
- The Developer provides a structured assessment: approve, approve with changes, or needs rework
|
||||
3. **Incorporate feedback** — update the leg artifact to address any issues raised
|
||||
- High-severity issues: must fix before proceeding
|
||||
- Medium-severity issues: fix unless there's a clear reason not to
|
||||
- Low-severity issues and suggestions: apply at discretion
|
||||
4. **Re-review if substantive changes were made** — spawn another Developer for a second pass
|
||||
- Skip if only minor/cosmetic fixes were applied
|
||||
- If the second review raises new high-severity issues, fix and re-review once more
|
||||
- **Max 2 design review cycles** — if issues persist after 2 rounds, escalate to human
|
||||
5. **Update leg status** to `ready`
|
||||
6. **Signal `[HANDOFF:review-needed]`** when the leg design is finalized
|
||||
|
||||
### 2b: Leg Implementation
|
||||
|
||||
**NEVER implement code directly.** Spawn a Developer agent via the Task tool.
|
||||
|
||||
**Interactive/UAT legs**: If the leg is a UAT, alignment, or other interactive leg (identified by slug like `uat-*`, `alignment-*`, or explicit marking in the flight spec), do NOT spawn agents to execute it autonomously. The human performs verification — the Flight Director guides them through it:
|
||||
1. **Design the leg** normally (2a), but keep it lightweight — the acceptance criteria are verification steps, not implementation tasks
|
||||
2. **Skip the autonomous implementation cycle** (no Developer/Reviewer agents)
|
||||
3. **Guide the human through verification steps one at a time** — present a single step, wait for the human to perform it and report results, then proceed to the next step
|
||||
4. **Fix issues inline** — if the human reports a failure, diagnose and fix it (spawning a Developer agent if code changes are needed), then re-verify that step before moving on
|
||||
5. **Commit when all steps pass** — spawn a Developer agent to update artifacts and commit
|
||||
|
||||
**Standard (autonomous) legs**: Follow the Developer/Reviewer cycle below.
|
||||
|
||||
1. **Spawn a Developer agent** (Task tool, `subagent_type: "general-purpose"`)
|
||||
- Working directory: `{working-directory}`
|
||||
- Provide the "Implement" prompt from the leg-execution phase file's Prompts section
|
||||
- The Developer updates leg status to `in-flight`, implements to acceptance criteria
|
||||
- When done, the Developer updates leg status to `landed`, updates flight log, and signals `[HANDOFF:review-needed]` — do NOT let it commit
|
||||
2. **Spawn a Reviewer agent** (Task tool, `subagent_type: "general-purpose"`)
|
||||
- Working directory: `{working-directory}`
|
||||
- Provide the "Review" prompt from the leg-execution phase file's Prompts section
|
||||
- The Reviewer evaluates ALL uncommitted changes against acceptance criteria and code quality
|
||||
- The Reviewer signals `[HANDOFF:confirmed]` or lists issues with severity
|
||||
3. **If issues found**, spawn a new Developer agent to fix them
|
||||
- Provide the "Fix Review Issues" prompt from the leg-execution phase file with the Reviewer's feedback
|
||||
- Loop review/fix until the Reviewer confirms
|
||||
4. **Spawn the Developer agent to commit** after review passes
|
||||
- Provide the "Commit" prompt from the leg-execution phase file's Prompts section
|
||||
- The commit must include code changes, updated flight log, and leg status updated to `completed`
|
||||
|
||||
### 2c: Leg Transition
|
||||
|
||||
After `[COMPLETE:leg]` (all git/PR operations run from `{working-directory}`):
|
||||
1. Increment `legs_completed`
|
||||
2. **Manage PR**:
|
||||
- **First leg**: Open a draft PR with the leg checklist in the body (see PR Body Format below), then check off the completed leg
|
||||
- **Subsequent legs**: Use `gh pr edit --body` to check off the newly completed leg in the existing PR body
|
||||
3. If more legs remain → return to 2a
|
||||
4. If all legs complete → proceed to Phase 3
|
||||
|
||||
## Phase 3: Flight Completion
|
||||
|
||||
1. **Verify all legs** show `completed` status
|
||||
2. **Verify flight log** has entries for all legs
|
||||
3. **Verify documentation** — check that CLAUDE.md, README, and other project docs reflect any new commands, endpoints, configuration, or APIs introduced during the flight. If not, spawn a Developer agent to update them.
|
||||
4. **Update flight status** to `landed`
|
||||
5. **Check off flight** in mission artifact
|
||||
6. **Clean up worktree** (worktree strategy only) — run `git worktree remove` after the PR is marked ready for review
|
||||
7. **Signal `[COMPLETE:flight]`**
|
||||
|
||||
The flight debrief is a separate step run via `/flight-debrief` after the flight lands. The debrief transitions the flight to `completed`.
|
||||
|
||||
## Architecture
|
||||
|
||||
The Flight Director (you) orchestrates according to this skill. Project crew composition, roles, models, and prompts are defined in `{target-project}/.flightops/agent-crews/leg-execution.md`.
|
||||
|
||||
**Separation is mandatory.** Project crew agents run in the target project and load its CLAUDE.md and conventions. The Reviewer has no knowledge of the Developer's reasoning — only the resulting changes. This provides objective review.
|
||||
|
||||
**Model selection:** Follow the model preferences in the phase file. MC may use Opus for complex planning. Never use Opus for the Reviewer.
|
||||
|
||||
## Handoff Signals
|
||||
|
||||
Signals are part of the Flight Control methodology and are NOT configurable per-project. All crew agents must use these exact signals:
|
||||
|
||||
| Signal | Emitted By | Meaning |
|
||||
|--------|-----------|---------|
|
||||
| `[HANDOFF:review-needed]` | Developer | Code/artifact ready for review |
|
||||
| `[HANDOFF:confirmed]` | Reviewer | Review passed |
|
||||
| `[BLOCKED:reason]` | Any crew agent | Cannot proceed, needs resolution |
|
||||
| `[COMPLETE:leg]` | Developer | Leg finished and committed |
|
||||
| `[COMPLETE:flight]` | Flight Director | Flight landed |
|
||||
|
||||
## Flight Director Decision Log
|
||||
|
||||
The Flight Director must maintain transparency about its own decisions. After each major orchestration step, log what happened and why in the flight log under a `### Flight Director Notes` subsection:
|
||||
|
||||
1. **Phase file loading** — Record which phase file was loaded (project or default fallback) and what crew was extracted
|
||||
2. **Agent spawning** — Record which agent was spawned, with what prompt, and what model
|
||||
3. **Review cycle decisions** — When incorporating feedback, note what was accepted/rejected and why
|
||||
4. **Escalation decisions** — When choosing between "fix and re-review" vs "escalate to human," note the reasoning
|
||||
5. **Signal interpretation** — When a crew agent's output is ambiguous, note how it was interpreted
|
||||
|
||||
This is not a separate file — it goes in the flight log alongside leg entries. The goal is that anyone reviewing the flight log can understand not just what the crew did, but why the Flight Director made the orchestration choices it did.
|
||||
|
||||
## Git Workflow
|
||||
|
||||
### Strategy Selection
|
||||
|
||||
Read the `## Git Workflow` section from `{target-project}/.flightops/ARTIFACTS.md`. The `Strategy` property determines which workflow to use. If the section is absent, default to `branch`.
|
||||
|
||||
### Shared Elements
|
||||
|
||||
Both strategies use the same branch naming, commit format, PR lifecycle, and PR body format.
|
||||
|
||||
**Branch naming**: `flight/{number}-{slug}`
|
||||
|
||||
**Commit message format:**
|
||||
```
|
||||
leg/{number}: {description}
|
||||
|
||||
Flight: {flight-number}
|
||||
Mission: {mission-number}
|
||||
```
|
||||
|
||||
**PR lifecycle:**
|
||||
|
||||
| Event | Action |
|
||||
|-------|--------|
|
||||
| First leg complete | Open draft PR with leg checklist in body |
|
||||
| Each leg complete | Commit code + artifacts, update PR checklist |
|
||||
| Flight landed | Mark PR ready for review |
|
||||
|
||||
**PR body format:**
|
||||
|
||||
```markdown
|
||||
## {Flight Title}
|
||||
|
||||
{Flight objective — one paragraph}
|
||||
|
||||
**Mission**: {Mission Title}
|
||||
|
||||
## Legs
|
||||
|
||||
- [ ] `{leg-slug}` — {brief description}
|
||||
- [ ] `{leg-slug}` — {brief description}
|
||||
```
|
||||
|
||||
### Strategy: Branch
|
||||
|
||||
The default single-checkout workflow. One flight at a time per working copy.
|
||||
|
||||
| Step | Command |
|
||||
|------|---------|
|
||||
| Flight start | `git checkout -b flight/{number}-{slug}` |
|
||||
| Set `{working-directory}` | Target project root |
|
||||
| Agents work in | Project root |
|
||||
| Flight landed | PR marked ready for review |
|
||||
|
||||
### Strategy: Worktree
|
||||
|
||||
Worktree isolation enables parallel flights on a single repo clone.
|
||||
|
||||
| Step | Command |
|
||||
|------|---------|
|
||||
| Flight start | `git worktree add .worktrees/flight-{number}-{slug} -b flight/{number}-{slug}` |
|
||||
| Set `{working-directory}` | `.worktrees/flight-{number}-{slug}` |
|
||||
| Orchestrator stays on | Main branch (does not checkout the flight branch) |
|
||||
| Agents work in | Worktree path |
|
||||
| Flight landed | PR marked ready for review, then `git worktree remove .worktrees/flight-{number}-{slug}` |
|
||||
|
||||
**Note:** The `.worktrees/` directory must be in `.gitignore` when using this strategy.
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Situation | Action |
|
||||
|-----------|--------|
|
||||
| Developer agent fails mid-leg | Spawn new Developer with context of what failed |
|
||||
| Design review loops > 2 times | Escalate to human with unresolved design issues |
|
||||
| Code review loops > 3 times | Escalate to human |
|
||||
| Leg marked aborted | Escalate to human with abort details |
|
||||
| Artifact discrepancy | Remediate before proceeding |
|
||||
| Off the rails | Roll back to last leg commit, escalate |
|
||||
| Stale worktree (worktree strategy) | Run `git worktree prune`, recreate if needed |
|
||||
| Agent hangs on tests | Kill the agent, spawn new Developer to isolate and fix hanging tests |
|
||||
206
container/mission-control/.claude/skills/daily-briefing/SKILL.md
Normal file
206
container/mission-control/.claude/skills/daily-briefing/SKILL.md
Normal file
@@ -0,0 +1,206 @@
|
||||
---
|
||||
name: daily-briefing
|
||||
description: Cross-project status report with health assessment, stale work detection, and methodology insights. Use for a quick overview of all managed projects.
|
||||
---
|
||||
|
||||
# Daily Briefing
|
||||
|
||||
Generate a comprehensive status report across managed projects with health assessments, staleness detection, and cross-project methodology insights.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- `projects.md` must exist (run `/init-mission-control` first)
|
||||
|
||||
## Output
|
||||
|
||||
Daily briefings are saved to the `daily-briefings/` directory in the mission-control repository root, named by date: `daily-briefings/YYYY-MM-DD.md`. This directory is gitignored — briefings are local-only, ephemeral documents.
|
||||
|
||||
If a briefing already exists for today, append a sequence number: `YYYY-MM-DD-2.md`, `YYYY-MM-DD-3.md`, etc.
|
||||
|
||||
## Workflow
|
||||
|
||||
### Phase 1: Load Projects Registry
|
||||
|
||||
1. **Read `projects.md`** to get the full list of managed projects
|
||||
2. Extract each project's slug, description, path, and status
|
||||
|
||||
### Phase 2: Project Selection Interview
|
||||
|
||||
Present the user with a **checkbox list** (AskUserQuestion with `multiSelect: true`) of all registered projects so they can select which ones to include in the briefing.
|
||||
|
||||
**AskUserQuestion limits each question to 4 options.** Split projects across multiple questions in groups of 3, reserving the 4th option as "None from this group" (description: "Skip these projects") so the user can opt out of an entire group without blocking submission. Send all questions in a single AskUserQuestion call.
|
||||
|
||||
Format each project option as:
|
||||
- **Label**: Project slug
|
||||
- **Description**: Short project description from the registry
|
||||
|
||||
### Phase 3: Project Scanning
|
||||
|
||||
For each selected project, gather data by spawning **parallel Explore agents** (Task tool, `subagent_type: "Explore"`) — one per project. Each agent should:
|
||||
|
||||
1. **Check initialization status**
|
||||
- Check if `{project-path}/.flightops/ARTIFACTS.md` exists
|
||||
- If missing, note the project as "not initialized" and skip artifact scanning
|
||||
|
||||
2. **Read artifact configuration**
|
||||
- Read `{project-path}/.flightops/ARTIFACTS.md` for directory structure and naming conventions
|
||||
|
||||
3. **Discover all artifacts**
|
||||
- For filesystem-based projects, scan `{project-path}/missions/` for mission directories
|
||||
- For each mission, scan for flights; for each flight, scan for legs
|
||||
- Read all discovered artifacts and capture:
|
||||
- **Status fields** from each mission, flight, and leg
|
||||
- **Titles and objectives**
|
||||
- **Checklist completion** (count checked vs unchecked items)
|
||||
|
||||
4. **Read debriefs**
|
||||
- Read any flight debriefs and mission debriefs found
|
||||
- Extract key learnings, recommendations, and action items
|
||||
|
||||
5. **Check git activity**
|
||||
- Run `git log --oneline --since="7 days ago" -20` in the project directory
|
||||
- Capture recent commit activity as a proxy for momentum
|
||||
|
||||
6. **Return structured findings**
|
||||
The agent should return a structured summary including:
|
||||
- Project initialization status
|
||||
- List of all missions with status
|
||||
- List of all flights with status
|
||||
- List of all legs with status
|
||||
- Debrief summaries (key learnings and recommendations)
|
||||
- Recent git activity summary
|
||||
- Any anomalies (e.g., in-flight legs with no recent commits)
|
||||
|
||||
### Phase 4: Health Analysis
|
||||
|
||||
For each scanned project, assess health across these dimensions:
|
||||
|
||||
#### Activity Status
|
||||
- **Active**: Has in-flight work AND recent commits
|
||||
- **Stalled**: Has in-flight work but NO recent commits (7+ days)
|
||||
- **Idle**: No in-flight work
|
||||
- **Fresh**: Recently completed work (within 7 days)
|
||||
|
||||
#### Staleness Detection
|
||||
Flag artifacts that appear stale or abandoned:
|
||||
- **Missions** with status `active` or `planning` but no flight activity in 14+ days
|
||||
- **Flights** with status `in-flight` or `planning` but no leg progress in 7+ days
|
||||
- **Legs** with status `in-flight` or `planning` but no recent commits in 7+ days
|
||||
- **Open questions** that remain unresolved across any active artifacts
|
||||
|
||||
#### Completion Assessment
|
||||
- Missions nearing completion (most success criteria checked)
|
||||
- Flights with all legs completed but not yet marked `landed`
|
||||
- Orphaned artifacts (legs without flights, flights without missions)
|
||||
|
||||
### Phase 5: Cross-Project Insights
|
||||
|
||||
Analyze all debriefs (flight and mission) across selected projects to extract:
|
||||
|
||||
#### Common Patterns
|
||||
- Recurring recommendations across projects
|
||||
- Shared technical or process challenges
|
||||
- Patterns in what goes well vs what struggles
|
||||
|
||||
#### Mission Control Methodology Improvements
|
||||
- Feedback about the mission/flight/leg hierarchy itself
|
||||
- Skill effectiveness observations (were leg specs clear enough? were flight plans accurate?)
|
||||
- Suggestions for new skills, templates, or workflow changes
|
||||
|
||||
#### Project-Specific Recommendations
|
||||
- Per-project action items drawn from debrief insights
|
||||
- Suggested next steps based on current state
|
||||
|
||||
### Phase 6: Generate Briefing
|
||||
|
||||
Create the daily briefing file at `daily-briefings/YYYY-MM-DD.md`. Ensure the `daily-briefings/` directory exists first.
|
||||
|
||||
**Format:**
|
||||
|
||||
```markdown
|
||||
# Daily Briefing — {YYYY-MM-DD}
|
||||
|
||||
## Executive Summary
|
||||
{2-3 sentence overview of portfolio health — how many projects active, key highlights, top concerns}
|
||||
|
||||
---
|
||||
|
||||
## Project Reports
|
||||
|
||||
### {Project Slug}
|
||||
|
||||
**Health**: {Active | Stalled | Idle | Fresh} {optional: brief qualifier}
|
||||
**Recent Activity**: {X commits in last 7 days | No recent commits}
|
||||
|
||||
#### Current State
|
||||
| Level | Active | Completed | Stale | Total |
|
||||
|-------|--------|-----------|-------|-------|
|
||||
| Missions | {n} | {n} | {n} | {n} |
|
||||
| Flights | {n} | {n} | {n} | {n} |
|
||||
| Legs | {n} | {n} | {n} | {n} |
|
||||
|
||||
#### In Progress
|
||||
- **Mission**: {title} — {status summary}
|
||||
- **Flight**: {title} — {status}, {X/Y legs complete}
|
||||
- {Current/next leg and its status}
|
||||
|
||||
#### Staleness Alerts
|
||||
- {Description of stale artifact and how long it's been inactive}
|
||||
|
||||
#### Recommendations
|
||||
- {Actionable recommendation based on current state and debrief insights}
|
||||
|
||||
---
|
||||
|
||||
{Repeat for each selected project}
|
||||
|
||||
---
|
||||
|
||||
## Not Initialized
|
||||
{List any selected projects that lack `.flightops/` — suggest running `/init-project`}
|
||||
|
||||
---
|
||||
|
||||
## Cross-Project Insights
|
||||
|
||||
### Common Patterns
|
||||
{Themes observed across multiple project debriefs}
|
||||
|
||||
### Methodology Observations
|
||||
{Feedback about Flight Control itself — what's working, what could improve}
|
||||
|
||||
### Recommended Actions
|
||||
1. {Highest-priority cross-project action}
|
||||
2. {Second priority}
|
||||
3. {Third priority}
|
||||
```
|
||||
|
||||
### Phase 7: Present Summary
|
||||
|
||||
After writing the briefing file, present a **concise verbal summary** to the user:
|
||||
|
||||
1. The file path where the full briefing was saved
|
||||
2. Portfolio-level health (X active, Y stalled, Z idle)
|
||||
3. Top 3 items needing attention (stale work, approaching completions, blockers)
|
||||
4. Any cross-project methodology insights worth highlighting
|
||||
|
||||
Keep the verbal summary short — the detailed report is in the file.
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Read-Only
|
||||
This skill only reads project artifacts and git history. It **never** modifies any project files, artifacts, or source code.
|
||||
|
||||
### Parallel Scanning
|
||||
Spawn project scanning agents in parallel for efficiency. Don't scan projects sequentially.
|
||||
|
||||
### Graceful Degradation
|
||||
- Projects without `.flightops/` get noted but don't block the report
|
||||
- Projects with no missions directory get reported as "no Flight Control artifacts"
|
||||
- Missing or malformed artifacts get flagged, not crashed on
|
||||
|
||||
### Honest Assessment
|
||||
Report what you find. Don't sugarcoat stale projects or inflate activity. The briefing is for situational awareness — accuracy matters more than optimism.
|
||||
|
||||
### Brevity in Verbal Summary
|
||||
The written briefing is the detailed artifact. The verbal summary should be 5-10 lines max — just the highlights and the file path.
|
||||
160
container/mission-control/.claude/skills/flight-debrief/SKILL.md
Normal file
160
container/mission-control/.claude/skills/flight-debrief/SKILL.md
Normal file
@@ -0,0 +1,160 @@
|
||||
---
|
||||
name: flight-debrief
|
||||
description: Post-flight analysis for continuous improvement. Use after a flight is completed to capture lessons learned and improve the methodology.
|
||||
---
|
||||
|
||||
# Flight Debrief
|
||||
|
||||
Perform comprehensive post-flight analysis for continuous improvement.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Project must be initialized with `/init-project` (`.flightops/ARTIFACTS.md` must exist)
|
||||
- A flight must have status `landed` before debriefing
|
||||
|
||||
## Workflow
|
||||
|
||||
### Phase 1: Context Loading
|
||||
|
||||
1. **Identify the target project**
|
||||
- Read `projects.md` to find the project's path
|
||||
|
||||
2. **Verify project is initialized**
|
||||
- Check if `{target-project}/.flightops/ARTIFACTS.md` exists
|
||||
- **If missing**: STOP and tell the user to run `/init-project` first
|
||||
- Do not proceed without the artifact configuration
|
||||
|
||||
3. **Read the artifact configuration**
|
||||
- Read `{target-project}/.flightops/ARTIFACTS.md` for artifact locations and formats
|
||||
|
||||
4. **Load flight documentation**
|
||||
- Read the mission for overall context and success criteria
|
||||
- Read the flight for objectives, design decisions, and checkpoints
|
||||
- Read ALL legs to understand the planned implementation
|
||||
- Read the complete flight log for ground truth on what happened
|
||||
|
||||
5. **Load project context**
|
||||
- Read the target project's `README.md` and `CLAUDE.md`
|
||||
- Identify key implementation files from leg outputs and flight log
|
||||
|
||||
6. **Examine actual implementation**
|
||||
- Read files created or modified during the flight
|
||||
- Compare intended vs actual implementation
|
||||
- Note deviations, workarounds, or unexpected discoveries
|
||||
|
||||
### Phase 2: Crew Debrief Interviews
|
||||
|
||||
Read `{target-project}/.flightops/agent-crews/flight-debrief.md` for crew definitions and prompts (fall back to defaults at `.claude/skills/init-project/defaults/agent-crews/flight-debrief.md`).
|
||||
|
||||
**Validate structure**: The phase file MUST contain `## Crew`, `## Interaction Protocol`, and `## Prompts` sections with fenced code blocks. If the file exists but is malformed, STOP and tell the user: "Phase file `flight-debrief.md` is missing required sections. Either fix it manually or re-run `/init-project` to reset to defaults."
|
||||
|
||||
#### Developer Interview
|
||||
1. **Spawn a Developer agent** in the target project context (Task tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Debrief Interview" prompt from the flight-debrief phase file's Prompts section
|
||||
- The Developer examines code changes, test coverage, patterns, and technical debt
|
||||
- The Developer provides structured debrief input
|
||||
|
||||
#### Architect Interview
|
||||
1. **Spawn an Architect agent** in the target project context (Task tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Debrief Design Review" prompt from the flight-debrief phase file's Prompts section
|
||||
- The Architect compares planned design decisions against actual implementation
|
||||
- The Architect evaluates whether the flight design held up and provides feedback for future flights
|
||||
- This closes the design feedback loop — the same role that reviewed the spec now evaluates the outcome
|
||||
|
||||
#### Human Interview
|
||||
Brief questions to capture insights documents may miss. Keep this lightweight — 2-3 questions max based on what you observed in the flight log.
|
||||
|
||||
- **On anomalies/deviations**: "The log mentions [X] — what drove that decision?"
|
||||
- **On leg quality**: "Were any leg specs unclear or missing key context?"
|
||||
- **On blockers**: "What slowed you down most? Was it predictable?"
|
||||
|
||||
Skip the human interview if the flight log is comprehensive and there are no obvious gaps.
|
||||
|
||||
### Phase 3: Deep Analysis
|
||||
|
||||
Synthesize Developer input, Architect input, human input, and document analysis across multiple dimensions:
|
||||
|
||||
#### Outcome Analysis
|
||||
- Did the flight achieve its objective?
|
||||
- Which mission success criteria did this flight advance?
|
||||
- Were all checkpoints met?
|
||||
- What value was delivered?
|
||||
|
||||
#### Process Analysis
|
||||
- How accurate were the leg specifications?
|
||||
- Were there gaps requiring improvisation?
|
||||
- Did the leg sequence make sense?
|
||||
- Were legs appropriately sized?
|
||||
- Did acceptance criteria prove verifiable?
|
||||
|
||||
#### Technical Analysis
|
||||
- What technical decisions were made during flight that weren't planned?
|
||||
- Were there architectural surprises?
|
||||
- What technical debt was introduced?
|
||||
- Does implementation align with project conventions?
|
||||
|
||||
#### Deviation Analysis
|
||||
- What deviations occurred and why?
|
||||
- Were deviations captured in the flight log?
|
||||
- Should any deviations become standard practice?
|
||||
|
||||
#### Knowledge Capture
|
||||
- What was learned that should be documented?
|
||||
- Are there reusable patterns that emerged?
|
||||
- Are README or CLAUDE.md updates needed?
|
||||
|
||||
### Phase 4: Skill Effectiveness Analysis
|
||||
|
||||
Evaluate whether the mission-control skills could be improved:
|
||||
|
||||
#### Mission Skill
|
||||
- Did the mission provide adequate context?
|
||||
- Were success criteria clear and measurable?
|
||||
|
||||
#### Flight Skill
|
||||
- Did the flight structure support execution?
|
||||
- Were design decisions adequately captured?
|
||||
- Was the leg breakdown appropriate?
|
||||
|
||||
#### Leg Skill
|
||||
- Did legs provide sufficient implementation guidance?
|
||||
- Were acceptance criteria verifiable?
|
||||
- Were edge cases adequately identified?
|
||||
|
||||
### Phase 5: Generate Debrief
|
||||
|
||||
Create the flight debrief artifact using the format defined in `.flightops/ARTIFACTS.md`.
|
||||
|
||||
### Phase 6: Flight Status Transition
|
||||
|
||||
Ask the user if the flight should be marked as `completed`. If confirmed, update the flight artifact's status from `landed` to `completed`.
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Thoroughness Over Speed
|
||||
- Read files completely, not just skim
|
||||
- Consider root causes, not just symptoms
|
||||
- Think about systemic improvements
|
||||
|
||||
### Be Specific and Actionable
|
||||
Avoid vague recommendations. Instead of "improve documentation," say:
|
||||
- "Add a 'Devcontainer Commands' section to CLAUDE.md documenting the docker exec workflow"
|
||||
|
||||
### Distinguish Severity
|
||||
- **Critical**: Would have prevented significant rework or failure
|
||||
- **Important**: Would have meaningfully improved efficiency
|
||||
- **Minor**: Nice-to-have improvements
|
||||
|
||||
### Credit What Worked
|
||||
Identify effective patterns that should be reinforced or codified.
|
||||
|
||||
### Consider the Meta-Level
|
||||
- Did the mission/flight/leg hierarchy work?
|
||||
- Were the right artifacts being created?
|
||||
- Is there friction that could be eliminated?
|
||||
|
||||
## Output
|
||||
|
||||
Create the debrief artifact using the location and format defined in `.flightops/ARTIFACTS.md`.
|
||||
|
||||
After creating the debrief, summarize the top 3-5 most impactful recommendations.
|
||||
183
container/mission-control/.claude/skills/flight/SKILL.md
Normal file
183
container/mission-control/.claude/skills/flight/SKILL.md
Normal file
@@ -0,0 +1,183 @@
|
||||
---
|
||||
name: flight
|
||||
description: Create technical flight specifications from missions. Use when breaking down a mission into implementable work or planning technical approach.
|
||||
---
|
||||
|
||||
# Flight Specification
|
||||
|
||||
Create a technical flight spec from a mission.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Project must be initialized with `/init-project` (`.flightops/ARTIFACTS.md` must exist)
|
||||
- A mission must exist before creating a flight
|
||||
|
||||
## Workflow
|
||||
|
||||
### Phase 1: Context Gathering
|
||||
|
||||
1. **Identify the target project**
|
||||
- Read `projects.md` to find the project's path
|
||||
|
||||
2. **Verify project is initialized**
|
||||
- Check if `{target-project}/.flightops/ARTIFACTS.md` exists
|
||||
- **If missing**: STOP and tell the user to run `/init-project` first
|
||||
- Do not proceed without the artifact configuration
|
||||
|
||||
3. **Read the artifact configuration**
|
||||
- Read `{target-project}/.flightops/ARTIFACTS.md` for artifact locations and formats
|
||||
|
||||
4. **Read the parent mission**
|
||||
- Understand the outcome being pursued
|
||||
- Identify which success criteria this flight addresses
|
||||
- Note constraints that apply
|
||||
|
||||
5. **Check existing flights**
|
||||
- What flights already exist for this mission?
|
||||
- What's been completed vs. in progress?
|
||||
- Are there dependencies on other flights?
|
||||
|
||||
### Phase 2: Code Interrogation
|
||||
|
||||
Explore the target project's codebase to inform the technical approach:
|
||||
|
||||
1. **Identify relevant code areas**
|
||||
- What existing code relates to this flight?
|
||||
- What patterns are already established?
|
||||
- What dependencies exist?
|
||||
|
||||
2. **Find files likely to be affected**
|
||||
- Source files to modify
|
||||
- Test files to create/update
|
||||
- Configuration changes needed
|
||||
|
||||
3. **Understand existing patterns**
|
||||
- Code style and conventions
|
||||
- Error handling approaches
|
||||
- Testing patterns
|
||||
|
||||
4. **Check for schema/migration implications**
|
||||
- Does this flight add or modify database tables?
|
||||
- What migration tooling does the project use?
|
||||
- Are there existing migration patterns to follow?
|
||||
|
||||
### Phase 3: User Input
|
||||
|
||||
Before asking structured technical questions, share a brief summary of what you learned during context gathering and code interrogation, then prompt the user for open-ended input:
|
||||
|
||||
- "Here's what I've gathered about the mission context and codebase: [summary]. Before I ask specific technical questions, what are your thoughts on what this flight should cover? Feel free to share approach preferences, priorities, concerns — anything that should shape this flight."
|
||||
|
||||
Use the user's response to inform and focus the crew interview questions that follow.
|
||||
|
||||
### Phase 4: Crew Interview
|
||||
|
||||
Ask technical questions to resolve the approach:
|
||||
|
||||
1. **Technical approach**
|
||||
- "Should we extend existing code or create new modules?"
|
||||
- "What's the preferred pattern for [specific decision]?"
|
||||
- "Are there performance considerations?"
|
||||
|
||||
2. **Open questions**
|
||||
- Surface ambiguities in requirements
|
||||
- Clarify edge cases
|
||||
- Identify unknowns
|
||||
|
||||
3. **Design decisions**
|
||||
- Document choices and rationale
|
||||
- Get agreement on trade-offs
|
||||
- Note constraints discovered
|
||||
|
||||
4. **Prerequisites verification**
|
||||
- "Is [dependency] ready?"
|
||||
- "Do we have access to [resource]?"
|
||||
|
||||
5. **Validation approach**
|
||||
- "How will this flight be validated?"
|
||||
- "Is test automation needed, or is manual verification sufficient?"
|
||||
- "What tests should be created or updated?"
|
||||
|
||||
### Phase 5: Spec Creation
|
||||
|
||||
Create the flight artifact using the format defined in `.flightops/ARTIFACTS.md`.
|
||||
|
||||
Also create the flight log artifact (empty, ready for execution notes).
|
||||
|
||||
### Phase 5b: Design Review
|
||||
|
||||
Spawn an Architect agent to validate the flight spec against the real codebase before presenting it to the crew.
|
||||
|
||||
Read `{target-project}/.flightops/agent-crews/flight-design.md` for crew definitions and prompts (fall back to defaults at `.claude/skills/init-project/defaults/agent-crews/flight-design.md`).
|
||||
|
||||
**Validate structure**: The phase file MUST contain `## Crew`, `## Interaction Protocol`, and `## Prompts` sections with fenced code blocks. If the file exists but is malformed, STOP and tell the user: "Phase file `flight-design.md` is missing required sections. Either fix it manually or re-run `/init-project` to reset to defaults."
|
||||
|
||||
1. **Spawn an Architect agent** in the target project context (Task tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Review Flight Design" prompt from the flight-design phase file's Prompts section
|
||||
- The Architect reads the flight spec and cross-references design decisions, prerequisites, technical approach, and leg breakdown against actual codebase state and architecture best practices
|
||||
- The Architect provides a structured assessment: approve, approve with changes, or needs rework
|
||||
2. **Incorporate feedback** — update the flight artifact to address issues raised
|
||||
- High-severity issues: must fix before proceeding
|
||||
- Medium-severity issues: fix unless there's a clear reason not to
|
||||
- Low-severity issues and suggestions: apply at discretion
|
||||
3. **Re-review if substantive changes were made** — spawn another Architect for a second pass
|
||||
- Skip if only minor/cosmetic fixes were applied
|
||||
- **Max 2 design review cycles** — if issues persist after 2 rounds, escalate to human
|
||||
4. **Proceed to Phase 6** with a codebase-validated spec
|
||||
|
||||
### Phase 6: Iterate
|
||||
|
||||
1. Walk through the spec with the crew
|
||||
2. Validate technical approach is sound
|
||||
3. Confirm leg breakdown is appropriate
|
||||
4. Refine until approved
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Flight Sizing
|
||||
|
||||
A well-sized flight:
|
||||
- Takes 1-3 days of focused work
|
||||
- Breaks into 3-8 legs typically
|
||||
- Has a clear, verifiable objective
|
||||
- Addresses specific mission criteria
|
||||
|
||||
**Too small**: Single leg's worth of work
|
||||
**Too large**: More than a week of work, vague checkpoints
|
||||
|
||||
### Leg Identification
|
||||
|
||||
Break flights into legs based on technical boundaries:
|
||||
- Each leg should be atomic (independently completable)
|
||||
- Legs should have clear inputs and outputs
|
||||
- Consider dependencies between legs
|
||||
- Group related changes together
|
||||
|
||||
**For scaffolding flights**: Include a final `verify-integration` leg
|
||||
|
||||
**For interface changes**: Identify consumers that need updates
|
||||
|
||||
**For documentation**: Consider whether README, CLAUDE.md, or other docs need updates as part of this flight — especially for flights adding new CLI commands, API endpoints, or configuration options
|
||||
|
||||
**For schema changes**: Include explicit migration legs and verify against the live database, not just mocks
|
||||
|
||||
**For UAT and alignment**: During the crew interview, ask the user whether they'd like to include a UAT and alignment leg. Explain that this optional leg is a guided UAT session — the agent walks the user through a series of tests and verification steps, fixing issues along the way until the user is satisfied with the results. If the user opts in, include it in the breakdown, marked as optional.
|
||||
|
||||
### Pre-Flight Rigor
|
||||
|
||||
- Open questions MUST be resolved before execution
|
||||
- Design decisions MUST be documented with rationale
|
||||
- Prerequisites MUST be verified, not assumed
|
||||
- **Environment conflicts**: Flights introducing network services (ports, databases, containers) must check for conflicts with existing services on the developer's machine during planning. Ask: "What else is running that could conflict?"
|
||||
|
||||
### Adaptive Planning
|
||||
|
||||
- Flights can be modified during `planning` state
|
||||
- Once `in-flight`, flights may still be modified (e.g., changing planned legs) as long as the flight log captures the change and rationale
|
||||
- New legs can be added if scope grows
|
||||
|
||||
## Output
|
||||
|
||||
Create the following artifacts using locations and formats from `.flightops/ARTIFACTS.md`:
|
||||
|
||||
1. **Flight spec** — The flight plan
|
||||
2. **Flight log** — Empty, ready for execution notes
|
||||
@@ -0,0 +1,97 @@
|
||||
---
|
||||
name: init-mission-control
|
||||
description: Onboard to Mission Control by setting up the projects registry. Use when projects.md is missing or when adding new projects.
|
||||
---
|
||||
|
||||
# Mission Control Onboarding
|
||||
|
||||
Set up the projects registry (`projects.md`) and orient the user to the Flight Control workflow.
|
||||
|
||||
## When to Use
|
||||
|
||||
Run `/init-mission-control` when:
|
||||
- `projects.md` doesn't exist (first time using Mission Control)
|
||||
- Adding a new project to the registry
|
||||
- Updating an existing project's details
|
||||
|
||||
## Workflow
|
||||
|
||||
### 1. Check Registry Status
|
||||
|
||||
Check if `projects.md` exists in the mission-control repository root.
|
||||
|
||||
**If missing:**
|
||||
> "No projects registry found. Let's set one up — this tells Mission Control where your projects live."
|
||||
|
||||
**If exists:**
|
||||
> "Projects registry found. Want to add a new project or update an existing one?"
|
||||
|
||||
### 2. Create or Update Registry
|
||||
|
||||
**If creating new:**
|
||||
1. Copy `projects.md.template` to `projects.md`
|
||||
2. Remove the example entries
|
||||
3. Interview the user to register their first project (see step 3)
|
||||
|
||||
**If updating:**
|
||||
1. Read existing `projects.md`
|
||||
2. Ask: add new project, or update existing?
|
||||
3. Proceed accordingly
|
||||
|
||||
### 3. Discover Projects
|
||||
|
||||
Ask the user how they want to add projects:
|
||||
|
||||
> "Want to scan a directory for projects, or add a single project?"
|
||||
|
||||
**Option A: Scan a directory**
|
||||
1. Ask for the parent directory path (e.g., `~/projects`)
|
||||
2. Scan immediate subdirectories for git repos
|
||||
3. Present the list and ask which ones to register
|
||||
4. Auto-detect details for each selected project
|
||||
5. Ask for descriptions in batch (or let the user provide them later)
|
||||
|
||||
**Option B: Add a single project**
|
||||
1. Ask for the project path (e.g., `~/projects/my-app`)
|
||||
2. Verify it exists and is a git repo
|
||||
3. Auto-detect details
|
||||
4. Ask only for what can't be detected: description, and optionally stack/status
|
||||
|
||||
**Auto-detection:** For each project directory, run:
|
||||
- **Slug**: directory name
|
||||
- **Remote**: `git -C <path> remote get-url origin`
|
||||
|
||||
Only ask the user for fields that can't be auto-detected.
|
||||
|
||||
### 4. Orientation
|
||||
|
||||
After the registry is set up, briefly orient the user:
|
||||
|
||||
> "You're all set. Here's the workflow:"
|
||||
> 1. **`/init-project`** — Run this in each project to set up `.flightops/` with methodology references and configure the project crew
|
||||
> 2. **`/mission`** — Define what you want to achieve (outcomes, not tasks)
|
||||
> 3. **`/flight`** — Break a mission into technical specs
|
||||
> 4. **`/leg`** — Generate implementation steps for each flight leg
|
||||
> 5. **`/agentic-workflow`** — Execute legs with automated Developer + Reviewer agents
|
||||
|
||||
> "Start with `/init-project` on the project you just registered."
|
||||
|
||||
### 5. Offer Next Step
|
||||
|
||||
> "Want to run `/init-project` for {project-slug} now?"
|
||||
|
||||
If yes, invoke the `/init-project` skill for the registered project.
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Keep It Quick
|
||||
|
||||
This is onboarding, not the main work. Get the registry set up and move on.
|
||||
|
||||
### Don't Over-Explain
|
||||
|
||||
The user will learn the methodology by using it. Give the orientation but don't lecture.
|
||||
|
||||
### Validate Paths
|
||||
|
||||
When the user provides a filesystem path, verify it exists before adding to the registry.
|
||||
@@ -0,0 +1,182 @@
|
||||
# Flight Operations Quick Reference
|
||||
|
||||
> For full methodology docs, see [mission-control](https://github.com/msieurthenardier/mission-control)
|
||||
|
||||
## Before You Start
|
||||
|
||||
**Read these files in order:**
|
||||
1. `.flightops/ARTIFACTS.md` — Where and how artifacts are stored (project-specific)
|
||||
2. The **flight log** for your active flight — Ground truth for what happened
|
||||
3. The **leg artifact** you're implementing — Your acceptance criteria
|
||||
|
||||
---
|
||||
|
||||
## Project Crew & Phases
|
||||
|
||||
Each phase of the Flight Control workflow has a crew definition in `.flightops/agent-crews/`:
|
||||
|
||||
| Crew | Purpose |
|
||||
|------|---------|
|
||||
| `mission-design.md` | Crew for `/mission` (e.g., Architect validates viability) |
|
||||
| `flight-design.md` | Crew for `/flight` (e.g., Architect reviews spec) |
|
||||
| `leg-execution.md` | Crew for `/agentic-workflow` (e.g., Developer + Reviewer) |
|
||||
| `flight-debrief.md` | Crew for `/flight-debrief` (e.g., Developer provides perspective) |
|
||||
| `mission-debrief.md` | Crew for `/mission-debrief` (e.g., Architect provides perspective) |
|
||||
|
||||
Crew files define: roles, models, interaction protocols, prompts, and signals. Customize these to change your project's agent configuration.
|
||||
|
||||
---
|
||||
|
||||
## Multi-Agent Workflow
|
||||
|
||||
Legs must be implemented by a **separate Developer instance** and reviewed by a **separate Reviewer instance** (or whatever crew is defined in `leg-execution.md`). Mission Control designs legs and orchestrates — it does NOT implement code directly.
|
||||
|
||||
The Reviewer has no knowledge of the Developer's reasoning — only the resulting changes. This separation provides objective code review. Use the `/agentic-workflow` skill in mission-control to drive this cycle.
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Leg Completion Checklist (MANDATORY)
|
||||
|
||||
**You MUST complete ALL of these before emitting `[COMPLETE:leg]`:**
|
||||
|
||||
| Step | Action |
|
||||
|------|--------|
|
||||
| 1 | All acceptance criteria verified |
|
||||
| 2 | Tests passing |
|
||||
| 3 | **Update flight log** — Add leg progress entry (see below) |
|
||||
| 4 | **Mark leg completed** — Update leg status to `completed` |
|
||||
| 5 | **Update flight** — Check off the leg in flight artifact |
|
||||
| 6 | **Commit/save with all artifact updates** |
|
||||
|
||||
**Flight log entry MUST include:**
|
||||
- Leg status, started date, completed date
|
||||
- Changes Made (what was implemented)
|
||||
- Verification (how acceptance criteria were confirmed)
|
||||
- Any decisions, deviations, or anomalies
|
||||
|
||||
Refer to `.flightops/ARTIFACTS.md` for exact locations and formats.
|
||||
|
||||
---
|
||||
|
||||
## Workflow Signals
|
||||
|
||||
Emit at the end of your response, on its own line:
|
||||
|
||||
| Signal | When |
|
||||
|--------|------|
|
||||
| `[HANDOFF:review-needed]` | Artifact changes ready for validation |
|
||||
| `[HANDOFF:confirmed]` | Review complete, no issues |
|
||||
| `[BLOCKED:reason]` | Cannot proceed |
|
||||
| `[COMPLETE:leg]` | Leg done AND checklist complete |
|
||||
|
||||
---
|
||||
|
||||
## Implementing a Leg
|
||||
|
||||
### Pre-Implementation
|
||||
1. Read mission, flight, and leg artifacts
|
||||
2. Read flight log for context from prior legs
|
||||
3. Verify leg accuracy against existing code
|
||||
4. **Update leg status** to `in-flight`
|
||||
5. Present summary and get approval before proceeding
|
||||
|
||||
### Implementation
|
||||
5. Implement to acceptance criteria
|
||||
6. Run tests with a timeout — use the test runner's timeout flag (e.g., `--timeout`,
|
||||
`--test-timeout`, `-timeout`) so hanging tests fail fast instead of stalling.
|
||||
If a test hangs, kill it, isolate the hanging test, and fix the root cause before
|
||||
continuing. Log hanging tests and their resolution in the flight log.
|
||||
7. Run code review, fix Critical/Major issues
|
||||
8. Re-review until clean
|
||||
|
||||
### Post-Implementation
|
||||
9. Propagate changes (project docs, flight artifacts if scope changed)
|
||||
10. **Complete the Leg Completion Checklist above**
|
||||
11. Signal `[COMPLETE:leg]`
|
||||
|
||||
---
|
||||
|
||||
## Just-in-Time Planning
|
||||
|
||||
Flights and legs are created one at a time, not upfront.
|
||||
|
||||
| Reviewing... | Should exist | Should NOT exist yet |
|
||||
|--------------|--------------|----------------------|
|
||||
| Mission | Mission artifact | Flight artifacts (only listed) |
|
||||
| Flight | Flight artifact | Leg artifacts (only listed) |
|
||||
| Leg | Leg artifact | Ready to implement |
|
||||
|
||||
Listed flights/legs are **tentative suggestions** that evolve based on discoveries.
|
||||
|
||||
---
|
||||
|
||||
## Reviewing Artifacts
|
||||
|
||||
When reviewing a mission, flight, or leg:
|
||||
|
||||
1. Read the artifact thoroughly
|
||||
2. Validate against project goals and existing code
|
||||
3. Check for ambiguities or missing details
|
||||
4. Make changes directly if needed
|
||||
5. Describe any changes made
|
||||
6. Signal `[HANDOFF:confirmed]` if no issues, or describe changes for validation
|
||||
|
||||
---
|
||||
|
||||
## Code Review Gate
|
||||
|
||||
```
|
||||
Implement → Test → Review → Fix → Re-review → Complete
|
||||
```
|
||||
|
||||
| Severity | Action |
|
||||
|----------|--------|
|
||||
| Critical | Must fix |
|
||||
| Major | Must fix |
|
||||
| Minor | Fix if safe, else defer |
|
||||
|
||||
Deferred issues go in the flight log.
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Flight Completion Checklist (MANDATORY)
|
||||
|
||||
**When you complete the FINAL leg of a flight, also complete these steps:**
|
||||
|
||||
| Step | Action |
|
||||
|------|--------|
|
||||
| 1 | Complete all items in the Leg Completion Checklist above |
|
||||
| 2 | **Update flight log** — Add flight completion entry with summary |
|
||||
| 3 | **Update flight status** — Set `**Status**: landed` in flight.md |
|
||||
| 4 | **Update mission** — Check off this flight in mission.md |
|
||||
| 5 | **Verify all legs** — Confirm all legs show `completed` status |
|
||||
| 6 | **Update project docs** — Ensure CLAUDE.md, README, and other docs reflect any new commands, endpoints, configuration, or APIs introduced during the flight |
|
||||
| 7 | Signal `[COMPLETE:leg]` (the orchestrator will trigger Phase 4) |
|
||||
|
||||
The orchestrator will then:
|
||||
- Mark the PR ready for human review
|
||||
|
||||
The flight debrief is a separate step run via `/flight-debrief`, which transitions the flight from `landed` to `completed`.
|
||||
|
||||
---
|
||||
|
||||
## Database Schema Changes
|
||||
|
||||
When a flight modifies database schemas:
|
||||
|
||||
1. **Include migration steps in the leg** — schema changes need explicit CREATE/ALTER statements or migration commands
|
||||
2. **Verify migrations run** — acceptance criteria must include confirming the migration executed successfully against the live database
|
||||
3. **Update SCHEMA docs** — if the project maintains a SCHEMA reference, update it in the same leg that creates the migration
|
||||
4. **Test against real DB** — unit tests with mocks are not sufficient for schema changes; verify against the actual database
|
||||
|
||||
A table defined in SCHEMA but never created via migration is a gap — treat schema documentation and migration execution as a single atomic operation.
|
||||
|
||||
---
|
||||
|
||||
## Key Principles
|
||||
|
||||
1. **Flight log is ground truth** — Read it first, update it always
|
||||
2. **Never modify in-flight legs** — Create new ones instead
|
||||
3. **Binary acceptance criteria** — Met or not met
|
||||
4. **Log everything** — Decisions, deviations, anomalies
|
||||
5. **Signal clearly** — End of response, own line
|
||||
@@ -0,0 +1,29 @@
|
||||
# Flight Operations
|
||||
|
||||
This directory contains reference materials for the [Flight Control](https://github.com/anthropics/flight-control) development methodology.
|
||||
|
||||
## Contents
|
||||
|
||||
- **FLIGHT_OPERATIONS.md** — Quick reference for implementing missions, flights, and legs
|
||||
- **ARTIFACTS.md** — Project-specific configuration for how artifacts are stored
|
||||
- **agent-crews/** — Project crew definitions for each phase (who Mission Control works with)
|
||||
|
||||
## For AI Agents
|
||||
|
||||
When working on this project with Flight Control:
|
||||
|
||||
1. Read `ARTIFACTS.md` to understand how this project stores missions, flights, and legs
|
||||
2. Read `FLIGHT_OPERATIONS.md` for the implementation workflow
|
||||
3. Read the relevant `agent-crews/*.md` file for crew definitions and prompts
|
||||
4. Check the artifact locations defined in `ARTIFACTS.md` for active work
|
||||
5. Follow the code review gate before marking any leg complete
|
||||
6. Update flight-log after each leg (location depends on artifact system)
|
||||
|
||||
## Sync Behavior
|
||||
|
||||
| File | Synced? | Notes |
|
||||
|------|---------|-------|
|
||||
| README.md | Yes | Updated via `/init-project` |
|
||||
| FLIGHT_OPERATIONS.md | Yes | Updated via `/init-project` |
|
||||
| ARTIFACTS.md | No | Project-specific, customize freely |
|
||||
| agent-crews/*.md | No | Project-specific, customize freely |
|
||||
215
container/mission-control/.claude/skills/init-project/SKILL.md
Normal file
215
container/mission-control/.claude/skills/init-project/SKILL.md
Normal file
@@ -0,0 +1,215 @@
|
||||
---
|
||||
name: init-project
|
||||
description: Initialize a project for Flight Control. Creates .flightops directory with methodology reference and artifact configuration. Run before using other Flight Control skills on a new project.
|
||||
---
|
||||
|
||||
# Project Initialization
|
||||
|
||||
Prepare a project for Flight Control by creating the `.flightops/` directory with methodology references and artifact configuration.
|
||||
|
||||
## When to Use
|
||||
|
||||
Run `/init-project` when:
|
||||
- Starting to use Flight Control on a new project
|
||||
- You suspect the .flightops reference may be outdated
|
||||
- Another skill indicates the reference needs to be synced
|
||||
|
||||
## Workflow
|
||||
|
||||
### 1. Identify Target Project
|
||||
|
||||
1. **Read `projects.md`** to find the project's path, remote, and description
|
||||
2. If the project isn't listed, ask the user for:
|
||||
- Project name/slug
|
||||
- Filesystem path
|
||||
- Brief description
|
||||
3. Optionally offer to add the project to `projects.md`
|
||||
|
||||
### 2. Check and Apply Migrations
|
||||
|
||||
Check for legacy directory layouts and offer to migrate them.
|
||||
|
||||
1. **Read `migrations.md`** from the skill directory (`${SKILL_DIR}/migrations.md`)
|
||||
2. **Run detection checks** for each migration in order (001, 002, ...)
|
||||
3. **If no migrations are needed**, proceed silently to the next step
|
||||
4. **If any migrations are needed**, present a summary to the user:
|
||||
> "Detected legacy directory layout in {project}. The following migrations are available:"
|
||||
>
|
||||
> - _Each applicable migration's user message_
|
||||
>
|
||||
> "Apply these migrations?"
|
||||
5. **On confirmation**, apply the actions for each applicable migration in order
|
||||
6. **On decline**, warn the user that some skills may not work correctly with the old layout, but continue using whatever directory structure exists
|
||||
|
||||
### 3. Check Sync Status
|
||||
|
||||
Run the hash comparison script to determine sync status:
|
||||
|
||||
```bash
|
||||
bash "${SKILL_DIR}/check-sync.sh" \
|
||||
"${SKILL_DIR}" \
|
||||
"{target-project}/.flightops"
|
||||
```
|
||||
|
||||
The script outputs one of:
|
||||
- `missing` - Directory doesn't exist in target project
|
||||
- `outdated` - Directory exists but files differ from source
|
||||
- `current` - All files are up-to-date
|
||||
|
||||
### 4. Prompt and Sync Methodology Files
|
||||
|
||||
Based on the status:
|
||||
|
||||
**If `missing`**:
|
||||
> "Flight operations directory not found. Create `{project}/.flightops/` with methodology references?"
|
||||
|
||||
**If `outdated`**:
|
||||
> "Flight operations references in {project} are outdated. Update?"
|
||||
|
||||
**If `current`**:
|
||||
> "Flight operations references are up-to-date in {project}."
|
||||
|
||||
If the user confirms, create/update the directory:
|
||||
|
||||
```bash
|
||||
mkdir -p "{target-project}/.flightops"
|
||||
cp "${SKILL_DIR}/FLIGHT_OPERATIONS.md" "{target-project}/.flightops/"
|
||||
cp "${SKILL_DIR}/README.md" "{target-project}/.flightops/"
|
||||
```
|
||||
|
||||
### 5. Configure Artifact System (New Projects Only)
|
||||
|
||||
**Only if ARTIFACTS.md doesn't exist**, ask the user to select an artifact system:
|
||||
|
||||
> "How should mission, flight, and leg artifacts be stored?"
|
||||
|
||||
Available templates:
|
||||
- **files** — Markdown files in the repository (`templates/ARTIFACTS-files.md`)
|
||||
- **jira** — Jira issues: Epics, Stories, Sub-tasks (`templates/ARTIFACTS-jira.md`)
|
||||
|
||||
#### 5a. Check for Setup Questions
|
||||
|
||||
After the user selects a template, read the template file and check if it contains a `## Setup Questions` section with a table of questions.
|
||||
|
||||
If setup questions exist:
|
||||
1. Parse the questions from the table (first column contains the questions)
|
||||
2. Ask the user each question interactively
|
||||
3. Replace the placeholder answers in the table with the user's responses
|
||||
|
||||
#### 5b. Copy and Populate Template
|
||||
|
||||
Copy the selected template, with answers populated if setup questions were asked:
|
||||
|
||||
```bash
|
||||
cp "${SKILL_DIR}/templates/ARTIFACTS-{selection}.md" \
|
||||
"{target-project}/.flightops/ARTIFACTS.md"
|
||||
```
|
||||
|
||||
If setup questions were answered, update the ARTIFACTS.md file to replace the placeholder answers with the user's responses.
|
||||
|
||||
**If ARTIFACTS.md already exists**, do not modify it — it's project-specific and may have been customized.
|
||||
|
||||
### 6. Configure Project Crew
|
||||
|
||||
Set up phase-specific crew definitions that control how Mission Control interacts with project-side agents.
|
||||
|
||||
1. **Check if `.flightops/agent-crews/` exists**
|
||||
|
||||
**If missing** (first run):
|
||||
- Copy all defaults from `${SKILL_DIR}/defaults/agent-crews/` to `{target-project}/.flightops/agent-crews/`
|
||||
- Brief the user:
|
||||
> "Default crew has been set up for all phases. Your agent crews define who Mission Control (Flight Director) works with during each phase — which agents exist, their roles, models, and prompts."
|
||||
- Ask about customization:
|
||||
> "Want to customize any agent crew? (Most projects work fine with defaults)"
|
||||
- If yes: ask which crew → show current definitions → walk through changes (add/remove/modify roles, adjust prompts, change interaction protocol)
|
||||
- If no: proceed with defaults
|
||||
|
||||
**If exists** (re-run):
|
||||
- Copy any missing crew files from defaults (new crews added to the methodology)
|
||||
- Ask the user:
|
||||
> "Agent crew files already exist. Default crew definitions may have been updated since your project was initialized. Want to review and update any crew files to the latest defaults?"
|
||||
- If yes: for each file, show what changed between their version and the current default, ask whether to overwrite or keep their version
|
||||
- If no: leave all existing files untouched
|
||||
|
||||
### 7. Update CLAUDE.md
|
||||
|
||||
Check if the project's `CLAUDE.md` file references the flight operations directory:
|
||||
|
||||
1. **If CLAUDE.md doesn't exist**, create it with a Flight Operations section
|
||||
2. **If CLAUDE.md exists but lacks a Flight Operations section**, append one
|
||||
3. **If CLAUDE.md already has a Flight Operations section**, leave it unchanged
|
||||
|
||||
#### 7a. Fix Stale Path References
|
||||
|
||||
If migrations were applied in Step 2, scan the project's `CLAUDE.md` for stale path references within the Flight Operations section and fix them:
|
||||
|
||||
- Replace `.flight-ops/` → `.flightops/`
|
||||
- Replace `phases/` → `agent-crews/` (only within Flight Operations context)
|
||||
|
||||
This ensures the CLAUDE.md instructions point to the correct post-migration paths.
|
||||
|
||||
Add this section:
|
||||
|
||||
```markdown
|
||||
## Flight Operations
|
||||
|
||||
This project uses [Flight Control](https://github.com/msieurthenardier/mission-control).
|
||||
|
||||
**Before any mission/flight/leg work, read these files in order:**
|
||||
1. `.flightops/README.md` — What the flightops directory contains
|
||||
2. `.flightops/FLIGHT_OPERATIONS.md` — **The workflow you MUST follow**
|
||||
3. `.flightops/ARTIFACTS.md` — Where all artifacts are stored
|
||||
4. `.flightops/agent-crews/` — Project crew definitions for each phase (read the relevant crew file)
|
||||
```
|
||||
|
||||
### 8. Post-Sync Instructions
|
||||
|
||||
After creating or updating the directory, inform the user:
|
||||
|
||||
> "If you have Claude Code running in {project}, restart it to pick up the new flight operations references."
|
||||
|
||||
This ensures Claude Code loads the new files into its context when working in the target project.
|
||||
|
||||
## Output
|
||||
|
||||
This skill creates/updates the following at project root:
|
||||
|
||||
```
|
||||
{project}/
|
||||
├── CLAUDE.md # Updated with Flight Operations section
|
||||
└── .flightops/ # Hidden directory for Flight Control
|
||||
├── README.md # Explains the directory purpose
|
||||
├── FLIGHT_OPERATIONS.md # Quick reference for implementation (synced)
|
||||
├── ARTIFACTS.md # Artifact system configuration (project-specific)
|
||||
└── agent-crews/ # Project crew definitions (project-specific)
|
||||
├── mission-design.md
|
||||
├── flight-design.md
|
||||
├── leg-execution.md
|
||||
├── flight-debrief.md
|
||||
├── mission-debrief.md
|
||||
└── routine-maintenance.md
|
||||
```
|
||||
|
||||
## File Sync Behavior
|
||||
|
||||
| File | Synced on update? | Notes |
|
||||
|------|-------------------|-------|
|
||||
| CLAUDE.md | Append only | Adds Flight Operations section if missing |
|
||||
| README.md | Yes | Methodology reference |
|
||||
| FLIGHT_OPERATIONS.md | Yes | Methodology reference |
|
||||
| ARTIFACTS.md | No | Created once from template, then project-specific |
|
||||
| agent-crews/*.md | Ask on re-run | Created from defaults; on re-run, user can choose to update to latest defaults |
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Don't Over-Prompt
|
||||
|
||||
If everything is `current`, just inform the user briefly and move on. No confirmation needed.
|
||||
|
||||
### Respect ARTIFACTS.md
|
||||
|
||||
Never overwrite ARTIFACTS.md — it may contain project-specific customizations. Only create it if missing.
|
||||
|
||||
### Keep It Quick
|
||||
|
||||
This is a setup step, not the main work. Complete it efficiently so the user can proceed to their actual task.
|
||||
119
container/mission-control/.claude/skills/init-project/check-sync.sh
Executable file
119
container/mission-control/.claude/skills/init-project/check-sync.sh
Executable file
@@ -0,0 +1,119 @@
|
||||
#!/bin/bash
|
||||
# check-sync.sh - Compare .flightops directory between source and target
|
||||
#
|
||||
# Usage: check-sync.sh <source-dir> <target-dir>
|
||||
#
|
||||
# Outputs:
|
||||
# missing - Target directory doesn't exist (neither .flightops/ nor .flight-ops/)
|
||||
# outdated - Target exists but one or more files differ from source
|
||||
# current - All files match source
|
||||
#
|
||||
# Additional output lines:
|
||||
# agent-crews:{missing|empty|present} - Crew directory status
|
||||
# crew-missing:{filename} - Default crew file not found in project (repeats per file)
|
||||
# legacy-layout:flight-ops - .flight-ops/ detected (old name)
|
||||
# legacy-layout:phases - phases/ detected (old name)
|
||||
#
|
||||
# Note: Only checks synced files (README.md, FLIGHT_OPERATIONS.md).
|
||||
# ARTIFACTS.md and agent-crews/ are project-specific and not synced.
|
||||
|
||||
set -e
|
||||
|
||||
SOURCE_DIR="$1"
|
||||
TARGET_DIR="$2"
|
||||
|
||||
if [[ -z "$SOURCE_DIR" || -z "$TARGET_DIR" ]]; then
|
||||
echo "Usage: check-sync.sh <source-dir> <target-dir>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -d "$SOURCE_DIR" ]]; then
|
||||
echo "Error: Source directory not found: $SOURCE_DIR" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Resolve the actual target directory, falling back to legacy name
|
||||
EFFECTIVE_TARGET="$TARGET_DIR"
|
||||
LEGACY_FLIGHT_OPS=false
|
||||
|
||||
if [[ ! -d "$TARGET_DIR" ]]; then
|
||||
# Derive the legacy path: replace trailing .flightops with .flight-ops
|
||||
LEGACY_DIR="${TARGET_DIR%/.flightops}/.flight-ops"
|
||||
if [[ "$LEGACY_DIR" != "$TARGET_DIR" && -d "$LEGACY_DIR" ]]; then
|
||||
EFFECTIVE_TARGET="$LEGACY_DIR"
|
||||
LEGACY_FLIGHT_OPS=true
|
||||
else
|
||||
echo "missing"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Compare each file in source directory
|
||||
FILES_TO_CHECK=("FLIGHT_OPERATIONS.md" "README.md")
|
||||
ALL_CURRENT=true
|
||||
|
||||
for FILE in "${FILES_TO_CHECK[@]}"; do
|
||||
SOURCE_FILE="$SOURCE_DIR/$FILE"
|
||||
TARGET_FILE="$EFFECTIVE_TARGET/$FILE"
|
||||
|
||||
if [[ ! -f "$SOURCE_FILE" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ ! -f "$TARGET_FILE" ]]; then
|
||||
ALL_CURRENT=false
|
||||
break
|
||||
fi
|
||||
|
||||
SOURCE_HASH=$(sha256sum "$SOURCE_FILE" | cut -d' ' -f1)
|
||||
TARGET_HASH=$(sha256sum "$TARGET_FILE" | cut -d' ' -f1)
|
||||
|
||||
if [[ "$SOURCE_HASH" != "$TARGET_HASH" ]]; then
|
||||
ALL_CURRENT=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if $ALL_CURRENT; then
|
||||
echo "current"
|
||||
else
|
||||
echo "outdated"
|
||||
fi
|
||||
|
||||
# Report on agent-crews directory existence, checking both current and legacy names
|
||||
CREW_DIR=""
|
||||
LEGACY_PHASES=false
|
||||
|
||||
if [[ -d "$EFFECTIVE_TARGET/agent-crews" ]]; then
|
||||
CREW_DIR="$EFFECTIVE_TARGET/agent-crews"
|
||||
elif [[ -d "$EFFECTIVE_TARGET/phases" ]]; then
|
||||
CREW_DIR="$EFFECTIVE_TARGET/phases"
|
||||
LEGACY_PHASES=true
|
||||
fi
|
||||
|
||||
if [[ -z "$CREW_DIR" ]]; then
|
||||
echo "agent-crews:missing"
|
||||
elif [[ -z "$(ls -A "$CREW_DIR" 2>/dev/null)" ]]; then
|
||||
echo "agent-crews:empty"
|
||||
else
|
||||
echo "agent-crews:present"
|
||||
# Check for missing crew files (new skills added since init)
|
||||
DEFAULT_CREWS_DIR="$SOURCE_DIR/defaults/agent-crews"
|
||||
if [[ -d "$DEFAULT_CREWS_DIR" ]]; then
|
||||
for DEFAULT_FILE in "$DEFAULT_CREWS_DIR"/*.md; do
|
||||
BASENAME=$(basename "$DEFAULT_FILE")
|
||||
if [[ ! -f "$CREW_DIR/$BASENAME" ]]; then
|
||||
echo "crew-missing:$BASENAME"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Report legacy layout detection
|
||||
if $LEGACY_FLIGHT_OPS; then
|
||||
echo "legacy-layout:flight-ops"
|
||||
fi
|
||||
|
||||
if $LEGACY_PHASES; then
|
||||
echo "legacy-layout:phases"
|
||||
fi
|
||||
@@ -0,0 +1,126 @@
|
||||
# Flight Debrief — Project Crew
|
||||
|
||||
Crew definitions for post-flight analysis. The Flight Director interviews the
|
||||
human and project-side agents to capture both execution and design perspectives.
|
||||
|
||||
## Crew
|
||||
|
||||
### Developer
|
||||
- **Context**: {project}/
|
||||
- **Model**: Sonnet
|
||||
- **Role**: Provides developer perspective on flight execution. Reviews what was
|
||||
built, identifies technical debt introduced, evaluates implementation quality,
|
||||
and surfaces issues that flight logs may not capture.
|
||||
- **Actions**: debrief-interview
|
||||
|
||||
### Architect
|
||||
- **Context**: {project}/
|
||||
- **Model**: Sonnet
|
||||
- **Role**: Closes the design feedback loop. Evaluates whether the design decisions
|
||||
made during flight planning held up in practice. Reviews architectural impact of
|
||||
what was built and whether the approach should be adjusted for future flights.
|
||||
- **Actions**: debrief-design-review
|
||||
|
||||
## Interaction Protocol
|
||||
|
||||
### Developer Interview
|
||||
1. Flight Director loads flight context (mission, flight, legs, log, actual code)
|
||||
2. Flight Director spawns **Developer** to review implementation and provide feedback
|
||||
3. Developer examines code changes, test coverage, patterns used, debt introduced
|
||||
4. Developer provides structured debrief input
|
||||
|
||||
### Architect Interview
|
||||
1. Flight Director spawns **Architect** to review whether design decisions held up
|
||||
2. Architect compares flight-design spec against actual implementation
|
||||
3. Architect evaluates architectural impact and provides feedback for future flights
|
||||
|
||||
### Human Interview
|
||||
1. Flight Director interviews human with targeted questions based on flight log
|
||||
2. Keep lightweight — 2-3 questions max
|
||||
|
||||
### Synthesis
|
||||
1. Flight Director synthesizes Developer input + Architect input + human input + document analysis
|
||||
2. Generates debrief artifact
|
||||
|
||||
## Template Variables
|
||||
|
||||
The Flight Director substitutes these variables in prompts at runtime:
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `{project-slug}` | Project identifier from projects.md |
|
||||
| `{flight-number}` | Current flight number |
|
||||
| `{flight-artifact-path}` | Path to the flight artifact file |
|
||||
|
||||
## Prompts
|
||||
|
||||
### Developer: Debrief Interview
|
||||
|
||||
```
|
||||
role: developer
|
||||
phase: flight-debrief
|
||||
project: {project-slug}
|
||||
flight: {flight-number}
|
||||
action: debrief-interview
|
||||
|
||||
Review the implementation produced during this flight. Examine the code changes,
|
||||
test coverage, and architectural decisions made.
|
||||
|
||||
Provide structured input for the debrief:
|
||||
|
||||
**Implementation Quality**:
|
||||
- Does the code follow project conventions?
|
||||
- Are there patterns that should be documented?
|
||||
- What technical debt was introduced?
|
||||
|
||||
**Leg Spec Accuracy**:
|
||||
- Were leg specs clear and sufficient for implementation?
|
||||
- What was missing or misleading?
|
||||
- Were acceptance criteria verifiable?
|
||||
|
||||
**Testing Assessment**:
|
||||
- Is test coverage adequate?
|
||||
- Are there untested edge cases?
|
||||
- Do tests meaningfully validate behavior?
|
||||
|
||||
**Recommendations**:
|
||||
- What should future flights in this area account for?
|
||||
- Are there refactoring opportunities?
|
||||
- What documentation is missing?
|
||||
```
|
||||
|
||||
### Architect: Debrief Design Review
|
||||
|
||||
```
|
||||
role: architect
|
||||
phase: flight-debrief
|
||||
project: {project-slug}
|
||||
flight: {flight-number}
|
||||
action: debrief-design-review
|
||||
|
||||
Read the flight artifact at {flight-artifact-path}. Compare the design decisions,
|
||||
technical approach, and leg breakdown that were planned against what was actually
|
||||
implemented.
|
||||
|
||||
Provide structured input for the debrief:
|
||||
|
||||
**Design Decisions Assessment**:
|
||||
- Which design decisions held up well in practice?
|
||||
- Which decisions had to be revised during implementation? Why?
|
||||
- Were there decisions that should have been made differently?
|
||||
|
||||
**Architectural Impact**:
|
||||
- Did the implementation maintain or improve the system's architecture?
|
||||
- Were there unplanned structural changes? Are they sound?
|
||||
- Did the approach create any architectural debt?
|
||||
|
||||
**Flight Design Accuracy**:
|
||||
- Was the technical approach feasible as specified?
|
||||
- Were prerequisites correctly identified?
|
||||
- Was the leg breakdown appropriate for the actual work?
|
||||
|
||||
**Forward-Looking**:
|
||||
- What should future flight designs in this area account for?
|
||||
- Are there architectural patterns that emerged worth standardizing?
|
||||
- What design assumptions should be revisited?
|
||||
```
|
||||
@@ -0,0 +1,72 @@
|
||||
# Flight Design — Project Crew
|
||||
|
||||
Crew definitions for flight specification. The Flight Director designs the
|
||||
technical spec and uses project-side agents to validate against the real codebase.
|
||||
|
||||
## Crew
|
||||
|
||||
### Architect
|
||||
- **Context**: {project}/
|
||||
- **Model**: Sonnet
|
||||
- **Role**: Reviews flight specs for technical soundness. Validates design
|
||||
decisions, prerequisites, technical approach, and leg breakdown against
|
||||
architecture best practices and actual codebase state. Ensures the flight
|
||||
is buildable and well-structured.
|
||||
- **Actions**: review-flight-design
|
||||
|
||||
## Interaction Protocol
|
||||
|
||||
### Design Review
|
||||
1. Flight Director creates flight spec and interviews human
|
||||
2. Flight Director spawns **Architect** to review against codebase
|
||||
3. Architect evaluates design decisions, prerequisites, approach, leg breakdown
|
||||
4. Flight Director incorporates feedback
|
||||
5. Max 2 review cycles — escalate to human if unresolved
|
||||
|
||||
## Template Variables
|
||||
|
||||
The Flight Director substitutes these variables in prompts at runtime:
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `{project-slug}` | Project identifier from projects.md |
|
||||
| `{flight-number}` | Current flight number |
|
||||
| `{flight-artifact-path}` | Path to the flight artifact file |
|
||||
|
||||
## Prompts
|
||||
|
||||
### Architect: Review Flight Design
|
||||
|
||||
```
|
||||
role: architect
|
||||
phase: flight-design-review
|
||||
project: {project-slug}
|
||||
flight: {flight-number}
|
||||
action: review-flight-design
|
||||
|
||||
Read the flight artifact at {flight-artifact-path}. Cross-reference its design
|
||||
decisions, prerequisites, technical approach, and leg breakdown against the actual
|
||||
codebase state and architecture best practices.
|
||||
|
||||
Evaluate:
|
||||
1. Design decisions — are they sound given the real codebase and architecture?
|
||||
2. Prerequisites — are they accurate? Is anything missing or already done?
|
||||
3. Technical approach — is it feasible? Does it follow existing patterns?
|
||||
4. Leg breakdown — are legs well-scoped, properly ordered, with correct dependencies?
|
||||
5. Codebase state — does the spec account for current working tree, existing tooling,
|
||||
and conventions that might affect implementation?
|
||||
6. Architecture — does the approach maintain or improve system structure?
|
||||
|
||||
Provide structured output:
|
||||
|
||||
**Overall assessment**: approve | approve with changes | needs rework
|
||||
|
||||
**Issues** (ranked by severity):
|
||||
- [high/medium/low] Description — recommended fix
|
||||
|
||||
**Suggestions** (non-blocking improvements):
|
||||
- Description
|
||||
|
||||
**Questions** (for the designer to clarify):
|
||||
- Question
|
||||
```
|
||||
@@ -0,0 +1,216 @@
|
||||
# Leg Execution — Project Crew
|
||||
|
||||
Crew definitions and interaction protocol for implementing flight legs.
|
||||
The Flight Director (Mission Control) orchestrates this phase using the
|
||||
/agentic-workflow skill.
|
||||
|
||||
## Crew
|
||||
|
||||
### Developer
|
||||
- **Context**: {working-directory}/
|
||||
- **Model**: Sonnet
|
||||
- **Role**: Implements code changes. Also performs design reviews against real
|
||||
codebase to validate leg specs before implementation.
|
||||
- **Actions**: implement, fix-review-issues, commit, review-leg-design
|
||||
|
||||
### Reviewer
|
||||
- **Context**: {working-directory}/
|
||||
- **Model**: Sonnet (NEVER Opus)
|
||||
- **Role**: Reviews code changes for quality, correctness, and criteria compliance.
|
||||
Has NO knowledge of Developer's reasoning — only sees resulting changes.
|
||||
- **Actions**: review
|
||||
|
||||
### Accessibility Reviewer (optional)
|
||||
- **Context**: {working-directory}/
|
||||
- **Model**: Sonnet
|
||||
- **Enabled**: false
|
||||
- **Role**: Reviews UI changes for accessibility compliance. Evaluates against
|
||||
WCAG 2.1 AA standards, screen reader compatibility, keyboard navigation,
|
||||
color contrast, ARIA usage, and semantic HTML. Only spawn when the leg
|
||||
involves user-facing interface changes.
|
||||
- **Actions**: review-accessibility
|
||||
|
||||
## Separation Rules
|
||||
|
||||
- Developer and Reviewer load the target project's CLAUDE.md and conventions
|
||||
- Reviewer has NO knowledge of Developer's reasoning — only resulting changes
|
||||
- Each agent instance gets fresh context (no carryover between legs)
|
||||
|
||||
**Note:** Handoff signals (`[HANDOFF:review-needed]`, `[HANDOFF:confirmed]`, `[BLOCKED:reason]`, `[COMPLETE:leg]`) are defined by the Flight Control methodology in the agentic-workflow skill, not in this file. Do not modify signal names here — they must match what the Flight Director expects.
|
||||
|
||||
## Interaction Protocol
|
||||
|
||||
### Design Review
|
||||
1. Flight Director spawns **Developer** for design review
|
||||
2. Developer reviews leg against codebase, provides structured assessment
|
||||
3. Flight Director incorporates feedback
|
||||
4. Max 2 review cycles — escalate to human if unresolved
|
||||
|
||||
### Implementation
|
||||
1. Flight Director spawns **Developer** to implement
|
||||
2. Developer implements to acceptance criteria, updates flight log
|
||||
3. Developer signals [HANDOFF:review-needed] — does NOT commit
|
||||
|
||||
### Code Review
|
||||
1. Flight Director spawns **Reviewer** to evaluate all uncommitted changes
|
||||
2. If **Accessibility Reviewer** is enabled and leg involves UI changes,
|
||||
spawn in parallel with Reviewer
|
||||
3. If issues: Flight Director spawns new **Developer** to fix
|
||||
4. Loop until all reviewers signal [HANDOFF:confirmed] — max 3 cycles
|
||||
|
||||
### Commit
|
||||
1. Flight Director spawns **Developer** to commit
|
||||
2. Developer commits code + artifacts, signals [COMPLETE:leg]
|
||||
|
||||
## Template Variables
|
||||
|
||||
The Flight Director substitutes these variables in prompts at runtime:
|
||||
|
||||
| Variable | Description | Available In |
|
||||
|----------|-------------|-------------|
|
||||
| `{project-slug}` | Project identifier from projects.md | All prompts |
|
||||
| `{flight-number}` | Current flight number | All prompts |
|
||||
| `{leg-number}` | Current leg number | Leg-scoped prompts |
|
||||
| `{leg-artifact-path}` | Path to the leg artifact file | review-leg-design |
|
||||
| `{working-directory}` | Resolved working directory for the agent (project root for branch strategy, worktree path for worktree strategy) | All prompts |
|
||||
| `{reviewer-issues}` | Full text of reviewer feedback (dynamic) | fix-review-issues |
|
||||
|
||||
## Prompts
|
||||
|
||||
### Developer: Review Leg Design
|
||||
|
||||
```
|
||||
role: developer
|
||||
phase: leg-design-review
|
||||
project: {project-slug}
|
||||
flight: {flight-number}
|
||||
leg: {leg-number}
|
||||
action: review-leg-design
|
||||
|
||||
Read the leg artifact at {leg-artifact-path}. Cross-reference its acceptance
|
||||
criteria, implementation guidance, and file references against the actual codebase.
|
||||
|
||||
Evaluate:
|
||||
1. Acceptance criteria — specific, verifiable, complete?
|
||||
2. Implementation guidance — complete and correctly ordered?
|
||||
3. Edge cases — missing scenarios?
|
||||
4. Codebase state — account for working tree, existing tooling, uncommitted changes?
|
||||
5. File/line references — accurate against current codebase?
|
||||
6. Dependencies — prerequisite legs completed? Outputs available?
|
||||
|
||||
Provide structured output:
|
||||
|
||||
**Overall assessment**: approve | approve with changes | needs rework
|
||||
|
||||
**Issues** (ranked by severity):
|
||||
- [high/medium/low] Description — recommended fix
|
||||
|
||||
**Suggestions** (non-blocking):
|
||||
- Description
|
||||
|
||||
**Questions** (for the designer):
|
||||
- Question
|
||||
```
|
||||
|
||||
### Developer: Implement
|
||||
|
||||
```
|
||||
role: developer
|
||||
phase: leg-implementation
|
||||
project: {project-slug}
|
||||
flight: {flight-number}
|
||||
leg: {leg-number}
|
||||
action: implement
|
||||
|
||||
Read leg artifact. Update leg status to in-flight. Implement to acceptance criteria.
|
||||
Run tests with a timeout flag appropriate to this project's test runner — fail fast,
|
||||
do not wait indefinitely for hanging tests. If a test hangs, isolate and fix it.
|
||||
Update flight log with outcomes. Propagate changes to artifacts (flight, mission, leg),
|
||||
CLAUDE.md, README, and other project documentation as needed. Do NOT commit yet —
|
||||
signal [HANDOFF:review-needed] when implementation is complete.
|
||||
```
|
||||
|
||||
### Reviewer: Review
|
||||
|
||||
```
|
||||
role: reviewer
|
||||
phase: leg-review
|
||||
project: {project-slug}
|
||||
flight: {flight-number}
|
||||
leg: {leg-number}
|
||||
action: review
|
||||
|
||||
Review all changes since the last commit. Evaluate against:
|
||||
1. Leg acceptance criteria — are all criteria met?
|
||||
2. Code quality — style, clarity, maintainability
|
||||
3. Correctness — edge cases, error handling, security
|
||||
4. Tests — coverage, meaningful assertions, no regressions
|
||||
5. Artifacts — flight log updated, leg status correct
|
||||
|
||||
Signal [HANDOFF:confirmed] if all changes are satisfactory.
|
||||
If issues found, list them with severity (blocking/non-blocking) and specific
|
||||
file:line references.
|
||||
```
|
||||
|
||||
### Accessibility Reviewer: Review Accessibility
|
||||
|
||||
```
|
||||
role: accessibility-reviewer
|
||||
phase: leg-review
|
||||
project: {project-slug}
|
||||
flight: {flight-number}
|
||||
leg: {leg-number}
|
||||
action: review-accessibility
|
||||
|
||||
Review all UI changes since the last commit for accessibility compliance.
|
||||
|
||||
Evaluate against:
|
||||
1. WCAG 2.1 AA — do changes meet Level AA success criteria?
|
||||
2. Semantic HTML — proper heading hierarchy, landmark regions, form labels?
|
||||
3. Keyboard navigation — all interactive elements reachable and operable?
|
||||
4. Screen readers — ARIA attributes correct and meaningful? Live regions?
|
||||
5. Color and contrast — minimum 4.5:1 for text, 3:1 for large text/UI?
|
||||
6. Focus management — visible focus indicators, logical tab order?
|
||||
|
||||
Signal [HANDOFF:confirmed] if all changes are accessible.
|
||||
If issues found, list them with severity (blocking/non-blocking), WCAG criterion
|
||||
reference, and specific file:line references.
|
||||
```
|
||||
|
||||
### Developer: Fix Review Issues
|
||||
|
||||
```
|
||||
role: developer
|
||||
phase: leg-implementation
|
||||
project: {project-slug}
|
||||
flight: {flight-number}
|
||||
leg: {leg-number}
|
||||
action: fix-review-issues
|
||||
|
||||
Address the following review feedback:
|
||||
{reviewer-issues}
|
||||
|
||||
Fix all blocking issues. Non-blocking issues: fix if straightforward, otherwise
|
||||
note as accepted. Signal [HANDOFF:review-needed] when fixes are complete.
|
||||
```
|
||||
|
||||
### Developer: Commit
|
||||
|
||||
```
|
||||
role: developer
|
||||
phase: leg-implementation
|
||||
project: {project-slug}
|
||||
flight: {flight-number}
|
||||
leg: {leg-number}
|
||||
action: commit
|
||||
|
||||
Review has passed. Before committing, complete ALL post-completion checklist items
|
||||
in the leg artifact:
|
||||
1. Check off all acceptance criteria in the leg artifact
|
||||
2. Update leg status to completed
|
||||
3. Check off this leg in flight.md
|
||||
4. If final leg: update flight.md status to landed, check off flight in mission.md
|
||||
|
||||
Then commit all changes (code + artifacts) with appropriate message.
|
||||
Signal [COMPLETE:leg].
|
||||
```
|
||||
@@ -0,0 +1,74 @@
|
||||
# Mission Debrief — Project Crew
|
||||
|
||||
Crew definitions for post-mission retrospective. The Flight Director interviews
|
||||
both the human and a project-side Architect to capture strategic technical perspective.
|
||||
|
||||
## Crew
|
||||
|
||||
### Architect
|
||||
- **Context**: {project}/
|
||||
- **Model**: Sonnet
|
||||
- **Role**: Provides architectural perspective on mission outcomes. Evaluates
|
||||
whether the system evolved well across flights, identifies structural issues,
|
||||
and assesses long-term maintainability of what was built.
|
||||
- **Actions**: debrief-interview
|
||||
|
||||
## Interaction Protocol
|
||||
|
||||
### Architect Interview
|
||||
1. Flight Director loads full mission context (all flights, logs, debriefs, code)
|
||||
2. Flight Director spawns **Architect** to review overall system evolution
|
||||
3. Architect examines architectural changes across all flights
|
||||
4. Architect provides structured debrief input
|
||||
|
||||
### Human Interview
|
||||
1. Flight Director interviews human with mission-level questions
|
||||
2. Covers coordination experience, outcome satisfaction, process feedback
|
||||
|
||||
### Synthesis
|
||||
1. Flight Director synthesizes Architect input + human input + document analysis
|
||||
2. Generates mission debrief artifact
|
||||
|
||||
## Template Variables
|
||||
|
||||
The Flight Director substitutes these variables in prompts at runtime:
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `{project-slug}` | Project identifier from projects.md |
|
||||
|
||||
## Prompts
|
||||
|
||||
### Architect: Debrief Interview
|
||||
|
||||
```
|
||||
role: architect
|
||||
phase: mission-debrief
|
||||
project: {project-slug}
|
||||
action: debrief-interview
|
||||
|
||||
Review the system changes produced across all flights in this mission. Examine
|
||||
the architectural evolution, pattern consistency, and structural health.
|
||||
|
||||
Provide structured input for the debrief:
|
||||
|
||||
**Architectural Assessment**:
|
||||
- Did the system's architecture improve, maintain, or degrade?
|
||||
- Are there structural issues that emerged across flights?
|
||||
- Were design decisions consistent across the mission?
|
||||
|
||||
**Pattern Analysis**:
|
||||
- What patterns were established? Are they good ones?
|
||||
- Is there inconsistency that should be reconciled?
|
||||
- Are there reusable patterns worth documenting?
|
||||
|
||||
**Technical Debt**:
|
||||
- What debt was introduced across the mission?
|
||||
- What's the priority for addressing it?
|
||||
- Are there quick wins vs. long-term concerns?
|
||||
|
||||
**Forward-Looking**:
|
||||
- What architectural considerations should the next mission account for?
|
||||
- Are there scaling or performance concerns on the horizon?
|
||||
- What documentation or conventions should be established?
|
||||
```
|
||||
@@ -0,0 +1,72 @@
|
||||
# Mission Design — Project Crew
|
||||
|
||||
Crew definitions for mission planning. The Flight Director interviews the human
|
||||
and uses project-side agents to validate technical viability.
|
||||
|
||||
## Crew
|
||||
|
||||
### Architect
|
||||
- **Context**: {project}/
|
||||
- **Model**: Sonnet
|
||||
- **Role**: Validates technical viability of proposed outcomes. Ensures business
|
||||
goals align with what's actually possible given the codebase, stack, and
|
||||
constraints. Does NOT add implementation details — focuses on feasibility,
|
||||
risks, and architectural implications.
|
||||
- **Actions**: validate-mission
|
||||
|
||||
## Interaction Protocol
|
||||
|
||||
### Research & Interview
|
||||
1. Flight Director researches codebase and external context
|
||||
2. Flight Director interviews human about outcomes, stakeholders, constraints, criteria
|
||||
3. Human must explicitly sign off before proceeding — iterate until approved
|
||||
|
||||
### Technical Viability Check
|
||||
1. Flight Director spawns **Architect** to review draft mission against codebase
|
||||
2. Architect evaluates: Are proposed outcomes achievable? Are there technical risks
|
||||
the mission doesn't account for? Does the stack support what's being asked?
|
||||
3. Architect provides assessment — feasible / feasible with caveats / not feasible
|
||||
4. Flight Director incorporates feedback, re-interviews human if scope changes
|
||||
5. Human gives final sign-off
|
||||
|
||||
## Template Variables
|
||||
|
||||
The Flight Director substitutes these variables in prompts at runtime:
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `{project-slug}` | Project identifier from projects.md |
|
||||
|
||||
## Prompts
|
||||
|
||||
### Architect: Validate Mission
|
||||
|
||||
```
|
||||
role: architect
|
||||
phase: mission-design
|
||||
project: {project-slug}
|
||||
action: validate-mission
|
||||
|
||||
Read the draft mission artifact. Cross-reference proposed outcomes and success
|
||||
criteria against the actual codebase, stack, and project constraints.
|
||||
|
||||
Evaluate:
|
||||
1. Technical feasibility — can the proposed outcomes be achieved with this stack?
|
||||
2. Architectural implications — does this require significant structural changes?
|
||||
3. Risk factors — what technical risks could block success?
|
||||
4. Constraints accuracy — are stated constraints complete and correct?
|
||||
5. Sizing — is the scope realistic for a mission (days-to-weeks)?
|
||||
|
||||
Provide structured output:
|
||||
|
||||
**Feasibility**: feasible | feasible with caveats | not feasible
|
||||
|
||||
**Risks** (ranked by impact):
|
||||
- [high/medium/low] Description — mitigation
|
||||
|
||||
**Caveats** (if feasible with caveats):
|
||||
- Description
|
||||
|
||||
**Questions** (for the Flight Director):
|
||||
- Question
|
||||
```
|
||||
@@ -0,0 +1,592 @@
|
||||
# Routine Maintenance — Project Crew
|
||||
|
||||
Crew definitions for codebase health inspection. The Flight Director
|
||||
coordinates specialist reviewers for automated checks and an Architect
|
||||
for severity assessment and roundtable moderation.
|
||||
|
||||
## Crew
|
||||
|
||||
### Inspector
|
||||
- **Context**: {project}/
|
||||
- **Model**: Sonnet
|
||||
- **Role**: Performs broad read-only codebase inspection across all applicable
|
||||
categories. Runs test suites, linters, type checkers, audit commands, and
|
||||
manual code review. Returns structured findings without modifying any files.
|
||||
- **Actions**: inspect-codebase
|
||||
|
||||
### Security Reviewer
|
||||
- **Context**: {project}/
|
||||
- **Model**: Sonnet
|
||||
- **Role**: Performs focused manual security review of authentication flows,
|
||||
injection surfaces, secrets handling, CORS/CSP configuration, and data
|
||||
exposure risks. Goes deeper than the Inspector's Category 1 automated checks
|
||||
with targeted code path analysis.
|
||||
- **Actions**: review-security
|
||||
|
||||
### CI/CD Reviewer (optional)
|
||||
- **Context**: {project}/
|
||||
- **Model**: Sonnet
|
||||
- **Enabled**: false (enable when project has CI/CD pipelines)
|
||||
- **Role**: Reviews CI/CD pipeline configuration, build security, deployment
|
||||
practices, and environment consistency. Evaluates pipeline definitions,
|
||||
secret management in CI, and deployment safeguards.
|
||||
- **Actions**: review-cicd
|
||||
|
||||
### Accessibility Reviewer (optional)
|
||||
- **Context**: {project}/
|
||||
- **Model**: Sonnet
|
||||
- **Enabled**: false (enable when project has user-facing UI)
|
||||
- **Role**: Reviews codebase for accessibility compliance against WCAG 2.1 AA
|
||||
standards. Evaluates semantic HTML, keyboard navigation, screen reader
|
||||
compatibility, color contrast, ARIA usage, and focus management.
|
||||
- **Actions**: review-accessibility
|
||||
|
||||
### Architect
|
||||
- **Context**: {project}/
|
||||
- **Model**: Opus
|
||||
- **Role**: Reviews all reviewer findings alongside debrief context. Assigns
|
||||
severity per finding, challenges questionable assessments, moderates
|
||||
roundtable discussion with specialist reviewers, and produces final codebase
|
||||
assessment with maintenance scope recommendation.
|
||||
- **Actions**: assess-findings, moderate-roundtable
|
||||
|
||||
## Separation Rules
|
||||
|
||||
- All reviewers are strictly **read-only** — they may run commands but must NEVER modify files
|
||||
- Each reviewer operates independently during Phase 4 — no cross-reviewer communication
|
||||
- The Architect sees all reviewer findings but not their internal reasoning
|
||||
- Roundtable discussion is mediated by the Flight Director, not direct agent-to-agent
|
||||
|
||||
**Note:** Handoff signals are not used in this crew. The routine-maintenance workflow is
|
||||
sequential (review → assess → roundtable → report) and does not use the leg-based
|
||||
handoff protocol.
|
||||
|
||||
## Interaction Protocol
|
||||
|
||||
### Delegation Planning
|
||||
1. Flight Director loads context, conducts scoping interview with human
|
||||
2. Flight Director assesses project size and identifies module boundaries
|
||||
3. Flight Director builds delegation plan (agent count, scope assignments, partitioning)
|
||||
4. Human approves or adjusts the plan
|
||||
|
||||
### Specialist Review
|
||||
1. Flight Director spawns agents per the delegation plan — Inspector(s) + Security Reviewer always, CI/CD and Accessibility if enabled
|
||||
2. Each agent receives its scope assignment and output discipline rules
|
||||
3. All reviewers perform read-only checks and return structured findings
|
||||
4. For partitioned Inspectors: Flight Director merges and de-duplicates findings
|
||||
|
||||
### Initial Assessment
|
||||
1. Flight Director spawns **Architect** (Opus) with all reviewer findings + debrief context
|
||||
2. Architect assigns initial severity per finding
|
||||
3. Architect raises challenges or questions directed at specific reviewers
|
||||
|
||||
### Roundtable
|
||||
1. Flight Director routes Architect's challenges to the relevant reviewers
|
||||
2. Each challenged reviewer responds with evidence, rebuttals, or concurrence
|
||||
3. Flight Director collects responses and spawns Architect for final resolution
|
||||
4. Architect produces final assessment incorporating roundtable discussion
|
||||
5. Max 2 roundtable cycles — unresolved disagreements go to the human
|
||||
|
||||
### Human Review and Scoping
|
||||
1. Flight Director presents findings to human, grouped by severity
|
||||
2. Human confirms, overrides, or adjusts findings
|
||||
3. If Maintenance Required: Flight Director recommends a shortlist (~5-7 items); human selects scope for maintenance mission
|
||||
4. Deferred findings remain in the report for future cycles
|
||||
|
||||
### Synthesis
|
||||
1. Flight Director generates maintenance report artifact
|
||||
2. If confirmed: Flight Director creates maintenance mission scaffold
|
||||
|
||||
## Template Variables
|
||||
|
||||
The Flight Director substitutes these variables in prompts at runtime:
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `{project-slug}` | Project identifier from projects.md |
|
||||
| `{applicable-categories}` | Numbered list of categories to inspect (1-7 always, 8-10 conditional) |
|
||||
| `{project-stack}` | Language, framework, test runner, linter, formatter, type checker, audit tool |
|
||||
| `{known-debt}` | Debt items from mission debrief and flight debriefs (if available, otherwise "None — ad-hoc inspection") |
|
||||
| `{known-security-debt}` | Security-specific debt items extracted from debriefs (if available, otherwise "None") |
|
||||
| `{known-cicd-debt}` | CI/CD-specific debt items extracted from debriefs (if available, otherwise "None") |
|
||||
| `{areas-of-concern}` | User-specified areas of concern from scoping interview |
|
||||
| `{scope-assignment}` | Scope restriction from the delegation plan (files, directories, or "full project") |
|
||||
| `{all-reviewer-findings}` | Combined structured findings from all reviewers (used in Architect prompts) |
|
||||
| `{architect-challenges}` | Architect's challenges directed at a specific reviewer (used in roundtable) |
|
||||
| `{roundtable-responses}` | All reviewer rebuttals and responses from the roundtable (used in resolution) |
|
||||
|
||||
## Prompts
|
||||
|
||||
### Inspector: Inspect Codebase
|
||||
|
||||
```
|
||||
role: inspector
|
||||
phase: routine-maintenance
|
||||
project: {project-slug}
|
||||
action: inspect-codebase
|
||||
|
||||
Perform a read-only codebase inspection across the following categories:
|
||||
{applicable-categories}
|
||||
|
||||
Project stack: {project-stack}
|
||||
|
||||
Known debt from prior debriefs, if available (do not re-flag as new discoveries):
|
||||
{known-debt}
|
||||
|
||||
User areas of concern:
|
||||
{areas-of-concern}
|
||||
|
||||
IMPORTANT: You are strictly READ-ONLY. You may run test suites, linters, type
|
||||
checkers, audit commands, and read any file. You must NEVER modify source files,
|
||||
configuration, dependencies, or any other project file.
|
||||
|
||||
**Scope assignment**: If a scope restriction is provided, inspect only the
|
||||
specified files and directories. Run automated tools against the full project
|
||||
(tools are fast and comprehensive), but limit manual code review to the assigned
|
||||
scope. If no scope restriction is given, inspect the full project.
|
||||
|
||||
For each applicable category, perform the checks listed below and report findings.
|
||||
|
||||
**Category 1 — Security**:
|
||||
- Review auth paths (focus on recently changed code if mission context is available)
|
||||
- Check input sanitization on endpoints
|
||||
- Verify CORS/CSP configuration
|
||||
- Scan for hardcoded secrets (API keys, tokens, passwords)
|
||||
- Review third-party data flow for exposure risks
|
||||
|
||||
**Category 2 — Test Systems**:
|
||||
- Run the test suite and report results
|
||||
- Check coverage delta (if tooling available)
|
||||
- Find new code paths without test coverage
|
||||
- Detect flaky tests (tests that pass/fail inconsistently)
|
||||
- Check test performance (slow tests)
|
||||
- Find hardcoded test data that should be fixtures
|
||||
|
||||
**Category 3 — Dependency Health**:
|
||||
- Run the dependency audit command (npm audit, cargo audit, etc.)
|
||||
- Check for outdated dependencies
|
||||
- Find unused dependencies
|
||||
- Verify lockfile is consistent
|
||||
- Check license compliance
|
||||
- Check for Dependabot/Renovate PRs and security alerts
|
||||
- Assess auto-merge eligibility for patch updates
|
||||
|
||||
**Category 4 — Code Quality**:
|
||||
- Run linter and formatter check (report violations, do NOT fix)
|
||||
- Find dead code (unused exports, unreachable branches)
|
||||
- Grep for TODOs/FIXMEs/HACKs (focus on recently introduced ones if mission context is available)
|
||||
- Detect code duplication
|
||||
- Check pattern consistency with existing codebase
|
||||
|
||||
**Category 5 — Type & API Safety**:
|
||||
- Run the type checker and report errors
|
||||
- Find `any` casts (TypeScript), `unsafe` blocks (Rust), or equivalent
|
||||
- Check for unhandled errors or missing error types
|
||||
- Detect API contract drift (mismatched types between client/server)
|
||||
- Find deprecated API usage
|
||||
|
||||
**Category 6 — Documentation**:
|
||||
- Check README accuracy against current state
|
||||
- Verify new public interfaces have documentation
|
||||
- Find stale comments referencing old behavior
|
||||
- Check CHANGELOG for completeness
|
||||
- Verify CLAUDE.md accuracy
|
||||
|
||||
**Category 7 — Git & Branch Hygiene**:
|
||||
- List stale branches (merged but not deleted)
|
||||
- Find large committed files (>1MB)
|
||||
- Scan for secrets in recent git history
|
||||
- Check commit message quality
|
||||
- Check for GitHub/remote warnings (secret scanning, code scanning alerts)
|
||||
- Find merge conflicts against main
|
||||
- Check upstream divergence
|
||||
|
||||
**Category 8 — CI/CD Pipeline** (if applicable):
|
||||
- Check CI status on main/default branch
|
||||
- Detect build time regression
|
||||
- Find skipped or disabled checks
|
||||
- Check config drift between environments
|
||||
|
||||
**Category 9 — Infrastructure & Config** (if applicable):
|
||||
- Check env var documentation (.env.example vs actual usage)
|
||||
- Find pending database migrations
|
||||
- Find temporary feature flags that should be removed
|
||||
|
||||
**Category 10 — Performance & Observability** (if applicable):
|
||||
- Find new operations without logging/tracing
|
||||
- Detect potential N+1 queries
|
||||
- Check bundle size (if web project)
|
||||
- Find resource cleanup issues (unclosed connections, missing cleanup)
|
||||
|
||||
**Output discipline**: Keep findings concise. Do not paste full command output,
|
||||
full file contents, or long dependency lists. Summarize and reference.
|
||||
|
||||
**Output format**: Return findings as a structured list per category:
|
||||
|
||||
## Category {N}: {Name}
|
||||
|
||||
### Finding: {title}
|
||||
- **Evidence**: {one-line summary with file paths and line numbers}
|
||||
- **Impact**: {what could go wrong}
|
||||
- **Recommendation**: {what to do about it}
|
||||
|
||||
Include code excerpts only for Critical or High severity findings.
|
||||
|
||||
If a category has no issues, report:
|
||||
## Category {N}: {Name}
|
||||
No issues found.
|
||||
```
|
||||
|
||||
### Security Reviewer: Review Security
|
||||
|
||||
```
|
||||
role: security-reviewer
|
||||
phase: routine-maintenance
|
||||
project: {project-slug}
|
||||
action: review-security
|
||||
|
||||
Perform a focused, manual security review of the codebase. You go deeper than
|
||||
automated scanning — trace actual code paths and evaluate security posture.
|
||||
|
||||
Project stack: {project-stack}
|
||||
|
||||
Known security debt from prior debriefs (do not re-flag as new discoveries):
|
||||
{known-security-debt}
|
||||
|
||||
User areas of concern:
|
||||
{areas-of-concern}
|
||||
|
||||
IMPORTANT: You are strictly READ-ONLY. You may run commands and read any file.
|
||||
You must NEVER modify source files, configuration, dependencies, or any other
|
||||
project file.
|
||||
|
||||
**Scope assignment**: Review only the files and areas specified. If no scope
|
||||
restriction is given, review the full project.
|
||||
|
||||
**Output discipline**: Keep findings concise. Include code excerpts only for
|
||||
Critical or High severity findings. Do not paste full file contents or raw
|
||||
command output.
|
||||
|
||||
**Review areas**:
|
||||
|
||||
1. **Authentication & Authorization**
|
||||
- Trace auth flows end-to-end (login, token refresh, logout)
|
||||
- Check for missing auth checks on protected routes/endpoints
|
||||
- Verify role-based access control is enforced consistently
|
||||
- Look for privilege escalation paths
|
||||
|
||||
2. **Injection Surfaces**
|
||||
- SQL/NoSQL injection: check all database queries for parameterization
|
||||
- Command injection: check shell executions, subprocess calls
|
||||
- XSS: check output encoding in templates and API responses
|
||||
- Path traversal: check file system operations with user input
|
||||
|
||||
3. **Secrets & Configuration**
|
||||
- Scan for hardcoded credentials, API keys, tokens in source
|
||||
- Check .env files are gitignored
|
||||
- Verify secrets are not logged or included in error responses
|
||||
- Check for overly permissive CORS configuration
|
||||
|
||||
4. **Data Handling**
|
||||
- Review PII/sensitive data flows — where is it stored, logged, transmitted?
|
||||
- Check encryption at rest and in transit
|
||||
- Verify sensitive data is not cached inappropriately
|
||||
- Check for data leakage in error messages or debug output
|
||||
|
||||
5. **Dependency Risk**
|
||||
- Cross-reference critical dependencies against known CVE databases
|
||||
- Check for dependencies with known supply-chain risks
|
||||
- Verify integrity checks (lockfile hashes, checksums)
|
||||
|
||||
**Output format**: Return findings as a structured list:
|
||||
|
||||
### Finding: {title}
|
||||
- **Severity estimate**: critical | high | medium | low
|
||||
- **Attack vector**: {how this could be exploited}
|
||||
- **Evidence**: {specific code paths, file:line references}
|
||||
- **Recommendation**: {what to do about it}
|
||||
|
||||
If no security issues found, state: "No security issues identified."
|
||||
```
|
||||
|
||||
### CI/CD Reviewer: Review CI/CD
|
||||
|
||||
```
|
||||
role: cicd-reviewer
|
||||
phase: routine-maintenance
|
||||
project: {project-slug}
|
||||
action: review-cicd
|
||||
|
||||
Perform a focused review of the project's CI/CD pipeline configuration,
|
||||
build security, and deployment practices.
|
||||
|
||||
Project stack: {project-stack}
|
||||
|
||||
Known CI/CD debt from prior debriefs (do not re-flag as new discoveries):
|
||||
{known-cicd-debt}
|
||||
|
||||
User areas of concern:
|
||||
{areas-of-concern}
|
||||
|
||||
IMPORTANT: You are strictly READ-ONLY. You may run commands and read any file.
|
||||
You must NEVER modify source files, configuration, dependencies, or any other
|
||||
project file.
|
||||
|
||||
**Output discipline**: Keep findings concise. Include code excerpts only for
|
||||
Critical or High severity findings. Do not paste full file contents or raw
|
||||
command output.
|
||||
|
||||
**Review areas**:
|
||||
|
||||
1. **Pipeline Configuration**
|
||||
- Review pipeline definitions (GitHub Actions, GitLab CI, Concourse, etc.)
|
||||
- Check for outdated action/image versions
|
||||
- Verify branch protection rules are consistent with pipeline triggers
|
||||
- Detect redundant or overlapping pipeline steps
|
||||
|
||||
2. **Build Security**
|
||||
- Check for secrets exposed in build logs or artifacts
|
||||
- Verify pipeline secrets are scoped appropriately (not org-wide when repo-level suffices)
|
||||
- Check for unpinned dependencies in build steps (e.g., `uses: action@main` vs `@v4.1.0`)
|
||||
- Review build artifact permissions and retention policies
|
||||
|
||||
3. **Deployment Safeguards**
|
||||
- Verify deployment gates exist (approval steps, environment protection rules)
|
||||
- Check rollback capability — is there a documented or automated rollback path?
|
||||
- Verify environment promotion flow (dev → staging → prod) is enforced
|
||||
- Check for drift between environment configurations
|
||||
|
||||
4. **Pipeline Health**
|
||||
- Check recent build success rates and durations
|
||||
- Identify flaky pipeline steps
|
||||
- Find disabled or skipped checks that should be active
|
||||
- Check for resource waste (oversized runners, unnecessary matrix builds)
|
||||
|
||||
**Output format**: Return findings as a structured list:
|
||||
|
||||
### Finding: {title}
|
||||
- **Severity estimate**: critical | high | medium | low
|
||||
- **Evidence**: {specific config files, pipeline definitions, line references}
|
||||
- **Impact**: {what could go wrong}
|
||||
- **Recommendation**: {what to do about it}
|
||||
|
||||
If no CI/CD issues found, state: "No CI/CD issues identified."
|
||||
```
|
||||
|
||||
### Accessibility Reviewer: Review Accessibility
|
||||
|
||||
```
|
||||
role: accessibility-reviewer
|
||||
phase: routine-maintenance
|
||||
project: {project-slug}
|
||||
action: review-accessibility
|
||||
|
||||
Perform a focused accessibility review of the project's user-facing UI.
|
||||
Evaluate against WCAG 2.1 AA standards.
|
||||
|
||||
Project stack: {project-stack}
|
||||
|
||||
IMPORTANT: You are strictly READ-ONLY. You may run commands and read any file.
|
||||
You must NEVER modify source files, configuration, dependencies, or any other
|
||||
project file.
|
||||
|
||||
**Output discipline**: Keep findings concise. Include code excerpts only for
|
||||
Critical or High severity findings. Do not paste full file contents or raw
|
||||
command output.
|
||||
|
||||
**Review areas**:
|
||||
|
||||
1. **Semantic HTML & Structure**
|
||||
- Check heading hierarchy (h1-h6 in logical order)
|
||||
- Verify landmark regions (main, nav, aside, footer)
|
||||
- Check form labels and fieldset/legend usage
|
||||
- Verify list markup for list-like content
|
||||
|
||||
2. **Keyboard Navigation**
|
||||
- Check all interactive elements are reachable via Tab
|
||||
- Verify custom widgets have appropriate keyboard handlers
|
||||
- Check for keyboard traps (modals, dropdowns)
|
||||
- Verify skip-to-content links exist
|
||||
|
||||
3. **Screen Reader Compatibility**
|
||||
- Check ARIA attributes for correctness and necessity
|
||||
- Verify dynamic content updates use live regions
|
||||
- Check image alt text (present, meaningful, not redundant)
|
||||
- Verify form error messages are associated with inputs
|
||||
|
||||
4. **Visual & Color**
|
||||
- Check text contrast ratios (4.5:1 normal, 3:1 large text)
|
||||
- Verify UI component contrast (3:1 against background)
|
||||
- Check that color is not the sole indicator of meaning
|
||||
- Verify visible focus indicators on all interactive elements
|
||||
|
||||
5. **Motion & Timing**
|
||||
- Check for prefers-reduced-motion support on animations
|
||||
- Verify no auto-playing media without controls
|
||||
- Check for appropriate timeouts with user notification
|
||||
|
||||
**Output format**: Return findings as a structured list:
|
||||
|
||||
### Finding: {title}
|
||||
- **WCAG criterion**: {e.g., 1.1.1 Non-text Content, Level A}
|
||||
- **Severity estimate**: critical | high | medium | low
|
||||
- **Evidence**: {specific components, file:line references}
|
||||
- **Recommendation**: {what to do about it}
|
||||
|
||||
If no accessibility issues found, state: "No accessibility issues identified."
|
||||
```
|
||||
|
||||
### Architect: Assess Findings
|
||||
|
||||
```
|
||||
role: architect
|
||||
phase: routine-maintenance
|
||||
project: {project-slug}
|
||||
action: assess-findings
|
||||
|
||||
Review all specialist findings and assign severity ratings. You have access to:
|
||||
- All reviewer findings (provided below)
|
||||
- Known debt context from debriefs and prior maintenance reports (if available)
|
||||
|
||||
{all-reviewer-findings}
|
||||
|
||||
Known debt from debriefs, if available (already acknowledged — note as "previously identified" if re-found):
|
||||
{known-debt}
|
||||
|
||||
For each finding, assign one of:
|
||||
- **Pass** — No issue (reviewer flagged something that is actually fine)
|
||||
- **Advisory** — Minor issue, acceptable to defer
|
||||
- **Action Required** — Should be addressed before next major work cycle
|
||||
- **Critical** — Blocks further work, immediate attention needed
|
||||
|
||||
**Assessment criteria**:
|
||||
- Does this finding represent a real risk, or is it noise?
|
||||
- Is the severity proportional to the actual impact?
|
||||
- Would this compound if left for another cycle?
|
||||
- Is this a new discovery or previously acknowledged debt?
|
||||
- Do multiple reviewers corroborate the same issue?
|
||||
- Are any reviewer assessments questionable — too alarmist or too dismissive?
|
||||
|
||||
**Challenge reviewers** where you disagree or need clarification. For each
|
||||
challenge, name the reviewer and provide your specific question or objection.
|
||||
This initiates the roundtable discussion.
|
||||
|
||||
**Output format**:
|
||||
|
||||
## Overall Assessment
|
||||
{Flight Ready | Maintenance Required}
|
||||
|
||||
## Findings
|
||||
|
||||
| # | Source | Category | Finding | Initial Severity | New/Known | Notes |
|
||||
|---|--------|----------|---------|-----------------|-----------|-------|
|
||||
| 1 | {reviewer} | {cat} | {title} | {severity} | {new/known} | {brief note} |
|
||||
|
||||
## Challenges for Roundtable
|
||||
|
||||
### To {Reviewer Name}: {question or objection}
|
||||
{Context for why you're challenging this finding — what seems off, what
|
||||
additional evidence would change your assessment, or why you think the
|
||||
severity should be different.}
|
||||
|
||||
## Severity Summary
|
||||
- Critical: {N}
|
||||
- Action Required: {N}
|
||||
- Advisory: {N}
|
||||
- Pass: {N}
|
||||
|
||||
## Recommended Maintenance Scope
|
||||
(Only if Maintenance Required)
|
||||
|
||||
Group related Action Required and Critical findings into suggested flight scopes:
|
||||
|
||||
### Flight: {suggested title}
|
||||
- Finding #{N}: {title}
|
||||
- Finding #{N}: {title}
|
||||
- Rationale: {why these group together}
|
||||
```
|
||||
|
||||
### Reviewer: Roundtable Rebuttal
|
||||
|
||||
```
|
||||
role: {reviewer-role}
|
||||
phase: routine-maintenance
|
||||
project: {project-slug}
|
||||
action: roundtable-rebuttal
|
||||
|
||||
The Architect has challenged one or more of your findings during the
|
||||
severity assessment roundtable. Respond to each challenge with evidence.
|
||||
|
||||
Architect's challenges:
|
||||
{architect-challenges}
|
||||
|
||||
For each challenge:
|
||||
1. **Provide additional evidence** — code paths, specific examples, tool output
|
||||
that supports your finding
|
||||
2. **Concede if appropriate** — if the Architect raises a valid point, adjust
|
||||
your assessment rather than defending a weak position
|
||||
3. **Clarify misunderstandings** — if the Architect misread your finding,
|
||||
restate it with more precision
|
||||
|
||||
Be direct and evidence-based. The goal is consensus, not debate for its own sake.
|
||||
|
||||
**Output format**:
|
||||
|
||||
### Re: {Architect's challenge title}
|
||||
- **Response**: {concur | rebut | clarify}
|
||||
- **Evidence**: {additional code paths, line references, tool output}
|
||||
- **Revised assessment** (if changed): {updated severity or recommendation}
|
||||
```
|
||||
|
||||
### Architect: Roundtable Resolution
|
||||
|
||||
```
|
||||
role: architect
|
||||
phase: routine-maintenance
|
||||
project: {project-slug}
|
||||
action: roundtable-resolution
|
||||
|
||||
Review the roundtable responses from specialist reviewers and produce your
|
||||
final assessment.
|
||||
|
||||
Reviewer responses:
|
||||
{roundtable-responses}
|
||||
|
||||
For each challenged finding:
|
||||
1. **Weigh the evidence** — did the reviewer provide convincing support?
|
||||
2. **Assign final severity** — this is your call, but account for reviewer expertise
|
||||
3. **Note reasoning** — briefly explain why you maintained or changed severity
|
||||
|
||||
If any disagreements remain unresolved, flag them for human review rather than
|
||||
forcing consensus.
|
||||
|
||||
**Output format**:
|
||||
|
||||
## Roundtable Resolution
|
||||
|
||||
### Finding #{N}: {title}
|
||||
- **Original severity**: {severity}
|
||||
- **Reviewer response**: {concur | rebut | clarify} — {summary}
|
||||
- **Final severity**: {severity}
|
||||
- **Reasoning**: {why}
|
||||
|
||||
## Updated Overall Assessment
|
||||
{Flight Ready | Maintenance Required}
|
||||
|
||||
## Updated Severity Summary
|
||||
- Critical: {N}
|
||||
- Action Required: {N}
|
||||
- Advisory: {N}
|
||||
- Pass: {N}
|
||||
|
||||
## Unresolved Disagreements (if any)
|
||||
{Finding and both perspectives — for human to decide}
|
||||
|
||||
## Updated Recommended Maintenance Scope
|
||||
(Only if Maintenance Required — incorporate roundtable outcomes)
|
||||
|
||||
### Flight: {suggested title}
|
||||
- Finding #{N}: {title}
|
||||
- Finding #{N}: {title}
|
||||
- Rationale: {why these group together}
|
||||
```
|
||||
@@ -0,0 +1,104 @@
|
||||
# Init-Project Migrations
|
||||
|
||||
When `/init-project` runs, it checks for legacy directory layouts from earlier versions of Flight Control and offers to migrate them. Migrations are idempotent — they only apply when the old layout is detected and the new layout doesn't yet exist.
|
||||
|
||||
## Migration Registry
|
||||
|
||||
### 001 — Rename `.flight-ops/` to `.flightops/`
|
||||
|
||||
Early versions of Flight Control used `.flight-ops/` (with a hyphen). The current convention is `.flightops/` (no hyphen).
|
||||
|
||||
**Detection** (returns true if migration is needed):
|
||||
|
||||
```bash
|
||||
[[ -d "{target-project}/.flight-ops" && ! -d "{target-project}/.flightops" ]]
|
||||
```
|
||||
|
||||
**Actions:**
|
||||
|
||||
1. Rename the directory:
|
||||
```bash
|
||||
mv "{target-project}/.flight-ops" "{target-project}/.flightops"
|
||||
```
|
||||
2. Update `.gitignore` if it references the old name:
|
||||
```bash
|
||||
sed -i 's/\.flight-ops/\.flightops/g' "{target-project}/.gitignore"
|
||||
```
|
||||
|
||||
**User message:**
|
||||
> Renaming `.flight-ops/` → `.flightops/` (updated naming convention)
|
||||
|
||||
---
|
||||
|
||||
### 002 — Rename `phases/` to `agent-crews/`
|
||||
|
||||
Early versions stored crew definitions in `.flightops/phases/`. The current convention is `.flightops/agent-crews/`.
|
||||
|
||||
**Detection** (returns true if migration is needed):
|
||||
|
||||
```bash
|
||||
[[ -d "{target-project}/.flightops/phases" && ! -d "{target-project}/.flightops/agent-crews" ]]
|
||||
```
|
||||
|
||||
> **Note:** This runs after migration 001, so it checks the post-rename `.flightops/` path.
|
||||
|
||||
**Actions:**
|
||||
|
||||
1. Rename the subdirectory:
|
||||
```bash
|
||||
mv "{target-project}/.flightops/phases" "{target-project}/.flightops/agent-crews"
|
||||
```
|
||||
|
||||
**User message:**
|
||||
> Renaming `phases/` → `agent-crews/` (updated naming convention)
|
||||
|
||||
---
|
||||
|
||||
### 003 — Update lifecycle states to unified model
|
||||
|
||||
Flight Control now uses a unified lifecycle for both flights and legs: `planning → ready → in-flight → landed → completed` (or `aborted`). This replaces the old divergent states:
|
||||
|
||||
- **Flights**: `diverted` → `aborted`; added `completed` after `landed`
|
||||
- **Legs**: `queued` → `planning`; `review` → `landed`; `blocked` → `aborted`; added `ready` and `completed`
|
||||
|
||||
**Detection** (returns true if migration is needed):
|
||||
|
||||
```bash
|
||||
grep -rql 'queued\|diverted\|blocked\|review.*completed' "{target-project}/.flightops/ARTIFACTS.md" 2>/dev/null
|
||||
```
|
||||
|
||||
> **Note:** This runs after migrations 001 and 002, so it checks the post-rename `.flightops/` path.
|
||||
|
||||
**Actions:**
|
||||
|
||||
1. Update state definitions in ARTIFACTS.md:
|
||||
- Replace flight status line: `planning | ready | in-flight | landed | diverted` → `planning | ready | in-flight | landed | completed | aborted`
|
||||
- Replace leg status line: `queued | in-flight | review | completed | blocked` → `planning | ready | in-flight | landed | completed | aborted`
|
||||
- Replace flight state tracking: `planning` → `ready` → `in-flight` → `landed` (or `diverted`) → `planning` → `ready` → `in-flight` → `landed` → `completed` (or `aborted`)
|
||||
- Replace leg state tracking: `queued` → `in-flight` → `review` → `completed` (or `blocked`) → `planning` → `ready` → `in-flight` → `landed` → `completed` (or `aborted`)
|
||||
- Replace `landed | diverted` → `landed | aborted` in debrief templates
|
||||
- Replace `landed/diverted` → `landed/aborted` in debrief templates
|
||||
- Replace `completed | in-flight | blocked` → `completed | landed | in-flight | aborted` in flight log templates
|
||||
|
||||
2. Update existing artifact files in the project (if any):
|
||||
- In flight artifacts: replace `**Status**: diverted` → `**Status**: aborted`
|
||||
- In leg artifacts: replace `**Status**: queued` → `**Status**: planning`, `**Status**: review` → `**Status**: landed`, `**Status**: blocked` → `**Status**: aborted`
|
||||
- In flight log entries: replace `**Status**: blocked` → `**Status**: aborted`
|
||||
|
||||
Find artifacts using the locations defined in ARTIFACTS.md (typically `missions/` directory for file-based projects).
|
||||
|
||||
**User message:**
|
||||
> Updating lifecycle states to unified model: flights and legs now share `planning → ready → in-flight → landed → completed (or aborted)`
|
||||
|
||||
---
|
||||
|
||||
## Adding Future Migrations
|
||||
|
||||
To add a new migration:
|
||||
|
||||
1. Assign the next sequential ID (e.g., `003`)
|
||||
2. Write a **Detection** check that returns true only when the migration is needed and false if already applied (idempotent)
|
||||
3. List the **Actions** to perform — prefer `mv` over copy-and-delete to preserve file contents and git history
|
||||
4. Write a short **User message** shown during the migration summary
|
||||
5. Consider ordering — if the migration depends on a previous one having run, note that in the detection section
|
||||
6. Keep migrations non-destructive: rename and update references, never delete user content
|
||||
@@ -0,0 +1,575 @@
|
||||
# Artifact System: Filesystem
|
||||
|
||||
This project stores Flight Control artifacts as markdown files in the repository.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
{project}/
|
||||
├── missions/
|
||||
│ └── {NN}-{mission-slug}/
|
||||
│ ├── mission.md
|
||||
│ ├── mission-debrief.md
|
||||
│ └── flights/
|
||||
│ └── {NN}-{flight-slug}/
|
||||
│ ├── flight.md
|
||||
│ ├── flight-log.md
|
||||
│ ├── flight-briefing.md
|
||||
│ ├── flight-debrief.md
|
||||
│ └── legs/
|
||||
│ └── {NN}-{leg-slug}.md
|
||||
└── maintenance/
|
||||
└── {YYYY-MM-DD}.md
|
||||
```
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
- **Slugs**: Lowercase, kebab-case, derived from title (e.g., "User Authentication" → `user-authentication`)
|
||||
- **Sequence numbers**: Missions, flights, and legs use two-digit prefixes (`01`, `02`, etc.) for ordering
|
||||
|
||||
---
|
||||
|
||||
## Core Artifacts
|
||||
|
||||
### Mission
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | `missions/{NN}-{slug}/mission.md` |
|
||||
| Created | During mission planning |
|
||||
| Updated | Until status changes to `active` |
|
||||
|
||||
**Format:**
|
||||
|
||||
```markdown
|
||||
# Mission: {Title}
|
||||
|
||||
**Status**: planning | active | completed | aborted
|
||||
|
||||
## Outcome
|
||||
What success looks like in human terms.
|
||||
|
||||
## Context
|
||||
Why this mission matters now. Background information.
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Criterion 1 (observable, binary)
|
||||
- [ ] Criterion 2
|
||||
- [ ] Criterion 3
|
||||
|
||||
## Stakeholders
|
||||
Who cares about this outcome and why.
|
||||
|
||||
## Constraints
|
||||
Non-negotiable boundaries.
|
||||
|
||||
## Environment Requirements
|
||||
- Development environment (devcontainer, local toolchain, cloud IDE)
|
||||
- Runtime requirements (GUI, audio hardware, network access)
|
||||
- Special tooling (Docker, specific CLI versions)
|
||||
|
||||
## Open Questions
|
||||
Unknowns that need resolution during execution.
|
||||
|
||||
## Known Issues
|
||||
Emergent blockers and issues discovered during execution. Add items here as flights surface problems that affect the broader mission — things not anticipated during planning but visible at the mission level.
|
||||
|
||||
- [ ] {Issue description} — discovered in Flight {N}, affects {scope}
|
||||
|
||||
## Flights
|
||||
|
||||
> **Note:** These are tentative suggestions, not commitments. Flights are planned and created one at a time as work progresses. This list will evolve based on discoveries during implementation.
|
||||
|
||||
- [ ] Flight 1: {description}
|
||||
- [ ] Flight 2: {description}
|
||||
- [ ] Flight N *(optional)*: Alignment — vibe coding session for creative collaboration and hands-on adjustments
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Flight
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | `missions/{mission}/flights/{NN}-{slug}/flight.md` |
|
||||
| Created | During flight planning |
|
||||
| Updated | Until status changes to `in-flight` |
|
||||
|
||||
**Format:**
|
||||
|
||||
```markdown
|
||||
# Flight: {Title}
|
||||
|
||||
**Status**: planning | ready | in-flight | landed | completed | aborted
|
||||
**Mission**: [{Mission Title}](../../mission.md)
|
||||
|
||||
## Contributing to Criteria
|
||||
- [ ] {Relevant success criterion 1}
|
||||
- [ ] {Relevant success criterion 2}
|
||||
|
||||
---
|
||||
|
||||
## Pre-Flight
|
||||
|
||||
### Objective
|
||||
What this flight accomplishes (one paragraph).
|
||||
|
||||
### Open Questions
|
||||
- [ ] Question needing resolution
|
||||
- [x] Resolved question → see Design Decisions
|
||||
|
||||
### Design Decisions
|
||||
|
||||
**{Decision Title}**: {Choice made}
|
||||
- Rationale: Why this choice
|
||||
- Trade-off: What we're giving up
|
||||
|
||||
### Prerequisites
|
||||
- [ ] {What must be true before execution}
|
||||
|
||||
### Pre-Flight Checklist
|
||||
- [ ] All open questions resolved
|
||||
- [ ] Design decisions documented
|
||||
- [ ] Prerequisites verified
|
||||
- [ ] Validation approach defined
|
||||
- [ ] Legs defined
|
||||
|
||||
---
|
||||
|
||||
## In-Flight
|
||||
|
||||
### Technical Approach
|
||||
How the objective will be achieved.
|
||||
|
||||
### Checkpoints
|
||||
- [ ] {Milestone 1}
|
||||
- [ ] {Milestone 2}
|
||||
|
||||
### Adaptation Criteria
|
||||
|
||||
**Divert if**:
|
||||
- {Condition requiring re-planning}
|
||||
|
||||
**Acceptable variations**:
|
||||
- {Minor changes that don't require diversion}
|
||||
|
||||
### Legs
|
||||
|
||||
> **Note:** These are tentative suggestions, not commitments. Legs are planned and created one at a time as the flight progresses. This list will evolve based on discoveries during implementation.
|
||||
|
||||
- [ ] `{leg-slug}` - {Brief description}
|
||||
- [ ] `{leg-slug}` - {Brief description}
|
||||
- [ ] `uat-and-alignment` *(optional)* - Guided UAT session with iterative fixes
|
||||
|
||||
---
|
||||
|
||||
## Post-Flight
|
||||
|
||||
### Completion Checklist
|
||||
- [ ] All legs completed
|
||||
- [ ] Code merged
|
||||
- [ ] Tests passing
|
||||
- [ ] Documentation updated
|
||||
|
||||
### Verification
|
||||
How to confirm the flight achieved its objective.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Leg
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | `missions/{mission}/flights/{flight}/legs/{NN}-{slug}.md` |
|
||||
| Created | Before leg execution |
|
||||
| Updated | Never once `in-flight` (immutable) |
|
||||
|
||||
**Format:**
|
||||
|
||||
```markdown
|
||||
# Leg: {slug}
|
||||
|
||||
**Status**: planning | ready | in-flight | landed | completed | aborted
|
||||
**Flight**: [{Flight Title}](../flight.md)
|
||||
|
||||
## Objective
|
||||
Single sentence: what this leg accomplishes.
|
||||
|
||||
## Context
|
||||
- Relevant design decisions from the flight
|
||||
- How this fits into the broader technical approach
|
||||
- Key learnings from prior legs (from flight log)
|
||||
|
||||
## Inputs
|
||||
What exists before this leg runs:
|
||||
- Files that must exist
|
||||
- State that must be true
|
||||
|
||||
## Outputs
|
||||
What exists after this leg completes:
|
||||
- Files created or modified
|
||||
- State changes
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Criterion 1 (specific, observable)
|
||||
- [ ] Criterion 2
|
||||
- [ ] Criterion 3
|
||||
|
||||
## Verification Steps
|
||||
How to confirm each criterion is met:
|
||||
- {Command or manual check for criterion 1}
|
||||
- {Command or manual check for criterion 2}
|
||||
|
||||
## Implementation Guidance
|
||||
|
||||
1. **{First step}**
|
||||
- Details about what to do
|
||||
|
||||
2. **{Second step}**
|
||||
- Details
|
||||
|
||||
## Edge Cases
|
||||
- **{Edge case 1}**: How to handle
|
||||
|
||||
## Files Affected
|
||||
- `path/to/file.ext` - {What changes}
|
||||
|
||||
---
|
||||
|
||||
## Post-Completion Checklist
|
||||
|
||||
**Complete ALL steps before signaling `[COMPLETE:leg]`:**
|
||||
|
||||
- [ ] All acceptance criteria verified
|
||||
- [ ] Tests passing
|
||||
- [ ] Update flight-log.md with leg progress entry
|
||||
- [ ] Set this leg's status to `completed` (in this file's header)
|
||||
- [ ] Check off this leg in flight.md
|
||||
- [ ] If final leg of flight:
|
||||
- [ ] Update flight.md status to `landed`
|
||||
- [ ] Check off flight in mission.md
|
||||
- [ ] Commit all changes together (code + artifacts)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Supporting Artifacts
|
||||
|
||||
### Flight Log
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | `missions/{mission}/flights/{flight}/flight-log.md` |
|
||||
| Created | When flight is created |
|
||||
| Updated | Continuously during execution (append-only) |
|
||||
|
||||
**Format:**
|
||||
|
||||
```markdown
|
||||
# Flight Log: {Flight Title}
|
||||
|
||||
**Flight**: [{Flight Title}](flight.md)
|
||||
|
||||
## Summary
|
||||
Brief overview of execution status and key outcomes.
|
||||
|
||||
---
|
||||
|
||||
## Leg Progress
|
||||
|
||||
### {Leg Name}
|
||||
**Status**: completed | landed | in-flight | aborted
|
||||
**Started**: {timestamp}
|
||||
**Completed**: {timestamp}
|
||||
|
||||
#### Changes Made
|
||||
- {Summary of what was implemented}
|
||||
|
||||
#### Notes
|
||||
{Observations during execution}
|
||||
|
||||
---
|
||||
|
||||
## Decisions
|
||||
Runtime decisions not in original plan.
|
||||
|
||||
### {Decision Title}
|
||||
**Context**: Why needed
|
||||
**Decision**: What was chosen
|
||||
**Impact**: Effect on flight or future legs
|
||||
|
||||
---
|
||||
|
||||
## Deviations
|
||||
Departures from planned approach.
|
||||
|
||||
### {Deviation Title}
|
||||
**Planned**: What the flight specified
|
||||
**Actual**: What was done instead
|
||||
**Reason**: Why the deviation was necessary
|
||||
|
||||
---
|
||||
|
||||
## Anomalies
|
||||
Unexpected issues encountered.
|
||||
|
||||
### {Anomaly Title}
|
||||
**Observed**: What happened
|
||||
**Severity**: blocking | degraded | cosmetic
|
||||
**Resolution**: How handled or "unresolved"
|
||||
|
||||
---
|
||||
|
||||
## Session Notes
|
||||
Chronological notes from work sessions.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Flight Briefing
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | `missions/{mission}/flights/{flight}/flight-briefing.md` |
|
||||
| Created | Before flight execution begins |
|
||||
| Purpose | Pre-flight summary for crew alignment |
|
||||
|
||||
**Format:**
|
||||
|
||||
```markdown
|
||||
# Flight Briefing: {Flight Title}
|
||||
|
||||
**Date**: {briefing date}
|
||||
**Flight**: [{Flight Title}](flight.md)
|
||||
**Status**: Flight is ready for execution
|
||||
|
||||
## Mission Context
|
||||
{Brief reminder of mission outcome and how this flight contributes}
|
||||
|
||||
## Objective
|
||||
{What this flight will accomplish}
|
||||
|
||||
## Key Decisions
|
||||
{Summary of critical design decisions crew should know}
|
||||
|
||||
## Risks and Mitigations
|
||||
| Risk | Mitigation |
|
||||
|------|------------|
|
||||
| {risk} | {mitigation} |
|
||||
|
||||
## Legs Overview
|
||||
1. `{leg-slug}` - {description} - {estimated complexity}
|
||||
2. `{leg-slug}` - {description} - {estimated complexity}
|
||||
|
||||
## Environment Requirements
|
||||
{Any special setup needed before starting}
|
||||
|
||||
## Success Criteria
|
||||
{How we'll know the flight succeeded}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Flight Debrief
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | `missions/{mission}/flights/{flight}/flight-debrief.md` |
|
||||
| Created | After flight lands or diverts |
|
||||
| Purpose | Post-flight analysis and lessons learned |
|
||||
|
||||
**Format:**
|
||||
|
||||
```markdown
|
||||
# Flight Debrief: {Flight Title}
|
||||
|
||||
**Date**: {debrief date}
|
||||
**Flight**: [{Flight Title}](flight.md)
|
||||
**Status**: {landed | aborted}
|
||||
**Duration**: {start} - {end}
|
||||
**Legs Completed**: {X of Y}
|
||||
|
||||
## Outcome Assessment
|
||||
|
||||
### Objectives Achieved
|
||||
{What the flight accomplished}
|
||||
|
||||
### Mission Criteria Advanced
|
||||
{Which success criteria this flight contributed to}
|
||||
|
||||
## What Went Well
|
||||
{Specific things that worked effectively}
|
||||
|
||||
## What Could Be Improved
|
||||
|
||||
### Process
|
||||
- {Recommendations for flight execution}
|
||||
|
||||
### Technical
|
||||
- {Code quality, architecture, debt}
|
||||
|
||||
### Documentation
|
||||
- {Gaps identified}
|
||||
|
||||
## Deviations and Lessons Learned
|
||||
|
||||
| Deviation | Reason | Standardize? |
|
||||
|-----------|--------|--------------|
|
||||
| {what changed} | {why} | {yes/no} |
|
||||
|
||||
## Key Learnings
|
||||
{Insights for future flights}
|
||||
|
||||
## Recommendations
|
||||
1. {Most impactful recommendation}
|
||||
2. {Second recommendation}
|
||||
3. {Third recommendation}
|
||||
|
||||
## Action Items
|
||||
- [ ] {Immediate actions}
|
||||
- [ ] {Near-term improvements}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Mission Debrief
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | `missions/{NN}-{mission}/mission-debrief.md` |
|
||||
| Created | After mission completes or aborts |
|
||||
| Purpose | Post-mission retrospective and methodology improvements |
|
||||
|
||||
**Format:**
|
||||
|
||||
```markdown
|
||||
# Mission Debrief: {Mission Title}
|
||||
|
||||
**Date**: {debrief date}
|
||||
**Mission**: [{Mission Title}](mission.md)
|
||||
**Status**: {completed | aborted}
|
||||
**Duration**: {start} - {end}
|
||||
**Flights Completed**: {X of Y}
|
||||
|
||||
## Outcome Assessment
|
||||
|
||||
### Success Criteria Results
|
||||
| Criterion | Status | Notes |
|
||||
|-----------|--------|-------|
|
||||
| {criterion} | {met/not met} | {notes} |
|
||||
|
||||
### Overall Outcome
|
||||
{Did we achieve what we set out to do?}
|
||||
|
||||
## Flight Summary
|
||||
| Flight | Status | Key Outcome |
|
||||
|--------|--------|-------------|
|
||||
| {flight} | {landed/aborted} | {outcome} |
|
||||
|
||||
## What Went Well
|
||||
{Effective patterns and successes}
|
||||
|
||||
## What Could Be Improved
|
||||
{Process, planning, execution improvements}
|
||||
|
||||
## Lessons Learned
|
||||
{Insights to carry forward}
|
||||
|
||||
## Methodology Feedback
|
||||
{Improvements to Flight Control process itself}
|
||||
|
||||
## Action Items
|
||||
- [ ] {Follow-up work}
|
||||
- [ ] {Process improvements}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Maintenance Report
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | `maintenance/{YYYY-MM-DD}.md` |
|
||||
| Created | After a mission or ad-hoc, during routine maintenance |
|
||||
| Purpose | Codebase health assessment and maintenance recommendation |
|
||||
|
||||
**Format:**
|
||||
|
||||
```markdown
|
||||
# Maintenance Report: {YYYY-MM-DD}
|
||||
|
||||
**Date**: {report date}
|
||||
**Triggered by**: [{Mission Title}](missions/{NN}-{slug}/mission.md) *(optional — omit if ad-hoc)*
|
||||
**Assessment**: {Flight Ready | Maintenance Required}
|
||||
|
||||
## Categories Inspected
|
||||
{Numbered list of categories that were checked}
|
||||
|
||||
## Executive Summary
|
||||
{2-3 sentence overview of codebase health and key findings}
|
||||
|
||||
## Findings by Category
|
||||
|
||||
### Category {N}: {Name}
|
||||
|
||||
| # | Finding | Severity | New/Known | Recommendation |
|
||||
|---|---------|----------|-----------|----------------|
|
||||
| {n} | {title} | {severity} | {new/known} | {recommendation} |
|
||||
|
||||
**Details:**
|
||||
{Per-finding evidence with file paths and line numbers}
|
||||
|
||||
## Severity Summary
|
||||
|
||||
| Severity | Count |
|
||||
|----------|-------|
|
||||
| Critical | {N} |
|
||||
| Action Required | {N} |
|
||||
| Advisory | {N} |
|
||||
| Pass | {N} |
|
||||
|
||||
## Known Debt Carried Forward
|
||||
{Debt items from debriefs that were acknowledged but not addressed, or "None — no prior debt context"}
|
||||
|
||||
## Recommendations
|
||||
1. {Most impactful recommendation}
|
||||
2. {Second recommendation}
|
||||
3. {Third recommendation}
|
||||
|
||||
## Maintenance Mission
|
||||
{Link to scaffolded mission if created, or "Not required — codebase is flight ready"}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## State Tracking
|
||||
|
||||
States are tracked in the frontmatter or status field of each artifact:
|
||||
|
||||
| Artifact | States |
|
||||
|----------|--------|
|
||||
| Mission | `planning` → `active` → `completed` (or `aborted`) |
|
||||
| Flight | `planning` → `ready` → `in-flight` → `landed` → `completed` (or `aborted`) |
|
||||
| Leg | `planning` → `ready` → `in-flight` → `landed` → `completed` (or `aborted`) |
|
||||
|
||||
## Conventions
|
||||
|
||||
- **Immutability**: Never modify legs once `in-flight`; create new ones instead
|
||||
- **Append-only logs**: Flight logs are append-only during execution
|
||||
- **Flight briefings**: Created before execution, not modified after
|
||||
- **Debriefs**: Created after completion, may be updated with follow-up notes
|
||||
- **Mission as briefing**: The mission.md document serves as both definition and briefing (no separate mission-briefing.md)
|
||||
|
||||
## Git Workflow
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Strategy | `branch` |
|
||||
|
||||
**Options:**
|
||||
|
||||
- **`branch`** (default) — Single-checkout workflow. The orchestrator creates a feature branch and all agents work in the project root. One flight at a time per working copy.
|
||||
- **`worktree`** — Worktree isolation. The orchestrator creates a git worktree under `.worktrees/` for each flight. Agents work in the worktree path. Parallel flights are possible on a single repo clone.
|
||||
|
||||
When using the `worktree` strategy, add `.worktrees/` to `.gitignore`.
|
||||
@@ -0,0 +1,408 @@
|
||||
# Artifact System: Jira
|
||||
|
||||
This project stores Flight Control artifacts as Jira issues.
|
||||
|
||||
## Issue Type Mapping
|
||||
|
||||
| Flight Control | Jira Issue Type | Hierarchy |
|
||||
|----------------|-----------------|-----------|
|
||||
| Mission | Epic | Parent |
|
||||
| Flight | Story | Child of Epic |
|
||||
| Leg | Sub-task | Child of Story |
|
||||
|
||||
## Setup Questions
|
||||
|
||||
Answer these questions when configuring Jira artifacts for your project:
|
||||
|
||||
| Question | Answer |
|
||||
|----------|--------|
|
||||
| What is the Jira project key? | `PROJECT` |
|
||||
| JQL query for discovering flight documentation? | (e.g., `project = PROJECT AND labels = flight-control`) |
|
||||
|
||||
## Configuration
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Project Key | `PROJECT` |
|
||||
| Board | (specify board name or ID) |
|
||||
| Labels | `flight-control` |
|
||||
|
||||
---
|
||||
|
||||
## Custom Fields
|
||||
|
||||
<!-- Add your project's custom Jira fields here -->
|
||||
|
||||
| Custom Field | Jira Field ID | Required | Used For | Notes |
|
||||
|--------------|---------------|----------|----------|-------|
|
||||
| (example) Team | `customfield_10001` | Yes | All issues | Select from predefined teams |
|
||||
| (example) Sprint | `customfield_10002` | No | Stories, Sub-tasks | Assign to sprint |
|
||||
|
||||
## Project Rules
|
||||
|
||||
<!-- Document project-specific Jira rules and conventions here -->
|
||||
|
||||
### Required Fields by Issue Type
|
||||
|
||||
**Epic (Mission):**
|
||||
- (list required fields for your project)
|
||||
|
||||
**Story (Flight):**
|
||||
- (list required fields for your project)
|
||||
|
||||
**Sub-task (Leg):**
|
||||
- (list required fields for your project)
|
||||
|
||||
### Workflow Rules
|
||||
|
||||
- (document any workflow restrictions or automation rules)
|
||||
- (e.g., "Stories cannot move to In Progress without Epic Link")
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
- (document any naming patterns required by your project)
|
||||
- (e.g., "Epic summaries must start with [MISSION]")
|
||||
|
||||
---
|
||||
|
||||
## Core Artifacts
|
||||
|
||||
### Mission → Epic
|
||||
|
||||
| Field | Mapping |
|
||||
|-------|---------|
|
||||
| Summary | Mission title |
|
||||
| Description | See format below |
|
||||
| Labels | `flight-control`, `mission` |
|
||||
|
||||
**Description Format:**
|
||||
|
||||
```
|
||||
## Outcome
|
||||
{What success looks like in human terms}
|
||||
|
||||
## Context
|
||||
{Why this mission matters now}
|
||||
|
||||
## Success Criteria
|
||||
- [ ] {Criterion 1}
|
||||
- [ ] {Criterion 2}
|
||||
|
||||
## Stakeholders
|
||||
{Who cares about this outcome}
|
||||
|
||||
## Constraints
|
||||
{Non-negotiable boundaries}
|
||||
|
||||
## Environment Requirements
|
||||
{Development and runtime requirements}
|
||||
|
||||
## Open Questions
|
||||
{Unknowns needing resolution}
|
||||
|
||||
## Known Issues
|
||||
Emergent blockers and issues discovered during execution. Add items here as flights surface problems that affect the broader mission — things not anticipated during planning but visible at the mission level.
|
||||
|
||||
- [ ] {Issue description} — discovered in Flight {N}, affects {scope}
|
||||
|
||||
## Flights
|
||||
> **Note:** These are tentative suggestions, not commitments. Flights are planned and created one at a time as work progresses. This list will evolve based on discoveries during implementation.
|
||||
|
||||
- [ ] Flight 1: {description}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Flight → Story
|
||||
|
||||
| Field | Mapping |
|
||||
|-------|---------|
|
||||
| Summary | Flight title |
|
||||
| Description | See format below |
|
||||
| Epic Link | Parent mission epic |
|
||||
| Labels | `flight-control`, `flight` |
|
||||
|
||||
**Description Format:**
|
||||
|
||||
```
|
||||
## Objective
|
||||
{What this flight accomplishes}
|
||||
|
||||
## Contributing to Criteria
|
||||
- {Relevant success criterion 1}
|
||||
- {Relevant success criterion 2}
|
||||
|
||||
## Design Decisions
|
||||
{Key technical decisions and rationale}
|
||||
|
||||
## Prerequisites
|
||||
- [ ] {What must be true before execution}
|
||||
|
||||
## Technical Approach
|
||||
{How the objective will be achieved}
|
||||
|
||||
## Legs
|
||||
> **Note:** These are tentative suggestions, not commitments. Legs are planned and created one at a time as the flight progresses. This list will evolve based on discoveries during implementation.
|
||||
|
||||
- [ ] {leg-slug} - {description}
|
||||
|
||||
## Validation Approach
|
||||
{How will this flight be validated? Manual testing, automated tests, or both?}
|
||||
|
||||
## Verification
|
||||
{How to confirm success}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Leg → Sub-task
|
||||
|
||||
| Field | Mapping |
|
||||
|-------|---------|
|
||||
| Summary | Leg title |
|
||||
| Description | See format below |
|
||||
| Parent | Flight story |
|
||||
| Labels | `flight-control`, `leg` |
|
||||
|
||||
**Description Format:**
|
||||
|
||||
```
|
||||
## Objective
|
||||
{Single sentence: what this leg accomplishes}
|
||||
|
||||
## Context
|
||||
{Design decisions and learnings from prior legs}
|
||||
|
||||
## Inputs
|
||||
{What must exist before this leg runs}
|
||||
|
||||
## Outputs
|
||||
{What exists after completion}
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] {Criterion 1}
|
||||
- [ ] {Criterion 2}
|
||||
|
||||
## Verification Steps
|
||||
How to confirm each criterion is met:
|
||||
- {Command or manual check for criterion 1}
|
||||
- {Command or manual check for criterion 2}
|
||||
|
||||
## Implementation Guidance
|
||||
{Step-by-step guidance}
|
||||
|
||||
## Files Affected
|
||||
{List of files to modify}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Supporting Artifacts
|
||||
|
||||
### Flight Log → Story Comments
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | Comments on the Flight (Story) |
|
||||
| Format | Timestamped comments with prefix |
|
||||
| Update pattern | Append new comments during execution |
|
||||
|
||||
**Comment Format:**
|
||||
|
||||
```
|
||||
[Flight Log] {YYYY-MM-DD HH:MM}
|
||||
|
||||
## {Entry Type}: {Title}
|
||||
|
||||
{Content based on entry type - see below}
|
||||
```
|
||||
|
||||
**Entry Types:**
|
||||
|
||||
- `Leg Progress` - Status updates for leg completion
|
||||
- `Decision` - Runtime decisions not in original plan
|
||||
- `Deviation` - Departures from planned approach
|
||||
- `Anomaly` - Unexpected issues encountered
|
||||
- `Session Notes` - General progress notes
|
||||
|
||||
---
|
||||
|
||||
### Flight Briefing → Story Comment
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | Comment on the Flight (Story) |
|
||||
| Created | Before flight execution begins |
|
||||
| Label | `[Flight Briefing]` |
|
||||
|
||||
**Comment Format:**
|
||||
|
||||
```
|
||||
[Flight Briefing] {YYYY-MM-DD}
|
||||
|
||||
## Mission Context
|
||||
{How this flight contributes to mission}
|
||||
|
||||
## Objective
|
||||
{What this flight will accomplish}
|
||||
|
||||
## Key Decisions
|
||||
{Critical decisions crew should know}
|
||||
|
||||
## Risks
|
||||
| Risk | Mitigation |
|
||||
|------|------------|
|
||||
| {risk} | {mitigation} |
|
||||
|
||||
## Legs Overview
|
||||
1. {leg} - {description}
|
||||
2. {leg} - {description}
|
||||
|
||||
## Success Criteria
|
||||
{How we'll know the flight succeeded}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Flight Debrief → Story Comment
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | Comment on the Flight (Story) |
|
||||
| Created | After flight lands or diverts |
|
||||
| Label | `[Flight Debrief]` |
|
||||
|
||||
**Comment Format:**
|
||||
|
||||
```
|
||||
[Flight Debrief] {YYYY-MM-DD}
|
||||
|
||||
**Status**: {landed | aborted}
|
||||
**Duration**: {start} - {end}
|
||||
**Legs Completed**: {X of Y}
|
||||
|
||||
## Outcome Assessment
|
||||
{What the flight accomplished}
|
||||
|
||||
## What Went Well
|
||||
{Effective patterns}
|
||||
|
||||
## What Could Be Improved
|
||||
{Recommendations}
|
||||
|
||||
## Deviations
|
||||
| Deviation | Reason | Standardize? |
|
||||
|-----------|--------|--------------|
|
||||
| {what} | {why} | {yes/no} |
|
||||
|
||||
## Key Learnings
|
||||
{Insights for future flights}
|
||||
|
||||
## Recommendations
|
||||
1. {Most impactful recommendation}
|
||||
2. {Second recommendation}
|
||||
3. {Third recommendation}
|
||||
|
||||
## Action Items
|
||||
- [ ] {action}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Mission Debrief → Epic Comment
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Location | Comment on the Mission (Epic) |
|
||||
| Created | After mission completes or aborts |
|
||||
| Label | `[Mission Debrief]` |
|
||||
|
||||
**Comment Format:**
|
||||
|
||||
```
|
||||
[Mission Debrief] {YYYY-MM-DD}
|
||||
|
||||
**Status**: {completed | aborted}
|
||||
**Duration**: {start} - {end}
|
||||
**Flights Completed**: {X of Y}
|
||||
|
||||
## Success Criteria Results
|
||||
| Criterion | Status | Notes |
|
||||
|-----------|--------|-------|
|
||||
| {criterion} | {met/not met} | {notes} |
|
||||
|
||||
## Flight Summary
|
||||
| Flight | Status | Outcome |
|
||||
|--------|--------|---------|
|
||||
| {flight} | {status} | {outcome} |
|
||||
|
||||
## What Went Well
|
||||
{Successes}
|
||||
|
||||
## What Could Be Improved
|
||||
{Improvements}
|
||||
|
||||
## Lessons Learned
|
||||
{Insights}
|
||||
|
||||
## Action Items
|
||||
- [ ] {action}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## State Mapping
|
||||
|
||||
### Mission (Epic)
|
||||
|
||||
| Flight Control | Jira Status |
|
||||
|----------------|-------------|
|
||||
| planning | To Do |
|
||||
| active | In Progress |
|
||||
| completed | Done |
|
||||
| aborted | Cancelled |
|
||||
|
||||
### Flight (Story)
|
||||
|
||||
| Flight Control | Jira Status |
|
||||
|----------------|-------------|
|
||||
| planning | To Do |
|
||||
| ready | Ready |
|
||||
| in-flight | In Progress |
|
||||
| landed | In Review |
|
||||
| completed | Done |
|
||||
| aborted | Cancelled |
|
||||
|
||||
### Leg (Sub-task)
|
||||
|
||||
| Flight Control | Jira Status |
|
||||
|----------------|-------------|
|
||||
| planning | To Do |
|
||||
| ready | Ready |
|
||||
| in-flight | In Progress |
|
||||
| landed | In Review |
|
||||
| completed | Done |
|
||||
| aborted | Cancelled |
|
||||
|
||||
---
|
||||
|
||||
## Conventions
|
||||
|
||||
- **Naming**: Use clear, action-oriented summaries
|
||||
- **Linking**: Always link Stories to Epic, Sub-tasks to Story
|
||||
- **Labels**: Apply `flight-control` label to all artifacts
|
||||
- **Immutability**: Never modify Sub-tasks once In Progress; create new ones
|
||||
- **Comments**: Use prefixes (`[Flight Log]`, `[Flight Briefing]`, etc.) for filtering
|
||||
|
||||
## Git Workflow
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Strategy | `branch` |
|
||||
|
||||
**Options:**
|
||||
|
||||
- **`branch`** (default) — Single-checkout workflow. The orchestrator creates a feature branch and all agents work in the project root. One flight at a time per working copy.
|
||||
- **`worktree`** — Worktree isolation. The orchestrator creates a git worktree under `.worktrees/` for each flight. Agents work in the worktree path. Parallel flights are possible on a single repo clone.
|
||||
|
||||
When using the `worktree` strategy, add `.worktrees/` to `.gitignore`.
|
||||
177
container/mission-control/.claude/skills/leg/SKILL.md
Normal file
177
container/mission-control/.claude/skills/leg/SKILL.md
Normal file
@@ -0,0 +1,177 @@
|
||||
---
|
||||
name: leg
|
||||
description: Generate detailed implementation guidance for LLM execution. Use when creating atomic implementation steps from a flight.
|
||||
---
|
||||
|
||||
# Leg Implementation Guidance
|
||||
|
||||
Generate detailed implementation guidance for LLM execution.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Project must be initialized with `/init-project` (`.flightops/ARTIFACTS.md` must exist)
|
||||
- A flight must exist before creating legs
|
||||
|
||||
## Workflow
|
||||
|
||||
### Phase 1: Context Loading
|
||||
|
||||
1. **Identify the target project**
|
||||
- Read `projects.md` to find the project's path
|
||||
|
||||
2. **Verify project is initialized**
|
||||
- Check if `{target-project}/.flightops/ARTIFACTS.md` exists
|
||||
- **If missing**: STOP and tell the user to run `/init-project` first
|
||||
- Do not proceed without the artifact configuration
|
||||
|
||||
3. **Read the artifact configuration**
|
||||
- Read `{target-project}/.flightops/ARTIFACTS.md` for artifact locations and formats
|
||||
|
||||
4. **Read the parent flight**
|
||||
- Understand the objective being achieved
|
||||
- Review design decisions and constraints
|
||||
- Note the technical approach defined
|
||||
|
||||
5. **Read the flight log in detail** (critical)
|
||||
|
||||
The flight log captures ground truth from actual implementation. Read it fully and extract:
|
||||
- Actual outcomes from completed legs
|
||||
- Deviations from the original plan
|
||||
- Anomalies discovered during execution
|
||||
- Environment details (versions, configurations)
|
||||
- Decisions made during implementation
|
||||
- Workarounds for issues encountered
|
||||
|
||||
6. **Identify this leg's scope**
|
||||
- Which leg from the flight's leg list?
|
||||
- What comes before and after?
|
||||
- Dependencies on other legs?
|
||||
- How do prior leg outcomes affect this leg?
|
||||
|
||||
7. **Identify environment constraints**
|
||||
- Execution environment (devcontainer, WSL, cloud)?
|
||||
- User context (root, specific user)?
|
||||
- Environment variables or shell setup needed?
|
||||
- Commands inside vs outside containers?
|
||||
|
||||
### Phase 2: Implementation Analysis
|
||||
|
||||
Deep dive into the specific implementation:
|
||||
|
||||
1. **Identify exact files to modify**
|
||||
- Read existing files that will be changed
|
||||
- Understand current code structure
|
||||
- Note imports, dependencies, patterns
|
||||
|
||||
2. **Understand existing patterns**
|
||||
- How is similar functionality implemented?
|
||||
- What conventions does the codebase follow?
|
||||
- What testing patterns are used?
|
||||
|
||||
3. **Determine inputs and outputs**
|
||||
- What state exists before this leg?
|
||||
- What state must exist after completion?
|
||||
- What can the implementing agent assume?
|
||||
|
||||
4. **Identify edge cases**
|
||||
- What could go wrong?
|
||||
- What validation is needed?
|
||||
- What error handling is required?
|
||||
- If this leg modifies database schemas: does it include migration creation AND execution? Both must happen in the same leg — a schema defined but never migrated is a gap.
|
||||
|
||||
5. **Identify dependent code** (for interface changes)
|
||||
- Does this leg modify shared interfaces?
|
||||
- What files consume these interfaces?
|
||||
- Should updating consumers be part of this leg?
|
||||
|
||||
6. **Identify platform considerations**
|
||||
- Does this leg touch OS-specific features?
|
||||
- What platform differences might affect implementation?
|
||||
|
||||
### Phase 3: Guidance Generation
|
||||
|
||||
Create the leg artifact using the format defined in `.flightops/ARTIFACTS.md`.
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Writing Effective Objectives
|
||||
|
||||
State exactly what the leg accomplishes:
|
||||
|
||||
**Weak**: "Set up the database stuff"
|
||||
|
||||
**Strong**: "Create the User model with email, password_hash, and timestamp fields"
|
||||
|
||||
### Acceptance Criteria
|
||||
|
||||
Criteria must be:
|
||||
- **Binary**: Either met or not met
|
||||
- **Observable**: Can be verified by inspection or test
|
||||
- **Complete**: Nothing else needed for "done"
|
||||
|
||||
**Weak**: "Code is clean" (subjective)
|
||||
|
||||
**Strong**: "User model exists in `prisma/schema.prisma`"
|
||||
|
||||
### Verification Steps
|
||||
|
||||
Tell the agent exactly *how* to confirm each criterion:
|
||||
|
||||
**Weak**: "Make sure it works"
|
||||
|
||||
**Strong**:
|
||||
```markdown
|
||||
## Verification Steps
|
||||
- Run `npx prisma migrate status` — should show no pending migrations
|
||||
- Run `npm test` — all tests pass
|
||||
- Tab through form fields — focus order matches visual order
|
||||
```
|
||||
|
||||
For accessibility work, include specific checks:
|
||||
- Keyboard navigation sequences
|
||||
- Screen reader commands to test
|
||||
- Automated tool commands (Lighthouse, axe-core)
|
||||
|
||||
### Implementation Guidance
|
||||
|
||||
Be explicit, not implicit:
|
||||
|
||||
**Implicit**: "Add validation to the email field"
|
||||
|
||||
**Explicit**: "Add email validation using the `validator` library's `isEmail` function. Return HTTP 400 with `{ "error": "Invalid email format" }` on validation failure."
|
||||
|
||||
### Code Examples
|
||||
|
||||
Provide examples when:
|
||||
- The codebase has specific patterns to follow
|
||||
- There are multiple valid approaches
|
||||
- The implementation isn't obvious from context
|
||||
|
||||
### Leg Sizing
|
||||
|
||||
A well-sized leg:
|
||||
- Takes minutes to a few hours
|
||||
- Is atomic (can be completed independently)
|
||||
- Has clear, verifiable acceptance criteria
|
||||
- Produces a working increment
|
||||
|
||||
**Too small**: Single-line change with no meaningful criteria
|
||||
**Too large**: Would benefit from intermediate checkpoints
|
||||
|
||||
### Documenting Workarounds
|
||||
|
||||
When implementing a workaround, document:
|
||||
- **What**: The workaround clearly
|
||||
- **Why**: Why the ideal solution wasn't feasible
|
||||
- **When to remove**: Condition for replacement
|
||||
|
||||
### Immutability
|
||||
|
||||
Once a leg is `in-flight`:
|
||||
- Do NOT modify the leg document
|
||||
- If requirements change, mark it `aborted` (changes rolled back)
|
||||
- Create a new leg with updated requirements
|
||||
|
||||
## Output
|
||||
|
||||
Create the leg artifact using the location and format defined in `.flightops/ARTIFACTS.md`.
|
||||
@@ -0,0 +1,184 @@
|
||||
---
|
||||
name: mission-debrief
|
||||
description: Post-mission retrospective for outcomes assessment and methodology improvement. Use after a mission completes or aborts to capture overall lessons learned.
|
||||
---
|
||||
|
||||
# Mission Debrief
|
||||
|
||||
Perform comprehensive post-mission retrospective and methodology assessment.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Project must be initialized with `/init-project` (`.flightops/ARTIFACTS.md` must exist)
|
||||
- A mission must have at least one completed or aborted flight before debriefing
|
||||
|
||||
## Workflow
|
||||
|
||||
### Phase 1: Context Loading
|
||||
|
||||
1. **Identify the target project**
|
||||
- Read `projects.md` to find the project's path
|
||||
|
||||
2. **Verify project is initialized**
|
||||
- Check if `{target-project}/.flightops/ARTIFACTS.md` exists
|
||||
- **If missing**: STOP and tell the user to run `/init-project` first
|
||||
- Do not proceed without the artifact configuration
|
||||
|
||||
3. **Read the artifact configuration**
|
||||
- Read `{target-project}/.flightops/ARTIFACTS.md` for artifact locations and formats
|
||||
|
||||
4. **Load mission documentation**
|
||||
- Read the mission for original outcome, success criteria, and constraints
|
||||
- Read ALL flight documents for objectives and results
|
||||
- Read ALL flight debriefs for per-flight lessons learned
|
||||
- Read flight logs for execution details
|
||||
|
||||
5. **Load project context**
|
||||
- Read the target project's `README.md` and `CLAUDE.md`
|
||||
- Understand what was built during this mission
|
||||
|
||||
### Phase 2: Crew Debrief Interviews
|
||||
|
||||
Read `{target-project}/.flightops/agent-crews/mission-debrief.md` for crew definitions and prompts (fall back to defaults at `.claude/skills/init-project/defaults/agent-crews/mission-debrief.md`).
|
||||
|
||||
**Validate structure**: The phase file MUST contain `## Crew`, `## Interaction Protocol`, and `## Prompts` sections with fenced code blocks. If the file exists but is malformed, STOP and tell the user: "Phase file `mission-debrief.md` is missing required sections. Either fix it manually or re-run `/init-project` to reset to defaults."
|
||||
|
||||
#### Architect Interview
|
||||
1. **Spawn an Architect agent** in the target project context (Task tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Debrief Interview" prompt from the mission-debrief phase file's Prompts section
|
||||
- The Architect reviews architectural evolution across all flights, pattern consistency, and structural health
|
||||
- The Architect provides structured debrief input
|
||||
|
||||
#### Human Interview
|
||||
Interview the crew to capture qualitative insights that documents alone cannot reveal.
|
||||
|
||||
##### Flight Log Clarifications
|
||||
Surface specific observations from flight logs and ask for context:
|
||||
- Anomalies or deviations noted in logs — what caused them?
|
||||
- Decisions made during execution — what drove those choices?
|
||||
- Blockers or delays — were these predictable in hindsight?
|
||||
- Workarounds implemented — should these become standard practice?
|
||||
|
||||
##### Mission Control Experience
|
||||
For the human(s) who served as mission control:
|
||||
- "What was your experience coordinating this mission?"
|
||||
- "Were there moments of confusion or uncertainty about status?"
|
||||
- "Did the flight/leg structure help or hinder your oversight?"
|
||||
- "What information was missing when you needed it?"
|
||||
|
||||
##### Project-Specific Feedback
|
||||
- "What surprised you most during this mission?"
|
||||
- "What would you do differently if starting over?"
|
||||
- "Are there project-specific conventions that should be documented?"
|
||||
- "Did any tools, libraries, or patterns prove particularly valuable or problematic?"
|
||||
|
||||
##### Agentic Orchestration Feedback (if applicable)
|
||||
If the mission used automated orchestration (LLM agents executing legs):
|
||||
- "How well did handoffs between agents work?"
|
||||
- "Were there failures in agent coordination or context transfer?"
|
||||
- "Did agents make decisions that required human correction?"
|
||||
- "What guardrails or checkpoints would have helped?"
|
||||
- "Was the level of autonomy appropriate for the tasks?"
|
||||
|
||||
**Note**: Adapt questions based on what the flight logs and artifacts reveal. Surface specific examples rather than asking in the abstract.
|
||||
|
||||
### Phase 3: Outcome Assessment (synthesize Architect + human input)
|
||||
|
||||
#### Success Criteria Evaluation
|
||||
For each success criterion:
|
||||
- Was it met? Partially met? Not met?
|
||||
- What evidence supports this assessment?
|
||||
- If not met, what blocked it?
|
||||
|
||||
#### Overall Outcome
|
||||
- Did the mission achieve its stated outcome?
|
||||
- Was the outcome still the right goal by the end?
|
||||
- What value was delivered to stakeholders?
|
||||
|
||||
### Phase 4: Flight Analysis
|
||||
|
||||
#### Flight Summary
|
||||
For each flight:
|
||||
- Status (landed/completed/aborted)
|
||||
- Key accomplishments
|
||||
- Major challenges
|
||||
|
||||
#### Flight Patterns
|
||||
- Which flights went smoothly? Why?
|
||||
- Which flights struggled? Why?
|
||||
- Were there common issues across flights?
|
||||
|
||||
### Phase 5: Process Analysis
|
||||
|
||||
#### Planning Effectiveness
|
||||
- Was the initial flight plan accurate?
|
||||
- How many flights were added/removed/changed?
|
||||
- Were estimates reasonable?
|
||||
|
||||
#### Execution Patterns
|
||||
- What worked well in execution?
|
||||
- What friction points emerged?
|
||||
- Were the right artifacts being created?
|
||||
|
||||
#### Methodology Assessment
|
||||
- Did the mission/flight/leg hierarchy work for this project?
|
||||
- Were briefings and debriefs valuable?
|
||||
- What would you change about the process?
|
||||
|
||||
### Phase 6: Knowledge Capture
|
||||
|
||||
#### Lessons Learned
|
||||
- Technical lessons (architecture, patterns, tools)
|
||||
- Process lessons (planning, execution, communication)
|
||||
- Domain lessons (business logic, requirements)
|
||||
|
||||
#### Reusable Patterns
|
||||
- What patterns emerged that could be templated?
|
||||
- What conventions should be documented?
|
||||
|
||||
#### Documentation Updates
|
||||
- Does CLAUDE.md need updates?
|
||||
- Does README need updates?
|
||||
- Are there new runbooks or guides needed?
|
||||
|
||||
### Phase 7: Generate Debrief
|
||||
|
||||
Create the mission debrief artifact using the format defined in `.flightops/ARTIFACTS.md`.
|
||||
|
||||
### Phase 8: Mission Status Transition
|
||||
|
||||
If the mission is not already marked as `completed` or `aborted`, update the mission artifact's status to `completed`.
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Holistic View
|
||||
Look at the mission as a whole, not just individual flights. Identify patterns and systemic issues.
|
||||
|
||||
### Stakeholder Perspective
|
||||
Frame outcomes in terms stakeholders care about. Did we deliver what was promised?
|
||||
|
||||
### Honest Assessment
|
||||
Be candid about what didn't work. The debrief is for learning, not for blame.
|
||||
|
||||
### Actionable Insights
|
||||
Every lesson should have a "so what?" — how should future missions be different?
|
||||
|
||||
### Methodology Feedback
|
||||
This is the best time to identify improvements to Flight Control itself.
|
||||
|
||||
### Interview Integration
|
||||
Weave interview insights throughout the debrief, not as a separate section. Crew perspectives should inform:
|
||||
- Why certain outcomes were achieved or missed
|
||||
- Root causes behind process friction
|
||||
- Context that flight logs alone cannot capture
|
||||
- Recommendations that reflect lived experience, not just document analysis
|
||||
|
||||
## Output
|
||||
|
||||
Create the debrief artifact using the location and format defined in `.flightops/ARTIFACTS.md`.
|
||||
|
||||
After creating the debrief, summarize:
|
||||
1. Overall mission outcome assessment
|
||||
2. Top 3 things that went well
|
||||
3. Top 3 things to improve
|
||||
4. Recommended methodology changes
|
||||
157
container/mission-control/.claude/skills/mission/SKILL.md
Normal file
157
container/mission-control/.claude/skills/mission/SKILL.md
Normal file
@@ -0,0 +1,157 @@
|
||||
---
|
||||
name: mission
|
||||
description: Create outcome-driven missions through research and user interview. Use when starting a new project, feature, or initiative that needs planning.
|
||||
---
|
||||
|
||||
# Mission Creation
|
||||
|
||||
Create a new mission through research and collaborative discovery.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Project must be initialized with `/init-project` (`.flightops/ARTIFACTS.md` must exist)
|
||||
|
||||
## Workflow
|
||||
|
||||
### Phase 1: Research
|
||||
|
||||
Before asking questions, gather context:
|
||||
|
||||
1. **Identify the target project**
|
||||
- Read `projects.md` to find the project's path
|
||||
- If not listed, ask the user for details
|
||||
|
||||
2. **Verify project is initialized**
|
||||
- Check if `{target-project}/.flightops/ARTIFACTS.md` exists
|
||||
- **If missing**: STOP and tell the user to run `/init-project` first
|
||||
- Do not proceed without the artifact configuration
|
||||
|
||||
3. **Read the artifact configuration**
|
||||
- Read `{target-project}/.flightops/ARTIFACTS.md` for artifact locations and formats
|
||||
|
||||
4. **Explore the target project's codebase**
|
||||
- Project structure and architecture
|
||||
- Existing patterns and conventions
|
||||
- Related functionality
|
||||
|
||||
5. **Read existing documentation**
|
||||
- README and project docs
|
||||
- Any existing missions
|
||||
- Technical specs or design documents
|
||||
|
||||
6. **Search external sources if needed**
|
||||
- API documentation
|
||||
- Library documentation
|
||||
- Relevant patterns or best practices
|
||||
|
||||
### Phase 2: User Input
|
||||
|
||||
Before asking structured questions, share a brief summary of what you learned during research and prompt the user for open-ended input:
|
||||
|
||||
- "Here's what I've gathered about the project so far: [summary]. Before I ask specific questions, what are your thoughts on what this mission should include? Feel free to share goals, ideas, concerns, scope preferences — anything that should shape this mission."
|
||||
|
||||
Use the user's response to inform and focus the interview questions that follow.
|
||||
|
||||
### Phase 3: Interview
|
||||
|
||||
Ask about outcomes, not tasks. Focus on:
|
||||
|
||||
1. **Desired outcomes**
|
||||
- "What does success look like when this is done?"
|
||||
- "What problem does this solve for users/stakeholders?"
|
||||
- "How will you know this mission succeeded?"
|
||||
|
||||
2. **Stakeholders and their needs**
|
||||
- "Who benefits from this outcome?"
|
||||
- "Are there competing interests to balance?"
|
||||
- "Who needs to approve or review?"
|
||||
|
||||
3. **Constraints**
|
||||
- "What technical constraints exist?"
|
||||
- "Are there timeline or resource boundaries?"
|
||||
- "What's out of scope?"
|
||||
|
||||
4. **Success criteria**
|
||||
- "What specific, observable criteria indicate completion?"
|
||||
- "How will each criterion be verified?"
|
||||
- "Do any criteria name specific tools or technologies? Reframe as capabilities."
|
||||
- "Could each criterion be satisfied by more than one implementation approach?"
|
||||
|
||||
5. **Environment requirements**
|
||||
- "What development environment will be used?"
|
||||
- "Are there runtime dependencies?"
|
||||
- "What tooling versions are required?"
|
||||
|
||||
### Phase 4: Draft
|
||||
|
||||
Create the mission artifact using the format defined in `.flightops/ARTIFACTS.md`.
|
||||
|
||||
### Phase 4b: Technical Viability Check
|
||||
|
||||
Read `{target-project}/.flightops/agent-crews/mission-design.md` for crew definitions and prompts (fall back to defaults at `.claude/skills/init-project/defaults/agent-crews/mission-design.md`).
|
||||
|
||||
**Validate structure**: The phase file MUST contain `## Crew`, `## Interaction Protocol`, and `## Prompts` sections with fenced code blocks. If the file exists but is malformed, STOP and tell the user: "Phase file `mission-design.md` is missing required sections. Either fix it manually or re-run `/init-project` to reset to defaults."
|
||||
|
||||
1. **Spawn an Architect agent** in the target project context (Task tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Validate Mission" prompt from the mission-design phase file's Prompts section
|
||||
- The Architect reviews the draft mission against the codebase, stack, and constraints
|
||||
- The Architect provides a structured assessment: feasible / feasible with caveats / not feasible
|
||||
2. **Incorporate feedback** — update the mission artifact to address issues raised
|
||||
- If not feasible: discuss with user, adjust scope or approach
|
||||
- If feasible with caveats: present caveats to user, adjust if needed
|
||||
3. **Human gives final sign-off** before proceeding
|
||||
|
||||
### Phase 5: Iterate
|
||||
|
||||
Present the draft and iterate:
|
||||
|
||||
1. Walk through each section with the user
|
||||
2. Validate success criteria are measurable
|
||||
3. Screen each criterion for technology names, tool names, config file paths, or specific libraries — reframe any that describe implementation rather than capability
|
||||
4. Confirm flight breakdown makes sense
|
||||
5. Refine until the user explicitly approves
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Mission Sizing
|
||||
|
||||
A well-sized mission:
|
||||
- Takes days to weeks to complete
|
||||
- Spawns 5-7 flights typically
|
||||
- Represents a meaningful outcome stakeholders recognize
|
||||
- Has clear success criteria
|
||||
|
||||
**Too small**: Can be completed in a single flight
|
||||
**Too large**: Success criteria are vague or numerous (>10)
|
||||
|
||||
### Outcome vs. Activity
|
||||
|
||||
Frame missions around results, not tasks:
|
||||
|
||||
**Activity-focused** (avoid):
|
||||
> Implement user authentication
|
||||
|
||||
**Outcome-focused** (prefer):
|
||||
> Users can securely access their personal data without sharing credentials
|
||||
|
||||
This applies equally to success criteria. Criteria that name specific tools or technologies constrain the solution space prematurely — if the prescribed tool doesn't work, the criteria become misaligned with reality.
|
||||
|
||||
**Implementation-specific criterion** (avoid):
|
||||
> OAuth tokens are stored in Redis with a 24-hour TTL
|
||||
|
||||
**Capability-focused criterion** (prefer):
|
||||
> Authenticated sessions persist across browser restarts for up to 24 hours
|
||||
|
||||
### Alignment Flight
|
||||
|
||||
During the interview phase, ask the user whether they'd like to include an alignment flight. Explain that this optional flight is a vibe coding session — the user and agent work together interactively, exploring the codebase, trying ideas, and making hands-on adjustments that benefit from human judgment and real-time feedback. It's a space for creative collaboration rather than structured execution. If the user opts in, include it in the suggested breakdown, marked as optional.
|
||||
|
||||
### Adaptive Planning
|
||||
|
||||
- Missions can be updated as understanding develops
|
||||
- New flights can be added during execution
|
||||
- Success criteria can be refined (with stakeholder agreement)
|
||||
|
||||
## Output
|
||||
|
||||
Create the mission artifact using the location and format defined in `.flightops/ARTIFACTS.md`.
|
||||
@@ -0,0 +1,117 @@
|
||||
---
|
||||
name: preflight-check
|
||||
description: Verify all registered projects have current methodology files and crew definitions. Reports status and offers to re-initialize projects that need it.
|
||||
---
|
||||
|
||||
# Preflight Check
|
||||
|
||||
Verify all registered projects have current Flight Control methodology files and crew definitions. Reports findings and offers to run `/init-project` on projects that need it.
|
||||
|
||||
## When to Use
|
||||
|
||||
- After updating methodology files or adding new skills to mission-control
|
||||
- After adding new default crew files
|
||||
- Periodically, to catch drift across managed projects
|
||||
- When onboarding a new team member who may have stale project setups
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- `projects.md` must exist (run `/init-mission-control` first)
|
||||
|
||||
## Workflow
|
||||
|
||||
### Phase 1: Load Projects Registry
|
||||
|
||||
1. **Read `projects.md`** to get the full list of managed projects
|
||||
2. Extract each project's slug, path, and description
|
||||
|
||||
### Phase 2: Check Each Project
|
||||
|
||||
For each project in the registry:
|
||||
|
||||
1. **Verify project path exists** — if the directory doesn't exist on disk, mark as `unreachable` and skip
|
||||
2. **Run the sync check**:
|
||||
```bash
|
||||
bash "${SKILL_DIR}/../init-project/check-sync.sh" \
|
||||
"${SKILL_DIR}/../init-project" \
|
||||
"{project-path}/.flightops"
|
||||
```
|
||||
3. **Parse the output** and classify the project:
|
||||
|
||||
| Status | Meaning | Action Needed |
|
||||
|--------|---------|---------------|
|
||||
| `missing` | No `.flightops/` directory | Needs `/init-project` |
|
||||
| `outdated` | Methodology files differ from source | Needs `/init-project` |
|
||||
| `current` | Methodology files match | None for methodology |
|
||||
| `agent-crews:missing` | No crew directory at all | Needs `/init-project` |
|
||||
| `agent-crews:empty` | Crew directory exists but empty | Needs `/init-project` |
|
||||
| `crew-missing:{file}` | Specific crew file missing (new skill) | Needs crew file added |
|
||||
| `legacy-layout:*` | Old directory naming detected | Needs migration via `/init-project` |
|
||||
|
||||
### Phase 3: Report
|
||||
|
||||
Present a summary table:
|
||||
|
||||
> **Project Sync Status**
|
||||
>
|
||||
> | Project | Path | Methodology | Crew | Issues |
|
||||
> |---------|------|-------------|------|--------|
|
||||
> | my-app | ~/projects/my-app | current | 1 missing | `routine-maintenance.md` not found |
|
||||
> | api-server | ~/projects/api-server | outdated | current | Methodology files stale |
|
||||
> | frontend | ~/projects/frontend | current | current | — |
|
||||
>
|
||||
> **{N} projects need attention, {M} are current.**
|
||||
|
||||
Group issues by type:
|
||||
- **Needs full init**: projects with `missing`, `agent-crews:missing`, or legacy layouts
|
||||
- **Needs methodology update**: projects with `outdated` status
|
||||
- **Needs new crew files**: projects with `crew-missing` entries (list which files)
|
||||
- **Unreachable**: projects whose paths don't exist on disk
|
||||
|
||||
### Phase 4: Remediate
|
||||
|
||||
If any projects need attention:
|
||||
|
||||
> "Want me to run `/init-project` on the projects that need it?"
|
||||
>
|
||||
> Options:
|
||||
> - **All** — re-init every project that needs attention
|
||||
> - **Select** — choose which projects to update
|
||||
> - **Skip** — just take the report, no changes
|
||||
|
||||
For each project the user selects:
|
||||
|
||||
1. **Run `/init-project`** using the skill workflow (read `.claude/skills/init-project/SKILL.md` and execute)
|
||||
- For projects that only need new crew files: skip straight to Step 6 (Configure Project Crew) — the "If exists (re-run)" path will copy missing crew files from defaults without touching existing ones
|
||||
- For projects that need methodology updates: run the full workflow
|
||||
- For projects with legacy layouts: run from Step 2 (migrations)
|
||||
2. **Report result** — confirm what was updated for each project
|
||||
|
||||
### Phase 5: Summary
|
||||
|
||||
After remediation (or if skipped), output a final status:
|
||||
|
||||
> **Sync complete.**
|
||||
> - {N} projects checked
|
||||
> - {M} projects updated
|
||||
> - {K} projects already current
|
||||
> - {J} projects unreachable (verify paths in `projects.md`)
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Non-Destructive
|
||||
|
||||
This skill never overwrites customized files without user consent. The underlying `/init-project` workflow:
|
||||
- Copies missing crew files from defaults (safe — fills gaps)
|
||||
- Asks before updating existing crew files (respects customization)
|
||||
- Never overwrites `ARTIFACTS.md` (project-specific)
|
||||
|
||||
### Missing Crew Files vs. Drift
|
||||
|
||||
These are distinct situations:
|
||||
- **Missing crew file**: A new default crew was added to mission-control but the project doesn't have it yet. This is a gap — the file should be copied from defaults.
|
||||
- **Crew file drift**: A project's crew file differs from the current default. This is expected — projects customize their crews. Report it as informational but do not flag it as needing remediation.
|
||||
|
||||
### Quick and Quiet
|
||||
|
||||
If all projects are current, say so in one line and stop. No confirmation prompts needed when there's nothing to do.
|
||||
@@ -0,0 +1,387 @@
|
||||
---
|
||||
name: routine-maintenance
|
||||
description: Codebase health assessment and maintenance recommendation. Use after a mission or ad-hoc to verify codebase is flight-ready or scaffold a maintenance mission.
|
||||
---
|
||||
|
||||
# Routine Maintenance
|
||||
|
||||
Perform an exhaustive, aviation-style codebase inspection. Can be triggered after a mission completes or run ad-hoc at any time. Produces a findings report and optionally scaffolds a maintenance mission for significant issues.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Project must be initialized with `/init-project` (`.flightops/ARTIFACTS.md` must exist)
|
||||
|
||||
## Workflow
|
||||
|
||||
### Phase 1: Context Loading
|
||||
|
||||
1. **Identify the target project**
|
||||
- Read `projects.md` to find the project's path
|
||||
|
||||
2. **Verify project is initialized**
|
||||
- Check if `{target-project}/.flightops/ARTIFACTS.md` exists
|
||||
- **If missing**: STOP and tell the user to run `/init-project` first
|
||||
- Do not proceed without the artifact configuration
|
||||
|
||||
3. **Read the artifact configuration**
|
||||
- Read `{target-project}/.flightops/ARTIFACTS.md` for artifact locations and formats
|
||||
|
||||
4. **Load prior maintenance reports (if any exist)**
|
||||
- Read previous reports in `maintenance/` to identify deferred findings from earlier cycles
|
||||
- Deferred findings are those documented in prior reports but not addressed by a maintenance mission
|
||||
- This ensures recurring issues are tracked across cycles rather than re-discovered as "new"
|
||||
|
||||
5. **Load mission and debrief documentation (if available)**
|
||||
- If a recent mission exists, read it for outcome, success criteria, and known issues
|
||||
- If a mission debrief exists, read it for lessons learned and action items
|
||||
- If flight debriefs exist, read them for per-flight technical debt and recommendations
|
||||
- This provides known-debt context so the inspection can distinguish new issues from acknowledged ones
|
||||
- If no mission context is available (ad-hoc run), proceed without known-debt context
|
||||
|
||||
6. **Identify project stack**
|
||||
- Read `README.md`, `CLAUDE.md`, and package files (`package.json`, `Cargo.toml`, `go.mod`, etc.)
|
||||
- Determine language, framework, test runner, linter, formatter, type checker, and dependency audit tool
|
||||
|
||||
### Phase 2: Scoping Interview
|
||||
|
||||
Two parts: category enablement and inspection focus.
|
||||
|
||||
#### Category Enablement
|
||||
|
||||
Categories 1–7 always apply to every project. Ask the user yes/no for optional categories:
|
||||
|
||||
> "Before I begin the inspection, a few quick questions:"
|
||||
>
|
||||
> 1. "Does this project have CI/CD pipelines?" → enables Category 8 + CI/CD Reviewer
|
||||
> 2. "Does this project have deployments, databases, or environment-specific configs?" → enables Category 9
|
||||
> 3. "Does this project have monitoring, metrics, or observability tooling?" → enables Category 10
|
||||
> 4. "Does this project have user-facing UI (web pages, components, forms)?" → enables Accessibility Reviewer
|
||||
|
||||
#### Inspection Focus
|
||||
|
||||
> 5. "Where should the inspection focus?"
|
||||
> - **Recent changes** — "Since when? (last mission, last flight, specific date, N days)"
|
||||
> - **Specific areas** — "Which modules, directories, or concerns?"
|
||||
> - **Full codebase** — "Everything"
|
||||
>
|
||||
> "Any specific areas of concern for this project?"
|
||||
|
||||
Record all responses. The focus selection and areas of concern drive the delegation plan in Phase 3.
|
||||
|
||||
### Phase 3: Delegation Planning
|
||||
|
||||
Before spawning any agents, the Flight Director creates an explicit plan for how to partition work across sub-agents. Each sub-agent has its own context window — the delegation plan ensures no single agent is overwhelmed by project scale.
|
||||
|
||||
#### 3a: Assess Project Size
|
||||
|
||||
Run a quick structural assessment:
|
||||
|
||||
1. **Count source files** — `find {target-project} -name "*.{ext}" -not -path "*/node_modules/*" -not -path "*/.git/*" | wc -l` (use appropriate extensions for the stack)
|
||||
2. **Identify module boundaries** — check for workspace configs (`Cargo.toml` workspaces, `package.json` workspaces, `go.work`), top-level directories with independent configs, or monorepo structure
|
||||
3. **Classify project size**:
|
||||
- **Small**: < 100 source files
|
||||
- **Medium**: 100–500 source files
|
||||
- **Large**: > 500 source files
|
||||
|
||||
#### 3b: Determine Scope
|
||||
|
||||
Based on the interview focus selection:
|
||||
|
||||
- **Recent changes**: Run `git log` / `git diff` to identify changed files since the user's specified cutoff. Map changed files to their containing modules. The inspection scope is the changed files plus their immediate dependencies (imports, shared types, test files).
|
||||
- **Specific areas**: Scope is the user-specified modules/directories. Identify their boundaries and dependencies.
|
||||
- **Full codebase**: Scope is everything. For large projects, this triggers module partitioning (see 3c).
|
||||
|
||||
#### 3c: Build the Delegation Plan
|
||||
|
||||
The plan determines how many agents to spawn, what each one covers, and in what order. Present it to the user for approval before executing.
|
||||
|
||||
**Concurrency constraint**: Claude Code supports a default maximum of **4 concurrent sub-agents**. Additional agents are queued automatically and start as running agents complete. The delegation plan must account for this by organizing agents into **waves** of up to 4.
|
||||
|
||||
**Sub-agent context constraint**: Each sub-agent starts with a fresh context window containing only its prompt. All sub-agent output returns to the Flight Director's context. Verbose results from multiple agents accumulate and can exhaust the Flight Director's context — this makes output discipline a hard constraint, not a suggestion.
|
||||
|
||||
**Delegation rules by scope and size:**
|
||||
|
||||
| Scope | Small/Medium Project | Large Project |
|
||||
|-------|---------------------|---------------|
|
||||
| **Recent changes** | One agent per role, scoped to changed files | Same — changed files are naturally bounded |
|
||||
| **Specific areas** | One agent per role, scoped to specified areas | Same — user has already bounded the scope |
|
||||
| **Full codebase** | One agent per role, full project | **Partition**: see below |
|
||||
|
||||
**Full codebase on large projects — partitioning strategy:**
|
||||
|
||||
The Inspector is the most context-hungry role. Partition it:
|
||||
|
||||
1. **Tool-pass Inspector**: One agent that runs all automated tools (linters, type checkers, test suite, audit commands) across the full project. Tool output is structured and concise — this agent reads command output, not source code.
|
||||
2. **Module Inspectors**: One agent per identified module boundary. Each performs manual code review (dead code, patterns, TODOs, duplication) only within its module. If no clear module boundaries exist, partition by top-level source directories.
|
||||
|
||||
Specialist reviewers (Security, CI/CD, Accessibility) are naturally scoped by their domain and generally do not need partitioning. If a specialist reviewer's scope is too broad (e.g., security review of a 1000-file project), scope it to: files flagged by the tool-pass Inspector + files in security-critical paths (auth, API handlers, data access).
|
||||
|
||||
**Wave planning**: Assign agents to waves of up to 4 concurrent agents. Prioritize by dependency:
|
||||
- **Wave 1**: Tool-pass Inspector + specialist reviewers (Security, CI/CD, Accessibility) — up to 4 agents. The tool pass runs automated commands; specialists do independent manual review. These have no dependencies on each other.
|
||||
- **Wave 2** (if partitioned): Module Inspectors — these can optionally use tool-pass findings to focus their manual review, so they benefit from running after Wave 1. Up to 4 module agents per wave.
|
||||
|
||||
For small/medium projects or scoped inspections, everything fits in a single wave.
|
||||
|
||||
**The delegation plan structure:**
|
||||
|
||||
> **Delegation Plan**
|
||||
>
|
||||
> Scope: {recent changes since X | specific areas: X, Y | full codebase}
|
||||
> Project size: {small | medium | large} (~{N} source files)
|
||||
> Modules: {list of identified modules, or "flat project"}
|
||||
>
|
||||
> **Wave 1** (concurrent):
|
||||
> | # | Agent | Role | Scope | Strategy |
|
||||
> |---|-------|------|-------|----------|
|
||||
> | 1 | Inspector (tools) | Run automated tools | Full project | Commands only, no code reads |
|
||||
> | 2 | Security Reviewer | Auth & injection review | src/auth/, src/api/handlers/ | Manual code path analysis |
|
||||
> | 3 | CI/CD Reviewer | Pipeline review | .github/workflows/ | Config review |
|
||||
> | 4 | Accessibility Reviewer | WCAG 2.1 AA review | src/components/ | UI review |
|
||||
>
|
||||
> **Wave 2** (after Wave 1 completes):
|
||||
> | # | Agent | Role | Scope | Strategy |
|
||||
> |---|-------|------|-------|----------|
|
||||
> | 5 | Inspector (module A) | Manual code review | src/api/ | Categories 4-6 |
|
||||
> | 6 | Inspector (module B) | Manual code review | src/core/ | Categories 4-6 |
|
||||
>
|
||||
> "Here's how I plan to partition the inspection. Want to adjust anything?"
|
||||
|
||||
For small/medium projects or scoped inspections, the plan is straightforward (one wave, one agent per role) and can be presented briefly. Only expand the full table for large/full-codebase inspections.
|
||||
|
||||
#### 3d: Refinement
|
||||
|
||||
This delegation strategy is expected to evolve. After each maintenance run, note in the flight log or maintenance report whether the partitioning was effective:
|
||||
- Did any agent hit context limits or return shallow results?
|
||||
- Were module boundaries appropriate?
|
||||
- Should specialists have been scoped differently?
|
||||
|
||||
These observations inform future delegation plans for the same project.
|
||||
|
||||
### Phase 4: Specialist Review
|
||||
|
||||
Read `{target-project}/.flightops/agent-crews/routine-maintenance.md` for crew definitions and prompts (fall back to defaults at `.claude/skills/init-project/defaults/agent-crews/routine-maintenance.md`).
|
||||
|
||||
**Validate structure**: The crew file MUST contain `## Crew`, `## Interaction Protocol`, and `## Prompts` sections with fenced code blocks. If the file exists but is malformed, STOP and tell the user: "Crew file `routine-maintenance.md` is missing required sections. Either fix it manually or re-run `/init-project` to reset to defaults."
|
||||
|
||||
Execute the delegation plan from Phase 3, wave by wave. Spawn all agents within a wave in parallel (up to 4 concurrent). Wait for a wave to complete before starting the next. Every reviewer is strictly **read-only** — they may run test suites, linters, type checkers, and audit commands, but must NEVER modify source files, configuration, or dependencies.
|
||||
|
||||
Each agent receives:
|
||||
- Its assigned scope from the delegation plan
|
||||
- The relevant prompt from the crew file
|
||||
- The output discipline rules (see Guidelines)
|
||||
|
||||
**Context management**: As each agent completes, summarize its findings before storing them for downstream use. Do not accumulate full raw output from every agent — the Flight Director's context is finite.
|
||||
|
||||
#### Inspector
|
||||
|
||||
1. **Spawn Inspector agent(s)** per the delegation plan (Agent tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Inspect Codebase" prompt from the crew file's Prompts section
|
||||
- Include: applicable category list, project stack info, known debt from debriefs, user's areas of concern, and **scope assignment from the delegation plan**
|
||||
- For partitioned inspections: each Inspector agent receives its module/area scope and only the categories relevant to its assignment
|
||||
- The Inspector performs broad automated checks and returns structured findings per category
|
||||
|
||||
#### Security Reviewer
|
||||
|
||||
1. **Spawn a Security Reviewer agent** (Agent tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Review Security" prompt from the crew file's Prompts section
|
||||
- Include: project stack info, known security debt from debriefs, and **scope assignment from the delegation plan**
|
||||
- The Security Reviewer performs focused manual analysis of auth flows, injection surfaces, secrets handling, and data exposure — deeper than the Inspector's Category 1 automated checks
|
||||
|
||||
#### CI/CD Reviewer (if enabled)
|
||||
|
||||
1. **Spawn a CI/CD Reviewer agent** (Agent tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Review CI/CD" prompt from the crew file's Prompts section
|
||||
- Include: project stack info, known CI/CD debt from debriefs
|
||||
- The CI/CD Reviewer evaluates pipeline definitions, build security, deployment safeguards, and environment consistency
|
||||
|
||||
#### Accessibility Reviewer (if enabled)
|
||||
|
||||
1. **Spawn an Accessibility Reviewer agent** (Agent tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Review Accessibility" prompt from the crew file's Prompts section
|
||||
- The Accessibility Reviewer evaluates against WCAG 2.1 AA standards, semantic HTML, keyboard navigation, screen reader compatibility, and color contrast
|
||||
|
||||
#### Merge Partitioned Results
|
||||
|
||||
If the Inspector was partitioned into multiple agents, merge their findings into a single consolidated findings list before proceeding to Phase 5. De-duplicate findings that appear in multiple modules.
|
||||
|
||||
### Phase 5: Severity Assessment & Roundtable
|
||||
|
||||
#### 5a: Initial Assessment
|
||||
|
||||
1. **Spawn an Architect agent** (Opus) with all reviewer findings + debrief context (Agent tool, `subagent_type: "general-purpose"`)
|
||||
- Provide the "Assess Findings" prompt from the crew file's Prompts section
|
||||
- Include: all reviewer findings, known debt items (if available from debriefs)
|
||||
- The Architect assigns an initial severity to each finding and raises challenges or questions directed at specific reviewers where findings seem incorrect, overlapping, or missing context
|
||||
|
||||
#### 5b: Roundtable
|
||||
|
||||
The Architect and specialist reviewers hash out findings through structured discussion. The Flight Director mediates.
|
||||
|
||||
1. **Route Architect's challenges** to the relevant reviewers
|
||||
- For each reviewer with outstanding challenges, spawn them with the "Roundtable Rebuttal" prompt from the crew file
|
||||
- Include the Architect's specific challenges directed at that reviewer
|
||||
- Reviewers respond with evidence, rebuttals, or concurrence
|
||||
2. **Collect responses** from all challenged reviewers
|
||||
3. **Return to Architect** with roundtable responses
|
||||
- Spawn Architect with the "Roundtable Resolution" prompt from the crew file
|
||||
- Include all reviewer rebuttals and new evidence
|
||||
- Architect produces final assessment incorporating the discussion
|
||||
4. **Max 2 roundtable cycles** — if consensus isn't reached after 2 rounds, present both perspectives to the human in Phase 6 and let them decide
|
||||
|
||||
#### Severity Scale
|
||||
|
||||
| Severity | Meaning |
|
||||
|----------|---------|
|
||||
| **Pass** | No issue found |
|
||||
| **Advisory** | Minor issue, deferring is acceptable |
|
||||
| **Action Required** | Should be addressed before next major work cycle |
|
||||
| **Critical** | Blocks further work, immediate attention needed |
|
||||
|
||||
The Architect produces an overall assessment:
|
||||
- **Flight Ready** — All findings are Pass or Advisory
|
||||
- **Maintenance Required** — Any finding is Action Required or Critical
|
||||
|
||||
### Phase 6: Human Review and Scoping
|
||||
|
||||
Present findings grouped by severity (Critical first, then Action Required, Advisory, Pass):
|
||||
|
||||
> **Overall Assessment: {Flight Ready | Maintenance Required}**
|
||||
>
|
||||
> {Findings summary table}
|
||||
|
||||
Then ask:
|
||||
1. "Do these findings match your sense of the codebase health?"
|
||||
2. "Any findings to override or adjust severity?"
|
||||
|
||||
Apply any overrides the user requests.
|
||||
|
||||
#### Scope Selection
|
||||
|
||||
If the assessment is Maintenance Required, help the user choose a manageable scope rather than scaffolding everything. Present a recommended shortlist:
|
||||
|
||||
> **Recommended scope** (Critical items are always included):
|
||||
>
|
||||
> {Numbered list of Critical + top Action Required findings, capped at ~5-7 items}
|
||||
>
|
||||
> {Count} additional Action Required and {count} Advisory findings are documented in the report for a future cycle.
|
||||
>
|
||||
> "Want me to scaffold a maintenance mission for these items? You can add or remove findings from the list."
|
||||
|
||||
The goal is a mission that can land in a single focused session. All findings are captured in the report regardless — deferred items aren't lost, they'll surface again in the next maintenance cycle. This keeps maintenance approachable even when the backlog is large.
|
||||
|
||||
### Phase 7: Generate Maintenance Report
|
||||
|
||||
Create the maintenance report artifact at the location defined in `.flightops/ARTIFACTS.md` (typically `maintenance/YYYY-MM-DD.md`). If a report already exists for today's date, append a numeric suffix (e.g., `2026-03-26-2.md`).
|
||||
|
||||
**Report contents:**
|
||||
- Title (date-based) and date
|
||||
- Optional "Triggered by" link to the mission that prompted the inspection (if applicable)
|
||||
- Inspection scope and delegation plan summary
|
||||
- Overall assessment (Flight Ready / Maintenance Required)
|
||||
- Categories inspected
|
||||
- Executive summary
|
||||
- Findings by category (each with severity, description, evidence, recommendation)
|
||||
- Severity summary (counts per level)
|
||||
- Known debt carried forward (from debriefs, acknowledged but not addressed)
|
||||
- Delegation effectiveness notes (for refining future inspections)
|
||||
- Recommendations
|
||||
|
||||
### Phase 8: Scaffold Maintenance Mission (conditional)
|
||||
|
||||
**Only if**: Overall assessment is Maintenance Required AND the user confirmed they want a maintenance mission. Only the findings the user selected in Phase 6 are scaffolded — deferred findings remain in the report for future cycles.
|
||||
|
||||
This phase produces the full artifact tree — mission, flights, and legs — so the maintenance work is ready for `/agentic-workflow` execution without running `/mission`, `/flight`, or `/leg` separately.
|
||||
|
||||
#### 8a. Mission
|
||||
|
||||
Scan existing `missions/` directories to determine the next sequence number `{NN}`. Create `missions/{NN}-maintenance/mission.md` using the standard mission format from `.flightops/ARTIFACTS.md`:
|
||||
- **Status**: `planning`
|
||||
- **Outcome**: "Resolve codebase health issues identified in maintenance report {YYYY-MM-DD}"
|
||||
- **Context**: Link to the maintenance report in `maintenance/{YYYY-MM-DD}.md`
|
||||
- **Success Criteria**: One criterion per selected finding
|
||||
- **Flights**: List the flights from step 8b
|
||||
- Populate all standard mission sections. Mark sections with no relevant content as "N/A" (e.g., Open Questions, Stakeholders).
|
||||
|
||||
#### 8b. Flights
|
||||
|
||||
Re-group the user's selected findings into flights. Use the Architect's recommended groupings as a starting point, but adjust for any findings the user removed or added during Phase 6 scoping. Typical groupings: one flight per category with actionable findings, or by technical area when findings from different categories affect the same subsystem.
|
||||
|
||||
Each flight gets its own directory with `flight.md` and `flight-log.md`. Use the standard formats from `.flightops/ARTIFACTS.md` with these maintenance-specific notes:
|
||||
|
||||
- **Status**: `ready` — maintenance flights skip the Pre-Flight phase (no open questions or design decisions to resolve for concrete fixes). Mark Pre-Flight Checklist items as N/A.
|
||||
- **Mission**: Link back to the maintenance mission
|
||||
- **Objective**: What this group of fixes accomplishes
|
||||
- **Technical Approach**: Brief description of the fix strategy per finding
|
||||
- **Legs**: List the legs from step 8c
|
||||
- Populate all other standard flight sections. Mark sections with no relevant content as "N/A".
|
||||
|
||||
The `flight-log.md` is created empty (header only) — it will be populated during execution.
|
||||
|
||||
#### 8c. Legs
|
||||
|
||||
Each flight gets one leg per discrete fix. Create leg files using the standard format from `.flightops/ARTIFACTS.md` with these fields populated:
|
||||
|
||||
- **Status**: `ready`
|
||||
- **Flight**: Link back to the flight
|
||||
- **Objective**: Fix one specific finding (reference the finding number from the report)
|
||||
- **Context**: Link to the maintenance report finding and the Architect's recommendation
|
||||
- **Inputs/Outputs**: Files that exist before and after the fix
|
||||
- **Acceptance Criteria**: The specific condition that resolves the finding — derived from the Architect's recommendation
|
||||
- **Verification Steps**: How to confirm the fix (e.g., "run `npm audit` and confirm no high/critical vulnerabilities", "run `cargo clippy` with no warnings")
|
||||
- **Implementation Guidance**: Concrete steps to resolve the finding, based on the Inspector's evidence and the Architect's recommendation
|
||||
- **Files Affected**: List files identified in the Inspector's evidence
|
||||
- Mark sections with no relevant content as "N/A" (e.g., Edge Cases for straightforward dependency updates).
|
||||
|
||||
Keep legs atomic — one finding, one fix. If a finding requires touching many files but is conceptually one change (e.g., "replace all `any` casts"), that's still one leg.
|
||||
|
||||
#### 8d. Update Report Backlink
|
||||
|
||||
After scaffolding, update the "Maintenance Mission" section at the bottom of the maintenance report (from Phase 7) with a link to the newly created mission.
|
||||
|
||||
## Guidelines
|
||||
|
||||
### Read-Only Inspection
|
||||
|
||||
This skill NEVER modifies source files, configuration, or dependencies in the target project. The Inspector runs checks and reports findings. The only files created are the maintenance report and optionally a full maintenance mission scaffold (mission, flights, and legs) — all are Flight Control artifacts.
|
||||
|
||||
### Output Discipline
|
||||
|
||||
This is a **hard constraint**, not a suggestion. All sub-agent output returns to the Flight Director's context window. With 4+ reviewers plus roundtable agents, verbose output will exhaust the Flight Director's context and break the workflow.
|
||||
|
||||
All reviewer agents must follow these rules:
|
||||
|
||||
1. **Summary-first**: Each finding includes a title, severity estimate, file path, and a one-line evidence summary
|
||||
2. **Selective detail**: Include code excerpts or extended evidence only for Critical and High severity findings
|
||||
3. **No raw dumps**: Never paste full command output, full file contents, or long dependency lists — summarize and reference
|
||||
4. **Drill-down on demand**: If the Architect or roundtable needs deeper evidence for a specific finding, the Flight Director spawns a follow-up agent scoped to that single finding
|
||||
|
||||
The Flight Director must also enforce this when collecting results — if an agent returns excessively verbose output, summarize it before passing to downstream agents.
|
||||
|
||||
### Known Debt Awareness
|
||||
|
||||
Cross-reference Inspector findings against known debt from debriefs. Findings that match acknowledged debt should note "previously identified in {debrief}" rather than presenting them as new discoveries. This prevents alarm fatigue.
|
||||
|
||||
### Proportional Response
|
||||
|
||||
Not every codebase needs a maintenance mission. If the inspection finds only Advisory items, the report should clearly state "Flight Ready" and not push for unnecessary work.
|
||||
|
||||
### Honest Assessment
|
||||
|
||||
Report what you find, even if the codebase is in great shape. A clean report is valuable — it confirms the team's work quality and builds confidence for the next mission.
|
||||
|
||||
### Severity Calibration
|
||||
|
||||
- **Critical** is reserved for issues that would cause failures, security vulnerabilities, or data loss
|
||||
- **Action Required** means the issue will compound or cause problems if left for another cycle
|
||||
- **Advisory** is for genuine improvements that have no urgency
|
||||
- **Pass** means the category was inspected and is healthy
|
||||
|
||||
## Output
|
||||
|
||||
Create the maintenance report artifact using the location and format defined in `.flightops/ARTIFACTS.md`.
|
||||
|
||||
After generating the report, summarize:
|
||||
1. Overall assessment (Flight Ready or Maintenance Required)
|
||||
2. Count of findings by severity
|
||||
3. Top recommendations
|
||||
4. Whether a maintenance mission was scaffolded (and if so, how many flights and legs)
|
||||
44
container/mission-control/.gitignore
vendored
Normal file
44
container/mission-control/.gitignore
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
# Local configuration
|
||||
projects.md
|
||||
daily-briefings/
|
||||
.claude/settings.local.json
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
Desktop.ini
|
||||
|
||||
# Editors and IDEs
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
.vscode/
|
||||
.idea/
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*.egg-info/
|
||||
dist/
|
||||
build/
|
||||
.venv/
|
||||
venv/
|
||||
env/
|
||||
uv.lock
|
||||
*.egg
|
||||
|
||||
# Node
|
||||
node_modules/
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# Environment and secrets
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
*.pem
|
||||
*.key
|
||||
100
container/mission-control/CLAUDE.md
Normal file
100
container/mission-control/CLAUDE.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Invocation Context
|
||||
|
||||
You may be invoked by:
|
||||
- **A human** — Interactive session, ask questions freely
|
||||
- **An LLM orchestrator** — Run `/agentic-workflow` to drive multi-agent flight execution
|
||||
|
||||
When orchestrated, you are the **Flight Director** — responsible for driving execution, coordinating agents, and making go/no-go decisions. Emit signals like `[HANDOFF:review-needed]` and `[COMPLETE:leg]` at appropriate points. The orchestrator monitors your output for these markers.
|
||||
|
||||
**When a human says a leg is ready to implement**, invoke `/agentic-workflow`. Do not read the leg spec, do not plan execution steps, do not execute commands directly. You become the orchestrator by loading the skill.
|
||||
|
||||
### Loading Skills in Non-Interactive Contexts
|
||||
|
||||
**The Skill tool is ONLY available in interactive human sessions.** If you are a spawned agent, running via `claude -p`, or inside a container/SDK — you do NOT have the Skill tool. Do not attempt to call it.
|
||||
|
||||
To execute a skill, read its SKILL.md file directly and follow the workflow:
|
||||
|
||||
```
|
||||
Read .claude/skills/{skill-name}/SKILL.md and execute the workflow described there.
|
||||
```
|
||||
|
||||
**All Flight Control skills** (listed in the table below) **live in this repository** (mission-control), under `.claude/skills/`. The Flight Director runs from the mission-control directory, so relative `.claude/skills/` paths in skill docs resolve here. Target projects may have their own unrelated skills in their own `.claude/skills/` directories — those are separate.
|
||||
|
||||
## First-Contact Check
|
||||
|
||||
If `projects.md` does not exist in this repository, suggest running `/init-mission-control` to set up the projects registry before proceeding with any other skills.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Flight Control is an AI-first software development lifecycle methodology using aviation metaphors. It organizes work into three hierarchical levels:
|
||||
|
||||
- **Missions** (human-optimized) — Define outcomes in human terms, days-to-weeks scope
|
||||
- **Flights** (balanced) — Technical specifications with pre/in/post-flight checklists, hours-to-days scope
|
||||
- **Legs** (AI-optimized) — Structured implementation steps with explicit acceptance criteria, minutes-to-hours scope
|
||||
|
||||
This repository contains the methodology documentation and Claude Code skills for interactive planning.
|
||||
|
||||
## Claude Code Skills
|
||||
|
||||
Eleven skills automate the planning, execution, debrief, and oversight workflow:
|
||||
|
||||
| Skill | Purpose |
|
||||
|-------|---------|
|
||||
| `/init-mission-control` | Onboard to Mission Control (set up `projects.md` registry) |
|
||||
| `/init-project` | Initialize a project for Flight Control (creates `.flightops/` directory) |
|
||||
| `/mission` | Create outcome-driven missions through research and interview |
|
||||
| `/flight` | Create technical flight specs from missions |
|
||||
| `/leg` | Generate implementation guidance for LLM execution |
|
||||
| `/agentic-workflow` | Drive multi-agent flight execution (design, implement, review, commit) |
|
||||
| `/flight-debrief` | Post-flight analysis for continuous improvement |
|
||||
| `/mission-debrief` | Post-mission retrospective for outcomes assessment |
|
||||
| `/routine-maintenance` | Post-mission codebase health assessment and maintenance recommendation |
|
||||
| `/preflight-check` | Verify all projects have current methodology files and crew definitions |
|
||||
| `/daily-briefing` | Cross-project status report with health assessment and methodology insights |
|
||||
|
||||
Run `/init-project` before using the other skills on a new project to create the flight operations reference directory and configure the artifact system.
|
||||
|
||||
**Artifact Systems:** Each project defines how artifacts are stored in `.flightops/ARTIFACTS.md`. Skills read this configuration and adapt their output accordingly.
|
||||
|
||||
**IMPORTANT: Planning skills produce documentation only.** `/init-project`, `/mission`, `/flight`, `/leg`, `/flight-debrief`, `/mission-debrief`, and `/routine-maintenance` must:
|
||||
- **NEVER implement code changes** — only create/update artifacts
|
||||
- **NEVER modify source files** in the target project (no `.rs`, `.ts`, `.tsx`, `.json`, etc.)
|
||||
|
||||
`/agentic-workflow` orchestrates implementation by spawning separate agents that execute code changes in the target project. The orchestrator itself never modifies source files directly.
|
||||
|
||||
> **Phase gates require confirmation.** Missions must be fully agreed before designing
|
||||
> flights. Flights must be fully agreed before designing legs. Never skip ahead — get
|
||||
> explicit user confirmation at each transition.
|
||||
|
||||
## Projects Registry
|
||||
|
||||
The `projects.md` file in this repository catalogs all active projects on this device. When using skills:
|
||||
|
||||
1. **Read `projects.md` first** to find the target project's path, remote, and description
|
||||
2. **Read `.flightops/ARTIFACTS.md`** in the target project to determine artifact locations
|
||||
3. **Create all artifacts in the target project** — not in mission-control
|
||||
|
||||
The registry provides:
|
||||
- Project slug and description
|
||||
- Filesystem path (e.g., `~/projects/my-app`)
|
||||
- Git remote
|
||||
- Optional stack and status information
|
||||
|
||||
## Lifecycle States
|
||||
|
||||
- **Missions**: `planning` → `active` → `completed` (or `aborted`)
|
||||
- **Flights**: `planning` → `ready` → `in-flight` → `landed` → `completed` (or `aborted`)
|
||||
- **Legs**: `planning` → `ready` → `in-flight` → `landed` → `completed` (or `aborted`)
|
||||
|
||||
## Public Repository
|
||||
|
||||
This is a public repository. Keep all committed content anonymized:
|
||||
|
||||
- **No personal paths** — Use generic examples like `~/projects/my-app`, not actual home directories
|
||||
- **No usernames** — Use placeholders like `username` in examples
|
||||
- **No project-specific details** — Keep examples generic
|
||||
- `projects.md` is gitignored for this reason — it contains local paths and is not committed
|
||||
21
container/mission-control/LICENSE
Normal file
21
container/mission-control/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Flight Control Contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
201
container/mission-control/README.md
Normal file
201
container/mission-control/README.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Flight Control
|
||||
|
||||
An AI-first software development lifecycle methodology using aviation metaphors to bridge human intent and AI execution.
|
||||
|
||||
## What is Mission Control?
|
||||
|
||||
This repository is a **centralized command center** for managing multiple projects in parallel. Each project may have its own stack, systems, and constraints, but mission-control provides a consistent workflow and orchestration layer across all of them.
|
||||
|
||||
- **Project registry** — Track active projects with paths, remotes, and configurations
|
||||
- **Shared methodology** — Apply structured planning regardless of project differences
|
||||
- **Claude Code skills** — Interactive tools for mission, flight, and leg creation
|
||||
|
||||
Artifacts (missions, flights, legs) are created in target projects, not here. Mission-control holds the methodology, skills, and coordination—your projects hold the work.
|
||||
|
||||
## The Aviation Model
|
||||
|
||||
Flight Control organizes work into three hierarchical levels, each optimized for its primary audience:
|
||||
|
||||
```
|
||||
Mission (human-optimized)
|
||||
└── Flight (balanced)
|
||||
└── Leg (AI-optimized)
|
||||
```
|
||||
|
||||
- **Missions** define outcomes in human terms—what success looks like and why it matters
|
||||
- **Flights** translate outcomes into technical specifications with planning checklists
|
||||
- **Legs** provide structured, specific instructions optimized for AI consumption
|
||||
|
||||
## Why Aviation?
|
||||
|
||||
Aviation succeeds through layered planning and clear handoffs. Pilots follow flight plans but improvise when conditions demand it—weather, emergencies, ATC instructions. Structured planning enables effective improvisation by providing a baseline to deviate from and return to. Similarly, Flight Control separates strategic intent (missions) from tactical execution (legs), with flights serving as the translation layer.
|
||||
|
||||
## Agentic Workflow
|
||||
|
||||
**LLM orchestrators**: Run `/agentic-workflow` to drive multi-agent flight execution with Claude Code. The skill orchestrates the full leg cycle — design, implement, review, commit — using three separate Claude instances.
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Claude Code](https://docs.anthropic.com/en/docs/claude-code) installed
|
||||
- A project on disk with a git remote, initialized with Claude Code (`claude /init`)
|
||||
|
||||
### Walkthrough
|
||||
|
||||
1. **Clone mission-control** — Clone this repo and open it in Claude Code.
|
||||
|
||||
2. **Set up the projects registry** — Run `/init-mission-control` (or manually copy `projects.md.template` → `projects.md` and fill in your project details). This creates the central registry that all skills read from.
|
||||
|
||||
3. **Initialize your project** — Run `/init-project` and select your project. This creates `.flightops/` in your target project with artifact configuration, methodology reference, and crew definitions.
|
||||
|
||||
4. **Review agent crew files** — Check the files in `{project}/.flightops/agent-crews/`. These define the crew composition (roles, models, prompts) for each phase. Customize them to your needs.
|
||||
|
||||
5. **Create a mission** — Run `/mission`. This interviews you about desired outcomes and creates a mission artifact in your target project.
|
||||
|
||||
6. **Design a flight** — Run `/flight` to break the mission into a technical specification with pre/in/post-flight checklists.
|
||||
|
||||
7. **Execute** — Run `/agentic-workflow` to drive multi-agent implementation. This orchestrates design, implement, review, and commit cycles across legs.
|
||||
|
||||
8. **Debrief** — Run `/flight-debrief` and `/mission-debrief` after completion to capture lessons learned.
|
||||
|
||||
## Documentation
|
||||
|
||||
1. **[Overview](docs/overview.md)** — Philosophy and principles behind Flight Control
|
||||
2. **[Missions](docs/missions.md)** — Writing outcome-driven mission statements
|
||||
3. **[Flights](docs/flights.md)** — Creating technical specifications with pre/post checklists
|
||||
4. **[Flight Logs](docs/flight-logs.md)** — Recording execution progress and decisions
|
||||
5. **[Legs](docs/legs.md)** — Structuring AI-optimized implementation steps
|
||||
6. **[Workflow](docs/workflow.md)** — End-to-end flow from mission to completion
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### The Audience Gradient
|
||||
|
||||
Documentation becomes progressively more structured as it moves down the hierarchy:
|
||||
|
||||
| Level | Audience | Style |
|
||||
|-------|----------|-------|
|
||||
| Mission | Humans, stakeholders | Narrative prose, outcome-focused |
|
||||
| Flight | Developers, AI | Technical spec with checklists |
|
||||
| Leg | AI agents | Structured format, explicit criteria |
|
||||
|
||||
### Lifecycle States
|
||||
|
||||
Each level tracks progress through defined states:
|
||||
|
||||
- **Missions**: `planning` → `active` → `completed` (or `aborted`)
|
||||
- **Flights**: `planning` → `ready` → `in-flight` → `landed` → `completed` (or `aborted`)
|
||||
- **Legs**: `planning` → `ready` → `in-flight` → `landed` → `completed` (or `aborted`)
|
||||
|
||||
### Scaling
|
||||
|
||||
Flight Control scales from solo developers to teams. The methodology provides structure and continuity across sessions regardless of team size.
|
||||
|
||||
## Artifact Organization
|
||||
|
||||
The hierarchy nests naturally:
|
||||
|
||||
```
|
||||
Mission
|
||||
├── Mission Debrief
|
||||
└── Flight
|
||||
├── Flight Log
|
||||
├── Flight Briefing
|
||||
├── Flight Debrief
|
||||
└── Leg
|
||||
```
|
||||
|
||||
How you store these artifacts depends on your project's needs. Flight Control supports multiple artifact systems:
|
||||
|
||||
- **Markdown files** — Version-controlled documentation in your repository
|
||||
- **Issue trackers** — Jira, Linear, GitHub Issues with linked relationships
|
||||
- **Hybrid** — Missions in markdown, flights/legs as tickets
|
||||
|
||||
Each project configures its artifact system during initialization. The methodology and Claude Code skills adapt to your choice.
|
||||
|
||||
## Claude Code Skills
|
||||
|
||||
Flight Control includes Claude Code skills for interactive planning:
|
||||
|
||||
| Skill | Purpose |
|
||||
|-------|---------|
|
||||
| `/init-mission-control` | Set up the projects registry |
|
||||
| `/init-project` | Initialize a project for Flight Control |
|
||||
| `/mission` | Create outcome-driven missions through research and interview |
|
||||
| `/flight` | Create technical flight specs from missions |
|
||||
| `/leg` | Generate implementation guidance for LLM execution |
|
||||
| `/flight-debrief` | Post-flight analysis for continuous improvement |
|
||||
| `/agentic-workflow` | Drive multi-agent flight execution |
|
||||
| `/mission-debrief` | Post-mission retrospective for outcomes assessment |
|
||||
| `/daily-briefing` | Cross-project status report with health assessment |
|
||||
|
||||
## Recommended Workflow
|
||||
|
||||
All work runs from a single **Mission Control** session. Mission Control handles planning directly and spawns agents into the target project's context for implementation, review, and commits. Each spawned agent gets a clean context with only the information it needs, while Mission Control maintains continuity across the entire flight.
|
||||
|
||||
### Context Strategy
|
||||
|
||||
- **Mission Control**: Long-running session spanning an entire flight — accumulates knowledge across legs, orchestrates all work
|
||||
- **Spawned agents**: Fresh context per task — designed with precise instructions and the relevant artifacts, execute in the target project directory
|
||||
|
||||
Claude Code's version control in mission-control acts as the orchestrator for development of the remote project. No second interactive session is needed.
|
||||
|
||||
### The Cycle
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant MC as Mission Control
|
||||
participant A as Spawned Agents
|
||||
|
||||
Note over MC,A: ─── Mission Planning ───
|
||||
MC->>MC: /mission — research, interview, define outcomes
|
||||
MC->>MC: Review and confirm mission
|
||||
|
||||
Note over MC,A: ─── Flight Planning ───
|
||||
MC->>MC: /flight — create technical spec, checklists
|
||||
MC->>MC: Review and confirm flight
|
||||
|
||||
Note over MC,A: ─── Execution ───
|
||||
MC->>MC: /agentic-workflow
|
||||
|
||||
loop For each leg
|
||||
Note over MC: Design phase
|
||||
MC->>A: Spawn designer agent
|
||||
A->>A: Design leg spec
|
||||
A-->>MC: Leg designed
|
||||
|
||||
MC->>MC: Review leg design
|
||||
|
||||
Note over MC: Implement phase
|
||||
MC->>A: Spawn implementer agent<br/>(target project context)
|
||||
A->>A: Implement leg, update logs
|
||||
A-->>MC: Implementation complete
|
||||
|
||||
Note over MC: Review phase
|
||||
MC->>A: Spawn reviewer agent<br/>(target project context)
|
||||
A->>A: Review changes, verify criteria
|
||||
A-->>MC: Review complete
|
||||
|
||||
Note over MC: Commit phase
|
||||
MC->>A: Spawn commit agent<br/>(target project context)
|
||||
A->>A: Stage and commit changes
|
||||
A-->>MC: Committed
|
||||
|
||||
MC->>MC: Update flight checklist
|
||||
end
|
||||
|
||||
Note over MC,A: Flight lands
|
||||
|
||||
Note over MC,A: ─── Debrief ───
|
||||
MC->>MC: /flight-debrief
|
||||
MC->>MC: /mission-debrief
|
||||
```
|
||||
|
||||
### Why This Matters
|
||||
|
||||
A single orchestrating session eliminates context drift between planning and execution. Mission Control sees every leg's outcome and carries that knowledge forward into the next design. Spawned agents get clean, focused contexts — they don't need flight-wide memory because Mission Control provides exactly the context they need. Artifacts stay synchronized because one session owns the full lifecycle.
|
||||
|
||||
## License
|
||||
|
||||
[MIT](LICENSE)
|
||||
252
container/mission-control/docs/flight-logs.md
Normal file
252
container/mission-control/docs/flight-logs.md
Normal file
@@ -0,0 +1,252 @@
|
||||
# Flight Logs
|
||||
|
||||
Flight logs are living records created during flight execution. They capture what actually happened—when legs started and ended, what changed, decisions made mid-flight, and any deviations or anomalies encountered.
|
||||
|
||||
## What is a Flight Log?
|
||||
|
||||
A flight log is the authoritative record of a flight's execution. While the flight document describes the *plan*, the flight log documents *reality*. There is exactly one flight log per flight, updated continuously during execution.
|
||||
|
||||
**Important**: Flight logs are **append-only** during execution. Never delete or modify existing entries—only add new ones. This preserves the historical record and ensures nothing is lost when revisiting decisions later.
|
||||
|
||||
### Flight Log vs. Other Artifacts
|
||||
|
||||
| Artifact | Purpose | When Written | Updates |
|
||||
|----------|---------|--------------|---------|
|
||||
| Flight | Plan what to do | Before execution | During planning only |
|
||||
| Flight Log | Record what happened | During execution | Continuously |
|
||||
| Leg | Instructions for one task | Before execution | Never (immutable) |
|
||||
|
||||
## Why Flight Logs Matter
|
||||
|
||||
### For Current Execution
|
||||
|
||||
Flight logs provide:
|
||||
- **Continuity**: When work resumes after interruption, the log shows where things stand
|
||||
- **Decision history**: Why choices were made when the plan didn't fit reality
|
||||
- **Anomaly tracking**: Issues that didn't block progress but need future attention
|
||||
|
||||
### For Future Legs
|
||||
|
||||
When creating new legs, the flight log reveals:
|
||||
- What actually worked vs. what was planned
|
||||
- Decisions that affect subsequent implementation
|
||||
- Discovered complexity that should inform estimates
|
||||
- Patterns or anti-patterns emerging from execution
|
||||
|
||||
### For Post-Flight Debrief
|
||||
|
||||
The flight log enables meaningful retrospectives by capturing:
|
||||
- Actual vs. planned progression
|
||||
- Root causes of deviations
|
||||
- Learnings that should inform future flights
|
||||
|
||||
## Flight Log Structure
|
||||
|
||||
```markdown
|
||||
# Flight Log: {Flight Title}
|
||||
|
||||
## Flight Reference
|
||||
[{Flight Title}](flight.md)
|
||||
|
||||
## Summary
|
||||
Brief overview of the flight's execution status and key outcomes.
|
||||
|
||||
---
|
||||
|
||||
## Leg Progress
|
||||
|
||||
### {Leg Name}
|
||||
**Status**: completed | landed | in-flight | aborted
|
||||
**Started**: {timestamp}
|
||||
**Completed**: {timestamp}
|
||||
|
||||
#### Changes Made
|
||||
- {Summary of what was implemented}
|
||||
- {Files modified and why}
|
||||
|
||||
#### Notes
|
||||
{Any relevant observations during execution}
|
||||
|
||||
---
|
||||
|
||||
## Decisions
|
||||
|
||||
Choices made during execution that weren't in the original plan.
|
||||
|
||||
### {Decision Title}
|
||||
**Context**: Why this decision was needed
|
||||
**Options Considered**: What alternatives existed
|
||||
**Decision**: What was chosen
|
||||
**Rationale**: Why this choice was made
|
||||
**Impact**: How this affects the flight or future legs
|
||||
|
||||
---
|
||||
|
||||
## Deviations
|
||||
|
||||
Departures from the planned approach.
|
||||
|
||||
### {Deviation Title}
|
||||
**Planned**: What the flight specified
|
||||
**Actual**: What was done instead
|
||||
**Reason**: Why the deviation was necessary
|
||||
**Outcome**: Result of the deviation
|
||||
|
||||
---
|
||||
|
||||
## Anomalies
|
||||
|
||||
Unexpected issues or behaviors encountered.
|
||||
|
||||
### {Anomaly Title}
|
||||
**Observed**: What happened
|
||||
**Expected**: What should have happened
|
||||
**Severity**: blocking | degraded | cosmetic
|
||||
**Resolution**: How it was handled (or "unresolved")
|
||||
**Follow-up**: Any actions needed later
|
||||
|
||||
---
|
||||
|
||||
## Session Notes
|
||||
|
||||
Chronological notes from each work session.
|
||||
|
||||
### {Date/Session Identifier}
|
||||
- {Note about progress or observation}
|
||||
- {Note about what was attempted}
|
||||
- {Note about what was learned}
|
||||
```
|
||||
|
||||
## When to Update the Flight Log
|
||||
|
||||
### Starting a Leg
|
||||
|
||||
Record:
|
||||
- Leg name and start timestamp
|
||||
- Any context carried from previous legs
|
||||
- Initial observations about the task
|
||||
|
||||
### During Execution
|
||||
|
||||
Record:
|
||||
- Decisions that diverge from or extend the plan
|
||||
- Unexpected complexity or simplicity
|
||||
- Anomalies encountered (even if resolved)
|
||||
|
||||
### Completing a Leg
|
||||
|
||||
Record:
|
||||
- Completion timestamp
|
||||
- Summary of changes made
|
||||
- Files modified
|
||||
- Any notes for future legs
|
||||
|
||||
### Encountering Issues
|
||||
|
||||
Record immediately:
|
||||
- What went wrong
|
||||
- What was expected
|
||||
- Severity and impact
|
||||
- Resolution or workaround
|
||||
|
||||
## Using Flight Logs When Creating Legs
|
||||
|
||||
When generating new legs with `/leg`, the flight log must be consulted to understand:
|
||||
|
||||
1. **What's actually complete**: The log shows real completion status, not just planned progress
|
||||
2. **Decisions made**: Mid-flight decisions may change how subsequent legs should be implemented
|
||||
3. **Discovered context**: Anomalies and deviations reveal system behavior that affects new work
|
||||
4. **Patterns established**: Implementation approaches that worked (or didn't) inform future guidance
|
||||
|
||||
### Example: Log Informing Leg Creation
|
||||
|
||||
**Flight log entry:**
|
||||
```markdown
|
||||
### API Authentication Leg
|
||||
**Status**: completed
|
||||
|
||||
#### Changes Made
|
||||
- Implemented JWT auth in `src/middleware/auth.ts`
|
||||
- Used `jose` library instead of planned `jsonwebtoken` (see Decisions)
|
||||
|
||||
#### Decisions
|
||||
**JWT Library Change**
|
||||
- Context: `jsonwebtoken` had CVE flagged in security scan
|
||||
- Decision: Switch to `jose` library
|
||||
- Impact: All subsequent auth-related code must use `jose` patterns
|
||||
```
|
||||
|
||||
**Impact on next leg:**
|
||||
When creating the "Protected Routes" leg, the guidance must specify using `jose` patterns, not the originally planned `jsonwebtoken` approach. Without the flight log, this context would be lost.
|
||||
|
||||
## Location
|
||||
|
||||
Flight log location is defined in `.flightops/ARTIFACTS.md`. The artifact system determines where and how logs are stored.
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Keep Entries Atomic
|
||||
|
||||
Write entries as things happen, not in batches. Batched entries lose detail and context.
|
||||
|
||||
### Be Factual, Not Editorial
|
||||
|
||||
**Good**: "Validation logic required 3 additional edge cases not in spec"
|
||||
**Avoid**: "The spec was incomplete and caused problems"
|
||||
|
||||
### Link to Specifics
|
||||
|
||||
Reference file paths, line numbers, commit hashes when relevant. Vague entries lose value quickly.
|
||||
|
||||
### Capture the "Why"
|
||||
|
||||
The code shows *what* changed. The log should capture *why* decisions were made.
|
||||
|
||||
### Don't Duplicate the Leg
|
||||
|
||||
The leg document has acceptance criteria and implementation guidance. The log records completion status and what actually happened—not a copy of the plan.
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Discovery Pattern
|
||||
|
||||
When execution reveals something unexpected:
|
||||
|
||||
```markdown
|
||||
### Discovery: Rate Limiting Already Exists
|
||||
**Observed**: Found existing rate limiter in `src/middleware/rateLimit.ts`
|
||||
**Expected**: Needed to implement from scratch per flight spec
|
||||
**Impact**: Leg `implement-rate-limiting` can be simplified to configuration
|
||||
**Action**: Updated leg scope, reduced from 4 hours to 30 minutes
|
||||
```
|
||||
|
||||
### Blocker Pattern
|
||||
|
||||
When progress stops:
|
||||
|
||||
```markdown
|
||||
### Blocker: Missing API Credentials
|
||||
**Observed**: Stripe API key not in environment
|
||||
**Severity**: blocking
|
||||
**Impact**: Cannot test payment integration
|
||||
**Resolution**: Requested credentials from DevOps
|
||||
**Unblocked**: {timestamp when resolved}
|
||||
```
|
||||
|
||||
### Adaptation Pattern
|
||||
|
||||
When the plan needs adjustment:
|
||||
|
||||
```markdown
|
||||
### Deviation: Schema Change
|
||||
**Planned**: Add `verified` boolean to User model
|
||||
**Actual**: Added `verifiedAt` timestamp instead
|
||||
**Reason**: Team decided verification time is valuable data
|
||||
**Outcome**: Provides more information, minor query adjustments needed
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [Flights](flights.md) — Understanding flight structure and lifecycle
|
||||
- [Legs](legs.md) — How legs are created and executed
|
||||
- [Workflow](workflow.md) — The complete flow from mission to completion
|
||||
462
container/mission-control/docs/flights.md
Normal file
462
container/mission-control/docs/flights.md
Normal file
@@ -0,0 +1,462 @@
|
||||
# Flights
|
||||
|
||||
Flights are the balanced layer of Flight Control—technical enough for implementation, readable enough for humans. A flight and its flight plan are synonymous: the document *is* the plan.
|
||||
|
||||
## What is a Flight?
|
||||
|
||||
A flight translates mission outcomes into technical specifications. Flights are:
|
||||
|
||||
- **Technically scoped**: Bounded implementation work
|
||||
- **Checklist-driven**: Pre-flight, in-flight, and post-flight phases
|
||||
- **Adaptive**: Living documents that evolve with understanding
|
||||
|
||||
### Flight vs. Mission vs. Leg
|
||||
|
||||
| Aspect | Mission | Flight | Leg |
|
||||
|--------|---------|--------|-----|
|
||||
| Question | Why? | What & How? | Do this |
|
||||
| Audience | Stakeholders | Developers/AI | AI agents |
|
||||
| Flexibility | High | Medium | Low |
|
||||
| Updates | Rarely | As needed | Never (create new) |
|
||||
|
||||
## Flight Structure
|
||||
|
||||
Every flight has three sections corresponding to its lifecycle:
|
||||
|
||||
```markdown
|
||||
# Flight: {Title}
|
||||
|
||||
## Mission Link
|
||||
Parent mission and relevant success criteria.
|
||||
|
||||
---
|
||||
|
||||
## Pre-Flight
|
||||
|
||||
### Objective
|
||||
What this flight accomplishes.
|
||||
|
||||
### Open Questions
|
||||
- [ ] Question needing resolution
|
||||
- [ ] Another question
|
||||
|
||||
### Design Decisions
|
||||
Choices made and their rationale.
|
||||
|
||||
### Prerequisites
|
||||
What must be true before execution begins.
|
||||
|
||||
### Pre-Flight Checklist
|
||||
- [ ] Questions resolved
|
||||
- [ ] Design decisions documented
|
||||
- [ ] Prerequisites verified
|
||||
- [ ] Legs defined
|
||||
|
||||
---
|
||||
|
||||
## In-Flight
|
||||
|
||||
### Technical Approach
|
||||
How the objective will be achieved.
|
||||
|
||||
### Checkpoints
|
||||
Key milestones during execution.
|
||||
|
||||
### Adaptation Criteria
|
||||
When to deviate from plan.
|
||||
|
||||
### Legs
|
||||
Links to implementation legs.
|
||||
|
||||
---
|
||||
|
||||
## Post-Flight
|
||||
|
||||
### Completion Checklist
|
||||
- [ ] All legs completed
|
||||
- [ ] Integration verified
|
||||
- [ ] Tests passing
|
||||
- [ ] Documentation updated
|
||||
|
||||
### Verification
|
||||
How to confirm the flight achieved its objective.
|
||||
|
||||
### Retrospective Notes
|
||||
What was learned during this flight.
|
||||
```
|
||||
|
||||
## Pre-Flight Phase
|
||||
|
||||
Pre-flight is about readiness. No implementation happens here—only planning.
|
||||
|
||||
### Open Questions
|
||||
|
||||
Capture unknowns that need resolution before execution:
|
||||
|
||||
```markdown
|
||||
### Open Questions
|
||||
- [ ] Should we use JWT or session-based auth?
|
||||
- [ ] What OAuth providers do we need to support?
|
||||
- [ ] How long should sessions remain valid?
|
||||
```
|
||||
|
||||
Questions should be:
|
||||
- **Specific**: Clear what information is needed
|
||||
- **Blocking**: Execution can't proceed without answers
|
||||
- **Resolvable**: Someone can answer them
|
||||
|
||||
Check off questions as they're resolved and document answers in Design Decisions.
|
||||
|
||||
### Design Decisions
|
||||
|
||||
Record choices and rationale:
|
||||
|
||||
```markdown
|
||||
### Design Decisions
|
||||
|
||||
**Authentication Method**: JWT tokens
|
||||
- Rationale: Stateless, works across services
|
||||
- Trade-off: Token revocation more complex
|
||||
- Decided by: Security team review
|
||||
|
||||
**Session Duration**: 7 days with refresh
|
||||
- Rationale: Balance security and convenience
|
||||
- Trade-off: Longer exposure window
|
||||
- Decided by: Product requirements
|
||||
```
|
||||
|
||||
Design decisions prevent relitigating settled questions and help future maintainers understand *why*.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
List conditions required for execution:
|
||||
|
||||
```markdown
|
||||
### Prerequisites
|
||||
- [ ] Database schema migrations ready
|
||||
- [ ] API authentication middleware exists
|
||||
- [ ] Test environment configured
|
||||
- [ ] Security review of approach completed
|
||||
```
|
||||
|
||||
Prerequisites are external dependencies. If they're not met, the flight can't begin safely.
|
||||
|
||||
### Pre-Flight Checklist
|
||||
|
||||
The gate check before execution:
|
||||
|
||||
```markdown
|
||||
### Pre-Flight Checklist
|
||||
- [ ] All open questions resolved
|
||||
- [ ] Design decisions documented with rationale
|
||||
- [ ] Prerequisites verified
|
||||
- [ ] Legs defined with acceptance criteria
|
||||
- [ ] Estimated scope is reasonable
|
||||
```
|
||||
|
||||
When all items are checked, the flight moves from `planning` to `ready`.
|
||||
|
||||
## Flight Briefing
|
||||
|
||||
Before execution begins, create a **flight briefing** to align the crew:
|
||||
|
||||
```markdown
|
||||
# Flight Briefing: {Title}
|
||||
|
||||
## Mission Context
|
||||
How this flight contributes to the mission.
|
||||
|
||||
## Objective
|
||||
What this flight will accomplish.
|
||||
|
||||
## Key Decisions
|
||||
Critical design decisions the crew should know.
|
||||
|
||||
## Risks
|
||||
Known risks and mitigation strategies.
|
||||
|
||||
## Legs Overview
|
||||
Summary of legs with complexity notes.
|
||||
|
||||
## Success Criteria
|
||||
How we'll know the flight succeeded.
|
||||
```
|
||||
|
||||
The briefing is created when the flight moves to `ready` status.
|
||||
|
||||
## In-Flight Phase
|
||||
|
||||
In-flight is execution. Legs are being worked, progress is tracked in the [flight log](flight-logs.md).
|
||||
|
||||
### Technical Approach
|
||||
|
||||
Document *how* the objective will be achieved:
|
||||
|
||||
```markdown
|
||||
### Technical Approach
|
||||
|
||||
1. Create user model with email/password fields
|
||||
2. Implement registration endpoint with validation
|
||||
3. Add login endpoint with JWT generation
|
||||
4. Create middleware for protected routes
|
||||
5. Add password reset flow
|
||||
```
|
||||
|
||||
This bridges design decisions to concrete implementation without being leg-level specific.
|
||||
|
||||
### Checkpoints
|
||||
|
||||
Define milestones for progress tracking:
|
||||
|
||||
```markdown
|
||||
### Checkpoints
|
||||
- [ ] User registration working end-to-end
|
||||
- [ ] Login returning valid tokens
|
||||
- [ ] Protected routes rejecting invalid tokens
|
||||
- [ ] Password reset emails sending
|
||||
```
|
||||
|
||||
Checkpoints help identify if a flight is on track or needs intervention.
|
||||
|
||||
### Adaptation Criteria
|
||||
|
||||
When should the plan change?
|
||||
|
||||
```markdown
|
||||
### Adaptation Criteria
|
||||
|
||||
**Divert if**:
|
||||
- Security vulnerability discovered in approach
|
||||
- External API changes break integration
|
||||
- Performance testing reveals blocking issues
|
||||
|
||||
**Acceptable variations**:
|
||||
- Minor API changes for developer experience
|
||||
- Additional validation rules
|
||||
- Extended error handling
|
||||
```
|
||||
|
||||
Explicit adaptation criteria prevent both rigid adherence to bad plans and unnecessary scope creep.
|
||||
|
||||
### Legs
|
||||
|
||||
Reference the implementation legs:
|
||||
|
||||
```markdown
|
||||
### Legs
|
||||
- [x] `create-user-model` - completed
|
||||
- [x] `registration-endpoint` - completed
|
||||
- [ ] `login-endpoint` - in-flight
|
||||
- [ ] `auth-middleware` - planning
|
||||
- [ ] `password-reset` - planning
|
||||
```
|
||||
|
||||
### Flight Log
|
||||
|
||||
During execution, maintain a [flight log](flight-logs.md) alongside this document. The flight log records:
|
||||
|
||||
- **Leg progress**: When legs start and complete, with summaries of changes
|
||||
- **Decisions**: Choices made during execution not in the original plan
|
||||
- **Deviations**: Departures from the planned approach and why
|
||||
- **Anomalies**: Unexpected issues or behaviors encountered
|
||||
|
||||
The flight log is essential for:
|
||||
- Providing continuity when work resumes after interruption
|
||||
- Informing the creation of subsequent legs
|
||||
- Enabling meaningful post-flight retrospectives
|
||||
|
||||
## Post-Flight Phase
|
||||
|
||||
Post-flight is verification and learning. Implementation is complete; now confirm success.
|
||||
|
||||
### Completion Checklist
|
||||
|
||||
Verify all work is finished:
|
||||
|
||||
```markdown
|
||||
### Completion Checklist
|
||||
- [ ] All legs marked completed
|
||||
- [ ] Code merged to main branch
|
||||
- [ ] Integration tests passing
|
||||
- [ ] Documentation updated
|
||||
- [ ] No blocking issues remaining
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
How to confirm the flight succeeded:
|
||||
|
||||
```markdown
|
||||
### Verification
|
||||
|
||||
**Manual verification**:
|
||||
1. Create new user account
|
||||
2. Log in with credentials
|
||||
3. Access protected endpoint
|
||||
4. Log out and verify token invalid
|
||||
|
||||
**Automated verification**:
|
||||
- `npm run test:auth` passes
|
||||
- `npm run test:e2e` auth flows pass
|
||||
```
|
||||
|
||||
### Retrospective Notes
|
||||
|
||||
Capture learnings for future flights:
|
||||
|
||||
```markdown
|
||||
### Retrospective Notes
|
||||
|
||||
**What went well**:
|
||||
- JWT library well-documented
|
||||
- Security review caught edge case early
|
||||
|
||||
**What could improve**:
|
||||
- Underestimated password reset complexity
|
||||
- Should have spiked OAuth earlier
|
||||
|
||||
**For next time**:
|
||||
- Include OAuth research in pre-flight
|
||||
- Add explicit leg for error handling
|
||||
```
|
||||
|
||||
## Flight Lifecycle
|
||||
|
||||
Flights progress through defined states:
|
||||
|
||||
### States
|
||||
|
||||
```
|
||||
planning ──► ready ──► in-flight ──► landed ──► completed
|
||||
│
|
||||
└──► aborted
|
||||
```
|
||||
|
||||
**planning**
|
||||
Pre-flight phase. Questions being resolved, design decisions being made.
|
||||
|
||||
**ready**
|
||||
Pre-flight checklist complete. All prerequisites met. Ready to execute.
|
||||
|
||||
**in-flight**
|
||||
Legs actively being executed. Checkpoints being reached. Flights may be modified during this phase (e.g., changing planned legs) as long as the flight log captures the change and rationale.
|
||||
|
||||
**landed**
|
||||
All legs complete. Verification passed. Flight achieved its objective. Ready for debrief.
|
||||
|
||||
**completed**
|
||||
Debrief done. Flight fully wrapped up and artifacts updated.
|
||||
|
||||
**aborted**
|
||||
Flight cancelled. Changes are rolled back. Document the reason for future reference.
|
||||
|
||||
### In-Flight Modifications
|
||||
|
||||
Flights can be modified while `in-flight` — for example, when planned legs need to change due to discoveries during execution. This is not a separate state; simply update the flight artifact and record the change and rationale in the flight log.
|
||||
|
||||
**Create a new flight when:**
|
||||
- A completely new objective emerges
|
||||
- The discovered work is independent of the current flight's goal
|
||||
- The new work serves different mission success criteria
|
||||
|
||||
### State Transitions
|
||||
|
||||
| From | To | Trigger |
|
||||
|------|----|---------|
|
||||
| planning | ready | Pre-flight checklist complete |
|
||||
| ready | in-flight | First leg begins |
|
||||
| in-flight | landed | All legs complete |
|
||||
| in-flight | aborted | Flight cancelled, changes rolled back |
|
||||
| landed | completed | Debrief complete |
|
||||
|
||||
## Connecting to Parent Mission
|
||||
|
||||
Flights serve missions. Each flight should clearly link to:
|
||||
|
||||
```markdown
|
||||
## Mission Link
|
||||
|
||||
**Mission**: [Secure User Authentication](../mission.md)
|
||||
|
||||
**Contributing to criteria**:
|
||||
- [ ] Users can create accounts with email/password
|
||||
- [ ] Session management handles concurrent logins
|
||||
```
|
||||
|
||||
This traceability ensures flights aren't orphaned work—they connect to meaningful outcomes.
|
||||
|
||||
## Connecting to Child Legs
|
||||
|
||||
Flights generate legs. The flight defines *what* needs to happen; legs define the *exact* implementation steps.
|
||||
|
||||
A well-structured flight enables AI agents to execute legs without needing clarification:
|
||||
|
||||
**Flight provides**:
|
||||
- Technical approach and patterns
|
||||
- Design decisions and constraints
|
||||
- Context about the broader system
|
||||
|
||||
**Legs consume**:
|
||||
- Specific task to complete
|
||||
- Acceptance criteria to meet
|
||||
- Context from flight document
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
### Skipping Pre-Flight
|
||||
|
||||
Rushing to implementation creates problems. Pre-flight exists to:
|
||||
- Surface hidden complexity
|
||||
- Align on approach before work begins
|
||||
- Identify blocking dependencies
|
||||
|
||||
A thorough pre-flight prevents mid-flight crises.
|
||||
|
||||
### Over-Specifying
|
||||
|
||||
Flights should guide, not constrain. Leave room for:
|
||||
- Implementation details discovered during work
|
||||
- Better approaches found during execution
|
||||
- Minor adjustments that don't affect outcomes
|
||||
|
||||
### Ignoring Adaptation Criteria
|
||||
|
||||
When circumstances change, adapt the plan. Flights that ignore reality become fiction. Use adaptation criteria to decide when to divert.
|
||||
|
||||
### Skipping Post-Flight
|
||||
|
||||
Post-flight isn't bureaucracy—it's learning. Retrospective notes prevent repeating mistakes. Verification confirms the flight actually succeeded.
|
||||
|
||||
## Flight Debrief
|
||||
|
||||
After a flight lands (or diverts), create a **flight debrief** for retrospective analysis:
|
||||
|
||||
```markdown
|
||||
# Flight Debrief: {Title}
|
||||
|
||||
## Outcome Assessment
|
||||
What the flight accomplished and which mission criteria it advanced.
|
||||
|
||||
## What Went Well
|
||||
Effective patterns during execution.
|
||||
|
||||
## What Could Be Improved
|
||||
Process and technical recommendations.
|
||||
|
||||
## Deviations and Lessons
|
||||
What changed from the plan and why.
|
||||
|
||||
## Recommendations
|
||||
Top 3-5 most impactful improvements.
|
||||
|
||||
## Action Items
|
||||
Follow-up work and improvements.
|
||||
```
|
||||
|
||||
The debrief enables continuous improvement and informs future flights.
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [Flight Logs](flight-logs.md) — Recording execution progress and decisions
|
||||
- [Legs](legs.md) — Learn to write AI-optimized implementation steps
|
||||
- [Workflow](workflow.md) — See how flights flow into legs
|
||||
383
container/mission-control/docs/legs.md
Normal file
383
container/mission-control/docs/legs.md
Normal file
@@ -0,0 +1,383 @@
|
||||
# Legs
|
||||
|
||||
Legs are the AI-optimized layer of Flight Control. They provide structured, explicit instructions that AI agents can execute without ambiguity.
|
||||
|
||||
## What is a Leg?
|
||||
|
||||
A leg is a single, atomic unit of implementation work. Legs are:
|
||||
|
||||
- **Explicit**: No ambiguity about what "done" means
|
||||
- **Bounded**: Clear start and end points
|
||||
- **Context-complete**: All necessary information included
|
||||
- **AI-consumable**: Structured for machine parsing
|
||||
|
||||
### Leg vs. Flight vs. Mission
|
||||
|
||||
| Aspect | Mission | Flight | Leg |
|
||||
|--------|---------|--------|-----|
|
||||
| Scope | Outcome | Feature | Task |
|
||||
| Duration | Days-weeks | Hours-days | Minutes-hours |
|
||||
| Modifications | Allowed | Allowed | Create new instead |
|
||||
| Audience | Humans | Developers/AI | AI agents |
|
||||
|
||||
## Leg Structure
|
||||
|
||||
Legs follow a consistent structure optimized for AI consumption:
|
||||
|
||||
```markdown
|
||||
# Leg: {slug}
|
||||
|
||||
## Flight Link
|
||||
[Parent flight](../flight.md)
|
||||
|
||||
## Objective
|
||||
Single sentence describing what this leg accomplishes.
|
||||
|
||||
## Context
|
||||
Information the AI needs to understand this task.
|
||||
|
||||
## Inputs
|
||||
- What exists before this leg runs
|
||||
|
||||
## Outputs
|
||||
- What exists after this leg completes
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] Criterion 1
|
||||
- [ ] Criterion 2
|
||||
- [ ] Criterion 3
|
||||
|
||||
## Verification Steps
|
||||
How to confirm each criterion is met (commands, manual checks, tools).
|
||||
|
||||
## Implementation Guidance
|
||||
Specific patterns, approaches, or constraints.
|
||||
|
||||
## Files Likely Affected
|
||||
- `path/to/file.ts`
|
||||
- `path/to/test.ts`
|
||||
```
|
||||
|
||||
## Writing Effective Legs
|
||||
|
||||
### Objectives
|
||||
|
||||
State exactly what the leg accomplishes in one sentence:
|
||||
|
||||
**Weak**:
|
||||
> Set up the user stuff
|
||||
|
||||
**Strong**:
|
||||
> Create the User model with email, password_hash, and timestamps fields
|
||||
|
||||
The objective should be:
|
||||
- **Specific**: Clear what will be created/modified
|
||||
- **Verifiable**: Can confirm completion by inspection
|
||||
- **Atomic**: One cohesive piece of work
|
||||
|
||||
### Context
|
||||
|
||||
Provide information the AI needs but might not have:
|
||||
|
||||
```markdown
|
||||
## Context
|
||||
|
||||
This project uses Prisma for database access. The existing `prisma/schema.prisma`
|
||||
file contains the Post and Comment models. User will be referenced by these
|
||||
models via foreign keys.
|
||||
|
||||
Authentication uses JWT tokens. The password_hash field will store bcrypt hashes
|
||||
(cost factor 12). The email field must be unique and will be used as the login
|
||||
identifier.
|
||||
```
|
||||
|
||||
Good context includes:
|
||||
- Relevant technology/framework information
|
||||
- Relationship to existing code
|
||||
- Design decisions from the parent flight
|
||||
- Constraints that affect implementation
|
||||
|
||||
### Inputs and Outputs
|
||||
|
||||
Be explicit about state before and after:
|
||||
|
||||
```markdown
|
||||
## Inputs
|
||||
- Prisma schema at `prisma/schema.prisma` with Post and Comment models
|
||||
- No existing User model or authentication
|
||||
|
||||
## Outputs
|
||||
- User model added to Prisma schema
|
||||
- Migration file generated
|
||||
- Migration applied to development database
|
||||
```
|
||||
|
||||
Inputs help the AI understand starting conditions. Outputs define the expected end state.
|
||||
|
||||
### Acceptance Criteria
|
||||
|
||||
Define exactly what "done" means:
|
||||
|
||||
```markdown
|
||||
## Acceptance Criteria
|
||||
- [ ] User model exists in `prisma/schema.prisma`
|
||||
- [ ] User model has fields: id, email, password_hash, created_at, updated_at
|
||||
- [ ] email field has `@unique` attribute
|
||||
- [ ] Migration file exists in `prisma/migrations/`
|
||||
- [ ] `npx prisma migrate status` shows no pending migrations
|
||||
- [ ] TypeScript types generated (`npx prisma generate` succeeds)
|
||||
```
|
||||
|
||||
Acceptance criteria should be:
|
||||
- **Binary**: Either met or not met
|
||||
- **Observable**: Can verify by inspection or test
|
||||
- **Complete**: Nothing else required for "done"
|
||||
|
||||
### Verification Steps
|
||||
|
||||
Tell the AI exactly *how* to confirm each criterion:
|
||||
|
||||
```markdown
|
||||
## Verification Steps
|
||||
- Run `npx prisma migrate status` — should show no pending migrations
|
||||
- Run `npm test` — all tests pass
|
||||
- Open browser to `/users` — page loads without errors
|
||||
- Tab through form fields — focus order matches visual order
|
||||
- Run `npx lighthouse --accessibility` — score ≥ 90
|
||||
```
|
||||
|
||||
Verification steps should be:
|
||||
- **Executable**: Commands or specific actions
|
||||
- **Deterministic**: Same result every time
|
||||
- **Mapped to criteria**: Clear which criterion each step validates
|
||||
|
||||
For accessibility legs, include specific checks:
|
||||
- Keyboard navigation sequences to test
|
||||
- Screen reader commands (e.g., "navigate to main content via skip link")
|
||||
- Automated tool commands (Lighthouse, axe-core)
|
||||
|
||||
### Implementation Guidance
|
||||
|
||||
Provide specific direction when needed:
|
||||
|
||||
```markdown
|
||||
## Implementation Guidance
|
||||
|
||||
Use Prisma's native type for id:
|
||||
```prisma
|
||||
id String @id @default(cuid())
|
||||
```
|
||||
|
||||
For timestamps, use Prisma's auto-managed fields:
|
||||
```prisma
|
||||
created_at DateTime @default(now())
|
||||
updated_at DateTime @updatedAt
|
||||
```
|
||||
|
||||
Do not add relations to Post/Comment yet—that's a separate leg.
|
||||
```
|
||||
|
||||
Implementation guidance helps when:
|
||||
- Project has specific patterns to follow
|
||||
- There are multiple valid approaches (pick one)
|
||||
- Constraints aren't obvious from context
|
||||
|
||||
### Files Likely Affected
|
||||
|
||||
Help the AI know where to look:
|
||||
|
||||
```markdown
|
||||
## Files Likely Affected
|
||||
- `prisma/schema.prisma` - Add User model
|
||||
- `prisma/migrations/*` - New migration file (generated)
|
||||
```
|
||||
|
||||
This isn't prescriptive—the AI might touch other files. It's a starting point for orientation.
|
||||
|
||||
## Leg Lifecycle
|
||||
|
||||
Legs progress through defined states:
|
||||
|
||||
### States
|
||||
|
||||
```
|
||||
planning ──► ready ──► in-flight ──► landed ──► completed
|
||||
│
|
||||
└──► aborted
|
||||
```
|
||||
|
||||
**planning**
|
||||
Leg is being designed. Acceptance criteria and implementation guidance being defined.
|
||||
|
||||
**ready**
|
||||
Leg design approved. Ready for implementation.
|
||||
|
||||
**in-flight**
|
||||
AI agent actively working on implementation.
|
||||
|
||||
**landed**
|
||||
Implementation complete. Flight log updated. Ready for review.
|
||||
|
||||
**completed**
|
||||
Review passed. Acceptance criteria confirmed met.
|
||||
|
||||
**aborted**
|
||||
Leg cancelled. Changes are rolled back. Document the reason in the flight log.
|
||||
|
||||
### State Transitions
|
||||
|
||||
| From | To | Trigger |
|
||||
|------|----|---------|
|
||||
| planning | ready | Design review passes |
|
||||
| ready | in-flight | Developer begins work |
|
||||
| in-flight | landed | Developer reports completion |
|
||||
| in-flight | aborted | Cannot proceed, changes rolled back |
|
||||
| landed | completed | Review passes |
|
||||
| landed | in-flight | Issues found, needs fixes |
|
||||
|
||||
Note: Legs may only be modified while in `planning` state. Once `in-flight`, create new legs instead of modifying existing ones.
|
||||
|
||||
## Patterns for AI Consumption
|
||||
|
||||
### Be Explicit, Not Implicit
|
||||
|
||||
**Implicit** (requires inference):
|
||||
> Add validation to the email field
|
||||
|
||||
**Explicit** (no inference needed):
|
||||
> Add email validation: must be non-empty, valid email format (use validator library's isEmail), maximum 255 characters. Return 400 status with `{ error: "Invalid email format" }` on failure.
|
||||
|
||||
### Provide Examples
|
||||
|
||||
When patterns might be unclear, show don't tell:
|
||||
|
||||
```markdown
|
||||
## Implementation Guidance
|
||||
|
||||
Follow the existing controller pattern:
|
||||
|
||||
```typescript
|
||||
// Example from PostController
|
||||
export async function createPost(req: Request, res: Response) {
|
||||
const { title, content } = req.body;
|
||||
|
||||
if (!title) {
|
||||
return res.status(400).json({ error: "Title is required" });
|
||||
}
|
||||
|
||||
const post = await prisma.post.create({
|
||||
data: { title, content, authorId: req.user.id }
|
||||
});
|
||||
|
||||
return res.status(201).json(post);
|
||||
}
|
||||
```
|
||||
|
||||
Apply this pattern to the registration endpoint.
|
||||
```
|
||||
|
||||
### State Constraints Clearly
|
||||
|
||||
Don't bury constraints in prose:
|
||||
|
||||
**Buried**:
|
||||
> Create the endpoint and make sure it handles errors properly and validates input and also we're using Express and the response should be JSON.
|
||||
|
||||
**Clear**:
|
||||
```markdown
|
||||
## Constraints
|
||||
- Framework: Express.js
|
||||
- Response format: JSON
|
||||
- Error handling: Return appropriate HTTP status codes
|
||||
- Validation: Validate all input before processing
|
||||
```
|
||||
|
||||
### Link to Flight for Context
|
||||
|
||||
When details would be redundant, reference the parent:
|
||||
|
||||
```markdown
|
||||
## Context
|
||||
|
||||
See [parent flight](../flight.md) for:
|
||||
- Authentication approach (JWT tokens)
|
||||
- Session duration decisions
|
||||
- Error response format standards
|
||||
```
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
### Too Large
|
||||
|
||||
If a leg takes more than a few hours, it's probably too big. Signs:
|
||||
- Multiple independent pieces of functionality
|
||||
- Would benefit from intermediate checkpoints
|
||||
- Hard to write clear acceptance criteria
|
||||
|
||||
Split into smaller legs.
|
||||
|
||||
### Too Small
|
||||
|
||||
If a leg is trivial, it adds overhead without value. Signs:
|
||||
- Single line change
|
||||
- No meaningful acceptance criteria
|
||||
- Part of a larger atomic operation
|
||||
|
||||
Combine with related work.
|
||||
|
||||
### Ambiguous Acceptance Criteria
|
||||
|
||||
If criteria require judgment, they're not criteria:
|
||||
|
||||
**Ambiguous**: "Code is clean and readable"
|
||||
**Specific**: "Functions are under 50 lines, no eslint warnings"
|
||||
|
||||
**Ambiguous**: "Error handling is good"
|
||||
**Specific**: "All async operations wrapped in try/catch, errors logged with context"
|
||||
|
||||
### Missing Context
|
||||
|
||||
AI agents don't have your mental model. Include:
|
||||
- Why this approach (from flight decisions)
|
||||
- How this fits with existing code
|
||||
- What patterns to follow
|
||||
- What to avoid
|
||||
|
||||
## Relationship to Flight
|
||||
|
||||
Legs are generated from flights. The flight provides:
|
||||
- Technical approach
|
||||
- Design decisions
|
||||
- Overall context
|
||||
|
||||
Legs provide:
|
||||
- Specific implementation steps
|
||||
- Explicit acceptance criteria
|
||||
- Focused scope
|
||||
|
||||
A flight might generate many legs:
|
||||
|
||||
```
|
||||
Flight: User Registration Flow
|
||||
├── Leg: create-user-model
|
||||
├── Leg: registration-endpoint
|
||||
├── Leg: email-validation
|
||||
├── Leg: password-hashing
|
||||
├── Leg: registration-tests
|
||||
└── Leg: registration-docs
|
||||
```
|
||||
|
||||
## Immutability Principle
|
||||
|
||||
Once a leg is `in-flight`, don't modify it. If requirements change:
|
||||
|
||||
1. Mark the current leg as aborted (changes rolled back)
|
||||
2. Create a new leg with updated requirements
|
||||
3. Reference the old leg for context
|
||||
|
||||
This preserves history and prevents confusion about what the AI was asked to do.
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [Workflow](workflow.md) — See the complete mission → flight → leg flow
|
||||
- [Flights](flights.md) — Understand where legs come from
|
||||
245
container/mission-control/docs/missions.md
Normal file
245
container/mission-control/docs/missions.md
Normal file
@@ -0,0 +1,245 @@
|
||||
# Missions
|
||||
|
||||
Missions are the human-optimized layer of Flight Control. They define *what* success looks like without prescribing *how* to achieve it.
|
||||
|
||||
## What is a Mission?
|
||||
|
||||
A mission represents a meaningful outcome—something a stakeholder would recognize as valuable. Missions are:
|
||||
|
||||
- **Outcome-driven**: Focused on results, not activities
|
||||
- **Human-readable**: Written for people, not machines
|
||||
- **Strategically scoped**: Large enough to matter, bounded enough to complete
|
||||
|
||||
### Mission vs. Flight vs. Leg
|
||||
|
||||
| Aspect | Mission | Flight | Leg |
|
||||
|--------|---------|--------|-----|
|
||||
| Audience | Humans, stakeholders | Developers, AI | AI agents |
|
||||
| Scope | Outcome | Technical spec | Single task |
|
||||
| Style | Narrative | Structured | Explicit |
|
||||
| Duration | Days to weeks | Hours to days | Minutes to hours |
|
||||
|
||||
## Writing Effective Missions
|
||||
|
||||
### Start with Outcomes
|
||||
|
||||
Frame missions around what changes when they're complete:
|
||||
|
||||
**Weak** (activity-focused):
|
||||
> Implement user authentication
|
||||
|
||||
**Strong** (outcome-focused):
|
||||
> Users can securely access their personal data without sharing credentials across services
|
||||
|
||||
The outcome framing:
|
||||
- Clarifies *why* the work matters
|
||||
- Leaves implementation decisions to flights
|
||||
- Provides a clear test for completion
|
||||
|
||||
### Define Success Criteria
|
||||
|
||||
Every mission needs measurable success criteria. These answer: "How do we know we're done?"
|
||||
|
||||
```markdown
|
||||
## Success Criteria
|
||||
|
||||
- [ ] Users can create accounts with email/password
|
||||
- [ ] Users can authenticate via OAuth providers
|
||||
- [ ] Session management handles concurrent logins
|
||||
- [ ] Security audit passes with no critical findings
|
||||
```
|
||||
|
||||
Success criteria should be:
|
||||
- **Observable**: Can be verified by inspection
|
||||
- **Binary**: Either met or not met
|
||||
- **Independent**: Achievable without external dependencies
|
||||
- **Capability-focused**: Describes what users or the system can do, not which tool or technology achieves it
|
||||
|
||||
### Consider Stakeholders
|
||||
|
||||
Missions serve stakeholders. Identify them explicitly:
|
||||
|
||||
```markdown
|
||||
## Stakeholders
|
||||
|
||||
- **End users**: Need frictionless, secure access
|
||||
- **Security team**: Requires compliance with auth standards
|
||||
- **Support team**: Needs ability to assist locked-out users
|
||||
```
|
||||
|
||||
Stakeholder identification helps:
|
||||
- Prioritize competing concerns
|
||||
- Identify missing success criteria
|
||||
- Communicate progress meaningfully
|
||||
|
||||
## Mission Structure
|
||||
|
||||
A mission document typically contains:
|
||||
|
||||
```markdown
|
||||
# Mission: {Title}
|
||||
|
||||
## Outcome
|
||||
What success looks like in human terms.
|
||||
|
||||
## Context
|
||||
Why this mission matters now. Background information.
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Criterion 1
|
||||
- [ ] Criterion 2
|
||||
- [ ] Criterion 3
|
||||
|
||||
## Stakeholders
|
||||
Who cares about this outcome and why.
|
||||
|
||||
## Constraints
|
||||
Non-negotiable boundaries (budget, timeline, technology).
|
||||
|
||||
## Environment Requirements
|
||||
Development environment, runtime dependencies, special tooling.
|
||||
|
||||
## Open Questions
|
||||
Unknowns that need resolution during execution.
|
||||
|
||||
## Known Issues
|
||||
Emergent blockers and issues discovered during execution.
|
||||
|
||||
## Flights
|
||||
Links to flights executing this mission.
|
||||
```
|
||||
|
||||
## Mission Lifecycle
|
||||
|
||||
Missions progress through defined states:
|
||||
|
||||
### States
|
||||
|
||||
```
|
||||
planning ──► active ──► completed
|
||||
│
|
||||
└──► aborted
|
||||
```
|
||||
|
||||
**planning**
|
||||
The mission is being defined. Outcome, success criteria, and constraints are still being refined. No flights have started.
|
||||
|
||||
**active**
|
||||
At least one flight is in progress. The mission outcome is being pursued. New flights may be created as understanding develops.
|
||||
|
||||
**completed**
|
||||
All success criteria are met. Stakeholders have accepted the outcome. The mission can be archived.
|
||||
|
||||
**aborted**
|
||||
The mission was cancelled before completion. This might happen due to:
|
||||
- Changed priorities
|
||||
- Discovered infeasibility
|
||||
- External factors
|
||||
|
||||
Aborted missions should document *why* they were cancelled for future reference.
|
||||
|
||||
### State Transitions
|
||||
|
||||
| From | To | Trigger |
|
||||
|------|----|---------|
|
||||
| planning | active | First flight begins |
|
||||
| active | completed | All success criteria met |
|
||||
| active | aborted | Cancellation decision |
|
||||
| planning | aborted | Cancellation decision |
|
||||
|
||||
## Communicating Mission Status
|
||||
|
||||
Missions are stakeholder-facing. Status updates should be meaningful to non-technical audiences:
|
||||
|
||||
**Weak update**:
|
||||
> Completed 3 of 5 flights
|
||||
|
||||
**Strong update**:
|
||||
> Users can now create accounts and log in. Next: adding OAuth support and security review.
|
||||
|
||||
Link progress to outcomes, not activities.
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
### Too Granular
|
||||
|
||||
If a mission can be completed in a single flight, it's probably too small. Consider:
|
||||
- Is this a meaningful outcome or just a task?
|
||||
- Would a stakeholder recognize this as valuable?
|
||||
- Does it warrant its own success criteria?
|
||||
|
||||
### Too Vague
|
||||
|
||||
Missions need boundaries. "Improve the product" isn't a mission—it's a direction. Missions should be:
|
||||
- Completable (has an end state)
|
||||
- Measurable (success criteria exist)
|
||||
- Bounded (scope is clear)
|
||||
|
||||
### Implementation Leaking In
|
||||
|
||||
Missions should not prescribe *how*:
|
||||
|
||||
**Leaking implementation**:
|
||||
> Build a React-based authentication flow using JWT tokens stored in HttpOnly cookies
|
||||
|
||||
**Proper abstraction**:
|
||||
> Users can securely authenticate across sessions without re-entering credentials
|
||||
|
||||
This applies to success criteria too. Criteria that name tools or technologies lock you into an approach before flights even begin:
|
||||
|
||||
**Implementation-specific criteria** (avoid):
|
||||
> - [ ] JWT tokens are validated via middleware on every request
|
||||
> - [ ] User records are stored in PostgreSQL with bcrypt-hashed passwords
|
||||
|
||||
**Capability-focused criteria** (prefer):
|
||||
> - [ ] Unauthorized requests are rejected before reaching protected resources
|
||||
> - [ ] Stored credentials cannot be recovered even if the database is compromised
|
||||
|
||||
Save implementation details for flights.
|
||||
|
||||
## Mission Debrief
|
||||
|
||||
After a mission completes (or aborts), create a **mission debrief** for retrospective learning:
|
||||
|
||||
```markdown
|
||||
# Mission Debrief: {Title}
|
||||
|
||||
## Success Criteria Results
|
||||
Which criteria were met, partially met, or not met.
|
||||
|
||||
## Flight Summary
|
||||
Overview of how each flight contributed.
|
||||
|
||||
## What Went Well
|
||||
Effective patterns and successes.
|
||||
|
||||
## What Could Be Improved
|
||||
Process and execution improvements.
|
||||
|
||||
## Lessons Learned
|
||||
Insights to carry forward.
|
||||
|
||||
## Methodology Feedback
|
||||
Improvements to Flight Control itself.
|
||||
```
|
||||
|
||||
The debrief captures organizational learning and informs future missions.
|
||||
|
||||
## Relationship to Flights
|
||||
|
||||
Missions spawn flights. A typical mission might have:
|
||||
|
||||
```
|
||||
Mission: Secure User Authentication
|
||||
├── Flight: Account creation flow
|
||||
├── Flight: Login and session management
|
||||
├── Flight: OAuth integration
|
||||
└── Flight: Security hardening
|
||||
```
|
||||
|
||||
Flights can be planned upfront or emerge as the mission progresses. The mission provides the "why"; flights provide the "what" and "how".
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [Flights](flights.md) — Learn to create technical specifications
|
||||
- [Workflow](workflow.md) — See how missions flow into flights
|
||||
136
container/mission-control/docs/overview.md
Normal file
136
container/mission-control/docs/overview.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# Flight Control Overview
|
||||
|
||||
Flight Control is a methodology for AI-first software development that maintains meaningful human oversight while maximizing AI effectiveness.
|
||||
|
||||
## Philosophy
|
||||
|
||||
### AI-First, Human-Guided
|
||||
|
||||
Traditional development methodologies were designed for human developers. They assume humans will interpret requirements, make design decisions, and adapt to changing circumstances. AI agents work differently—they excel with explicit structure but struggle with ambiguity.
|
||||
|
||||
Flight Control inverts the traditional approach:
|
||||
|
||||
- **Humans define outcomes**, not implementation details
|
||||
- **AI executes implementations**, not strategic decisions
|
||||
- **The methodology bridges the gap** through progressive specification
|
||||
|
||||
### Why Aviation Works
|
||||
|
||||
Aviation provides a proven model for high-stakes operations where planning and execution are separate concerns:
|
||||
|
||||
| Aviation | Flight Control |
|
||||
|----------|----------------|
|
||||
| Mission objectives | Mission outcomes |
|
||||
| Flight plan | Flight specification |
|
||||
| Flight legs | Implementation legs |
|
||||
| Pilot authority | Human oversight |
|
||||
| Autopilot execution | AI execution |
|
||||
|
||||
The key insight: pilots don't recompute routes in real-time. They follow pre-computed flight plans while retaining authority to adapt when circumstances demand. Similarly, AI agents shouldn't reinvent architecture with each task—they should execute well-specified legs while flagging issues for human review.
|
||||
|
||||
## Key Principles
|
||||
|
||||
### 1. Outcome-Driven Planning
|
||||
|
||||
Missions start with outcomes, not tasks:
|
||||
|
||||
**Traditional**: "Build a user authentication system"
|
||||
**Flight Control**: "Users can securely access their accounts with minimal friction"
|
||||
|
||||
The outcome framing keeps focus on what matters while leaving implementation flexible.
|
||||
|
||||
### 2. Adaptive Specifications
|
||||
|
||||
Flights are living documents. Unlike traditional specs that become stale, flight plans explicitly track:
|
||||
|
||||
- Open questions requiring resolution
|
||||
- Design decisions and their rationale
|
||||
- Prerequisites and dependencies
|
||||
- Adaptation criteria (when to deviate from plan)
|
||||
|
||||
### 3. Structured Execution
|
||||
|
||||
Legs are optimized for AI consumption:
|
||||
|
||||
- Explicit acceptance criteria
|
||||
- Required context clearly stated
|
||||
- Expected inputs and outputs defined
|
||||
- No ambiguity in scope
|
||||
|
||||
### 4. Layered Feedback
|
||||
|
||||
Information flows both directions:
|
||||
|
||||
- **Downward**: Missions inform flights, flights generate legs
|
||||
- **Upward**: Leg completion updates flights, flight outcomes inform mission status
|
||||
|
||||
## Comparison to Traditional Methodologies
|
||||
|
||||
### vs. Agile/Scrum
|
||||
|
||||
Agile emphasizes iterative human collaboration. Flight Control complements this by structuring how AI fits into iterations:
|
||||
|
||||
- Sprints can contain multiple flights
|
||||
- Stories map roughly to flights
|
||||
- Tasks map to legs
|
||||
|
||||
Flight Control adds the missing layer: how to specify work for AI execution.
|
||||
|
||||
### vs. Waterfall
|
||||
|
||||
Waterfall assumes complete upfront specification. Flight Control embraces uncertainty:
|
||||
|
||||
- Missions can spawn new flights as understanding evolves
|
||||
- Flights can be modified in-flight when circumstances change
|
||||
- Legs can be aborted and replaced
|
||||
|
||||
### vs. CRISP-DM / ML Workflows
|
||||
|
||||
Data science workflows focus on experimentation. Flight Control adds structure without eliminating iteration:
|
||||
|
||||
- Experimental flights can have "explore" legs
|
||||
- Failed experiments inform mission outcomes
|
||||
- Reproducibility is built into leg specifications
|
||||
|
||||
## The Audience Gradient
|
||||
|
||||
A core innovation is the **audience gradient**—documentation shifts style based on who consumes it:
|
||||
|
||||
```
|
||||
Human Readable ◄─────────────────────────────► AI Optimized
|
||||
│ │ │
|
||||
Mission Flight Leg
|
||||
│ │ │
|
||||
Narrative prose Technical spec Structured format
|
||||
Outcome-focused Checklist-driven Explicit criteria
|
||||
Flexible scope Bounded scope Fixed scope
|
||||
```
|
||||
|
||||
This gradient acknowledges that humans and AI have different strengths:
|
||||
|
||||
- Humans excel at ambiguity, context, and strategic thinking
|
||||
- AI excels at following explicit instructions consistently
|
||||
|
||||
Flight Control puts each audience where they're strongest.
|
||||
|
||||
## When to Use Flight Control
|
||||
|
||||
Flight Control works best when:
|
||||
|
||||
- AI agents are part of your development workflow
|
||||
- Work benefits from clear specification before execution
|
||||
- You need traceability from outcomes to implementation
|
||||
- Multiple people (or AI sessions) contribute to a single outcome
|
||||
|
||||
It may be overkill for:
|
||||
|
||||
- Quick one-off scripts
|
||||
- Solo exploratory coding
|
||||
- Highly uncertain R&D with no clear outcomes
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [Missions](missions.md) — Learn to write outcome-driven mission statements
|
||||
- [Flights](flights.md) — Create technical specifications with pre/post checklists
|
||||
- [Legs](legs.md) — Structure AI-optimized implementation steps
|
||||
- [Workflow](workflow.md) — Understand the end-to-end flow
|
||||
473
container/mission-control/docs/workflow.md
Normal file
473
container/mission-control/docs/workflow.md
Normal file
@@ -0,0 +1,473 @@
|
||||
# Workflow
|
||||
|
||||
This document describes how work flows through Flight Control from mission inception to completion.
|
||||
|
||||
## The Complete Flow
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph MISSION
|
||||
direction TB
|
||||
planning[planning] --> active[active] --> completed[completed]
|
||||
planning -.-> aborted[aborted]
|
||||
|
||||
subgraph " "
|
||||
direction LR
|
||||
flightA[Flight A] --> legsA[legs]
|
||||
flightB[Flight B] --> legsB[legs]
|
||||
flightC[Flight C] --> legsC[legs]
|
||||
end
|
||||
|
||||
active --> flightA
|
||||
active --> flightB
|
||||
active --> flightC
|
||||
legsA --> completed
|
||||
legsB --> completed
|
||||
legsC --> completed
|
||||
end
|
||||
```
|
||||
|
||||
## Phase 1: Mission Definition
|
||||
|
||||
### Starting Point
|
||||
|
||||
Work begins with an outcome someone wants to achieve:
|
||||
|
||||
> "We need users to be able to authenticate securely"
|
||||
|
||||
This human need becomes a mission.
|
||||
|
||||
### Mission Creation
|
||||
|
||||
1. **Write the outcome** — What does success look like?
|
||||
2. **Define success criteria** — How will we know we're done?
|
||||
3. **Identify stakeholders** — Who cares about this outcome?
|
||||
4. **Document constraints** — What boundaries exist?
|
||||
|
||||
```markdown
|
||||
# Mission: Secure User Authentication
|
||||
|
||||
## Outcome
|
||||
Users can securely access their accounts without credential friction.
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Users can create accounts
|
||||
- [ ] Users can log in and maintain sessions
|
||||
- [ ] Password reset flow exists
|
||||
- [ ] Security audit passes
|
||||
|
||||
## Stakeholders
|
||||
- End users, Security team, Support team
|
||||
|
||||
## Constraints
|
||||
- Must complete before Q2 launch
|
||||
- Must meet SOC2 requirements
|
||||
```
|
||||
|
||||
### Mission State: `planning`
|
||||
|
||||
The mission exists but no work has started. This is the time for:
|
||||
- Refining the outcome statement
|
||||
- Adjusting success criteria
|
||||
- Identifying initial flights
|
||||
|
||||
## Phase 2: Flight Planning
|
||||
|
||||
### Decomposing the Mission
|
||||
|
||||
A mission typically requires multiple flights. Identify the major work areas:
|
||||
|
||||
```
|
||||
Mission: Secure User Authentication
|
||||
├── Flight: Account Creation
|
||||
├── Flight: Login and Sessions
|
||||
├── Flight: Password Reset
|
||||
└── Flight: Security Hardening
|
||||
```
|
||||
|
||||
### Creating the First Flight
|
||||
|
||||
Start with the flight that unblocks others or provides the most learning:
|
||||
|
||||
```markdown
|
||||
# Flight: Account Creation
|
||||
|
||||
## Mission Link
|
||||
[Secure User Authentication](../mission.md)
|
||||
- Contributing to: "Users can create accounts"
|
||||
|
||||
## Pre-Flight
|
||||
|
||||
### Objective
|
||||
Users can register new accounts with email and password.
|
||||
|
||||
### Open Questions
|
||||
- [ ] What password requirements?
|
||||
- [ ] Email verification required before use?
|
||||
|
||||
### Design Decisions
|
||||
(To be filled as questions are resolved)
|
||||
|
||||
### Prerequisites
|
||||
- [ ] Database provisioned
|
||||
- [ ] API framework set up
|
||||
```
|
||||
|
||||
### Flight State: `planning`
|
||||
|
||||
Pre-flight phase. Resolve open questions, document decisions, verify prerequisites.
|
||||
|
||||
## Phase 3: Pre-Flight Completion
|
||||
|
||||
### Resolving Questions
|
||||
|
||||
Each open question gets answered and documented:
|
||||
|
||||
```markdown
|
||||
### Open Questions
|
||||
- [x] What password requirements?
|
||||
- [x] Email verification required before use?
|
||||
|
||||
### Design Decisions
|
||||
|
||||
**Password Requirements**: Minimum 8 characters, 1 uppercase, 1 number
|
||||
- Rationale: Balance security and usability
|
||||
- Decided by: Security team
|
||||
|
||||
**Email Verification**: Required before accessing protected features
|
||||
- Rationale: Prevent spam accounts
|
||||
- Decided by: Product requirements
|
||||
```
|
||||
|
||||
### Defining Legs
|
||||
|
||||
With decisions made, break the flight into legs:
|
||||
|
||||
```markdown
|
||||
### Legs
|
||||
- [ ] `create-user-model` - Database model for users
|
||||
- [ ] `registration-endpoint` - API endpoint for registration
|
||||
- [ ] `email-verification` - Verification email and confirmation flow
|
||||
- [ ] `registration-tests` - Test coverage for registration
|
||||
```
|
||||
|
||||
### Pre-Flight Checklist
|
||||
|
||||
Complete the gate check:
|
||||
|
||||
```markdown
|
||||
### Pre-Flight Checklist
|
||||
- [x] All open questions resolved
|
||||
- [x] Design decisions documented
|
||||
- [x] Prerequisites verified
|
||||
- [x] Legs defined with acceptance criteria
|
||||
```
|
||||
|
||||
### Flight State: `ready`
|
||||
|
||||
Pre-flight complete. Ready for execution.
|
||||
|
||||
## Phase 4: Leg Execution
|
||||
|
||||
### Mission State: `active`
|
||||
|
||||
When the first leg begins, the mission becomes active.
|
||||
|
||||
### Flight State: `in-flight`
|
||||
|
||||
Legs are being executed. A [flight log](flight-logs.md) tracks progress, recording when legs start and complete, decisions made during execution, and any deviations or anomalies encountered.
|
||||
|
||||
### Leg Lifecycle
|
||||
|
||||
Each leg follows its own progression:
|
||||
|
||||
```
|
||||
planning ──► ready ──► in-flight ──► landed ──► completed
|
||||
```
|
||||
|
||||
**Example: `create-user-model` leg**
|
||||
|
||||
```markdown
|
||||
# Leg: create-user-model
|
||||
|
||||
## Objective
|
||||
Create the User model with authentication fields.
|
||||
|
||||
## Acceptance Criteria
|
||||
- [ ] User model in schema
|
||||
- [ ] Fields: id, email, password_hash, verified, timestamps
|
||||
- [ ] Migration applied
|
||||
- [ ] Types generated
|
||||
```
|
||||
|
||||
**Execution flow:**
|
||||
|
||||
1. Leg design approved, moves to `ready`
|
||||
2. Developer begins, leg moves to `in-flight`
|
||||
3. Developer completes implementation, leg moves to `landed`, flight log updated
|
||||
4. Reviewer verifies acceptance criteria met
|
||||
5. Leg moves to `completed`
|
||||
|
||||
### Parallel vs. Sequential Legs
|
||||
|
||||
Some legs can run in parallel:
|
||||
|
||||
```
|
||||
create-user-model ────► registration-endpoint ────► registration-tests
|
||||
└──► email-verification ────────┘
|
||||
```
|
||||
|
||||
The model must exist first, but the endpoint and email flows can be built simultaneously, then tests cover everything.
|
||||
|
||||
### Handling Aborted Legs
|
||||
|
||||
When a leg can't proceed:
|
||||
|
||||
1. Mark it `aborted` — changes are rolled back
|
||||
2. Determine if it needs:
|
||||
- Flight-level decision (update the flight)
|
||||
- External resolution (wait for dependency)
|
||||
- New leg with updated requirements
|
||||
|
||||
```markdown
|
||||
## Status: aborted
|
||||
|
||||
**Reason**: Email service credentials not available in dev environment
|
||||
**Changes**: Rolled back
|
||||
**Next**: New leg after DevOps provisions SendGrid API key
|
||||
```
|
||||
|
||||
## Phase 5: Flight Completion
|
||||
|
||||
### All Legs Complete
|
||||
|
||||
When every leg reaches `completed`:
|
||||
|
||||
```markdown
|
||||
### Legs
|
||||
- [x] `create-user-model` - completed
|
||||
- [x] `registration-endpoint` - completed
|
||||
- [x] `email-verification` - completed
|
||||
- [x] `registration-tests` - completed
|
||||
```
|
||||
|
||||
### Post-Flight Checklist
|
||||
|
||||
```markdown
|
||||
### Completion Checklist
|
||||
- [x] All legs completed
|
||||
- [x] Code merged to main
|
||||
- [x] Tests passing in CI
|
||||
- [x] Documentation updated
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
Confirm the flight achieved its objective:
|
||||
|
||||
```markdown
|
||||
### Verification
|
||||
|
||||
Manual test completed:
|
||||
1. ✓ Created account with test@example.com
|
||||
2. ✓ Received verification email
|
||||
3. ✓ Clicked link, account verified
|
||||
4. ✓ Can access protected features
|
||||
```
|
||||
|
||||
### Flight Debrief
|
||||
|
||||
**Run `/flight-debrief` to capture learnings.** This is a required step, not optional.
|
||||
|
||||
The debrief skill will:
|
||||
- Analyze what went well and what could improve
|
||||
- Identify process and technical lessons
|
||||
- Recommend methodology improvements
|
||||
- Update project documentation if needed
|
||||
|
||||
The debrief artifact becomes part of the flight record and informs future flights.
|
||||
|
||||
### Flight State: `landed`
|
||||
|
||||
Post-flight complete. The flight achieved its objective.
|
||||
|
||||
## Phase 6: Mission Progression
|
||||
|
||||
### Tracking Mission Progress
|
||||
|
||||
As flights land, mission success criteria get checked:
|
||||
|
||||
```markdown
|
||||
## Success Criteria
|
||||
- [x] Users can create accounts ← Flight: Account Creation landed
|
||||
- [ ] Users can log in and maintain sessions ← Flight: Login in-flight
|
||||
- [ ] Password reset flow exists ← Flight: Password Reset planning
|
||||
- [ ] Security audit passes ← Flight: Security Hardening planning
|
||||
```
|
||||
|
||||
### Spawning New Flights
|
||||
|
||||
Discoveries during execution may require new flights:
|
||||
|
||||
> "During the Login flight, we discovered we need rate limiting to prevent brute force attacks."
|
||||
|
||||
Create a new flight:
|
||||
|
||||
```
|
||||
Mission: Secure User Authentication
|
||||
├── Flight: Account Creation [landed]
|
||||
├── Flight: Login and Sessions [in-flight]
|
||||
├── Flight: Password Reset [planning]
|
||||
├── Flight: Security Hardening [planning]
|
||||
└── Flight: Rate Limiting [NEW - planning]
|
||||
```
|
||||
|
||||
### Mission Completion
|
||||
|
||||
When all success criteria are met:
|
||||
|
||||
```markdown
|
||||
## Success Criteria
|
||||
- [x] Users can create accounts
|
||||
- [x] Users can log in and maintain sessions
|
||||
- [x] Password reset flow exists
|
||||
- [x] Security audit passes
|
||||
```
|
||||
|
||||
### Mission Debrief
|
||||
|
||||
**Run `/mission-debrief` to perform a retrospective.** This is a required step, not optional.
|
||||
|
||||
The debrief skill will:
|
||||
- Assess whether the mission achieved its stated outcomes
|
||||
- Synthesize lessons from all flight debriefs
|
||||
- Capture process and methodology improvements
|
||||
- Interview participants for qualitative insights
|
||||
|
||||
The mission debrief is the primary source for improving Flight Control itself.
|
||||
|
||||
### Mission State: `completed`
|
||||
|
||||
All outcomes achieved. The mission can be archived with its debrief.
|
||||
|
||||
## Feedback Loops
|
||||
|
||||
### Upward Feedback
|
||||
|
||||
Information flows from legs to flights to missions:
|
||||
|
||||
- **Leg → Flight**: Completion status, discovered complexity, blockers (captured in flight log)
|
||||
- **Flight → Mission**: Progress on success criteria, new flight needs
|
||||
|
||||
### Downward Feedback
|
||||
|
||||
Guidance flows from missions to flights to legs:
|
||||
|
||||
- **Mission → Flight**: Priority changes, constraint updates
|
||||
- **Flight → Leg**: Design decisions, context updates, flight log history
|
||||
|
||||
### Adaptation
|
||||
|
||||
When circumstances change:
|
||||
|
||||
1. **Leg-level**: Block the leg, create new leg
|
||||
2. **Flight-level**: Divert the flight, re-plan
|
||||
3. **Mission-level**: Adjust success criteria or abort
|
||||
|
||||
## Handling Diversions
|
||||
|
||||
### When to Divert vs. Create New Flight
|
||||
|
||||
**Divert the current flight when:**
|
||||
- The objective remains the same but the approach must change
|
||||
- External factors (security issues, API changes) invalidate the current plan
|
||||
- Discovered complexity requires re-planning but the goal is unchanged
|
||||
|
||||
**Create a new flight when:**
|
||||
- A completely new objective emerges
|
||||
- The discovered work is independent of the current flight's goal
|
||||
- The new work serves different mission success criteria
|
||||
|
||||
### In-Flight Modifications
|
||||
|
||||
Flights can be modified while `in-flight` — for example, when planned legs need to change due to discoveries during execution. Update the flight artifact and record the change and rationale in the flight log.
|
||||
|
||||
### Flight Abortion
|
||||
|
||||
When a flight must be cancelled, changes are rolled back:
|
||||
|
||||
```markdown
|
||||
## Status: aborted
|
||||
|
||||
**Reason**: Security audit revealed JWT vulnerability requires a fundamentally different approach
|
||||
**Changes**: Rolled back — new flight will be created for Auth0 integration
|
||||
```
|
||||
|
||||
### Mission Abortion
|
||||
|
||||
Sometimes missions should be cancelled:
|
||||
|
||||
```markdown
|
||||
## Status: aborted
|
||||
|
||||
**Reason**: Business pivot - authentication now handled by parent company SSO
|
||||
**Learning**: Coordinate with enterprise architecture earlier
|
||||
**Artifacts**: Account creation flight code preserved in branch for reference
|
||||
```
|
||||
|
||||
Document the reason for future reference.
|
||||
|
||||
## State Summary
|
||||
|
||||
### All States in One View
|
||||
|
||||
```
|
||||
MISSION STATES
|
||||
planning ──► active ──► completed
|
||||
│
|
||||
└──► aborted
|
||||
|
||||
FLIGHT STATES
|
||||
planning ──► ready ──► in-flight ──► landed ──► completed
|
||||
│
|
||||
└──► aborted
|
||||
|
||||
LEG STATES
|
||||
planning ──► ready ──► in-flight ──► landed ──► completed
|
||||
│
|
||||
└──► aborted
|
||||
```
|
||||
|
||||
## When to Create vs. Modify
|
||||
|
||||
### Create New
|
||||
|
||||
- **Mission**: New outcome needed
|
||||
- **Flight**: New area of work identified
|
||||
- **Leg**: Requirements changed after `in-flight`
|
||||
|
||||
### Modify Existing
|
||||
|
||||
- **Mission**: Refining success criteria during `planning`
|
||||
- **Flight**: Updating during `planning` phase
|
||||
- **Leg**: Only while `planning` (before work begins)
|
||||
|
||||
### Rule of Thumb
|
||||
|
||||
Once work begins, create new rather than modify. This preserves history and prevents confusion about what was actually requested.
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Phase | Mission State | Flight State | Leg State | Action |
|
||||
|-------|---------------|--------------|-----------|--------|
|
||||
| Defining outcomes | planning | — | — | — |
|
||||
| Planning first flight | planning | planning | — | — |
|
||||
| Pre-flight complete | planning | ready | — | — |
|
||||
| Designing first leg | active | in-flight | planning | — |
|
||||
| Leg design approved | active | in-flight | ready | — |
|
||||
| Executing leg | active | in-flight | in-flight | — |
|
||||
| Leg implementation done | active | in-flight | landed | — |
|
||||
| Leg reviewed | active | in-flight | completed | — |
|
||||
| Flight done | active | landed | — | — |
|
||||
| Flight debriefed | active | completed | — | `/flight-debrief` |
|
||||
| All flights done | completed | — | — | `/mission-debrief` |
|
||||
29
container/mission-control/projects.md.template
Normal file
29
container/mission-control/projects.md.template
Normal file
@@ -0,0 +1,29 @@
|
||||
# Projects Registry
|
||||
|
||||
This file catalogs all active projects to provide context for mission, flight, and leg planning.
|
||||
|
||||
Copy this file to `projects.md` and fill in your project details.
|
||||
|
||||
---
|
||||
|
||||
## example-project
|
||||
|
||||
**Description**: Brief description of what this project does.
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Path | `/path/to/project` |
|
||||
| Remote | `git@github.com:username/repo.git` |
|
||||
| Stack | (optional) Key technologies used |
|
||||
| Status | (optional) Current state of the project |
|
||||
|
||||
---
|
||||
|
||||
## another-project
|
||||
|
||||
**Description**: Another project description.
|
||||
|
||||
| Property | Value |
|
||||
|----------|-------|
|
||||
| Path | `/path/to/another-project` |
|
||||
| Remote | `git@github.com:username/another-repo.git` |
|
||||
13
stt-container/Dockerfile
Normal file
13
stt-container/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
RUN pip install --no-cache-dir \
|
||||
faster-whisper \
|
||||
fastapi \
|
||||
uvicorn[standard] \
|
||||
python-multipart
|
||||
|
||||
COPY server.py /app/server.py
|
||||
|
||||
EXPOSE 9876
|
||||
|
||||
CMD ["uvicorn", "app.server:app", "--host", "0.0.0.0", "--port", "9876"]
|
||||
41
stt-container/server.py
Normal file
41
stt-container/server.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from faster_whisper import WhisperModel
|
||||
from fastapi import FastAPI, File, Form, UploadFile
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
app = FastAPI()
|
||||
model: WhisperModel | None = None
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
def load_model():
|
||||
global model
|
||||
model_size = os.environ.get("WHISPER_MODEL", "tiny")
|
||||
model = WhisperModel(model_size, device="cpu", compute_type="int8")
|
||||
|
||||
|
||||
@app.post("/transcribe")
|
||||
async def transcribe(
|
||||
file: UploadFile = File(...),
|
||||
language: str = Form(None),
|
||||
):
|
||||
if model is None:
|
||||
return JSONResponse(status_code=503, content={"error": "Model not loaded"})
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as tmp:
|
||||
tmp.write(await file.read())
|
||||
tmp.flush()
|
||||
kwargs = {}
|
||||
if language:
|
||||
kwargs["language"] = language
|
||||
segments, info = model.transcribe(tmp.name, **kwargs)
|
||||
text = " ".join(s.text for s in segments).strip()
|
||||
|
||||
return {"text": text, "language": info.language}
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
return {"status": "ok"}
|
||||
Reference in New Issue
Block a user