Compare commits
9 Commits
v0.1.77-wi
...
v0.1.86-wi
| Author | SHA1 | Date | |
|---|---|---|---|
| c023d80c86 | |||
| 33f02e65c0 | |||
| c5e28f9caa | |||
| 86176d8830 | |||
| 58a10c65e9 | |||
| d56c6e3845 | |||
| 574fca633a | |||
| e07c0e6150 | |||
| 20a07c84f2 |
@@ -19,6 +19,8 @@ env:
|
|||||||
jobs:
|
jobs:
|
||||||
build-linux:
|
build-linux:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
version: ${{ steps.version.outputs.VERSION }}
|
||||||
steps:
|
steps:
|
||||||
- name: Install Node.js 22
|
- name: Install Node.js 22
|
||||||
run: |
|
run: |
|
||||||
@@ -374,3 +376,96 @@ jobs:
|
|||||||
echo Uploading %%~nxf...
|
echo Uploading %%~nxf...
|
||||||
curl -s -X POST -H "Authorization: token %TOKEN%" -H "Content-Type: application/octet-stream" --data-binary "@%%f" "%GITEA_URL%/api/v1/repos/%REPO%/releases/%RELEASE_ID%/assets?name=%%~nxf"
|
curl -s -X POST -H "Authorization: token %TOKEN%" -H "Content-Type: application/octet-stream" --data-binary "@%%f" "%GITEA_URL%/api/v1/repos/%REPO%/releases/%RELEASE_ID%/assets?name=%%~nxf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
sync-to-github:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [build-linux, build-macos, build-windows]
|
||||||
|
if: gitea.event_name == 'push'
|
||||||
|
env:
|
||||||
|
GH_PAT: ${{ secrets.GH_PAT }}
|
||||||
|
GITHUB_REPO: shadowdao/triple-c
|
||||||
|
steps:
|
||||||
|
- name: Download artifacts from Gitea releases
|
||||||
|
env:
|
||||||
|
TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||||
|
VERSION: ${{ needs.build-linux.outputs.version }}
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
mkdir -p artifacts
|
||||||
|
|
||||||
|
# Download assets from all 3 platform releases
|
||||||
|
for TAG_SUFFIX in "" "-mac" "-win"; do
|
||||||
|
TAG="v${VERSION}${TAG_SUFFIX}"
|
||||||
|
echo "==> Fetching assets for release ${TAG}..."
|
||||||
|
|
||||||
|
RELEASE_JSON=$(curl -sf \
|
||||||
|
-H "Authorization: token ${TOKEN}" \
|
||||||
|
"${GITEA_URL}/api/v1/repos/${REPO}/releases/tags/${TAG}" 2>/dev/null || echo "{}")
|
||||||
|
|
||||||
|
echo "$RELEASE_JSON" | jq -r '.assets[]? | "\(.name) \(.browser_download_url)"' | while read -r NAME URL; do
|
||||||
|
[ -z "$NAME" ] && continue
|
||||||
|
echo " Downloading ${NAME}..."
|
||||||
|
curl -sfL \
|
||||||
|
-H "Authorization: token ${TOKEN}" \
|
||||||
|
-o "artifacts/${NAME}" \
|
||||||
|
"$URL"
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "==> All downloaded artifacts:"
|
||||||
|
ls -la artifacts/
|
||||||
|
|
||||||
|
- name: Create GitHub release and upload artifacts
|
||||||
|
env:
|
||||||
|
VERSION: ${{ needs.build-linux.outputs.version }}
|
||||||
|
COMMIT_SHA: ${{ gitea.sha }}
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
TAG="v${VERSION}"
|
||||||
|
|
||||||
|
echo "==> Creating unified release ${TAG} on GitHub..."
|
||||||
|
|
||||||
|
# Delete existing release if present (idempotent re-runs)
|
||||||
|
EXISTING=$(curl -sf \
|
||||||
|
-H "Authorization: Bearer ${GH_PAT}" \
|
||||||
|
-H "Accept: application/vnd.github+json" \
|
||||||
|
"https://api.github.com/repos/${GITHUB_REPO}/releases/tags/${TAG}" 2>/dev/null || echo "{}")
|
||||||
|
EXISTING_ID=$(echo "$EXISTING" | jq -r '.id // empty')
|
||||||
|
if [ -n "$EXISTING_ID" ]; then
|
||||||
|
echo " Deleting existing GitHub release ${TAG} (id: ${EXISTING_ID})..."
|
||||||
|
curl -sf -X DELETE \
|
||||||
|
-H "Authorization: Bearer ${GH_PAT}" \
|
||||||
|
-H "Accept: application/vnd.github+json" \
|
||||||
|
"https://api.github.com/repos/${GITHUB_REPO}/releases/${EXISTING_ID}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
RESPONSE=$(curl -sf -X POST \
|
||||||
|
-H "Authorization: Bearer ${GH_PAT}" \
|
||||||
|
-H "Accept: application/vnd.github+json" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"https://api.github.com/repos/${GITHUB_REPO}/releases" \
|
||||||
|
-d "{
|
||||||
|
\"tag_name\": \"${TAG}\",
|
||||||
|
\"name\": \"Triple-C ${TAG}\",
|
||||||
|
\"body\": \"Automated build from commit ${COMMIT_SHA}\n\nIncludes Linux, macOS, and Windows artifacts.\",
|
||||||
|
\"draft\": false,
|
||||||
|
\"prerelease\": false
|
||||||
|
}")
|
||||||
|
|
||||||
|
UPLOAD_URL=$(echo "$RESPONSE" | jq -r '.upload_url' | sed 's/{?name,label}//')
|
||||||
|
echo "==> Upload URL: ${UPLOAD_URL}"
|
||||||
|
|
||||||
|
for file in artifacts/*; do
|
||||||
|
[ -f "$file" ] || continue
|
||||||
|
FILENAME=$(basename "$file")
|
||||||
|
MIME="application/octet-stream"
|
||||||
|
echo "==> Uploading ${FILENAME}..."
|
||||||
|
curl -sf -X POST \
|
||||||
|
-H "Authorization: Bearer ${GH_PAT}" \
|
||||||
|
-H "Accept: application/vnd.github+json" \
|
||||||
|
-H "Content-Type: ${MIME}" \
|
||||||
|
--data-binary "@${file}" \
|
||||||
|
"${UPLOAD_URL}?name=$(python3 -c "import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1]))" "${FILENAME}")"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "==> GitHub release sync complete."
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
name: Sync Release to GitHub
|
name: Sync Release to GitHub
|
||||||
|
|
||||||
on:
|
on:
|
||||||
release:
|
workflow_dispatch:
|
||||||
types: [published]
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sync-release:
|
sync-release:
|
||||||
|
|||||||
17
app/public/audio-capture-processor.js
Normal file
17
app/public/audio-capture-processor.js
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
class AudioCaptureProcessor extends AudioWorkletProcessor {
|
||||||
|
process(inputs, outputs, parameters) {
|
||||||
|
const input = inputs[0];
|
||||||
|
if (input && input.length > 0 && input[0].length > 0) {
|
||||||
|
const samples = input[0]; // Float32Array, mono channel
|
||||||
|
const int16 = new Int16Array(samples.length);
|
||||||
|
for (let i = 0; i < samples.length; i++) {
|
||||||
|
const s = Math.max(-1, Math.min(1, samples[i]));
|
||||||
|
int16[i] = s < 0 ? s * 0x8000 : s * 0x7FFF;
|
||||||
|
}
|
||||||
|
this.port.postMessage(int16.buffer, [int16.buffer]);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
registerProcessor('audio-capture-processor', AudioCaptureProcessor);
|
||||||
1
app/src-tauri/Cargo.lock
generated
1
app/src-tauri/Cargo.lock
generated
@@ -4681,6 +4681,7 @@ dependencies = [
|
|||||||
"reqwest 0.12.28",
|
"reqwest 0.12.28",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"sha2",
|
||||||
"tar",
|
"tar",
|
||||||
"tauri",
|
"tauri",
|
||||||
"tauri-build",
|
"tauri-build",
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ fern = { version = "0.7", features = ["date-based"] }
|
|||||||
tar = "0.4"
|
tar = "0.4"
|
||||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||||
iana-time-zone = "0.1"
|
iana-time-zone = "0.1"
|
||||||
|
sha2 = "0.10"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
tauri-build = { version = "2", features = [] }
|
tauri-build = { version = "2", features = [] }
|
||||||
|
|||||||
@@ -53,6 +53,19 @@ fn load_secrets_for_project(project: &mut Project) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Resolve enabled MCP servers and filter to Docker-only ones.
|
||||||
|
fn resolve_mcp_servers(project: &Project, state: &AppState) -> (Vec<McpServer>, Vec<McpServer>) {
|
||||||
|
let all_mcp_servers = state.mcp_store.list();
|
||||||
|
let enabled_mcp: Vec<McpServer> = project.enabled_mcp_servers.iter()
|
||||||
|
.filter_map(|id| all_mcp_servers.iter().find(|s| &s.id == id).cloned())
|
||||||
|
.collect();
|
||||||
|
let docker_mcp: Vec<McpServer> = enabled_mcp.iter()
|
||||||
|
.filter(|s| s.is_docker())
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
(enabled_mcp, docker_mcp)
|
||||||
|
}
|
||||||
|
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn list_projects(state: State<'_, AppState>) -> Result<Vec<Project>, String> {
|
pub async fn list_projects(state: State<'_, AppState>) -> Result<Vec<Project>, String> {
|
||||||
Ok(state.projects_store.list())
|
Ok(state.projects_store.list())
|
||||||
@@ -97,6 +110,18 @@ pub async fn remove_project(
|
|||||||
let _ = docker::stop_container(container_id).await;
|
let _ = docker::stop_container(container_id).await;
|
||||||
let _ = docker::remove_container(container_id).await;
|
let _ = docker::remove_container(container_id).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove MCP containers and network
|
||||||
|
let (_enabled_mcp, docker_mcp) = resolve_mcp_servers(project, &state);
|
||||||
|
if !docker_mcp.is_empty() {
|
||||||
|
if let Err(e) = docker::remove_mcp_containers(&docker_mcp).await {
|
||||||
|
log::warn!("Failed to remove MCP containers for project {}: {}", project_id, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Err(e) = docker::remove_project_network(&project.id).await {
|
||||||
|
log::warn!("Failed to remove project network for project {}: {}", project_id, e);
|
||||||
|
}
|
||||||
|
|
||||||
// Clean up the snapshot image + volumes
|
// Clean up the snapshot image + volumes
|
||||||
if let Err(e) = docker::remove_snapshot_image(project).await {
|
if let Err(e) = docker::remove_snapshot_image(project).await {
|
||||||
log::warn!("Failed to remove snapshot image for project {}: {}", project_id, e);
|
log::warn!("Failed to remove snapshot image for project {}: {}", project_id, e);
|
||||||
@@ -143,10 +168,7 @@ pub async fn start_project_container(
|
|||||||
let image_name = container_config::resolve_image_name(&settings.image_source, &settings.custom_image_name);
|
let image_name = container_config::resolve_image_name(&settings.image_source, &settings.custom_image_name);
|
||||||
|
|
||||||
// Resolve enabled MCP servers for this project
|
// Resolve enabled MCP servers for this project
|
||||||
let all_mcp_servers = state.mcp_store.list();
|
let (enabled_mcp, docker_mcp) = resolve_mcp_servers(&project, &state);
|
||||||
let enabled_mcp: Vec<McpServer> = project.enabled_mcp_servers.iter()
|
|
||||||
.filter_map(|id| all_mcp_servers.iter().find(|s| &s.id == id).cloned())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// Validate auth mode requirements
|
// Validate auth mode requirements
|
||||||
if project.auth_mode == AuthMode::Bedrock {
|
if project.auth_mode == AuthMode::Bedrock {
|
||||||
@@ -178,6 +200,17 @@ pub async fn start_project_container(
|
|||||||
// AWS config path from global settings
|
// AWS config path from global settings
|
||||||
let aws_config_path = settings.global_aws.aws_config_path.clone();
|
let aws_config_path = settings.global_aws.aws_config_path.clone();
|
||||||
|
|
||||||
|
// Set up Docker network and MCP containers if needed
|
||||||
|
let network_name = if !docker_mcp.is_empty() {
|
||||||
|
emit_progress(&app_handle, &project_id, "Setting up MCP network...");
|
||||||
|
let net = docker::ensure_project_network(&project.id).await?;
|
||||||
|
emit_progress(&app_handle, &project_id, "Starting MCP containers...");
|
||||||
|
docker::start_mcp_containers(&docker_mcp, &net).await?;
|
||||||
|
Some(net)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let container_id = if let Some(existing_id) = docker::find_existing_container(&project).await? {
|
let container_id = if let Some(existing_id) = docker::find_existing_container(&project).await? {
|
||||||
// Check if config changed — if so, snapshot + recreate
|
// Check if config changed — if so, snapshot + recreate
|
||||||
let needs_recreate = docker::container_needs_recreation(
|
let needs_recreate = docker::container_needs_recreation(
|
||||||
@@ -218,6 +251,7 @@ pub async fn start_project_container(
|
|||||||
&settings.global_custom_env_vars,
|
&settings.global_custom_env_vars,
|
||||||
settings.timezone.as_deref(),
|
settings.timezone.as_deref(),
|
||||||
&enabled_mcp,
|
&enabled_mcp,
|
||||||
|
network_name.as_deref(),
|
||||||
).await?;
|
).await?;
|
||||||
emit_progress(&app_handle, &project_id, "Starting container...");
|
emit_progress(&app_handle, &project_id, "Starting container...");
|
||||||
docker::start_container(&new_id).await?;
|
docker::start_container(&new_id).await?;
|
||||||
@@ -250,6 +284,7 @@ pub async fn start_project_container(
|
|||||||
&settings.global_custom_env_vars,
|
&settings.global_custom_env_vars,
|
||||||
settings.timezone.as_deref(),
|
settings.timezone.as_deref(),
|
||||||
&enabled_mcp,
|
&enabled_mcp,
|
||||||
|
network_name.as_deref(),
|
||||||
).await?;
|
).await?;
|
||||||
emit_progress(&app_handle, &project_id, "Starting container...");
|
emit_progress(&app_handle, &project_id, "Starting container...");
|
||||||
docker::start_container(&new_id).await?;
|
docker::start_container(&new_id).await?;
|
||||||
@@ -299,6 +334,15 @@ pub async fn stop_project_container(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop MCP containers (best-effort)
|
||||||
|
let (_enabled_mcp, docker_mcp) = resolve_mcp_servers(&project, &state);
|
||||||
|
if !docker_mcp.is_empty() {
|
||||||
|
emit_progress(&app_handle, &project_id, "Stopping MCP containers...");
|
||||||
|
if let Err(e) = docker::stop_mcp_containers(&docker_mcp).await {
|
||||||
|
log::warn!("Failed to stop MCP containers for project {}: {}", project_id, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
state.projects_store.update_status(&project_id, ProjectStatus::Stopped)?;
|
state.projects_store.update_status(&project_id, ProjectStatus::Stopped)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -322,6 +366,14 @@ pub async fn rebuild_project_container(
|
|||||||
state.projects_store.set_container_id(&project_id, None)?;
|
state.projects_store.set_container_id(&project_id, None)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove MCP containers before rebuild
|
||||||
|
let (_enabled_mcp, docker_mcp) = resolve_mcp_servers(&project, &state);
|
||||||
|
if !docker_mcp.is_empty() {
|
||||||
|
if let Err(e) = docker::remove_mcp_containers(&docker_mcp).await {
|
||||||
|
log::warn!("Failed to remove MCP containers for project {}: {}", project_id, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Remove snapshot image + volumes so Reset creates from the clean base image
|
// Remove snapshot image + volumes so Reset creates from the clean base image
|
||||||
if let Err(e) = docker::remove_snapshot_image(&project).await {
|
if let Err(e) = docker::remove_snapshot_image(&project).await {
|
||||||
log::warn!("Failed to remove snapshot image for project {}: {}", project_id, e);
|
log::warn!("Failed to remove snapshot image for project {}: {}", project_id, e);
|
||||||
|
|||||||
@@ -1,7 +1,74 @@
|
|||||||
use tauri::{AppHandle, Emitter, State};
|
use tauri::{AppHandle, Emitter, State};
|
||||||
|
|
||||||
|
use crate::models::{AuthMode, BedrockAuthMethod, Project};
|
||||||
use crate::AppState;
|
use crate::AppState;
|
||||||
|
|
||||||
|
/// Build the command to run in the container terminal.
|
||||||
|
///
|
||||||
|
/// For Bedrock Profile projects, wraps `claude` in a bash script that validates
|
||||||
|
/// the AWS session first. If the SSO session is expired, runs `aws sso login`
|
||||||
|
/// so the user can re-authenticate (the URL is clickable via xterm.js WebLinksAddon).
|
||||||
|
fn build_terminal_cmd(project: &Project, state: &AppState) -> Vec<String> {
|
||||||
|
let is_bedrock_profile = project.auth_mode == AuthMode::Bedrock
|
||||||
|
&& project
|
||||||
|
.bedrock_config
|
||||||
|
.as_ref()
|
||||||
|
.map(|b| b.auth_method == BedrockAuthMethod::Profile)
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
if !is_bedrock_profile {
|
||||||
|
return vec![
|
||||||
|
"claude".to_string(),
|
||||||
|
"--dangerously-skip-permissions".to_string(),
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve AWS profile: project-level → global settings → "default"
|
||||||
|
let profile = project
|
||||||
|
.bedrock_config
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|b| b.aws_profile.clone())
|
||||||
|
.or_else(|| state.settings_store.get().global_aws.aws_profile.clone())
|
||||||
|
.unwrap_or_else(|| "default".to_string());
|
||||||
|
|
||||||
|
// Build a bash wrapper that validates credentials, re-auths if needed,
|
||||||
|
// then exec's into claude.
|
||||||
|
let script = format!(
|
||||||
|
r#"
|
||||||
|
echo "Validating AWS session for profile '{profile}'..."
|
||||||
|
if aws sts get-caller-identity --profile '{profile}' >/dev/null 2>&1; then
|
||||||
|
echo "AWS session valid."
|
||||||
|
else
|
||||||
|
echo "AWS session expired or invalid."
|
||||||
|
# Check if this profile uses SSO (has sso_start_url configured)
|
||||||
|
if aws configure get sso_start_url --profile '{profile}' >/dev/null 2>&1; then
|
||||||
|
echo "Starting SSO login — click the URL below to authenticate:"
|
||||||
|
echo ""
|
||||||
|
aws sso login --profile '{profile}'
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo ""
|
||||||
|
echo "SSO login failed or was cancelled. Starting Claude anyway..."
|
||||||
|
echo "You may see authentication errors."
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Profile '{profile}' does not use SSO. Check your AWS credentials."
|
||||||
|
echo "Starting Claude anyway..."
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
exec claude --dangerously-skip-permissions
|
||||||
|
"#,
|
||||||
|
profile = profile
|
||||||
|
);
|
||||||
|
|
||||||
|
vec![
|
||||||
|
"bash".to_string(),
|
||||||
|
"-c".to_string(),
|
||||||
|
script,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn open_terminal_session(
|
pub async fn open_terminal_session(
|
||||||
project_id: String,
|
project_id: String,
|
||||||
@@ -19,10 +86,7 @@ pub async fn open_terminal_session(
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or_else(|| "Container not running".to_string())?;
|
.ok_or_else(|| "Container not running".to_string())?;
|
||||||
|
|
||||||
let cmd = vec![
|
let cmd = build_terminal_cmd(&project, &state);
|
||||||
"claude".to_string(),
|
|
||||||
"--dangerously-skip-permissions".to_string(),
|
|
||||||
];
|
|
||||||
|
|
||||||
let output_event = format!("terminal-output-{}", session_id);
|
let output_event = format!("terminal-output-{}", session_id);
|
||||||
let exit_event = format!("terminal-exit-{}", session_id);
|
let exit_event = format!("terminal-exit-{}", session_id);
|
||||||
@@ -69,6 +133,10 @@ pub async fn close_terminal_session(
|
|||||||
session_id: String,
|
session_id: String,
|
||||||
state: State<'_, AppState>,
|
state: State<'_, AppState>,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
|
// Close audio bridge if it exists
|
||||||
|
let audio_session_id = format!("audio-{}", session_id);
|
||||||
|
state.exec_manager.close_session(&audio_session_id).await;
|
||||||
|
// Close terminal session
|
||||||
state.exec_manager.close_session(&session_id).await;
|
state.exec_manager.close_session(&session_id).await;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -92,3 +160,53 @@ pub async fn paste_image_to_terminal(
|
|||||||
.write_file_to_container(&container_id, &file_name, &image_data)
|
.write_file_to_container(&container_id, &file_name, &image_data)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tauri::command]
|
||||||
|
pub async fn start_audio_bridge(
|
||||||
|
session_id: String,
|
||||||
|
state: State<'_, AppState>,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
// Get container_id from the terminal session
|
||||||
|
let container_id = state.exec_manager.get_container_id(&session_id).await?;
|
||||||
|
|
||||||
|
// Create audio bridge exec session with ID "audio-{session_id}"
|
||||||
|
// The loop handles reconnection when the FIFO reader (fake rec) is killed and restarted
|
||||||
|
let audio_session_id = format!("audio-{}", session_id);
|
||||||
|
let cmd = vec![
|
||||||
|
"bash".to_string(),
|
||||||
|
"-c".to_string(),
|
||||||
|
"FIFO=/tmp/triple-c-audio-input; [ -p \"$FIFO\" ] || mkfifo \"$FIFO\"; trap '' PIPE; while true; do cat > \"$FIFO\" 2>/dev/null; sleep 0.1; done".to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
|
state
|
||||||
|
.exec_manager
|
||||||
|
.create_session_with_tty(
|
||||||
|
&container_id,
|
||||||
|
&audio_session_id,
|
||||||
|
cmd,
|
||||||
|
false,
|
||||||
|
|_data| { /* ignore output from the audio bridge */ },
|
||||||
|
Box::new(|| { /* no exit handler needed */ }),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tauri::command]
|
||||||
|
pub async fn send_audio_data(
|
||||||
|
session_id: String,
|
||||||
|
data: Vec<u8>,
|
||||||
|
state: State<'_, AppState>,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let audio_session_id = format!("audio-{}", session_id);
|
||||||
|
state.exec_manager.send_input(&audio_session_id, data).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tauri::command]
|
||||||
|
pub async fn stop_audio_bridge(
|
||||||
|
session_id: String,
|
||||||
|
state: State<'_, AppState>,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let audio_session_id = format!("audio-{}", session_id);
|
||||||
|
state.exec_manager.close_session(&audio_session_id).await;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,8 +5,7 @@ use bollard::container::{
|
|||||||
use bollard::image::{CommitContainerOptions, RemoveImageOptions};
|
use bollard::image::{CommitContainerOptions, RemoveImageOptions};
|
||||||
use bollard::models::{ContainerSummary, HostConfig, Mount, MountTypeEnum, PortBinding};
|
use bollard::models::{ContainerSummary, HostConfig, Mount, MountTypeEnum, PortBinding};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::hash_map::DefaultHasher;
|
use sha2::{Sha256, Digest};
|
||||||
use std::hash::{Hash, Hasher};
|
|
||||||
|
|
||||||
use super::client::get_docker;
|
use super::client::get_docker;
|
||||||
use crate::models::{AuthMode, BedrockAuthMethod, ContainerInfo, EnvVar, GlobalAwsSettings, McpServer, McpTransportType, PortMapping, Project, ProjectPath};
|
use crate::models::{AuthMode, BedrockAuthMethod, ContainerInfo, EnvVar, GlobalAwsSettings, McpServer, McpTransportType, PortMapping, Project, ProjectPath};
|
||||||
@@ -129,20 +128,28 @@ fn merge_claude_instructions(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Hash a string with SHA-256 and return the hex digest.
|
||||||
|
fn sha256_hex(input: &str) -> String {
|
||||||
|
let mut hasher = Sha256::new();
|
||||||
|
hasher.update(input.as_bytes());
|
||||||
|
format!("{:x}", hasher.finalize())
|
||||||
|
}
|
||||||
|
|
||||||
/// Compute a fingerprint for the Bedrock configuration so we can detect changes.
|
/// Compute a fingerprint for the Bedrock configuration so we can detect changes.
|
||||||
fn compute_bedrock_fingerprint(project: &Project) -> String {
|
fn compute_bedrock_fingerprint(project: &Project) -> String {
|
||||||
if let Some(ref bedrock) = project.bedrock_config {
|
if let Some(ref bedrock) = project.bedrock_config {
|
||||||
let mut hasher = DefaultHasher::new();
|
let parts = vec![
|
||||||
format!("{:?}", bedrock.auth_method).hash(&mut hasher);
|
format!("{:?}", bedrock.auth_method),
|
||||||
bedrock.aws_region.hash(&mut hasher);
|
bedrock.aws_region.clone(),
|
||||||
bedrock.aws_access_key_id.hash(&mut hasher);
|
bedrock.aws_access_key_id.as_deref().unwrap_or("").to_string(),
|
||||||
bedrock.aws_secret_access_key.hash(&mut hasher);
|
bedrock.aws_secret_access_key.as_deref().unwrap_or("").to_string(),
|
||||||
bedrock.aws_session_token.hash(&mut hasher);
|
bedrock.aws_session_token.as_deref().unwrap_or("").to_string(),
|
||||||
bedrock.aws_profile.hash(&mut hasher);
|
bedrock.aws_profile.as_deref().unwrap_or("").to_string(),
|
||||||
bedrock.aws_bearer_token.hash(&mut hasher);
|
bedrock.aws_bearer_token.as_deref().unwrap_or("").to_string(),
|
||||||
bedrock.model_id.hash(&mut hasher);
|
bedrock.model_id.as_deref().unwrap_or("").to_string(),
|
||||||
bedrock.disable_prompt_caching.hash(&mut hasher);
|
format!("{}", bedrock.disable_prompt_caching),
|
||||||
format!("{:x}", hasher.finish())
|
];
|
||||||
|
sha256_hex(&parts.join("|"))
|
||||||
} else {
|
} else {
|
||||||
String::new()
|
String::new()
|
||||||
}
|
}
|
||||||
@@ -157,9 +164,7 @@ fn compute_paths_fingerprint(paths: &[ProjectPath]) -> String {
|
|||||||
.collect();
|
.collect();
|
||||||
parts.sort();
|
parts.sort();
|
||||||
let joined = parts.join(",");
|
let joined = parts.join(",");
|
||||||
let mut hasher = DefaultHasher::new();
|
sha256_hex(&joined)
|
||||||
joined.hash(&mut hasher);
|
|
||||||
format!("{:x}", hasher.finish())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute a fingerprint for port mappings so we can detect changes.
|
/// Compute a fingerprint for port mappings so we can detect changes.
|
||||||
@@ -171,13 +176,17 @@ fn compute_ports_fingerprint(port_mappings: &[PortMapping]) -> String {
|
|||||||
.collect();
|
.collect();
|
||||||
parts.sort();
|
parts.sort();
|
||||||
let joined = parts.join(",");
|
let joined = parts.join(",");
|
||||||
let mut hasher = DefaultHasher::new();
|
sha256_hex(&joined)
|
||||||
joined.hash(&mut hasher);
|
|
||||||
format!("{:x}", hasher.finish())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build the JSON value for MCP servers config to be injected into ~/.claude.json.
|
/// Build the JSON value for MCP servers config to be injected into ~/.claude.json.
|
||||||
/// Produces `{"mcpServers": {"name": {"type": "stdio", ...}, ...}}`.
|
/// Produces `{"mcpServers": {"name": {"type": "stdio", ...}, ...}}`.
|
||||||
|
///
|
||||||
|
/// Handles 4 modes:
|
||||||
|
/// - Stdio+Docker: `docker exec -i <mcp-container-name> <command> ...args`
|
||||||
|
/// - Stdio+Manual: `<command> ...args` (existing behavior)
|
||||||
|
/// - HTTP+Docker: `streamableHttp` URL pointing to `http://<mcp-container-name>:<port>/mcp`
|
||||||
|
/// - HTTP+Manual: `streamableHttp` with user-provided URL + headers
|
||||||
fn build_mcp_servers_json(servers: &[McpServer]) -> String {
|
fn build_mcp_servers_json(servers: &[McpServer]) -> String {
|
||||||
let mut mcp_map = serde_json::Map::new();
|
let mut mcp_map = serde_json::Map::new();
|
||||||
for server in servers {
|
for server in servers {
|
||||||
@@ -185,18 +194,44 @@ fn build_mcp_servers_json(servers: &[McpServer]) -> String {
|
|||||||
match server.transport_type {
|
match server.transport_type {
|
||||||
McpTransportType::Stdio => {
|
McpTransportType::Stdio => {
|
||||||
entry.insert("type".to_string(), serde_json::json!("stdio"));
|
entry.insert("type".to_string(), serde_json::json!("stdio"));
|
||||||
|
if server.is_docker() {
|
||||||
|
// Stdio+Docker: use `docker exec` to communicate with MCP container
|
||||||
|
entry.insert("command".to_string(), serde_json::json!("docker"));
|
||||||
|
let mut args = vec![
|
||||||
|
"exec".to_string(),
|
||||||
|
"-i".to_string(),
|
||||||
|
server.mcp_container_name(),
|
||||||
|
];
|
||||||
|
if let Some(ref cmd) = server.command {
|
||||||
|
args.push(cmd.clone());
|
||||||
|
}
|
||||||
|
args.extend(server.args.iter().cloned());
|
||||||
|
entry.insert("args".to_string(), serde_json::json!(args));
|
||||||
|
} else {
|
||||||
|
// Stdio+Manual: existing behavior
|
||||||
if let Some(ref cmd) = server.command {
|
if let Some(ref cmd) = server.command {
|
||||||
entry.insert("command".to_string(), serde_json::json!(cmd));
|
entry.insert("command".to_string(), serde_json::json!(cmd));
|
||||||
}
|
}
|
||||||
if !server.args.is_empty() {
|
if !server.args.is_empty() {
|
||||||
entry.insert("args".to_string(), serde_json::json!(server.args));
|
entry.insert("args".to_string(), serde_json::json!(server.args));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if !server.env.is_empty() {
|
if !server.env.is_empty() {
|
||||||
entry.insert("env".to_string(), serde_json::json!(server.env));
|
entry.insert("env".to_string(), serde_json::json!(server.env));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
McpTransportType::Http => {
|
McpTransportType::Http => {
|
||||||
entry.insert("type".to_string(), serde_json::json!("http"));
|
entry.insert("type".to_string(), serde_json::json!("streamableHttp"));
|
||||||
|
if server.is_docker() {
|
||||||
|
// HTTP+Docker: point to MCP container by name on the shared network
|
||||||
|
let url = format!(
|
||||||
|
"http://{}:{}/mcp",
|
||||||
|
server.mcp_container_name(),
|
||||||
|
server.effective_container_port()
|
||||||
|
);
|
||||||
|
entry.insert("url".to_string(), serde_json::json!(url));
|
||||||
|
} else {
|
||||||
|
// HTTP+Manual: user-provided URL + headers
|
||||||
if let Some(ref url) = server.url {
|
if let Some(ref url) = server.url {
|
||||||
entry.insert("url".to_string(), serde_json::json!(url));
|
entry.insert("url".to_string(), serde_json::json!(url));
|
||||||
}
|
}
|
||||||
@@ -204,14 +239,6 @@ fn build_mcp_servers_json(servers: &[McpServer]) -> String {
|
|||||||
entry.insert("headers".to_string(), serde_json::json!(server.headers));
|
entry.insert("headers".to_string(), serde_json::json!(server.headers));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
McpTransportType::Sse => {
|
|
||||||
entry.insert("type".to_string(), serde_json::json!("sse"));
|
|
||||||
if let Some(ref url) = server.url {
|
|
||||||
entry.insert("url".to_string(), serde_json::json!(url));
|
|
||||||
}
|
|
||||||
if !server.headers.is_empty() {
|
|
||||||
entry.insert("headers".to_string(), serde_json::json!(server.headers));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mcp_map.insert(server.name.clone(), serde_json::Value::Object(entry));
|
mcp_map.insert(server.name.clone(), serde_json::Value::Object(entry));
|
||||||
@@ -226,9 +253,7 @@ fn compute_mcp_fingerprint(servers: &[McpServer]) -> String {
|
|||||||
return String::new();
|
return String::new();
|
||||||
}
|
}
|
||||||
let json = build_mcp_servers_json(servers);
|
let json = build_mcp_servers_json(servers);
|
||||||
let mut hasher = DefaultHasher::new();
|
sha256_hex(&json)
|
||||||
json.hash(&mut hasher);
|
|
||||||
format!("{:x}", hasher.finish())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_existing_container(project: &Project) -> Result<Option<String>, String> {
|
pub async fn find_existing_container(project: &Project) -> Result<Option<String>, String> {
|
||||||
@@ -271,6 +296,7 @@ pub async fn create_container(
|
|||||||
global_custom_env_vars: &[EnvVar],
|
global_custom_env_vars: &[EnvVar],
|
||||||
timezone: Option<&str>,
|
timezone: Option<&str>,
|
||||||
mcp_servers: &[McpServer],
|
mcp_servers: &[McpServer],
|
||||||
|
network_name: Option<&str>,
|
||||||
) -> Result<String, String> {
|
) -> Result<String, String> {
|
||||||
let docker = get_docker()?;
|
let docker = get_docker()?;
|
||||||
let container_name = project.container_name();
|
let container_name = project.container_name();
|
||||||
@@ -482,7 +508,7 @@ pub async fn create_container(
|
|||||||
if let Some(ref aws_path) = aws_dir {
|
if let Some(ref aws_path) = aws_dir {
|
||||||
if aws_path.exists() {
|
if aws_path.exists() {
|
||||||
mounts.push(Mount {
|
mounts.push(Mount {
|
||||||
target: Some("/home/claude/.aws".to_string()),
|
target: Some("/tmp/.host-aws".to_string()),
|
||||||
source: Some(aws_path.to_string_lossy().to_string()),
|
source: Some(aws_path.to_string_lossy().to_string()),
|
||||||
typ: Some(MountTypeEnum::BIND),
|
typ: Some(MountTypeEnum::BIND),
|
||||||
read_only: Some(true),
|
read_only: Some(true),
|
||||||
@@ -492,8 +518,12 @@ pub async fn create_container(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Docker socket (only if allowed)
|
// Docker socket (if allowed, or auto-enabled for stdio+Docker MCP servers)
|
||||||
if project.allow_docker_access {
|
let needs_docker_for_mcp = any_stdio_docker_mcp(mcp_servers);
|
||||||
|
if project.allow_docker_access || needs_docker_for_mcp {
|
||||||
|
if needs_docker_for_mcp && !project.allow_docker_access {
|
||||||
|
log::info!("Auto-enabling Docker socket access for stdio+Docker MCP servers");
|
||||||
|
}
|
||||||
// On Windows, the named pipe (//./pipe/docker_engine) cannot be
|
// On Windows, the named pipe (//./pipe/docker_engine) cannot be
|
||||||
// bind-mounted into a Linux container. Docker Desktop exposes the
|
// bind-mounted into a Linux container. Docker Desktop exposes the
|
||||||
// daemon socket as /var/run/docker.sock for container mounts.
|
// daemon socket as /var/run/docker.sock for container mounts.
|
||||||
@@ -542,6 +572,8 @@ pub async fn create_container(
|
|||||||
mounts: Some(mounts),
|
mounts: Some(mounts),
|
||||||
port_bindings: if port_bindings.is_empty() { None } else { Some(port_bindings) },
|
port_bindings: if port_bindings.is_empty() { None } else { Some(port_bindings) },
|
||||||
init: Some(true),
|
init: Some(true),
|
||||||
|
// Connect to project network if specified (for MCP container communication)
|
||||||
|
network_mode: network_name.map(|n| n.to_string()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -931,3 +963,178 @@ pub async fn list_sibling_containers() -> Result<Vec<ContainerSummary>, String>
|
|||||||
|
|
||||||
Ok(siblings)
|
Ok(siblings)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ── MCP Container Lifecycle ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Returns true if any MCP server uses stdio transport with Docker.
|
||||||
|
pub fn any_stdio_docker_mcp(servers: &[McpServer]) -> bool {
|
||||||
|
servers.iter().any(|s| s.is_docker() && s.transport_type == McpTransportType::Stdio)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if any MCP server uses Docker.
|
||||||
|
pub fn any_docker_mcp(servers: &[McpServer]) -> bool {
|
||||||
|
servers.iter().any(|s| s.is_docker())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find an existing MCP container by its expected name.
|
||||||
|
pub async fn find_mcp_container(server: &McpServer) -> Result<Option<String>, String> {
|
||||||
|
let docker = get_docker()?;
|
||||||
|
let container_name = server.mcp_container_name();
|
||||||
|
|
||||||
|
let filters: HashMap<String, Vec<String>> = HashMap::from([
|
||||||
|
("name".to_string(), vec![container_name.clone()]),
|
||||||
|
]);
|
||||||
|
|
||||||
|
let containers: Vec<ContainerSummary> = docker
|
||||||
|
.list_containers(Some(ListContainersOptions {
|
||||||
|
all: true,
|
||||||
|
filters,
|
||||||
|
..Default::default()
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to list MCP containers: {}", e))?;
|
||||||
|
|
||||||
|
let expected = format!("/{}", container_name);
|
||||||
|
for c in &containers {
|
||||||
|
if let Some(names) = &c.names {
|
||||||
|
if names.iter().any(|n| n == &expected) {
|
||||||
|
return Ok(c.id.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a Docker container for an MCP server.
|
||||||
|
pub async fn create_mcp_container(
|
||||||
|
server: &McpServer,
|
||||||
|
network_name: &str,
|
||||||
|
) -> Result<String, String> {
|
||||||
|
let docker = get_docker()?;
|
||||||
|
let container_name = server.mcp_container_name();
|
||||||
|
|
||||||
|
let image = server
|
||||||
|
.docker_image
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_else(|| format!("MCP server '{}' has no docker_image", server.name))?;
|
||||||
|
|
||||||
|
let mut env_vars: Vec<String> = Vec::new();
|
||||||
|
for (k, v) in &server.env {
|
||||||
|
env_vars.push(format!("{}={}", k, v));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build command + args as Cmd
|
||||||
|
let mut cmd: Vec<String> = Vec::new();
|
||||||
|
if let Some(ref command) = server.command {
|
||||||
|
cmd.push(command.clone());
|
||||||
|
}
|
||||||
|
cmd.extend(server.args.iter().cloned());
|
||||||
|
|
||||||
|
let mut labels = HashMap::new();
|
||||||
|
labels.insert("triple-c.managed".to_string(), "true".to_string());
|
||||||
|
labels.insert("triple-c.mcp-server".to_string(), server.id.clone());
|
||||||
|
|
||||||
|
let host_config = HostConfig {
|
||||||
|
network_mode: Some(network_name.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let config = Config {
|
||||||
|
image: Some(image.clone()),
|
||||||
|
env: if env_vars.is_empty() { None } else { Some(env_vars) },
|
||||||
|
cmd: if cmd.is_empty() { None } else { Some(cmd) },
|
||||||
|
labels: Some(labels),
|
||||||
|
host_config: Some(host_config),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let options = CreateContainerOptions {
|
||||||
|
name: container_name.clone(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let response = docker
|
||||||
|
.create_container(Some(options), config)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to create MCP container '{}': {}", container_name, e))?;
|
||||||
|
|
||||||
|
log::info!(
|
||||||
|
"Created MCP container {} (image: {}) on network {}",
|
||||||
|
container_name,
|
||||||
|
image,
|
||||||
|
network_name
|
||||||
|
);
|
||||||
|
Ok(response.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start all Docker-based MCP server containers. Finds or creates each one.
|
||||||
|
pub async fn start_mcp_containers(
|
||||||
|
servers: &[McpServer],
|
||||||
|
network_name: &str,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
for server in servers {
|
||||||
|
if !server.is_docker() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let container_id = if let Some(existing_id) = find_mcp_container(server).await? {
|
||||||
|
log::debug!("Found existing MCP container for '{}'", server.name);
|
||||||
|
existing_id
|
||||||
|
} else {
|
||||||
|
create_mcp_container(server, network_name).await?
|
||||||
|
};
|
||||||
|
|
||||||
|
// Start the container (ignore already-started errors)
|
||||||
|
if let Err(e) = start_container(&container_id).await {
|
||||||
|
let err_str = e.to_string();
|
||||||
|
if err_str.contains("already started") || err_str.contains("304") {
|
||||||
|
log::debug!("MCP container '{}' already running", server.name);
|
||||||
|
} else {
|
||||||
|
return Err(format!(
|
||||||
|
"Failed to start MCP container '{}': {}",
|
||||||
|
server.name, e
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log::info!("MCP container '{}' started", server.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stop all Docker-based MCP server containers (best-effort).
|
||||||
|
pub async fn stop_mcp_containers(servers: &[McpServer]) -> Result<(), String> {
|
||||||
|
for server in servers {
|
||||||
|
if !server.is_docker() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Ok(Some(container_id)) = find_mcp_container(server).await {
|
||||||
|
if let Err(e) = stop_container(&container_id).await {
|
||||||
|
log::warn!("Failed to stop MCP container '{}': {}", server.name, e);
|
||||||
|
} else {
|
||||||
|
log::info!("Stopped MCP container '{}'", server.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stop and remove all Docker-based MCP server containers (best-effort).
|
||||||
|
pub async fn remove_mcp_containers(servers: &[McpServer]) -> Result<(), String> {
|
||||||
|
for server in servers {
|
||||||
|
if !server.is_docker() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Ok(Some(container_id)) = find_mcp_container(server).await {
|
||||||
|
let _ = stop_container(&container_id).await;
|
||||||
|
if let Err(e) = remove_container(&container_id).await {
|
||||||
|
log::warn!("Failed to remove MCP container '{}': {}", server.name, e);
|
||||||
|
} else {
|
||||||
|
log::info!("Removed MCP container '{}'", server.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
@@ -60,6 +60,22 @@ impl ExecSessionManager {
|
|||||||
on_output: F,
|
on_output: F,
|
||||||
on_exit: Box<dyn FnOnce() + Send>,
|
on_exit: Box<dyn FnOnce() + Send>,
|
||||||
) -> Result<(), String>
|
) -> Result<(), String>
|
||||||
|
where
|
||||||
|
F: Fn(Vec<u8>) + Send + 'static,
|
||||||
|
{
|
||||||
|
self.create_session_with_tty(container_id, session_id, cmd, true, on_output, on_exit)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn create_session_with_tty<F>(
|
||||||
|
&self,
|
||||||
|
container_id: &str,
|
||||||
|
session_id: &str,
|
||||||
|
cmd: Vec<String>,
|
||||||
|
tty: bool,
|
||||||
|
on_output: F,
|
||||||
|
on_exit: Box<dyn FnOnce() + Send>,
|
||||||
|
) -> Result<(), String>
|
||||||
where
|
where
|
||||||
F: Fn(Vec<u8>) + Send + 'static,
|
F: Fn(Vec<u8>) + Send + 'static,
|
||||||
{
|
{
|
||||||
@@ -72,7 +88,7 @@ impl ExecSessionManager {
|
|||||||
attach_stdin: Some(true),
|
attach_stdin: Some(true),
|
||||||
attach_stdout: Some(true),
|
attach_stdout: Some(true),
|
||||||
attach_stderr: Some(true),
|
attach_stderr: Some(true),
|
||||||
tty: Some(true),
|
tty: Some(tty),
|
||||||
cmd: Some(cmd),
|
cmd: Some(cmd),
|
||||||
user: Some("claude".to_string()),
|
user: Some("claude".to_string()),
|
||||||
working_dir: Some("/workspace".to_string()),
|
working_dir: Some("/workspace".to_string()),
|
||||||
|
|||||||
@@ -2,8 +2,10 @@ pub mod client;
|
|||||||
pub mod container;
|
pub mod container;
|
||||||
pub mod image;
|
pub mod image;
|
||||||
pub mod exec;
|
pub mod exec;
|
||||||
|
pub mod network;
|
||||||
|
|
||||||
pub use client::*;
|
pub use client::*;
|
||||||
pub use container::*;
|
pub use container::*;
|
||||||
pub use image::*;
|
pub use image::*;
|
||||||
pub use exec::*;
|
pub use exec::*;
|
||||||
|
pub use network::*;
|
||||||
|
|||||||
128
app/src-tauri/src/docker/network.rs
Normal file
128
app/src-tauri/src/docker/network.rs
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
use bollard::network::{CreateNetworkOptions, InspectNetworkOptions};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use super::client::get_docker;
|
||||||
|
|
||||||
|
/// Network name for a project's MCP containers.
|
||||||
|
fn project_network_name(project_id: &str) -> String {
|
||||||
|
format!("triple-c-net-{}", project_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensure a Docker bridge network exists for the project.
|
||||||
|
/// Returns the network name.
|
||||||
|
pub async fn ensure_project_network(project_id: &str) -> Result<String, String> {
|
||||||
|
let docker = get_docker()?;
|
||||||
|
let network_name = project_network_name(project_id);
|
||||||
|
|
||||||
|
// Check if network already exists
|
||||||
|
match docker
|
||||||
|
.inspect_network(&network_name, None::<InspectNetworkOptions<String>>)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
log::debug!("Network {} already exists", network_name);
|
||||||
|
return Ok(network_name);
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
// Network doesn't exist, create it
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let options = CreateNetworkOptions {
|
||||||
|
name: network_name.clone(),
|
||||||
|
driver: "bridge".to_string(),
|
||||||
|
labels: HashMap::from([
|
||||||
|
("triple-c.managed".to_string(), "true".to_string()),
|
||||||
|
("triple-c.project-id".to_string(), project_id.to_string()),
|
||||||
|
]),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
docker
|
||||||
|
.create_network(options)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to create network {}: {}", network_name, e))?;
|
||||||
|
|
||||||
|
log::info!("Created Docker network {}", network_name);
|
||||||
|
Ok(network_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connect a container to the project network.
|
||||||
|
pub async fn connect_container_to_network(
|
||||||
|
container_id: &str,
|
||||||
|
network_name: &str,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let docker = get_docker()?;
|
||||||
|
|
||||||
|
let config = bollard::network::ConnectNetworkOptions {
|
||||||
|
container: container_id.to_string(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
docker
|
||||||
|
.connect_network(network_name, config)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Failed to connect container {} to network {}: {}",
|
||||||
|
container_id, network_name, e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
log::debug!(
|
||||||
|
"Connected container {} to network {}",
|
||||||
|
container_id,
|
||||||
|
network_name
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove the project network (best-effort). Disconnects all containers first.
|
||||||
|
pub async fn remove_project_network(project_id: &str) -> Result<(), String> {
|
||||||
|
let docker = get_docker()?;
|
||||||
|
let network_name = project_network_name(project_id);
|
||||||
|
|
||||||
|
// Inspect to get connected containers
|
||||||
|
let info = match docker
|
||||||
|
.inspect_network(&network_name, None::<InspectNetworkOptions<String>>)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(info) => info,
|
||||||
|
Err(_) => {
|
||||||
|
log::debug!(
|
||||||
|
"Network {} not found, nothing to remove",
|
||||||
|
network_name
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Disconnect all containers
|
||||||
|
if let Some(containers) = info.containers {
|
||||||
|
for (container_id, _) in containers {
|
||||||
|
let disconnect_opts = bollard::network::DisconnectNetworkOptions {
|
||||||
|
container: container_id.clone(),
|
||||||
|
force: true,
|
||||||
|
};
|
||||||
|
if let Err(e) = docker
|
||||||
|
.disconnect_network(&network_name, disconnect_opts)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
log::warn!(
|
||||||
|
"Failed to disconnect container {} from network {}: {}",
|
||||||
|
container_id,
|
||||||
|
network_name,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the network
|
||||||
|
match docker.remove_network(&network_name).await {
|
||||||
|
Ok(_) => log::info!("Removed Docker network {}", network_name),
|
||||||
|
Err(e) => log::warn!("Failed to remove network {}: {}", network_name, e),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -101,6 +101,9 @@ pub fn run() {
|
|||||||
commands::terminal_commands::terminal_resize,
|
commands::terminal_commands::terminal_resize,
|
||||||
commands::terminal_commands::close_terminal_session,
|
commands::terminal_commands::close_terminal_session,
|
||||||
commands::terminal_commands::paste_image_to_terminal,
|
commands::terminal_commands::paste_image_to_terminal,
|
||||||
|
commands::terminal_commands::start_audio_bridge,
|
||||||
|
commands::terminal_commands::send_audio_data,
|
||||||
|
commands::terminal_commands::stop_audio_bridge,
|
||||||
// MCP
|
// MCP
|
||||||
commands::mcp_commands::list_mcp_servers,
|
commands::mcp_commands::list_mcp_servers,
|
||||||
commands::mcp_commands::add_mcp_server,
|
commands::mcp_commands::add_mcp_server,
|
||||||
|
|||||||
@@ -70,6 +70,8 @@ pub struct AppSettings {
|
|||||||
pub dismissed_update_version: Option<String>,
|
pub dismissed_update_version: Option<String>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub timezone: Option<String>,
|
pub timezone: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub default_microphone: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for AppSettings {
|
impl Default for AppSettings {
|
||||||
@@ -87,6 +89,7 @@ impl Default for AppSettings {
|
|||||||
auto_check_updates: true,
|
auto_check_updates: true,
|
||||||
dismissed_update_version: None,
|
dismissed_update_version: None,
|
||||||
timezone: None,
|
timezone: None,
|
||||||
|
default_microphone: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ use std::collections::HashMap;
|
|||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub enum McpTransportType {
|
pub enum McpTransportType {
|
||||||
Stdio,
|
Stdio,
|
||||||
|
#[serde(alias = "sse")]
|
||||||
Http,
|
Http,
|
||||||
Sse,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for McpTransportType {
|
impl Default for McpTransportType {
|
||||||
@@ -29,6 +29,10 @@ pub struct McpServer {
|
|||||||
pub url: Option<String>,
|
pub url: Option<String>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub headers: HashMap<String, String>,
|
pub headers: HashMap<String, String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub docker_image: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub container_port: Option<u16>,
|
||||||
pub created_at: String,
|
pub created_at: String,
|
||||||
pub updated_at: String,
|
pub updated_at: String,
|
||||||
}
|
}
|
||||||
@@ -45,8 +49,22 @@ impl McpServer {
|
|||||||
env: HashMap::new(),
|
env: HashMap::new(),
|
||||||
url: None,
|
url: None,
|
||||||
headers: HashMap::new(),
|
headers: HashMap::new(),
|
||||||
|
docker_image: None,
|
||||||
|
container_port: None,
|
||||||
created_at: now.clone(),
|
created_at: now.clone(),
|
||||||
updated_at: now,
|
updated_at: now,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_docker(&self) -> bool {
|
||||||
|
self.docker_image.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn mcp_container_name(&self) -> String {
|
||||||
|
format!("triple-c-mcp-{}", self.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn effective_container_port(&self) -> u16 {
|
||||||
|
self.container_port.unwrap_or(3000)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ export default function Sidebar() {
|
|||||||
Projects
|
Projects
|
||||||
</button>
|
</button>
|
||||||
<button onClick={() => setSidebarView("mcp")} className={tabCls("mcp")}>
|
<button onClick={() => setSidebarView("mcp")} className={tabCls("mcp")}>
|
||||||
MCP
|
MCP <span className="text-[0.6rem] px-1 py-0.5 rounded bg-yellow-500/20 text-yellow-400 ml-0.5">Beta</span>
|
||||||
</button>
|
</button>
|
||||||
<button onClick={() => setSidebarView("settings")} className={tabCls("settings")}>
|
<button onClick={() => setSidebarView("settings")} className={tabCls("settings")}>
|
||||||
Settings
|
Settings
|
||||||
|
|||||||
@@ -26,7 +26,10 @@ export default function McpPanel() {
|
|||||||
return (
|
return (
|
||||||
<div className="space-y-3 p-2">
|
<div className="space-y-3 p-2">
|
||||||
<div>
|
<div>
|
||||||
<h2 className="text-sm font-semibold text-[var(--text-primary)]">MCP Servers</h2>
|
<h2 className="text-sm font-semibold text-[var(--text-primary)]">
|
||||||
|
MCP Servers{" "}
|
||||||
|
<span className="text-xs px-1.5 py-0.5 rounded bg-yellow-500/20 text-yellow-400">Beta</span>
|
||||||
|
</h2>
|
||||||
<p className="text-xs text-[var(--text-secondary)] mt-0.5">
|
<p className="text-xs text-[var(--text-secondary)] mt-0.5">
|
||||||
Define MCP servers globally, then enable them per-project.
|
Define MCP servers globally, then enable them per-project.
|
||||||
</p>
|
</p>
|
||||||
|
|||||||
@@ -16,6 +16,8 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
const [envPairs, setEnvPairs] = useState<[string, string][]>(Object.entries(server.env));
|
const [envPairs, setEnvPairs] = useState<[string, string][]>(Object.entries(server.env));
|
||||||
const [url, setUrl] = useState(server.url ?? "");
|
const [url, setUrl] = useState(server.url ?? "");
|
||||||
const [headerPairs, setHeaderPairs] = useState<[string, string][]>(Object.entries(server.headers));
|
const [headerPairs, setHeaderPairs] = useState<[string, string][]>(Object.entries(server.headers));
|
||||||
|
const [dockerImage, setDockerImage] = useState(server.docker_image ?? "");
|
||||||
|
const [containerPort, setContainerPort] = useState(server.container_port?.toString() ?? "3000");
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
setName(server.name);
|
setName(server.name);
|
||||||
@@ -25,6 +27,8 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
setEnvPairs(Object.entries(server.env));
|
setEnvPairs(Object.entries(server.env));
|
||||||
setUrl(server.url ?? "");
|
setUrl(server.url ?? "");
|
||||||
setHeaderPairs(Object.entries(server.headers));
|
setHeaderPairs(Object.entries(server.headers));
|
||||||
|
setDockerImage(server.docker_image ?? "");
|
||||||
|
setContainerPort(server.container_port?.toString() ?? "3000");
|
||||||
}, [server]);
|
}, [server]);
|
||||||
|
|
||||||
const saveServer = async (patch: Partial<McpServer>) => {
|
const saveServer = async (patch: Partial<McpServer>) => {
|
||||||
@@ -57,6 +61,15 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
saveServer({ url: url || null });
|
saveServer({ url: url || null });
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const handleDockerImageBlur = () => {
|
||||||
|
saveServer({ docker_image: dockerImage || null });
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleContainerPortBlur = () => {
|
||||||
|
const port = parseInt(containerPort, 10);
|
||||||
|
saveServer({ container_port: isNaN(port) ? null : port });
|
||||||
|
};
|
||||||
|
|
||||||
const saveEnv = (pairs: [string, string][]) => {
|
const saveEnv = (pairs: [string, string][]) => {
|
||||||
const env: Record<string, string> = {};
|
const env: Record<string, string> = {};
|
||||||
for (const [k, v] of pairs) {
|
for (const [k, v] of pairs) {
|
||||||
@@ -75,12 +88,15 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
|
|
||||||
const inputCls = "w-full px-2 py-1 bg-[var(--bg-primary)] border border-[var(--border-color)] rounded text-xs text-[var(--text-primary)] focus:outline-none focus:border-[var(--accent)]";
|
const inputCls = "w-full px-2 py-1 bg-[var(--bg-primary)] border border-[var(--border-color)] rounded text-xs text-[var(--text-primary)] focus:outline-none focus:border-[var(--accent)]";
|
||||||
|
|
||||||
|
const isDocker = !!dockerImage;
|
||||||
|
|
||||||
const transportBadge = {
|
const transportBadge = {
|
||||||
stdio: "Stdio",
|
stdio: "Stdio",
|
||||||
http: "HTTP",
|
http: "HTTP",
|
||||||
sse: "SSE",
|
|
||||||
}[transportType];
|
}[transportType];
|
||||||
|
|
||||||
|
const modeBadge = isDocker ? "Docker" : "Manual";
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="border border-[var(--border-color)] rounded bg-[var(--bg-primary)]">
|
<div className="border border-[var(--border-color)] rounded bg-[var(--bg-primary)]">
|
||||||
{/* Header */}
|
{/* Header */}
|
||||||
@@ -94,6 +110,9 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
<span className="text-xs px-1.5 py-0.5 rounded bg-[var(--bg-secondary)] text-[var(--text-secondary)]">
|
<span className="text-xs px-1.5 py-0.5 rounded bg-[var(--bg-secondary)] text-[var(--text-secondary)]">
|
||||||
{transportBadge}
|
{transportBadge}
|
||||||
</span>
|
</span>
|
||||||
|
<span className={`text-xs px-1.5 py-0.5 rounded ${isDocker ? "bg-blue-500/20 text-blue-400" : "bg-[var(--bg-secondary)] text-[var(--text-secondary)]"}`}>
|
||||||
|
{modeBadge}
|
||||||
|
</span>
|
||||||
</button>
|
</button>
|
||||||
<button
|
<button
|
||||||
onClick={() => { if (confirm(`Remove MCP server "${server.name}"?`)) onRemove(server.id); }}
|
onClick={() => { if (confirm(`Remove MCP server "${server.name}"?`)) onRemove(server.id); }}
|
||||||
@@ -117,11 +136,26 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{/* Docker Image (primary field — determines Docker vs Manual mode) */}
|
||||||
|
<div>
|
||||||
|
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Docker Image</label>
|
||||||
|
<input
|
||||||
|
value={dockerImage}
|
||||||
|
onChange={(e) => setDockerImage(e.target.value)}
|
||||||
|
onBlur={handleDockerImageBlur}
|
||||||
|
placeholder="e.g. mcp/filesystem:latest (leave empty for manual mode)"
|
||||||
|
className={inputCls}
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-[var(--text-secondary)] mt-0.5 opacity-60">
|
||||||
|
Set a Docker image to run this MCP server as a container. Leave empty for manual mode.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
{/* Transport type */}
|
{/* Transport type */}
|
||||||
<div>
|
<div>
|
||||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Transport</label>
|
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Transport</label>
|
||||||
<div className="flex items-center gap-1">
|
<div className="flex items-center gap-1">
|
||||||
{(["stdio", "http", "sse"] as McpTransportType[]).map((t) => (
|
{(["stdio", "http"] as McpTransportType[]).map((t) => (
|
||||||
<button
|
<button
|
||||||
key={t}
|
key={t}
|
||||||
onClick={() => handleTransportChange(t)}
|
onClick={() => handleTransportChange(t)}
|
||||||
@@ -131,12 +165,29 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
: "text-[var(--text-secondary)] hover:text-[var(--text-primary)] hover:bg-[var(--bg-secondary)]"
|
: "text-[var(--text-secondary)] hover:text-[var(--text-primary)] hover:bg-[var(--bg-secondary)]"
|
||||||
}`}
|
}`}
|
||||||
>
|
>
|
||||||
{t === "stdio" ? "Stdio" : t === "http" ? "HTTP" : "SSE"}
|
{t === "stdio" ? "Stdio" : "HTTP"}
|
||||||
</button>
|
</button>
|
||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{/* Container Port (HTTP+Docker only) */}
|
||||||
|
{transportType === "http" && isDocker && (
|
||||||
|
<div>
|
||||||
|
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">Container Port</label>
|
||||||
|
<input
|
||||||
|
value={containerPort}
|
||||||
|
onChange={(e) => setContainerPort(e.target.value)}
|
||||||
|
onBlur={handleContainerPortBlur}
|
||||||
|
placeholder="3000"
|
||||||
|
className={inputCls}
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-[var(--text-secondary)] mt-0.5 opacity-60">
|
||||||
|
Port inside the MCP container (default: 3000)
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
{/* Stdio fields */}
|
{/* Stdio fields */}
|
||||||
{transportType === "stdio" && (
|
{transportType === "stdio" && (
|
||||||
<>
|
<>
|
||||||
@@ -146,7 +197,7 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
value={command}
|
value={command}
|
||||||
onChange={(e) => setCommand(e.target.value)}
|
onChange={(e) => setCommand(e.target.value)}
|
||||||
onBlur={handleCommandBlur}
|
onBlur={handleCommandBlur}
|
||||||
placeholder="npx"
|
placeholder={isDocker ? "Command inside container" : "npx"}
|
||||||
className={inputCls}
|
className={inputCls}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
@@ -169,8 +220,8 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{/* HTTP/SSE fields */}
|
{/* HTTP fields (only for manual mode — Docker mode auto-generates URL) */}
|
||||||
{(transportType === "http" || transportType === "sse") && (
|
{transportType === "http" && !isDocker && (
|
||||||
<>
|
<>
|
||||||
<div>
|
<div>
|
||||||
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">URL</label>
|
<label className="block text-xs text-[var(--text-secondary)] mb-0.5">URL</label>
|
||||||
@@ -190,6 +241,16 @@ export default function McpServerCard({ server, onUpdate, onRemove }: Props) {
|
|||||||
/>
|
/>
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{/* Environment variables for HTTP+Docker */}
|
||||||
|
{transportType === "http" && isDocker && (
|
||||||
|
<KeyValueEditor
|
||||||
|
label="Environment Variables"
|
||||||
|
pairs={envPairs}
|
||||||
|
onChange={(pairs) => { setEnvPairs(pairs); }}
|
||||||
|
onSave={saveEnv}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -622,6 +622,7 @@ export default function ProjectCard({ project }: Props) {
|
|||||||
<div className="space-y-1">
|
<div className="space-y-1">
|
||||||
{mcpServers.map((server) => {
|
{mcpServers.map((server) => {
|
||||||
const enabled = project.enabled_mcp_servers.includes(server.id);
|
const enabled = project.enabled_mcp_servers.includes(server.id);
|
||||||
|
const isDocker = !!server.docker_image;
|
||||||
return (
|
return (
|
||||||
<label key={server.id} className="flex items-center gap-2 cursor-pointer">
|
<label key={server.id} className="flex items-center gap-2 cursor-pointer">
|
||||||
<input
|
<input
|
||||||
@@ -642,10 +643,18 @@ export default function ProjectCard({ project }: Props) {
|
|||||||
/>
|
/>
|
||||||
<span className="text-xs text-[var(--text-primary)]">{server.name}</span>
|
<span className="text-xs text-[var(--text-primary)]">{server.name}</span>
|
||||||
<span className="text-xs text-[var(--text-secondary)]">({server.transport_type})</span>
|
<span className="text-xs text-[var(--text-secondary)]">({server.transport_type})</span>
|
||||||
|
<span className={`text-xs px-1 py-0.5 rounded ${isDocker ? "bg-blue-500/20 text-blue-400" : "bg-[var(--bg-secondary)] text-[var(--text-secondary)]"}`}>
|
||||||
|
{isDocker ? "Docker" : "Manual"}
|
||||||
|
</span>
|
||||||
</label>
|
</label>
|
||||||
);
|
);
|
||||||
})}
|
})}
|
||||||
</div>
|
</div>
|
||||||
|
{mcpServers.some((s) => s.docker_image && s.transport_type === "stdio" && project.enabled_mcp_servers.includes(s.id)) && (
|
||||||
|
<p className="text-xs text-[var(--text-secondary)] mt-1 opacity-70">
|
||||||
|
Docker access will be auto-enabled for stdio+Docker MCP servers.
|
||||||
|
</p>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
@@ -860,3 +869,4 @@ function ActionButton({
|
|||||||
</button>
|
</button>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
101
app/src/components/settings/MicrophoneSettings.tsx
Normal file
101
app/src/components/settings/MicrophoneSettings.tsx
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
import { useState, useEffect, useCallback } from "react";
|
||||||
|
import { useSettings } from "../../hooks/useSettings";
|
||||||
|
|
||||||
|
interface AudioDevice {
|
||||||
|
deviceId: string;
|
||||||
|
label: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export default function MicrophoneSettings() {
|
||||||
|
const { appSettings, saveSettings } = useSettings();
|
||||||
|
const [devices, setDevices] = useState<AudioDevice[]>([]);
|
||||||
|
const [selected, setSelected] = useState(appSettings?.default_microphone ?? "");
|
||||||
|
const [loading, setLoading] = useState(false);
|
||||||
|
const [permissionNeeded, setPermissionNeeded] = useState(false);
|
||||||
|
|
||||||
|
// Sync local state when appSettings change
|
||||||
|
useEffect(() => {
|
||||||
|
setSelected(appSettings?.default_microphone ?? "");
|
||||||
|
}, [appSettings?.default_microphone]);
|
||||||
|
|
||||||
|
const enumerateDevices = useCallback(async () => {
|
||||||
|
setLoading(true);
|
||||||
|
setPermissionNeeded(false);
|
||||||
|
try {
|
||||||
|
// Request mic permission first so device labels are available
|
||||||
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||||
|
stream.getTracks().forEach((t) => t.stop());
|
||||||
|
|
||||||
|
const allDevices = await navigator.mediaDevices.enumerateDevices();
|
||||||
|
const mics = allDevices
|
||||||
|
.filter((d) => d.kind === "audioinput")
|
||||||
|
.map((d) => ({
|
||||||
|
deviceId: d.deviceId,
|
||||||
|
label: d.label || `Microphone (${d.deviceId.slice(0, 8)}...)`,
|
||||||
|
}));
|
||||||
|
setDevices(mics);
|
||||||
|
} catch {
|
||||||
|
setPermissionNeeded(true);
|
||||||
|
} finally {
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Enumerate devices on mount
|
||||||
|
useEffect(() => {
|
||||||
|
enumerateDevices();
|
||||||
|
}, [enumerateDevices]);
|
||||||
|
|
||||||
|
const handleChange = async (deviceId: string) => {
|
||||||
|
setSelected(deviceId);
|
||||||
|
if (appSettings) {
|
||||||
|
await saveSettings({ ...appSettings, default_microphone: deviceId || null });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium mb-1">Microphone</label>
|
||||||
|
<p className="text-xs text-[var(--text-secondary)] mb-1.5">
|
||||||
|
Audio input device for Claude Code voice mode (/voice)
|
||||||
|
</p>
|
||||||
|
{permissionNeeded ? (
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<span className="text-xs text-[var(--text-secondary)]">
|
||||||
|
Microphone permission required
|
||||||
|
</span>
|
||||||
|
<button
|
||||||
|
onClick={enumerateDevices}
|
||||||
|
className="text-xs px-2 py-0.5 text-[var(--accent)] hover:text-[var(--accent-hover)] hover:bg-[var(--bg-primary)] rounded transition-colors"
|
||||||
|
>
|
||||||
|
Grant Access
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<select
|
||||||
|
value={selected}
|
||||||
|
onChange={(e) => handleChange(e.target.value)}
|
||||||
|
disabled={loading}
|
||||||
|
className="flex-1 px-2 py-1 text-sm bg-[var(--bg-primary)] border border-[var(--border-color)] rounded focus:outline-none focus:border-[var(--accent)]"
|
||||||
|
>
|
||||||
|
<option value="">System Default</option>
|
||||||
|
{devices.map((d) => (
|
||||||
|
<option key={d.deviceId} value={d.deviceId}>
|
||||||
|
{d.label}
|
||||||
|
</option>
|
||||||
|
))}
|
||||||
|
</select>
|
||||||
|
<button
|
||||||
|
onClick={enumerateDevices}
|
||||||
|
disabled={loading}
|
||||||
|
title="Refresh microphone list"
|
||||||
|
className="text-xs px-2 py-1 text-[var(--text-secondary)] hover:text-[var(--text-primary)] hover:bg-[var(--bg-primary)] rounded transition-colors disabled:opacity-50"
|
||||||
|
>
|
||||||
|
{loading ? "..." : "Refresh"}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -82,6 +82,25 @@ export default function TerminalView({ sessionId, active }: Props) {
|
|||||||
// Send initial size
|
// Send initial size
|
||||||
resize(sessionId, term.cols, term.rows);
|
resize(sessionId, term.cols, term.rows);
|
||||||
|
|
||||||
|
// Handle OSC 52 clipboard write sequences from programs inside the container.
|
||||||
|
// When a program (e.g. Claude Code) copies text via xclip/xsel/pbcopy, the
|
||||||
|
// container's shim emits an OSC 52 escape sequence which xterm.js routes here.
|
||||||
|
const osc52Disposable = term.parser.registerOscHandler(52, (data) => {
|
||||||
|
const idx = data.indexOf(";");
|
||||||
|
if (idx === -1) return false;
|
||||||
|
const payload = data.substring(idx + 1);
|
||||||
|
if (payload === "?") return false; // clipboard read request, not supported
|
||||||
|
try {
|
||||||
|
const decoded = atob(payload);
|
||||||
|
navigator.clipboard.writeText(decoded).catch((e) =>
|
||||||
|
console.error("OSC 52 clipboard write failed:", e),
|
||||||
|
);
|
||||||
|
} catch (e) {
|
||||||
|
console.error("OSC 52 decode failed:", e);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
|
||||||
// Handle user input -> backend
|
// Handle user input -> backend
|
||||||
const inputDisposable = term.onData((data) => {
|
const inputDisposable = term.onData((data) => {
|
||||||
sendInput(sessionId, data);
|
sendInput(sessionId, data);
|
||||||
@@ -170,6 +189,7 @@ export default function TerminalView({ sessionId, active }: Props) {
|
|||||||
aborted = true;
|
aborted = true;
|
||||||
detector.dispose();
|
detector.dispose();
|
||||||
detectorRef.current = null;
|
detectorRef.current = null;
|
||||||
|
osc52Disposable.dispose();
|
||||||
inputDisposable.dispose();
|
inputDisposable.dispose();
|
||||||
scrollDisposable.dispose();
|
scrollDisposable.dispose();
|
||||||
containerRef.current?.removeEventListener("paste", handlePaste, { capture: true });
|
containerRef.current?.removeEventListener("paste", handlePaste, { capture: true });
|
||||||
|
|||||||
59
app/src/components/terminal/osc52.test.ts
Normal file
59
app/src/components/terminal/osc52.test.ts
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
import { describe, it, expect, vi, beforeEach } from "vitest";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests the OSC 52 clipboard parsing logic used in TerminalView.
|
||||||
|
* Extracted here to validate the decode/write path independently.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Mirrors the handler registered in TerminalView.tsx
|
||||||
|
function handleOsc52(data: string): string | null {
|
||||||
|
const idx = data.indexOf(";");
|
||||||
|
if (idx === -1) return null;
|
||||||
|
const payload = data.substring(idx + 1);
|
||||||
|
if (payload === "?") return null;
|
||||||
|
try {
|
||||||
|
return atob(payload);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("OSC 52 clipboard handler", () => {
|
||||||
|
it("decodes a valid clipboard write sequence", () => {
|
||||||
|
// "c;BASE64" where BASE64 encodes "https://example.com"
|
||||||
|
const encoded = btoa("https://example.com");
|
||||||
|
const result = handleOsc52(`c;${encoded}`);
|
||||||
|
expect(result).toBe("https://example.com");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("decodes multi-line content", () => {
|
||||||
|
const text = "line1\nline2\nline3";
|
||||||
|
const encoded = btoa(text);
|
||||||
|
const result = handleOsc52(`c;${encoded}`);
|
||||||
|
expect(result).toBe(text);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("handles primary selection target (p)", () => {
|
||||||
|
const encoded = btoa("selected text");
|
||||||
|
const result = handleOsc52(`p;${encoded}`);
|
||||||
|
expect(result).toBe("selected text");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns null for clipboard read request (?)", () => {
|
||||||
|
expect(handleOsc52("c;?")).toBe(null);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns null for missing semicolon", () => {
|
||||||
|
expect(handleOsc52("invalid")).toBe(null);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns null for invalid base64", () => {
|
||||||
|
expect(handleOsc52("c;!!!not-base64!!!")).toBe(null);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("handles empty payload after selection target", () => {
|
||||||
|
// btoa("") = ""
|
||||||
|
const result = handleOsc52("c;");
|
||||||
|
expect(result).toBe("");
|
||||||
|
});
|
||||||
|
});
|
||||||
103
app/src/hooks/useVoice.ts
Normal file
103
app/src/hooks/useVoice.ts
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
import { useCallback, useRef, useState } from "react";
|
||||||
|
import * as commands from "../lib/tauri-commands";
|
||||||
|
|
||||||
|
type VoiceState = "inactive" | "starting" | "active" | "error";
|
||||||
|
|
||||||
|
export function useVoice(sessionId: string, deviceId?: string | null) {
|
||||||
|
const [state, setState] = useState<VoiceState>("inactive");
|
||||||
|
const [error, setError] = useState<string | null>(null);
|
||||||
|
|
||||||
|
const audioContextRef = useRef<AudioContext | null>(null);
|
||||||
|
const streamRef = useRef<MediaStream | null>(null);
|
||||||
|
const workletRef = useRef<AudioWorkletNode | null>(null);
|
||||||
|
|
||||||
|
const start = useCallback(async () => {
|
||||||
|
if (state === "active" || state === "starting") return;
|
||||||
|
setState("starting");
|
||||||
|
setError(null);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// 1. Start the audio bridge in the container (creates FIFO writer)
|
||||||
|
await commands.startAudioBridge(sessionId);
|
||||||
|
|
||||||
|
// 2. Get microphone access (use specific device if configured)
|
||||||
|
const audioConstraints: MediaTrackConstraints = {
|
||||||
|
channelCount: 1,
|
||||||
|
echoCancellation: true,
|
||||||
|
noiseSuppression: true,
|
||||||
|
autoGainControl: true,
|
||||||
|
};
|
||||||
|
if (deviceId) {
|
||||||
|
audioConstraints.deviceId = { exact: deviceId };
|
||||||
|
}
|
||||||
|
|
||||||
|
const stream = await navigator.mediaDevices.getUserMedia({
|
||||||
|
audio: audioConstraints,
|
||||||
|
});
|
||||||
|
streamRef.current = stream;
|
||||||
|
|
||||||
|
// 3. Create AudioContext at 16kHz (browser handles resampling)
|
||||||
|
const audioContext = new AudioContext({ sampleRate: 16000 });
|
||||||
|
audioContextRef.current = audioContext;
|
||||||
|
|
||||||
|
// 4. Load AudioWorklet processor
|
||||||
|
await audioContext.audioWorklet.addModule("/audio-capture-processor.js");
|
||||||
|
|
||||||
|
// 5. Connect: mic → worklet → (silent) destination
|
||||||
|
const source = audioContext.createMediaStreamSource(stream);
|
||||||
|
const processor = new AudioWorkletNode(audioContext, "audio-capture-processor");
|
||||||
|
workletRef.current = processor;
|
||||||
|
|
||||||
|
// 6. Handle PCM chunks from the worklet
|
||||||
|
processor.port.onmessage = (event: MessageEvent<ArrayBuffer>) => {
|
||||||
|
const bytes = Array.from(new Uint8Array(event.data));
|
||||||
|
commands.sendAudioData(sessionId, bytes).catch(() => {
|
||||||
|
// Audio bridge may have been closed — ignore send errors
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
source.connect(processor);
|
||||||
|
processor.connect(audioContext.destination);
|
||||||
|
|
||||||
|
setState("active");
|
||||||
|
} catch (e) {
|
||||||
|
const msg = e instanceof Error ? e.message : String(e);
|
||||||
|
setError(msg);
|
||||||
|
setState("error");
|
||||||
|
// Clean up on failure
|
||||||
|
await commands.stopAudioBridge(sessionId).catch(() => {});
|
||||||
|
}
|
||||||
|
}, [sessionId, state, deviceId]);
|
||||||
|
|
||||||
|
const stop = useCallback(async () => {
|
||||||
|
// Tear down audio pipeline
|
||||||
|
workletRef.current?.disconnect();
|
||||||
|
workletRef.current = null;
|
||||||
|
|
||||||
|
if (audioContextRef.current) {
|
||||||
|
await audioContextRef.current.close().catch(() => {});
|
||||||
|
audioContextRef.current = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (streamRef.current) {
|
||||||
|
streamRef.current.getTracks().forEach((t) => t.stop());
|
||||||
|
streamRef.current = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop the container-side audio bridge
|
||||||
|
await commands.stopAudioBridge(sessionId).catch(() => {});
|
||||||
|
|
||||||
|
setState("inactive");
|
||||||
|
setError(null);
|
||||||
|
}, [sessionId]);
|
||||||
|
|
||||||
|
const toggle = useCallback(async () => {
|
||||||
|
if (state === "active") {
|
||||||
|
await stop();
|
||||||
|
} else {
|
||||||
|
await start();
|
||||||
|
}
|
||||||
|
}, [state, start, stop]);
|
||||||
|
|
||||||
|
return { state, error, start, stop, toggle };
|
||||||
|
}
|
||||||
@@ -49,6 +49,12 @@ export const closeTerminalSession = (sessionId: string) =>
|
|||||||
invoke<void>("close_terminal_session", { sessionId });
|
invoke<void>("close_terminal_session", { sessionId });
|
||||||
export const pasteImageToTerminal = (sessionId: string, imageData: number[]) =>
|
export const pasteImageToTerminal = (sessionId: string, imageData: number[]) =>
|
||||||
invoke<string>("paste_image_to_terminal", { sessionId, imageData });
|
invoke<string>("paste_image_to_terminal", { sessionId, imageData });
|
||||||
|
export const startAudioBridge = (sessionId: string) =>
|
||||||
|
invoke<void>("start_audio_bridge", { sessionId });
|
||||||
|
export const sendAudioData = (sessionId: string, data: number[]) =>
|
||||||
|
invoke<void>("send_audio_data", { sessionId, data });
|
||||||
|
export const stopAudioBridge = (sessionId: string) =>
|
||||||
|
invoke<void>("stop_audio_bridge", { sessionId });
|
||||||
|
|
||||||
// MCP Servers
|
// MCP Servers
|
||||||
export const listMcpServers = () => invoke<McpServer[]>("list_mcp_servers");
|
export const listMcpServers = () => invoke<McpServer[]>("list_mcp_servers");
|
||||||
|
|||||||
@@ -100,6 +100,7 @@ export interface AppSettings {
|
|||||||
auto_check_updates: boolean;
|
auto_check_updates: boolean;
|
||||||
dismissed_update_version: string | null;
|
dismissed_update_version: string | null;
|
||||||
timezone: string | null;
|
timezone: string | null;
|
||||||
|
default_microphone: string | null;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface UpdateInfo {
|
export interface UpdateInfo {
|
||||||
@@ -117,7 +118,7 @@ export interface ReleaseAsset {
|
|||||||
size: number;
|
size: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
export type McpTransportType = "stdio" | "http" | "sse";
|
export type McpTransportType = "stdio" | "http";
|
||||||
|
|
||||||
export interface McpServer {
|
export interface McpServer {
|
||||||
id: string;
|
id: string;
|
||||||
@@ -128,6 +129,8 @@ export interface McpServer {
|
|||||||
env: Record<string, string>;
|
env: Record<string, string>;
|
||||||
url: string | null;
|
url: string | null;
|
||||||
headers: Record<string, string>;
|
headers: Record<string, string>;
|
||||||
|
docker_image: string | null;
|
||||||
|
container_port: number | null;
|
||||||
created_at: string;
|
created_at: string;
|
||||||
updated_at: string;
|
updated_at: string;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -101,6 +101,24 @@ WORKDIR /workspace
|
|||||||
|
|
||||||
# ── Switch back to root for entrypoint (handles UID/GID remapping) ─────────
|
# ── Switch back to root for entrypoint (handles UID/GID remapping) ─────────
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
|
# ── OSC 52 clipboard support ─────────────────────────────────────────────
|
||||||
|
# Provides xclip/xsel/pbcopy shims that emit OSC 52 escape sequences,
|
||||||
|
# allowing programs inside the container to copy to the host clipboard.
|
||||||
|
COPY osc52-clipboard /usr/local/bin/osc52-clipboard
|
||||||
|
RUN chmod +x /usr/local/bin/osc52-clipboard \
|
||||||
|
&& ln -sf /usr/local/bin/osc52-clipboard /usr/local/bin/xclip \
|
||||||
|
&& ln -sf /usr/local/bin/osc52-clipboard /usr/local/bin/xsel \
|
||||||
|
&& ln -sf /usr/local/bin/osc52-clipboard /usr/local/bin/pbcopy
|
||||||
|
|
||||||
|
# ── Audio capture shim (voice mode) ────────────────────────────────────────
|
||||||
|
# Provides fake rec/arecord that read PCM from a FIFO instead of a real mic,
|
||||||
|
# allowing Claude Code voice mode to work inside the container.
|
||||||
|
COPY audio-shim /usr/local/bin/audio-shim
|
||||||
|
RUN chmod +x /usr/local/bin/audio-shim \
|
||||||
|
&& ln -sf /usr/local/bin/audio-shim /usr/local/bin/rec \
|
||||||
|
&& ln -sf /usr/local/bin/audio-shim /usr/local/bin/arecord
|
||||||
|
|
||||||
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
|
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||||
COPY triple-c-scheduler /usr/local/bin/triple-c-scheduler
|
COPY triple-c-scheduler /usr/local/bin/triple-c-scheduler
|
||||||
|
|||||||
16
container/audio-shim
Normal file
16
container/audio-shim
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Audio capture shim for Triple-C voice mode.
|
||||||
|
# Claude Code spawns `rec` or `arecord` to capture mic audio.
|
||||||
|
# Inside Docker there is no mic, so this shim reads PCM data from a
|
||||||
|
# FIFO that the Tauri host app writes to, and outputs it on stdout.
|
||||||
|
|
||||||
|
FIFO=/tmp/triple-c-audio-input
|
||||||
|
|
||||||
|
# Create the FIFO if it doesn't already exist
|
||||||
|
[ -p "$FIFO" ] || mkfifo "$FIFO" 2>/dev/null
|
||||||
|
|
||||||
|
# Clean exit on SIGTERM (Claude Code sends this when recording stops)
|
||||||
|
trap 'exit 0' TERM INT
|
||||||
|
|
||||||
|
# Stream PCM from the FIFO to stdout until we get a signal or EOF
|
||||||
|
cat "$FIFO"
|
||||||
@@ -73,6 +73,19 @@ su -s /bin/bash claude -c '
|
|||||||
sort -u -o /home/claude/.ssh/known_hosts /home/claude/.ssh/known_hosts
|
sort -u -o /home/claude/.ssh/known_hosts /home/claude/.ssh/known_hosts
|
||||||
'
|
'
|
||||||
|
|
||||||
|
# ── AWS config setup ──────────────────────────────────────────────────────────
|
||||||
|
# Host AWS dir is mounted read-only at /tmp/.host-aws.
|
||||||
|
# Copy to /home/claude/.aws so AWS CLI can write to sso/cache and cli/cache.
|
||||||
|
if [ -d /tmp/.host-aws ]; then
|
||||||
|
rm -rf /home/claude/.aws
|
||||||
|
cp -a /tmp/.host-aws /home/claude/.aws
|
||||||
|
chown -R claude:claude /home/claude/.aws
|
||||||
|
chmod 700 /home/claude/.aws
|
||||||
|
# Ensure writable cache directories exist
|
||||||
|
mkdir -p /home/claude/.aws/sso/cache /home/claude/.aws/cli/cache
|
||||||
|
chown -R claude:claude /home/claude/.aws/sso /home/claude/.aws/cli
|
||||||
|
fi
|
||||||
|
|
||||||
# ── Git credential helper (for HTTPS token) ─────────────────────────────────
|
# ── Git credential helper (for HTTPS token) ─────────────────────────────────
|
||||||
if [ -n "$GIT_TOKEN" ]; then
|
if [ -n "$GIT_TOKEN" ]; then
|
||||||
CRED_FILE="/home/claude/.git-credentials"
|
CRED_FILE="/home/claude/.git-credentials"
|
||||||
|
|||||||
26
container/osc52-clipboard
Normal file
26
container/osc52-clipboard
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# OSC 52 clipboard provider — sends clipboard data to the host system clipboard
|
||||||
|
# via OSC 52 terminal escape sequences. Installed as xclip/xsel/pbcopy so that
|
||||||
|
# programs inside the container (e.g. Claude Code) can copy to clipboard.
|
||||||
|
#
|
||||||
|
# Supports common invocations:
|
||||||
|
# echo "text" | xclip -selection clipboard
|
||||||
|
# echo "text" | xsel --clipboard --input
|
||||||
|
# echo "text" | pbcopy
|
||||||
|
#
|
||||||
|
# Paste/output requests exit silently (not supported via OSC 52).
|
||||||
|
|
||||||
|
# Detect paste/output mode — exit silently since we can't read the host clipboard
|
||||||
|
for arg in "$@"; do
|
||||||
|
case "$arg" in
|
||||||
|
-o|--output) exit 0 ;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Read all input from stdin
|
||||||
|
data=$(cat)
|
||||||
|
[ -z "$data" ] && exit 0
|
||||||
|
|
||||||
|
# Base64 encode and write OSC 52 escape sequence to the controlling terminal
|
||||||
|
encoded=$(printf '%s' "$data" | base64 | tr -d '\n')
|
||||||
|
printf '\033]52;c;%s\a' "$encoded" > /dev/tty 2>/dev/null
|
||||||
Reference in New Issue
Block a user