#!/bin/bash # Build script for Linux with CUDA support echo "Building Local Transcription with CUDA support..." echo "==================================================" echo "" echo "This will create a build that supports both CPU and CUDA GPUs." echo "The executable will be larger (~2-3GB) but will work on any system." echo "" # Check if we should install CUDA-enabled PyTorch read -p "Install PyTorch with CUDA support? (y/n) " -n 1 -r echo if [[ $REPLY =~ ^[Yy]$ ]] then echo "Installing PyTorch with CUDA 12.1 support..." # Uninstall CPU-only version if present # Note: uv doesn't support -y flag, it uninstalls without confirmation uv pip uninstall torch 2>/dev/null || true # Install CUDA-enabled PyTorch # This installs PyTorch with bundled CUDA runtime uv pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 echo "✓ CUDA-enabled PyTorch installed" echo "" fi # Clean previous builds echo "Cleaning previous builds..." rm -rf build dist # Build with PyInstaller echo "Running PyInstaller..." uv run pyinstaller local-transcription.spec # Check if build succeeded if [ -d "dist/LocalTranscription" ]; then echo "" echo "✓ Build successful!" echo "Executable location: dist/LocalTranscription/LocalTranscription" echo "" echo "CUDA Support: YES (falls back to CPU if CUDA not available)" echo "" echo "To run the application:" echo " cd dist/LocalTranscription" echo " ./LocalTranscription" echo "" echo "To create a distributable package:" echo " cd dist" echo " tar -czf LocalTranscription-Linux-CUDA.tar.gz LocalTranscription/" echo "" echo "Note: This build will work on systems with or without NVIDIA GPUs." else echo "" echo "✗ Build failed!" exit 1 fi