[project] name = "local-transcription" version = "0.1.0" description = "A standalone desktop application for real-time speech-to-text transcription using Whisper models" readme = "README.md" requires-python = ">=3.9" license = {text = "MIT"} authors = [ {name = "Your Name", email = "your.email@example.com"} ] keywords = ["transcription", "speech-to-text", "whisper", "streaming", "obs"] dependencies = [ "numpy>=1.24.0", "pyyaml>=6.0", "sounddevice>=0.4.6", "scipy>=1.10.0", "noisereduce>=3.0.0", "webrtcvad>=2.0.10", "faster-whisper>=0.10.0", "torch>=2.0.0", "PySide6>=6.6.0", # Web server (always-running for OBS integration) "fastapi>=0.104.0", "uvicorn>=0.24.0", "websockets>=12.0", # Server sync client "requests>=2.31.0", ] [project.optional-dependencies] # Kept for backwards compatibility, but server deps are now in main dependencies server = [ "fastapi>=0.104.0", "uvicorn>=0.24.0", "websockets>=12.0", "requests>=2.31.0", ] dev = [ "pytest>=7.4.0", "black>=23.0.0", "ruff>=0.1.0", ] [project.scripts] local-transcription = "main:main" [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] packages = ["client", "gui"] [dependency-groups] dev = [ "pyinstaller>=6.17.0", ] # Use PyTorch CUDA index by default # CUDA builds work on both GPU and CPU systems (fallback to CPU if no GPU) [[tool.uv.index]] url = "https://download.pytorch.org/whl/cu121" default = true [tool.ruff] line-length = 100 target-version = "py39" [tool.black] line-length = 100 target-version = ["py39"]