Files
local-transcription/pyproject.toml
Josh Knapp a5556c475d Fix uv index configuration: Use PyTorch CUDA as additional index
- Changed from 'default' to named additional index
- Added tool.uv.sources to specify torch comes from pytorch-cu121 index
- Other packages (fastapi, uvicorn, etc.) still come from PyPI
- Fixes: 'fastapi was not found in the package registry' error

How it works:
- PyPI remains the default index for most packages
- torch package explicitly uses pytorch-cu121 index
- Best of both worlds: CUDA PyTorch + all other packages from PyPI
2025-12-26 12:13:40 -08:00

77 lines
1.7 KiB
TOML

[project]
name = "local-transcription"
version = "0.1.0"
description = "A standalone desktop application for real-time speech-to-text transcription using Whisper models"
readme = "README.md"
requires-python = ">=3.9"
license = {text = "MIT"}
authors = [
{name = "Your Name", email = "your.email@example.com"}
]
keywords = ["transcription", "speech-to-text", "whisper", "streaming", "obs"]
dependencies = [
"numpy>=1.24.0",
"pyyaml>=6.0",
"sounddevice>=0.4.6",
"scipy>=1.10.0",
"noisereduce>=3.0.0",
"webrtcvad>=2.0.10",
"faster-whisper>=0.10.0",
"torch>=2.0.0",
"PySide6>=6.6.0",
# Web server (always-running for OBS integration)
"fastapi>=0.104.0",
"uvicorn>=0.24.0",
"websockets>=12.0",
# Server sync client
"requests>=2.31.0",
]
[project.optional-dependencies]
# Kept for backwards compatibility, but server deps are now in main dependencies
server = [
"fastapi>=0.104.0",
"uvicorn>=0.24.0",
"websockets>=12.0",
"requests>=2.31.0",
]
dev = [
"pytest>=7.4.0",
"black>=23.0.0",
"ruff>=0.1.0",
]
[project.scripts]
local-transcription = "main:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["client", "gui"]
[dependency-groups]
dev = [
"pyinstaller>=6.17.0",
]
# Add PyTorch CUDA index as additional source
# CUDA builds work on both GPU and CPU systems (fallback to CPU if no GPU)
[[tool.uv.index]]
name = "pytorch-cu121"
url = "https://download.pytorch.org/whl/cu121"
# Tell uv to get torch from the PyTorch CUDA index
[tool.uv.sources]
torch = { index = "pytorch-cu121" }
[tool.ruff]
line-length = 100
target-version = "py39"
[tool.black]
line-length = 100
target-version = ["py39"]