Files
local-transcription/pyproject.toml
jknapp be53f2e962 Fix PyInstaller build failure caused by enum34 package
The enum34 package is an obsolete backport of Python's enum module
and is incompatible with PyInstaller on Python 3.4+. It was being
pulled in as a transitive dependency by pvporcupine (part of
RealtimeSTT's dependencies).

Changes:
- All build scripts now remove enum34 before running PyInstaller
  - build.bat, build-cuda.bat (Windows)
  - build.sh, build-cuda.sh (Linux)
- Added "uv pip uninstall -q enum34" step after cleaning builds
- Removed attempted pyproject.toml override (not needed with this fix)

This fix allows PyInstaller to bundle the application without errors
while still maintaining all RealtimeSTT functionality (enum is part
of Python stdlib since 3.4).

Resolves: PyInstaller error "enum34 package is incompatible"

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-28 19:06:33 -08:00

82 lines
2.0 KiB
TOML

[project]
name = "local-transcription"
version = "0.1.0"
description = "A standalone desktop application for real-time speech-to-text transcription using Whisper models"
readme = "README.md"
requires-python = ">=3.9"
license = {text = "MIT"}
authors = [
{name = "Your Name", email = "your.email@example.com"}
]
keywords = ["transcription", "speech-to-text", "whisper", "streaming", "obs"]
dependencies = [
"numpy>=1.24.0",
"pyyaml>=6.0",
"sounddevice>=0.4.6",
"scipy>=1.10.0",
"torch>=2.0.0",
"PySide6>=6.6.0",
# RealtimeSTT for advanced VAD-based transcription
"RealtimeSTT>=0.3.0",
# Web server (always-running for OBS integration)
"fastapi>=0.104.0",
"uvicorn>=0.24.0",
"websockets>=12.0",
# Server sync client
"requests>=2.31.0",
]
[project.optional-dependencies]
# Kept for backwards compatibility, but server deps are now in main dependencies
server = [
"fastapi>=0.104.0",
"uvicorn>=0.24.0",
"websockets>=12.0",
"requests>=2.31.0",
]
dev = [
"pytest>=7.4.0",
"black>=23.0.0",
"ruff>=0.1.0",
]
[project.scripts]
local-transcription = "main:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["client", "gui"]
[dependency-groups]
dev = [
"pyinstaller>=6.17.0",
]
# Add PyTorch CUDA index as additional source
# CUDA builds work on both GPU and CPU systems (fallback to CPU if no GPU)
# Using 'explicit = true' means only packages we explicitly specify use this index
[[tool.uv.index]]
name = "pytorch-cu121"
url = "https://download.pytorch.org/whl/cu121"
explicit = true
# Tell uv to get torch, torchvision, and torchaudio from the PyTorch CUDA index
# All other packages come from PyPI
[tool.uv.sources]
torch = { index = "pytorch-cu121" }
torchvision = { index = "pytorch-cu121" }
torchaudio = { index = "pytorch-cu121" }
[tool.ruff]
line-length = 100
target-version = "py39"
[tool.black]
line-length = 100
target-version = ["py39"]