Compare commits

...

4 Commits

Author SHA1 Message Date
Gitea Actions
1c586738f3 chore: bump version to 2.0.8 [skip ci] 2026-04-08 16:58:00 +00:00
Developer
fb02a24334 Remove CUDA sidecar builds, keep CPU + Cloud only
All checks were successful
Tests / Python Backend Tests (push) Successful in 6s
Tests / Frontend Tests (push) Successful in 8s
Tests / Rust Sidecar Tests (push) Successful in 2m3s
CUDA sidecars are ~2GB and too slow to upload from the Windows runner.
Cloud (Deepgram) provides faster transcription anyway. Removed:

- CUDA build steps from Windows and Linux sidecar workflows
- CUDA option from the SidecarSetup download screen

Remaining sidecar variants:
- Cloud (Deepgram): ~50 MB - recommended for most users
- Local CPU: ~500 MB - for offline/privacy use

CUDA can be revisited once the managed Deepgram service is ready.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 09:49:36 -07:00
Developer
ce64cacc5e Use max compression for sidecar zips to reduce upload size
All checks were successful
Tests / Python Backend Tests (push) Successful in 5s
Tests / Frontend Tests (push) Successful in 7s
Tests / Rust Sidecar Tests (push) Successful in 2m1s
zip -9 on Linux, 7z -mx=9 on Windows. Compression takes longer but
produces smaller files which upload faster over the network.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 09:42:26 -07:00
Gitea Actions
14a7ca3b30 chore: bump sidecar version to 1.0.6 [skip ci] 2026-04-08 16:26:36 +00:00
8 changed files with 14 additions and 52 deletions

View File

@@ -40,26 +40,16 @@ jobs:
sudo apt-get update
sudo apt-get install -y portaudio19-dev
- name: Build sidecar (CUDA)
run: |
uv sync --frozen || uv sync
uv run pyinstaller local-transcription-headless.spec
- name: Package sidecar (CUDA)
run: |
cd dist/local-transcription-backend && zip -r ../../sidecar-linux-x86_64-cuda.zip .
- name: Build sidecar (CPU)
env:
UV_NO_SOURCES: "1"
run: |
rm -rf dist/local-transcription-backend build/
uv pip install torch torchaudio --index-url https://download.pytorch.org/whl/cpu --force-reinstall
# Run pyinstaller directly from venv to prevent uv run from
# re-resolving torch back to the CUDA version via pyproject.toml sources
uv sync
.venv/bin/pyinstaller local-transcription-headless.spec
- name: Package sidecar (CPU)
run: |
cd dist/local-transcription-backend && zip -r ../../sidecar-linux-x86_64-cpu.zip .
cd dist/local-transcription-backend && zip -9 -r ../../sidecar-linux-x86_64-cpu.zip .
- name: Upload to sidecar release
env:

View File

@@ -54,29 +54,18 @@ jobs:
choco install 7zip -y
}
- name: Build sidecar (CUDA)
shell: powershell
run: |
uv sync --frozen
if ($LASTEXITCODE -ne 0) { uv sync }
uv run pyinstaller local-transcription-headless.spec
- name: Package sidecar (CUDA)
shell: powershell
run: |
7z a -tzip -mx=5 sidecar-windows-x86_64-cuda.zip .\dist\local-transcription-backend\*
- name: Build sidecar (CPU)
shell: powershell
env:
UV_NO_SOURCES: "1"
run: |
Remove-Item -Recurse -Force dist\local-transcription-backend, build -ErrorAction SilentlyContinue
uv pip install torch torchaudio --index-url https://download.pytorch.org/whl/cpu --force-reinstall
uv sync
.venv\Scripts\pyinstaller.exe local-transcription-headless.spec
- name: Package sidecar (CPU)
shell: powershell
run: |
7z a -tzip -mx=5 sidecar-windows-x86_64-cpu.zip .\dist\local-transcription-backend\*
7z a -tzip -mx=9 sidecar-windows-x86_64-cpu.zip .\dist\local-transcription-backend\*
- name: Upload to sidecar release
shell: powershell

View File

@@ -1,7 +1,7 @@
{
"name": "local-transcription",
"private": true,
"version": "2.0.7",
"version": "2.0.8",
"type": "module",
"scripts": {
"dev": "vite dev",

View File

@@ -1,6 +1,6 @@
[project]
name = "local-transcription"
version = "1.0.5"
version = "1.0.6"
description = "A standalone desktop application for real-time speech-to-text transcription using Whisper models"
readme = "README.md"
requires-python = ">=3.9"

View File

@@ -1,6 +1,6 @@
[package]
name = "local-transcription"
version = "2.0.7"
version = "2.0.8"
description = "Real-time speech-to-text transcription for streamers"
authors = ["Local Transcription Contributors"]
edition = "2021"

View File

@@ -1,6 +1,6 @@
{
"productName": "Local Transcription",
"version": "2.0.7",
"version": "2.0.8",
"identifier": "net.anhonesthost.local-transcription",
"build": {
"frontendDist": "../dist",

View File

@@ -126,23 +126,6 @@
</div>
</label>
<label class="variant-option" class:selected={variant === "cuda"}>
<input
type="radio"
name="variant"
value="cuda"
bind:group={variant}
/>
<div class="variant-info">
<span class="variant-name">Local - GPU (NVIDIA CUDA)</span>
<span class="variant-desc">~2 GB download</span>
<span class="variant-detail">
Runs Whisper AI models locally using your NVIDIA GPU for fast
transcription. No internet needed after download. Requires an
NVIDIA GPU with CUDA support.
</span>
</div>
</label>
</div>
<button class="download-btn" onclick={startDownload}>

View File

@@ -1,7 +1,7 @@
"""Version information for Local Transcription."""
__version__ = "2.0.7"
__version_info__ = (2, 0, 7)
__version__ = "2.0.8"
__version_info__ = (2, 0, 8)
# Version history:
# 1.4.0 - Auto-update feature: