Fix Ollama connection: remove double /v1 in URL
Some checks failed
Build Sidecars / Bump sidecar version and tag (push) Successful in 3s
Release / Bump version and tag (push) Successful in 3s
Build Sidecars / Build Sidecar (macOS) (push) Successful in 5m16s
Release / Build App (macOS) (push) Successful in 1m19s
Build Sidecars / Build Sidecar (Linux) (push) Successful in 13m55s
Release / Build App (Linux) (push) Successful in 4m1s
Release / Build App (Windows) (push) Has been cancelled
Build Sidecars / Build Sidecar (Windows) (push) Successful in 33m38s

base_url was being set to 'http://localhost:11434/v1' by the frontend,
then LocalProvider appended another '/v1', resulting in '/v1/v1'.
Now the provider uses base_url directly (frontend already appends /v1).
Also fixed health check to hit Ollama root instead of /health.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Claude
2026-03-22 17:41:44 -07:00
parent bddce2fbeb
commit 425e3c2b7c

View File

@@ -1,4 +1,4 @@
"""Local AI provider — bundled llama-server (OpenAI-compatible API).""" """Local AI provider — Ollama or any OpenAI-compatible API."""
from __future__ import annotations from __future__ import annotations
@@ -9,9 +9,9 @@ from voice_to_notes.providers.base import AIProvider
class LocalProvider(AIProvider): class LocalProvider(AIProvider):
"""Connects to bundled llama-server via its OpenAI-compatible API.""" """Connects to Ollama or any OpenAI-compatible API server."""
def __init__(self, base_url: str = "http://localhost:8080", model: str = "local") -> None: def __init__(self, base_url: str = "http://localhost:11434/v1", model: str = "llama3.2") -> None:
self._base_url = base_url.rstrip("/") self._base_url = base_url.rstrip("/")
self._model = model self._model = model
self._client: Any = None self._client: Any = None
@@ -24,8 +24,8 @@ class LocalProvider(AIProvider):
from openai import OpenAI from openai import OpenAI
self._client = OpenAI( self._client = OpenAI(
base_url=f"{self._base_url}/v1", base_url=self._base_url,
api_key="not-needed", # llama-server doesn't require an API key api_key="ollama", # Ollama doesn't require a real key
) )
except ImportError: except ImportError:
raise RuntimeError( raise RuntimeError(
@@ -47,7 +47,9 @@ class LocalProvider(AIProvider):
try: try:
import urllib.request import urllib.request
req = urllib.request.Request(f"{self._base_url}/health", method="GET") # Check base URL without /v1 suffix for Ollama root endpoint
root_url = self._base_url.replace("/v1", "")
req = urllib.request.Request(root_url, method="GET")
with urllib.request.urlopen(req, timeout=2) as resp: with urllib.request.urlopen(req, timeout=2) as resp:
return resp.status == 200 return resp.status == 200
except Exception: except Exception:
@@ -55,4 +57,4 @@ class LocalProvider(AIProvider):
@property @property
def name(self) -> str: def name(self) -> str:
return "Local (llama-server)" return "Ollama"