- Implement AIProvider base interface with chat() and is_available() - Add LocalProvider connecting to bundled llama-server via OpenAI SDK - Add OpenAIProvider for direct OpenAI API access - Add AnthropicProvider for Anthropic Claude API - Add LiteLLMProvider for multi-provider gateway - Build AIProviderService with provider routing, auto-selection, and transcript context injection - Add ai.chat IPC handler supporting chat, list_providers, set_provider, and configure actions - Add ai_chat, ai_list_providers, ai_configure Tauri commands - Build interactive AIChatPanel with message history, quick actions (Summarize, Action Items), and transcript context awareness - Tests: 30 Python, 6 Rust, 0 Svelte errors Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
59 lines
1.8 KiB
Python
59 lines
1.8 KiB
Python
"""Local AI provider — bundled llama-server (OpenAI-compatible API)."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import sys
|
|
from typing import Any
|
|
|
|
from voice_to_notes.providers.base import AIProvider
|
|
|
|
|
|
class LocalProvider(AIProvider):
|
|
"""Connects to bundled llama-server via its OpenAI-compatible API."""
|
|
|
|
def __init__(self, base_url: str = "http://localhost:8080", model: str = "local") -> None:
|
|
self._base_url = base_url.rstrip("/")
|
|
self._model = model
|
|
self._client: Any = None
|
|
|
|
def _ensure_client(self) -> Any:
|
|
if self._client is not None:
|
|
return self._client
|
|
|
|
try:
|
|
from openai import OpenAI
|
|
|
|
self._client = OpenAI(
|
|
base_url=f"{self._base_url}/v1",
|
|
api_key="not-needed", # llama-server doesn't require an API key
|
|
)
|
|
except ImportError:
|
|
raise RuntimeError(
|
|
"openai package is required for local AI. Install with: pip install openai"
|
|
)
|
|
return self._client
|
|
|
|
def chat(self, messages: list[dict[str, str]], **kwargs: Any) -> str:
|
|
client = self._ensure_client()
|
|
response = client.chat.completions.create(
|
|
model=self._model,
|
|
messages=messages,
|
|
temperature=kwargs.get("temperature", 0.7),
|
|
max_tokens=kwargs.get("max_tokens", 2048),
|
|
)
|
|
return response.choices[0].message.content or ""
|
|
|
|
def is_available(self) -> bool:
|
|
try:
|
|
import urllib.request
|
|
|
|
req = urllib.request.Request(f"{self._base_url}/health", method="GET")
|
|
with urllib.request.urlopen(req, timeout=2) as resp:
|
|
return resp.status == 200
|
|
except Exception:
|
|
return False
|
|
|
|
@property
|
|
def name(self) -> str:
|
|
return "Local (llama-server)"
|