- Implement AIProvider base interface with chat() and is_available() - Add LocalProvider connecting to bundled llama-server via OpenAI SDK - Add OpenAIProvider for direct OpenAI API access - Add AnthropicProvider for Anthropic Claude API - Add LiteLLMProvider for multi-provider gateway - Build AIProviderService with provider routing, auto-selection, and transcript context injection - Add ai.chat IPC handler supporting chat, list_providers, set_provider, and configure actions - Add ai_chat, ai_list_providers, ai_configure Tauri commands - Build interactive AIChatPanel with message history, quick actions (Summarize, Action Items), and transcript context awareness - Tests: 30 Python, 6 Rust, 0 Svelte errors Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
43 lines
1.2 KiB
Python
43 lines
1.2 KiB
Python
"""LiteLLM provider — multi-provider gateway."""
|
|
|
|
from __future__ import annotations
|
|
|
|
from typing import Any
|
|
|
|
from voice_to_notes.providers.base import AIProvider
|
|
|
|
|
|
class LiteLLMProvider(AIProvider):
|
|
"""Routes through LiteLLM for access to 100+ LLM providers."""
|
|
|
|
def __init__(self, model: str = "gpt-4o-mini", **kwargs: Any) -> None:
|
|
self._model = model
|
|
self._extra_kwargs = kwargs
|
|
|
|
def chat(self, messages: list[dict[str, str]], **kwargs: Any) -> str:
|
|
try:
|
|
import litellm
|
|
except ImportError:
|
|
raise RuntimeError("litellm package is required. Install with: pip install litellm")
|
|
|
|
merged_kwargs = {**self._extra_kwargs, **kwargs}
|
|
response = litellm.completion(
|
|
model=merged_kwargs.get("model", self._model),
|
|
messages=messages,
|
|
temperature=merged_kwargs.get("temperature", 0.7),
|
|
max_tokens=merged_kwargs.get("max_tokens", 2048),
|
|
)
|
|
return response.choices[0].message.content or ""
|
|
|
|
def is_available(self) -> bool:
|
|
try:
|
|
import litellm # noqa: F401
|
|
|
|
return True
|
|
except ImportError:
|
|
return False
|
|
|
|
@property
|
|
def name(self) -> str:
|
|
return "LiteLLM"
|