AIChatPanel had its own hardcoded configMap with the old llama-server URL (localhost:8080) and field names (local_model_path). Every chat message reconfigured the provider with these wrong values, overriding the correct settings applied at startup. Fix: replace the duplicate with a call to the shared configureAIProvider(). Also strip trailing slashes from ollama_url before appending /v1 to prevent double-slash URLs (http://localhost:11434//v1). Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
79 lines
2.3 KiB
TypeScript
79 lines
2.3 KiB
TypeScript
import { writable } from 'svelte/store';
|
|
import { invoke } from '@tauri-apps/api/core';
|
|
|
|
export interface AppSettings {
|
|
ai_provider: string;
|
|
openai_api_key: string;
|
|
anthropic_api_key: string;
|
|
openai_model: string;
|
|
anthropic_model: string;
|
|
litellm_model: string;
|
|
litellm_api_key: string;
|
|
litellm_api_base: string;
|
|
ollama_url: string;
|
|
ollama_model: string;
|
|
transcription_model: string;
|
|
transcription_device: string;
|
|
transcription_language: string;
|
|
skip_diarization: boolean;
|
|
hf_token: string;
|
|
num_speakers: number | null;
|
|
devtools_enabled: boolean;
|
|
}
|
|
|
|
const defaults: AppSettings = {
|
|
ai_provider: 'local',
|
|
openai_api_key: '',
|
|
anthropic_api_key: '',
|
|
openai_model: 'gpt-4o-mini',
|
|
anthropic_model: 'claude-sonnet-4-6',
|
|
litellm_model: 'gpt-4o-mini',
|
|
litellm_api_key: '',
|
|
litellm_api_base: '',
|
|
ollama_url: 'http://localhost:11434',
|
|
ollama_model: 'llama3.2',
|
|
transcription_model: 'base',
|
|
transcription_device: 'cpu',
|
|
transcription_language: '',
|
|
skip_diarization: false,
|
|
hf_token: '',
|
|
num_speakers: null,
|
|
devtools_enabled: false,
|
|
};
|
|
|
|
export const settings = writable<AppSettings>({ ...defaults });
|
|
|
|
export async function loadSettings(): Promise<void> {
|
|
try {
|
|
const saved = await invoke<Record<string, unknown>>('load_settings');
|
|
settings.update(s => ({ ...s, ...saved } as AppSettings));
|
|
} catch {
|
|
// Use defaults if settings can't be loaded
|
|
}
|
|
}
|
|
|
|
export async function configureAIProvider(s: AppSettings): Promise<void> {
|
|
const configMap: Record<string, Record<string, string>> = {
|
|
openai: { api_key: s.openai_api_key, model: s.openai_model },
|
|
anthropic: { api_key: s.anthropic_api_key, model: s.anthropic_model },
|
|
litellm: { api_key: s.litellm_api_key, api_base: s.litellm_api_base, model: s.litellm_model },
|
|
local: { model: s.ollama_model, base_url: s.ollama_url.replace(/\/+$/, '') + '/v1' },
|
|
};
|
|
const config = configMap[s.ai_provider];
|
|
if (config) {
|
|
try {
|
|
await invoke('ai_configure', { provider: s.ai_provider, config });
|
|
} catch {
|
|
// Sidecar may not be running yet
|
|
}
|
|
}
|
|
}
|
|
|
|
export async function saveSettings(s: AppSettings): Promise<void> {
|
|
settings.set(s);
|
|
await invoke('save_settings', { settings: s });
|
|
|
|
// Configure the AI provider in the Python sidecar
|
|
await configureAIProvider(s);
|
|
}
|