diff --git a/src/lib/components/SettingsModal.svelte b/src/lib/components/SettingsModal.svelte index b7a23f8..ffdc557 100644 --- a/src/lib/components/SettingsModal.svelte +++ b/src/lib/components/SettingsModal.svelte @@ -11,7 +11,7 @@ let { visible, onClose }: Props = $props(); let localSettings = $state({ ...$settings }); - let activeTab = $state<'transcription' | 'speakers' | 'ai' | 'local' | 'developer'>('transcription'); + let activeTab = $state<'transcription' | 'speakers' | 'ai' | 'debug'>('transcription'); let modelStatus = $state<'idle' | 'downloading' | 'success' | 'error'>('idle'); let modelError = $state(''); let revealedFields = $state>(new Set()); @@ -81,11 +81,8 @@ - - @@ -184,14 +181,27 @@
- {#if localSettings.ai_provider === 'openai'} + {#if localSettings.ai_provider === 'local'} +
+ + +
+
+ + +
+

+ Install Ollama from ollama.com, then pull a model with ollama pull llama3.2. + The app connects via Ollama's OpenAI-compatible API. +

+ {:else if localSettings.ai_provider === 'openai'}
@@ -232,20 +242,7 @@
{/if} - {:else} -
- - -
-
- - -
-

- Place GGUF model files in ~/.voicetonotes/models/ for auto-detection. - The local AI server uses the OpenAI-compatible API from llama.cpp. -

- {:else if activeTab === 'developer'} + {:else if activeTab === 'debug'}