Rename LiteLLM backend to OpenAI Compatible
All checks were successful
Build App / compute-version (push) Successful in 8s
Build App / build-macos (push) Successful in 2m25s
Build App / build-windows (push) Successful in 4m0s
Build App / build-linux (push) Successful in 4m47s
Build App / create-tag (push) Successful in 3s
Build App / sync-to-github (push) Successful in 12s

Reflects that this backend works with any OpenAI API-compatible endpoint
(LiteLLM, OpenRouter, vLLM, text-generation-inference, LocalAI, etc.),
not just LiteLLM. Includes serde aliases for backward compatibility with
existing projects.json files.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-13 06:16:05 -07:00
parent 879322bc9a
commit d7d7a83aec
8 changed files with 100 additions and 98 deletions

View File

@@ -34,9 +34,9 @@ fn store_secrets_for_project(project: &Project) -> Result<(), String> {
secure::store_project_secret(&project.id, "aws-bearer-token", v)?;
}
}
if let Some(ref litellm) = project.litellm_config {
if let Some(ref v) = litellm.api_key {
secure::store_project_secret(&project.id, "litellm-api-key", v)?;
if let Some(ref oai_config) = project.openai_compatible_config {
if let Some(ref v) = oai_config.api_key {
secure::store_project_secret(&project.id, "openai-compatible-api-key", v)?;
}
}
Ok(())
@@ -56,8 +56,8 @@ fn load_secrets_for_project(project: &mut Project) {
bedrock.aws_bearer_token = secure::get_project_secret(&project.id, "aws-bearer-token")
.unwrap_or(None);
}
if let Some(ref mut litellm) = project.litellm_config {
litellm.api_key = secure::get_project_secret(&project.id, "litellm-api-key")
if let Some(ref mut oai_config) = project.openai_compatible_config {
oai_config.api_key = secure::get_project_secret(&project.id, "openai-compatible-api-key")
.unwrap_or(None);
}
}
@@ -197,11 +197,11 @@ pub async fn start_project_container(
}
}
if project.backend == Backend::LiteLlm {
let litellm = project.litellm_config.as_ref()
.ok_or_else(|| "LiteLLM backend selected but no LiteLLM configuration found.".to_string())?;
if litellm.base_url.is_empty() {
return Err("LiteLLM base URL is required.".to_string());
if project.backend == Backend::OpenAiCompatible {
let oai_config = project.openai_compatible_config.as_ref()
.ok_or_else(|| "OpenAI Compatible backend selected but no configuration found.".to_string())?;
if oai_config.base_url.is_empty() {
return Err("OpenAI Compatible base URL is required.".to_string());
}
}

View File

@@ -244,13 +244,13 @@ fn compute_ollama_fingerprint(project: &Project) -> String {
}
}
/// Compute a fingerprint for the LiteLLM configuration so we can detect changes.
fn compute_litellm_fingerprint(project: &Project) -> String {
if let Some(ref litellm) = project.litellm_config {
/// Compute a fingerprint for the OpenAI Compatible configuration so we can detect changes.
fn compute_openai_compatible_fingerprint(project: &Project) -> String {
if let Some(ref config) = project.openai_compatible_config {
let parts = vec![
litellm.base_url.clone(),
litellm.api_key.as_deref().unwrap_or("").to_string(),
litellm.model_id.as_deref().unwrap_or("").to_string(),
config.base_url.clone(),
config.api_key.as_deref().unwrap_or("").to_string(),
config.model_id.as_deref().unwrap_or("").to_string(),
];
sha256_hex(&parts.join("|"))
} else {
@@ -516,14 +516,14 @@ pub async fn create_container(
}
}
// LiteLLM configuration
if project.backend == Backend::LiteLlm {
if let Some(ref litellm) = project.litellm_config {
env_vars.push(format!("ANTHROPIC_BASE_URL={}", litellm.base_url));
if let Some(ref key) = litellm.api_key {
// OpenAI Compatible configuration
if project.backend == Backend::OpenAiCompatible {
if let Some(ref config) = project.openai_compatible_config {
env_vars.push(format!("ANTHROPIC_BASE_URL={}", config.base_url));
if let Some(ref key) = config.api_key {
env_vars.push(format!("ANTHROPIC_AUTH_TOKEN={}", key));
}
if let Some(ref model) = litellm.model_id {
if let Some(ref model) = config.model_id {
env_vars.push(format!("ANTHROPIC_MODEL={}", model));
}
}
@@ -698,7 +698,7 @@ pub async fn create_container(
labels.insert("triple-c.paths-fingerprint".to_string(), compute_paths_fingerprint(&project.paths));
labels.insert("triple-c.bedrock-fingerprint".to_string(), compute_bedrock_fingerprint(project));
labels.insert("triple-c.ollama-fingerprint".to_string(), compute_ollama_fingerprint(project));
labels.insert("triple-c.litellm-fingerprint".to_string(), compute_litellm_fingerprint(project));
labels.insert("triple-c.openai-compatible-fingerprint".to_string(), compute_openai_compatible_fingerprint(project));
labels.insert("triple-c.ports-fingerprint".to_string(), compute_ports_fingerprint(&project.port_mappings));
labels.insert("triple-c.image".to_string(), image_name.to_string());
labels.insert("triple-c.timezone".to_string(), timezone.unwrap_or("").to_string());
@@ -948,11 +948,11 @@ pub async fn container_needs_recreation(
return Ok(true);
}
// ── LiteLLM config fingerprint ───────────────────────────────────────
let expected_litellm_fp = compute_litellm_fingerprint(project);
let container_litellm_fp = get_label("triple-c.litellm-fingerprint").unwrap_or_default();
if container_litellm_fp != expected_litellm_fp {
log::info!("LiteLLM config mismatch");
// ── OpenAI Compatible config fingerprint ────────────────────────────
let expected_oai_fp = compute_openai_compatible_fingerprint(project);
let container_oai_fp = get_label("triple-c.openai-compatible-fingerprint").unwrap_or_default();
if container_oai_fp != expected_oai_fp {
log::info!("OpenAI Compatible config mismatch");
return Ok(true);
}

View File

@@ -35,7 +35,8 @@ pub struct Project {
pub backend: Backend,
pub bedrock_config: Option<BedrockConfig>,
pub ollama_config: Option<OllamaConfig>,
pub litellm_config: Option<LiteLlmConfig>,
#[serde(alias = "litellm_config")]
pub openai_compatible_config: Option<OpenAiCompatibleConfig>,
pub allow_docker_access: bool,
#[serde(default)]
pub mission_control_enabled: bool,
@@ -70,7 +71,7 @@ pub enum ProjectStatus {
/// - `Anthropic`: Direct Anthropic API (user runs `claude login` inside the container)
/// - `Bedrock`: AWS Bedrock with per-project AWS credentials
/// - `Ollama`: Local or remote Ollama server
/// - `LiteLlm`: LiteLLM proxy gateway for 100+ model providers
/// - `OpenAiCompatible`: Any OpenAI API-compatible endpoint (e.g., LiteLLM, vLLM, etc.)
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum Backend {
@@ -79,8 +80,8 @@ pub enum Backend {
Anthropic,
Bedrock,
Ollama,
#[serde(alias = "litellm")]
LiteLlm,
#[serde(alias = "lite_llm", alias = "litellm")]
OpenAiCompatible,
}
impl Default for Backend {
@@ -132,13 +133,14 @@ pub struct OllamaConfig {
pub model_id: Option<String>,
}
/// LiteLLM gateway configuration for a project.
/// LiteLLM translates Anthropic API calls to 100+ model providers.
/// OpenAI Compatible endpoint configuration for a project.
/// Routes Anthropic API calls through any OpenAI API-compatible endpoint
/// (e.g., LiteLLM, vLLM, or other compatible gateways).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LiteLlmConfig {
/// The base URL of the LiteLLM proxy (e.g., "http://host.docker.internal:4000" or "https://litellm.example.com")
pub struct OpenAiCompatibleConfig {
/// The base URL of the OpenAI-compatible endpoint (e.g., "http://host.docker.internal:4000" or "https://api.example.com")
pub base_url: String,
/// API key for the LiteLLM proxy
/// API key for the OpenAI-compatible endpoint
#[serde(skip_serializing, default)]
pub api_key: Option<String>,
/// Optional model override
@@ -157,7 +159,7 @@ impl Project {
backend: Backend::default(),
bedrock_config: None,
ollama_config: None,
litellm_config: None,
openai_compatible_config: None,
allow_docker_access: false,
mission_control_enabled: false,
ssh_key_path: None,