Fix Discord bot to use LiteLLM Responses API v2 with MCP tools
All checks were successful
OpenWebUI Discord Bot / Build-and-Push (push) Successful in 53s
All checks were successful
OpenWebUI Discord Bot / Build-and-Push (push) Successful in 53s
Key changes: - Upgrade OpenAI SDK from 1.x to 2.x (required for responses API) - Update get_ai_response to use developer role for system prompts - Improve response extraction with output_text shorthand - Add enhanced debug logging for troubleshooting - Add traceback logging for better error diagnostics This fixes the "'OpenAI' object has no attribute 'responses'" error and enables proper MCP tool auto-execution through LiteLLM. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -277,10 +277,10 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
|
||||
# Build input array with system prompt, history, and current message
|
||||
input_messages = []
|
||||
|
||||
# Add system prompt as developer role (newer models) or user role
|
||||
# Add system prompt as developer role for newer models
|
||||
input_messages.append({
|
||||
"role": "user", # System messages converted to user for Responses API
|
||||
"content": f"[System Instructions]\n{SYSTEM_PROMPT}"
|
||||
"role": "developer",
|
||||
"content": SYSTEM_PROMPT
|
||||
})
|
||||
|
||||
# Add conversation history
|
||||
@@ -329,6 +329,8 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
|
||||
else:
|
||||
# Use Responses API with MCP tools
|
||||
debug_log(f"Calling Responses API with {len(tools_config)} MCP servers")
|
||||
debug_log(f"Input messages: {len(input_messages)} messages")
|
||||
|
||||
response = client.responses.create(
|
||||
model=MODEL_NAME,
|
||||
input=input_messages,
|
||||
@@ -336,18 +338,28 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
|
||||
stream=False
|
||||
)
|
||||
|
||||
debug_log(f"Response status: {response.status}")
|
||||
debug_log(f"Response received, status: {getattr(response, 'status', 'unknown')}")
|
||||
|
||||
# Extract text from Responses API format
|
||||
# Try the shorthand first
|
||||
response_text = getattr(response, 'output_text', None)
|
||||
|
||||
if response_text:
|
||||
debug_log(f"Got response via output_text shorthand: {response_text[:100]}...")
|
||||
return response_text
|
||||
|
||||
# Otherwise navigate the structure
|
||||
if hasattr(response, 'output') and len(response.output) > 0:
|
||||
for output in response.output:
|
||||
if hasattr(output, 'type') and output.type == "message":
|
||||
if hasattr(output, 'content') and len(output.content) > 0:
|
||||
for content in output.content:
|
||||
if hasattr(content, 'type') and content.type == "output_text":
|
||||
debug_log(f"Got response via structure navigation: {content.text[:100]}...")
|
||||
return content.text
|
||||
|
||||
debug_log(f"Unexpected response format: {response}")
|
||||
debug_log(f"Response attributes: {dir(response)}")
|
||||
return "I received a response but couldn't extract the text. Please try again."
|
||||
|
||||
# Standard chat completions (when tools disabled or MCP not available)
|
||||
@@ -381,6 +393,8 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
|
||||
error_msg = f"Error calling LiteLLM API: {str(e)}"
|
||||
print(error_msg)
|
||||
debug_log(f"Exception details: {e}")
|
||||
import traceback
|
||||
debug_log(f"Traceback: {traceback.format_exc()}")
|
||||
return error_msg
|
||||
|
||||
@bot.event
|
||||
|
||||
Reference in New Issue
Block a user