Fix Discord bot to use LiteLLM Responses API v2 with MCP tools
All checks were successful
OpenWebUI Discord Bot / Build-and-Push (push) Successful in 53s

Key changes:
- Upgrade OpenAI SDK from 1.x to 2.x (required for responses API)
- Update get_ai_response to use developer role for system prompts
- Improve response extraction with output_text shorthand
- Add enhanced debug logging for troubleshooting
- Add traceback logging for better error diagnostics

This fixes the "'OpenAI' object has no attribute 'responses'" error
and enables proper MCP tool auto-execution through LiteLLM.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-12-12 10:58:10 -08:00
parent 240330cf3b
commit 394e4ccf24
2 changed files with 19 additions and 5 deletions

View File

@@ -277,10 +277,10 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
# Build input array with system prompt, history, and current message # Build input array with system prompt, history, and current message
input_messages = [] input_messages = []
# Add system prompt as developer role (newer models) or user role # Add system prompt as developer role for newer models
input_messages.append({ input_messages.append({
"role": "user", # System messages converted to user for Responses API "role": "developer",
"content": f"[System Instructions]\n{SYSTEM_PROMPT}" "content": SYSTEM_PROMPT
}) })
# Add conversation history # Add conversation history
@@ -329,6 +329,8 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
else: else:
# Use Responses API with MCP tools # Use Responses API with MCP tools
debug_log(f"Calling Responses API with {len(tools_config)} MCP servers") debug_log(f"Calling Responses API with {len(tools_config)} MCP servers")
debug_log(f"Input messages: {len(input_messages)} messages")
response = client.responses.create( response = client.responses.create(
model=MODEL_NAME, model=MODEL_NAME,
input=input_messages, input=input_messages,
@@ -336,18 +338,28 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
stream=False stream=False
) )
debug_log(f"Response status: {response.status}") debug_log(f"Response received, status: {getattr(response, 'status', 'unknown')}")
# Extract text from Responses API format # Extract text from Responses API format
# Try the shorthand first
response_text = getattr(response, 'output_text', None)
if response_text:
debug_log(f"Got response via output_text shorthand: {response_text[:100]}...")
return response_text
# Otherwise navigate the structure
if hasattr(response, 'output') and len(response.output) > 0: if hasattr(response, 'output') and len(response.output) > 0:
for output in response.output: for output in response.output:
if hasattr(output, 'type') and output.type == "message": if hasattr(output, 'type') and output.type == "message":
if hasattr(output, 'content') and len(output.content) > 0: if hasattr(output, 'content') and len(output.content) > 0:
for content in output.content: for content in output.content:
if hasattr(content, 'type') and content.type == "output_text": if hasattr(content, 'type') and content.type == "output_text":
debug_log(f"Got response via structure navigation: {content.text[:100]}...")
return content.text return content.text
debug_log(f"Unexpected response format: {response}") debug_log(f"Unexpected response format: {response}")
debug_log(f"Response attributes: {dir(response)}")
return "I received a response but couldn't extract the text. Please try again." return "I received a response but couldn't extract the text. Please try again."
# Standard chat completions (when tools disabled or MCP not available) # Standard chat completions (when tools disabled or MCP not available)
@@ -381,6 +393,8 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
error_msg = f"Error calling LiteLLM API: {str(e)}" error_msg = f"Error calling LiteLLM API: {str(e)}"
print(error_msg) print(error_msg)
debug_log(f"Exception details: {e}") debug_log(f"Exception details: {e}")
import traceback
debug_log(f"Traceback: {traceback.format_exc()}")
return error_msg return error_msg
@bot.event @bot.event

View File

@@ -1,5 +1,5 @@
discord.py>=2.0.0 discord.py>=2.0.0
openai>=1.0.0 openai>=2.0.0
python-dotenv>=1.0.0 python-dotenv>=1.0.0
aiohttp>=3.8.0 aiohttp>=3.8.0
tiktoken>=0.5.0 tiktoken>=0.5.0