Add intelligent tool_choice parameter for MCP tools
All checks were successful
OpenWebUI Discord Bot / Build-and-Push (push) Successful in 54s
All checks were successful
OpenWebUI Discord Bot / Build-and-Push (push) Successful in 54s
Implements smart tool selection based on query content: - Adds query_needs_tools() function to detect tool-requiring queries - Sets tool_choice="required" for queries needing GitHub/time/weather/search - Sets tool_choice="auto" for general conversation - Adds debug logging for tool choice decisions This fixes the issue where MCP tools were configured but not being used because tool_choice defaulted to "auto" and the model opted not to use them. Query detection keywords include: - Time/date operations (time, clock, date, now, current) - Weather queries (weather, temperature, forecast) - GitHub operations (repo, code, file, commit, PR, issue) - Search/lookup operations (search, find, get, fetch, retrieve) - File operations (read, open, check, list, contents) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -80,6 +80,41 @@ async def download_image(url: str) -> str | None:
|
|||||||
print(f"Error downloading image from {url}: {e}")
|
print(f"Error downloading image from {url}: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def query_needs_tools(message: str) -> bool:
|
||||||
|
"""
|
||||||
|
Detect if a query likely needs MCP tool access.
|
||||||
|
|
||||||
|
Returns True if the message contains keywords suggesting the need for:
|
||||||
|
- Real-time data (time, weather, current events)
|
||||||
|
- GitHub operations (repos, code, files, issues, PRs)
|
||||||
|
- Search/lookup operations
|
||||||
|
- File system access
|
||||||
|
"""
|
||||||
|
tool_keywords = [
|
||||||
|
# Time-related
|
||||||
|
'time', 'clock', 'date', 'today', 'now', 'current', 'when',
|
||||||
|
# Weather
|
||||||
|
'weather', 'temperature', 'forecast',
|
||||||
|
# GitHub operations
|
||||||
|
'github', 'repo', 'repository', 'repositories', 'code', 'file', 'files',
|
||||||
|
'commit', 'commits', 'branch', 'branches', 'pr', 'pull request',
|
||||||
|
'issue', 'issues', 'merge', 'fork', 'clone',
|
||||||
|
# Search/lookup
|
||||||
|
'search', 'look up', 'find', 'locate', 'get', 'fetch', 'retrieve',
|
||||||
|
'show me', 'tell me about', 'what is', 'how many',
|
||||||
|
# File operations
|
||||||
|
'read', 'open', 'check', 'list', 'contents'
|
||||||
|
]
|
||||||
|
|
||||||
|
message_lower = message.lower()
|
||||||
|
needs_tools = any(keyword in message_lower for keyword in tool_keywords)
|
||||||
|
|
||||||
|
if DEBUG_LOGGING and needs_tools:
|
||||||
|
matched = [kw for kw in tool_keywords if kw in message_lower]
|
||||||
|
debug_log(f"Query needs tools - matched keywords: {matched}")
|
||||||
|
|
||||||
|
return needs_tools
|
||||||
|
|
||||||
async def get_available_mcp_servers():
|
async def get_available_mcp_servers():
|
||||||
"""Query LiteLLM for available MCP servers (used with Responses API)"""
|
"""Query LiteLLM for available MCP servers (used with Responses API)"""
|
||||||
try:
|
try:
|
||||||
@@ -327,6 +362,10 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
|
|||||||
debug_log("No MCP servers found, falling back to standard chat completions")
|
debug_log("No MCP servers found, falling back to standard chat completions")
|
||||||
# Fall through to standard chat completions below
|
# Fall through to standard chat completions below
|
||||||
else:
|
else:
|
||||||
|
# Determine if we should force tool usage based on query content
|
||||||
|
tool_choice_value = "required" if query_needs_tools(user_message) else "auto"
|
||||||
|
debug_log(f"Tool choice mode: {tool_choice_value}")
|
||||||
|
|
||||||
# Use Responses API with MCP tools
|
# Use Responses API with MCP tools
|
||||||
debug_log(f"Calling Responses API with {len(tools_config)} MCP servers")
|
debug_log(f"Calling Responses API with {len(tools_config)} MCP servers")
|
||||||
debug_log(f"Input messages: {len(input_messages)} messages")
|
debug_log(f"Input messages: {len(input_messages)} messages")
|
||||||
@@ -335,6 +374,7 @@ async def get_ai_response(history_messages: List[Dict[str, Any]], user_message:
|
|||||||
model=MODEL_NAME,
|
model=MODEL_NAME,
|
||||||
input=input_messages,
|
input=input_messages,
|
||||||
tools=tools_config,
|
tools=tools_config,
|
||||||
|
tool_choice=tool_choice_value,
|
||||||
stream=False
|
stream=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user