diff --git a/auto-filter-tool-v2.py b/auto-filter-tool-v2.py new file mode 100644 index 0000000..296c927 --- /dev/null +++ b/auto-filter-tool-v2.py @@ -0,0 +1,212 @@ +""" +title: AutoTool Filter +author: open-webui +author_url: https://github.com/open-webui +funding_url: https://github.com/open-webui +version: 2.0.0 +required_open_webui_version: 0.5.0 +""" + +from pydantic import BaseModel, Field +from typing import Callable, Awaitable, Any, Optional, Literal +import json +import re + +# Updated imports +from open_webui.models.users import Users +from open_webui.models.tools import Tools +from open_webui.models.models import Models +from open_webui.utils.chat import generate_chat_completion +from open_webui.utils.misc import get_last_user_message + + +class Filter: + class Valves(BaseModel): + template: str = Field( + default="""Tools: {{TOOLS}} +If a tool doesn't match the query, return an empty list []. Otherwise, return a list of matching tool IDs in the format ["tool_id"]. Select multiple tools if applicable. Only return the list. Do not return any other text only the list. Review the entire chat history to ensure the selected tool matches the context. If unsure, default to an empty list []. Use tools conservatively.""" + ) + status: bool = Field(default=True) + pass + + def __init__(self): + self.valves = self.Valves() + pass + + async def inlet( + self, + body: dict, + __event_emitter__: Callable[[Any], Awaitable[None]], + __request__: Any, + __user__: Optional[dict] = None, + __model__: Optional[dict] = None, + ) -> dict: + messages = body["messages"] + user_message = get_last_user_message(messages) + + # Check for existing tool outputs and add them as context + if "tool_outputs" in body and body["tool_outputs"]: + tool_outputs = body["tool_outputs"] + + # Process each tool output and add it to messages as context + for tool_output in tool_outputs: + tool_id = tool_output.get("tool_id", "unknown_tool") + output = tool_output.get("output", {}) + + # Format the tool output as a message + output_str = ( + json.dumps(output) if isinstance(output, dict) else str(output) + ) + tool_message = { + "role": "system", # Changed from assistant to system for more authority + "content": f"Tool `{tool_id}` Output: {output_str}", + } + + # Add tool output to messages + messages.append(tool_message) + + # Always show status when finding tools + await __event_emitter__( + { + "type": "status", + "data": { + "description": "Finding the right tools...", + "done": False, + }, + } + ) + + all_tools = [ + {"id": tool.id, "description": tool.meta.description} + for tool in Tools.get_tools() + ] + available_tool_ids = ( + __model__.get("info", {}).get("meta", {}).get("toolIds", []) + ) + available_tools = [ + tool for tool in all_tools if tool["id"] in available_tool_ids + ] + + # Enhanced system prompt to better identify multiple tools + system_prompt = self.valves.template.replace("{{TOOLS}}", str(available_tools)) + system_prompt += '\nIMPORTANT: If multiple tools are needed to complete the task, return ALL required tools in the list. For example, if both a date tool and an email tool are needed, return ["date-tool-id", "email-tool-id"].' + + prompt = ( + "History:\n" + + "\n".join( + [ + f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\"" + for message in messages[::-1][:4] + ] + ) + + f"\nQuery: {user_message}" + ) + payload = { + "model": body["model"], + "messages": [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": prompt}, + ], + "stream": False, + } + + try: + user = Users.get_user_by_id(__user__["id"]) + # Updated to use the direct successor function + response = await generate_chat_completion( + request=__request__, form_data=payload, user=user + ) + content = response["choices"][0]["message"]["content"] + # Parse the function response + if content is not None: + print(f"content: {content}") + content = content.replace("'", '"') + pattern = r"\[.*?\]" + match = re.search(pattern, content) + if match: + content = str(match.group(0)) + result = json.loads(content) + if isinstance(result, list) and len(result) > 0: + body["tool_ids"] = result + + # Add explicit tool availability message to the conversation + tool_names = [ + next( + ( + t["description"] + for t in available_tools + if t["id"] == tool_id + ), + tool_id, + ) + for tool_id in result + ] + tools_message = { + "role": "system", + "content": f"TOOL AVAILABILITY: You have access to the following tools for this request: {', '.join(tool_names)}. Use these tools to complete the user's request.", + } + body["messages"].append(tools_message) + + # Ensure multiple tool execution + body["execute_tools_sequentially"] = True + body["tools_are_sequential"] = ( + True # Additional flag for sequential execution + ) + + # Ensure tools array exists + if not body.get("tools"): + body["tools"] = [] + + # Add each detected tool with enhanced configuration + for tool_id in result: + tool_info = next( + (t for t in available_tools if t["id"] == tool_id), None + ) + if tool_info: + if not any( + t.get("id") == tool_id for t in body.get("tools", []) + ): + # Enhanced tool configuration + body["tools"].append( + { + "id": tool_id, + "description": tool_info["description"], + "enabled": True, + "execute": True, + "required": True, # Mark as required to ensure execution + } + ) + + # Always emit the tool status + await __event_emitter__( + { + "type": "status", + "data": { + "description": f"Found matching tools: {', '.join(result)}", + "done": True, + }, + } + ) + else: + await __event_emitter__( + { + "type": "status", + "data": { + "description": "No matching tools found.", + "done": True, + }, + } + ) + except Exception as e: + print(e) + await __event_emitter__( + { + "type": "status", + "data": { + "description": f"Error processing request: {e}", + "done": True, + }, + } + ) + pass + return body