Add meta tool samples (#75)
* Add meta tool samples * add meta tool samples * add meta tool samples
This commit is contained in:
47
Meta_tools/Meta_tool_config.json
Normal file
47
Meta_tools/Meta_tool_config.json
Normal file
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"Information Retrieval": {
|
||||
"description": "The Information Retrieval category includes tools for retrieving web-based content or answering queries through external sources. These tools support tasks such as web search, open-domain fact lookup, and retrieval-augmented response generation. Such tools can also help to check the high-speed rail information for travel. You can basically use this category to search and retrieve whatever you want to know.",
|
||||
"tool_usage_notes": "When using online search tools, the `max_results` parameter MUST BE AT MOST 6 per query. For queries related to China-specific topics, consider translating the query into **Chinese** to better match the search engine styles.",
|
||||
"tools": [
|
||||
"bing_search",
|
||||
"fetch_webpage",
|
||||
"get-stations-code-in-city",
|
||||
"get-station-code-of-citys",
|
||||
"get-station-code-by-names",
|
||||
"get-station-by-telecode",
|
||||
"get-tickets",
|
||||
"get-interline-tickets",
|
||||
"get-train-route-stations",
|
||||
"maps_weather"
|
||||
]
|
||||
},
|
||||
"Programming & Tech Support": {
|
||||
"description": "The Programming & Tech Support category includes tools for executing code, executing shell command, running scripts, and interacting with system environments to support tasks such as debugging, automation, and developer operations.",
|
||||
"tool_usage_notes": "",
|
||||
"tools": [
|
||||
"execute_python_code",
|
||||
"execute_shell_command"
|
||||
]
|
||||
},
|
||||
"Location & Navigation": {
|
||||
"description": "The Location & Navigation category includes tools for accessing and processing geospatial data, including walking, driving, cycling, and public transit navigation; distance estimation; geocoding and reverse geocoding; IP-based location detection; located weather retrieval; and various forms of location-based search such as keyword search, detailed POI lookup, and nearby place discovery. These tools support navigation and content generation tasks requiring spatial awareness, such as route descriptions, travel suggestions, and area-based comparisons.",
|
||||
"tool_usage_notes": "",
|
||||
"tools": [
|
||||
"maps_direction_bicycling",
|
||||
"maps_direction_driving",
|
||||
"maps_direction_transit_integrated",
|
||||
"maps_direction_walking",
|
||||
"maps_distance",
|
||||
"maps_geo",
|
||||
"maps_regeocode",
|
||||
"maps_ip_location",
|
||||
"maps_schema_personal_map",
|
||||
"maps_around_search",
|
||||
"maps_search_detail",
|
||||
"maps_text_search",
|
||||
"maps_schema_navi",
|
||||
"maps_schema_take_taxi",
|
||||
"maps_weather"
|
||||
]
|
||||
}
|
||||
}
|
||||
988
Meta_tools/Meta_toolkit.py
Normal file
988
Meta_tools/Meta_toolkit.py
Normal file
@@ -0,0 +1,988 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
This file is the core of the meta tool system.
|
||||
|
||||
It contains the CategoryManager class, which manages the tools within a
|
||||
specific category. It also contains the MetaManager class, which is the
|
||||
top-level class that manages the category managers.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Literal,
|
||||
Optional,
|
||||
Union,
|
||||
)
|
||||
from agentscope.model import ChatModelBase
|
||||
from agentscope.formatter import FormatterBase
|
||||
from agentscope.memory import InMemoryMemory, MemoryBase
|
||||
from agentscope.message import Msg, TextBlock
|
||||
from agentscope.tool import Toolkit, ToolResponse
|
||||
from agentscope.tool._types import RegisteredToolFunction
|
||||
from agentscope.tool._toolkit import ToolGroup
|
||||
from meta_config_models import MetaToolConfig
|
||||
|
||||
# Constants
|
||||
MAX_ITERATIONS = 5
|
||||
|
||||
|
||||
class CategoryManager:
|
||||
"""Level 2 Category Manager - Manages tools within a specific category."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
category_name: str,
|
||||
category_description: str,
|
||||
model: ChatModelBase,
|
||||
formatter: FormatterBase,
|
||||
tool_usage_notes: str = "",
|
||||
memory: MemoryBase = InMemoryMemory(),
|
||||
):
|
||||
"""Initialize the Category Manager
|
||||
|
||||
Args:
|
||||
category_name (`str`):
|
||||
The unique name identifier for this category manager.
|
||||
category_description (`str`):
|
||||
A comprehensive description of what this category manages
|
||||
and its functional scope.
|
||||
model (`ChatModelBase`):
|
||||
The chat model used for internal tool selection, tool result
|
||||
evaluation, and summary generation within this category.
|
||||
formatter (`FormatterBase`):
|
||||
The formatter used to format the messages into the required
|
||||
format of the model API provider.
|
||||
tool_usage_notes (`str`, optional):
|
||||
Special usage notes and considerations for tools in this
|
||||
category that will be included in the system prompts.
|
||||
memory (`MemoryBase`, optional):
|
||||
The memory instance to be used by this category manager.
|
||||
Defaults to InMemoryMemory() if not provided.
|
||||
"""
|
||||
self.category_name = category_name
|
||||
self.category_description = category_description
|
||||
self.model = model
|
||||
self.tool_usage_notes = tool_usage_notes
|
||||
self.memory = memory
|
||||
self.formatter = formatter
|
||||
# internal level 1 tools
|
||||
self.internal_toolkit = Toolkit()
|
||||
|
||||
def _generate_category_json_schema(self) -> dict:
|
||||
"""Generate JSON schema for this category manager as a meta-tool.
|
||||
|
||||
Returns:
|
||||
`dict`:
|
||||
A JSON schema in OpenAI function calling format that defines
|
||||
this category manager as a callable tool function with
|
||||
'objective' and 'exact_input' parameters.
|
||||
"""
|
||||
return {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": self.category_name,
|
||||
"description": (
|
||||
f"{self.category_description} This category automatically "
|
||||
"selects and operates the most appropriate tool based on "
|
||||
"your objective and input."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"objective": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"A clear and well-defined description of the "
|
||||
"goal you wish to accomplish using tools in "
|
||||
"this category. Be explicit about your "
|
||||
"intended outcome to ensure accurate tool "
|
||||
"selection and execution."
|
||||
),
|
||||
},
|
||||
"exact_input": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The precise, detailed, and complete input "
|
||||
"or query to be processed by the selected "
|
||||
"tool. Ensure all relevant data, context, "
|
||||
"and execution details are fully provided to "
|
||||
"enable accurate tool operation."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["objective", "exact_input"],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@property
|
||||
def json_schema(self) -> dict:
|
||||
"""Return the JSON schema for this category manager.
|
||||
|
||||
Returns:
|
||||
`dict`:
|
||||
The JSON schema that defines this category manager as a
|
||||
callable tool function for external agents.
|
||||
"""
|
||||
return self._generate_category_json_schema()
|
||||
|
||||
def generate_internal_tool_json_schema(self) -> dict:
|
||||
"""Generate JSON schema for the internal tools of this category
|
||||
manager.
|
||||
|
||||
Returns:
|
||||
`dict`:
|
||||
A list of JSON schemas for all tools contained within this
|
||||
category's internal toolkit.
|
||||
"""
|
||||
return self.internal_toolkit.get_json_schemas()
|
||||
|
||||
def _get_prompt(
|
||||
self,
|
||||
prompt_type: Literal[
|
||||
"tool_selection",
|
||||
"tool_result_evaluation",
|
||||
"max_iteration_summary",
|
||||
],
|
||||
) -> str:
|
||||
"""Generate system prompt for tool selection by reading from file.
|
||||
|
||||
Args:
|
||||
prompt_type (`Literal["tool_selection", "tool_result_evaluation", \
|
||||
"max_iteration_summary"]`):
|
||||
The type of prompt to generate. Each type corresponds to a
|
||||
different phase of the execution workflow:
|
||||
- "tool_selection": For initial tool selection
|
||||
- "tool_result_evaluation": For evaluating tool results
|
||||
- "max_iteration_summary": For generating final summaries
|
||||
|
||||
Returns:
|
||||
`str`:
|
||||
The formatted system prompt.
|
||||
"""
|
||||
# Get current directory
|
||||
current_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
# Read prompt template from file
|
||||
prompt_file_path = os.path.join(
|
||||
current_dir,
|
||||
"meta_tool_prompts",
|
||||
f"prompt_{prompt_type}.md",
|
||||
)
|
||||
|
||||
with open(prompt_file_path, "r", encoding="utf-8") as f:
|
||||
prompt_template = f.read()
|
||||
|
||||
formatted_prompt = prompt_template.format_map(
|
||||
{
|
||||
"category_name": self.category_name,
|
||||
},
|
||||
)
|
||||
|
||||
# Add tool usage notes if they exist
|
||||
if self.tool_usage_notes and self.tool_usage_notes.strip():
|
||||
formatted_prompt += f"""
|
||||
|
||||
## Special Tool Usage Considerations for {self.category_name}
|
||||
{self.tool_usage_notes.strip()}
|
||||
|
||||
Please keep these considerations in mind when generating tool calls."""
|
||||
return formatted_prompt
|
||||
|
||||
def add_internal_func_obj(
|
||||
self,
|
||||
func_obj: RegisteredToolFunction = None,
|
||||
tool_group: ToolGroup = None,
|
||||
) -> None:
|
||||
"""Add an internal tool function object to the category manager.
|
||||
|
||||
Args:
|
||||
func_obj (`RegisteredToolFunction`):
|
||||
The registered tool function object to be added to this
|
||||
category's internal toolkit.
|
||||
tool_group (`ToolGroup`, optional):
|
||||
The tool group information from the global toolkit. If
|
||||
provided and the function's group doesn't exist in the
|
||||
internal toolkit, the group will be created inside the
|
||||
category manager, maintaining consistency with the outside.
|
||||
|
||||
Note:
|
||||
This method directly adds the tool function object to the internal
|
||||
toolkit and preserves the original group structure from the
|
||||
global toolkit.
|
||||
"""
|
||||
self.internal_toolkit.tools[func_obj.name] = func_obj
|
||||
|
||||
# Classify internal_toolkit according to the original groups of the
|
||||
# toolkit if needed
|
||||
if func_obj.group not in self.internal_toolkit.groups and tool_group:
|
||||
self.internal_toolkit.groups[func_obj.group] = tool_group
|
||||
|
||||
async def execute_category_task(
|
||||
self,
|
||||
objective: str,
|
||||
exact_input: str,
|
||||
) -> ToolResponse:
|
||||
"""Execute a task within this category using intelligent tool
|
||||
selection.
|
||||
|
||||
This is the core method that implements the multi-round
|
||||
reasoning-acting
|
||||
loop for category-level task execution. It performs tool selection,
|
||||
execution, evaluation, and result synthesis automatically.
|
||||
|
||||
Returns:
|
||||
`ToolResponse`:
|
||||
A response containing the execution results in JSON format
|
||||
with fields:
|
||||
- "all_execution_results": Detailed history of tool executions
|
||||
- "summary": Comprehensive summary of accomplishments
|
||||
- "category": Category name for tracking
|
||||
|
||||
Note:
|
||||
The method implements a maximum of MAX_ITERATIONS iterations for
|
||||
the
|
||||
reasoning-acting loop.
|
||||
"""
|
||||
try:
|
||||
# 1. Check tool availability
|
||||
if not self.internal_toolkit.tools:
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(
|
||||
type="text",
|
||||
text=(
|
||||
f"'{self.category_name}' has no available "
|
||||
f"tools"
|
||||
),
|
||||
),
|
||||
],
|
||||
metadata={
|
||||
"success": False,
|
||||
},
|
||||
)
|
||||
|
||||
# 2. First round: tool selection (using tool_selection prompt)
|
||||
response = await self._llm_select_tools(objective, exact_input)
|
||||
|
||||
reasoning = response.get("reasoning", "")
|
||||
tool_calls = response.get("tool_calls", [])
|
||||
|
||||
# 3. Check if there are tool calls
|
||||
if not tool_calls:
|
||||
# No tool calls - did not select any tool due to constraint
|
||||
# analysis
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(
|
||||
type="text",
|
||||
text=(
|
||||
f"Based on the constraint analysis, the "
|
||||
f"{self.category_name} category selects not "
|
||||
f"to perform any tool calls. \n\n Reason: "
|
||||
f"{reasoning}"
|
||||
),
|
||||
),
|
||||
],
|
||||
metadata={
|
||||
"success": True,
|
||||
},
|
||||
)
|
||||
|
||||
await self.memory.clear()
|
||||
|
||||
# Add user request to memory
|
||||
await self.memory.add(
|
||||
Msg(
|
||||
name="user",
|
||||
content=f"Task: {objective}\nInput: {exact_input}",
|
||||
role="user",
|
||||
),
|
||||
)
|
||||
|
||||
# Add initial reasoning to memory
|
||||
await self.memory.add(
|
||||
Msg(
|
||||
name="assistant",
|
||||
content=reasoning,
|
||||
role="assistant",
|
||||
),
|
||||
)
|
||||
|
||||
all_execution_results = []
|
||||
max_iterations = MAX_ITERATIONS
|
||||
iteration = 0
|
||||
|
||||
# 5. Execution loop
|
||||
while iteration < max_iterations:
|
||||
iteration += 1
|
||||
|
||||
# Execute current round of tool calls
|
||||
current_results = await self._execute_tool_calls(tool_calls)
|
||||
all_execution_results.extend(current_results)
|
||||
|
||||
# Evaluate whether to continue (using tool_result_evaluation
|
||||
# prompt)
|
||||
evaluation_response = await self._evaluate_tool_results(
|
||||
objective,
|
||||
exact_input,
|
||||
)
|
||||
evaluation_text = evaluation_response.get("reasoning", "")
|
||||
new_tool_calls = evaluation_response.get("tool_calls", [])
|
||||
|
||||
# Add evaluation result to memory
|
||||
if evaluation_text:
|
||||
await self.memory.add(
|
||||
Msg(
|
||||
name="assistant",
|
||||
content=evaluation_text,
|
||||
role="assistant",
|
||||
),
|
||||
)
|
||||
|
||||
# Key judgment: no new tool calls = task completed
|
||||
if not new_tool_calls:
|
||||
final_output = {
|
||||
"all_execution_results": all_execution_results,
|
||||
"summary": evaluation_text,
|
||||
"category": self.category_name,
|
||||
}
|
||||
final_output_str = json.dumps(
|
||||
final_output,
|
||||
indent=4,
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(
|
||||
type="text",
|
||||
text=final_output_str,
|
||||
),
|
||||
],
|
||||
metadata={
|
||||
"success": True,
|
||||
},
|
||||
)
|
||||
|
||||
# Continue to next round
|
||||
tool_calls = new_tool_calls
|
||||
|
||||
# Reached maximum iterations
|
||||
max_iter_summary = await self._generate_max_iteration_summary()
|
||||
final_output = {
|
||||
"all_execution_results": all_execution_results,
|
||||
"summary": max_iter_summary,
|
||||
"category": self.category_name,
|
||||
}
|
||||
final_output_str = json.dumps(
|
||||
final_output,
|
||||
indent=4,
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(
|
||||
type="text",
|
||||
text=final_output_str,
|
||||
),
|
||||
],
|
||||
metadata={
|
||||
"success": True,
|
||||
},
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ToolResponse(
|
||||
content=[
|
||||
TextBlock(
|
||||
type="text",
|
||||
text=f"{self.category_name} execution error: {str(e)}",
|
||||
),
|
||||
],
|
||||
metadata={
|
||||
"success": False,
|
||||
},
|
||||
)
|
||||
finally:
|
||||
await self.memory.clear()
|
||||
|
||||
async def _llm_select_tools(
|
||||
self,
|
||||
objective: str,
|
||||
exact_input: str,
|
||||
) -> dict:
|
||||
"""Perform initial tool selection using LLM reasoning.
|
||||
|
||||
Uses the tool_selection prompt template to guide the LLM in selecting
|
||||
the most appropriate tools from the internal toolkit based on the
|
||||
objective and input parameters.
|
||||
|
||||
Returns:
|
||||
`dict`:
|
||||
A dictionary containing:
|
||||
- "reasoning": The LLM's reasoning for tool selection
|
||||
- "tool_calls": List of selected tool calls in ToolUseBlock
|
||||
format
|
||||
|
||||
Note:
|
||||
If no tools are selected due to constraint analysis or missing
|
||||
requirements, the tool_calls list will be empty and reasoning
|
||||
will contain the explanation.
|
||||
"""
|
||||
try:
|
||||
# 1. Build prompt
|
||||
system_prompt = self._get_prompt("tool_selection")
|
||||
user_content = (
|
||||
f"Task objective: {objective}\nInput data: {exact_input}"
|
||||
)
|
||||
|
||||
# 2. Format messages
|
||||
messages = await self.formatter.format(
|
||||
msgs=[
|
||||
Msg("system", system_prompt, "system"),
|
||||
Msg("user", user_content, "user"),
|
||||
],
|
||||
)
|
||||
|
||||
# 3. Call model
|
||||
tools = self.internal_toolkit.get_json_schemas()
|
||||
res = await self.model(messages, tools=tools)
|
||||
|
||||
# 4. Handle response
|
||||
msg = await self._handle_model_response(res)
|
||||
return self._parse_tool_selection_response(msg)
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"reasoning": f"Tool selection failed: {str(e)}",
|
||||
"tool_calls": [],
|
||||
}
|
||||
|
||||
async def _handle_model_response(self, res: Any) -> Msg:
|
||||
"""Handle model response for both streaming and non-streaming."""
|
||||
msg = None
|
||||
try:
|
||||
if self.model.stream:
|
||||
# Streaming response: res is AsyncGenerator[ChatResponse]
|
||||
msg = Msg(self.category_name, [], "assistant")
|
||||
async for content_chunk in res:
|
||||
msg.content = content_chunk.content
|
||||
else:
|
||||
# Non-streaming response: res is ChatResponse
|
||||
msg = Msg(
|
||||
self.category_name,
|
||||
list(res.content),
|
||||
"assistant",
|
||||
)
|
||||
return msg
|
||||
except Exception as parse_error:
|
||||
raise ValueError(
|
||||
f"Response parsing failed: {str(parse_error)}",
|
||||
) from parse_error
|
||||
|
||||
def _parse_tool_selection_response(self, msg: Msg) -> dict:
|
||||
"""Parse tool selection response and extract reasoning and
|
||||
tool calls."""
|
||||
try:
|
||||
reasoning = ""
|
||||
tool_calls = []
|
||||
|
||||
if isinstance(msg.content, list):
|
||||
for block in msg.content:
|
||||
if isinstance(block, dict):
|
||||
if block.get("type") == "text":
|
||||
reasoning += block.get("text", "")
|
||||
elif block.get("type") == "tool_use":
|
||||
tool_calls.append(block)
|
||||
|
||||
if hasattr(msg, "get_content_blocks"):
|
||||
tool_calls = msg.get_content_blocks("tool_use")
|
||||
|
||||
return {
|
||||
"reasoning": reasoning,
|
||||
"tool_calls": tool_calls,
|
||||
}
|
||||
|
||||
except Exception as parse_error:
|
||||
return {
|
||||
"reasoning": (f"Response parsing failed: {str(parse_error)}"),
|
||||
"tool_calls": [],
|
||||
}
|
||||
|
||||
async def _evaluate_tool_results(
|
||||
self,
|
||||
objective: str, # pylint: disable=unused-argument
|
||||
exact_input: str, # pylint: disable=unused-argument
|
||||
) -> dict:
|
||||
"""Evaluate tool execution results and determine next actions.
|
||||
|
||||
Uses the tool_result_evaluation prompt template and complete memory
|
||||
history to assess whether the current tool execution results have
|
||||
successfully fulfilled the task or if additional tool calls are needed.
|
||||
|
||||
Returns:
|
||||
`dict`:
|
||||
A dictionary containing:
|
||||
- "reasoning": The LLM's evaluation and reasoning
|
||||
- "tool_calls": List of additional tool calls if needed, or
|
||||
empty list if task is complete
|
||||
|
||||
Note:
|
||||
This method uses the complete memory history (including previous
|
||||
tool results) to make informed decisions about task completion
|
||||
and next steps.
|
||||
"""
|
||||
try:
|
||||
# 1. Build evaluation prompt
|
||||
system_prompt = self._get_prompt("tool_result_evaluation")
|
||||
|
||||
# 2. Use complete memory history for evaluation
|
||||
messages = await self.formatter.format(
|
||||
msgs=[
|
||||
Msg("system", system_prompt, "system"),
|
||||
*await self.memory.get_memory(),
|
||||
],
|
||||
)
|
||||
|
||||
# 3. Call model for evaluation
|
||||
tools = self.internal_toolkit.get_json_schemas()
|
||||
res = await self.model(messages, tools=tools)
|
||||
|
||||
# 4. Handle response
|
||||
msg = await self._handle_model_response(res)
|
||||
return self._parse_tool_selection_response(msg)
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"reasoning": f"Evaluation failed: {str(e)}",
|
||||
"tool_calls": [],
|
||||
}
|
||||
|
||||
async def _generate_max_iteration_summary(self) -> str:
|
||||
"""Generate intelligent summary when reaching maximum iterations.
|
||||
|
||||
Uses the max_iteration_summary prompt template and complete
|
||||
memory history to generate a comprehensive summary of what was
|
||||
accomplished and what remains incomplete when the maximum iteration
|
||||
limit is reached.
|
||||
|
||||
Returns:
|
||||
`str`:
|
||||
A comprehensive summary of the execution history, including
|
||||
successful tool executions, their outputs, and any incomplete
|
||||
aspects of the original objective.
|
||||
|
||||
Note:
|
||||
This method is called when the category manager reaches the
|
||||
maximum number of iterations (5) without completing the task.
|
||||
It provides a failsafe to ensure users receive meaningful
|
||||
information even in complex or incomplete scenarios.
|
||||
"""
|
||||
try:
|
||||
system_prompt = self._get_prompt("max_iteration_summary")
|
||||
|
||||
messages = await self.formatter.format(
|
||||
msgs=[
|
||||
Msg("system", system_prompt, "system"),
|
||||
*await self.memory.get_memory(),
|
||||
],
|
||||
)
|
||||
|
||||
res = await self.model(messages) # No need for tools parameter
|
||||
|
||||
msg = None
|
||||
try:
|
||||
if self.model.stream:
|
||||
# Streaming response
|
||||
msg = Msg(self.category_name, [], "assistant")
|
||||
async for content_chunk in res:
|
||||
msg.content = content_chunk.content
|
||||
else:
|
||||
# Non-streaming response
|
||||
msg = Msg(
|
||||
self.category_name,
|
||||
list(res.content),
|
||||
"assistant",
|
||||
)
|
||||
|
||||
# Extract text content
|
||||
summary_text = ""
|
||||
if isinstance(msg.content, list):
|
||||
for block in msg.content:
|
||||
if (
|
||||
isinstance(block, dict)
|
||||
and block.get("type") == "text"
|
||||
):
|
||||
summary_text += block.get("text", "")
|
||||
|
||||
# Use get_text_content method (if available)
|
||||
if hasattr(msg, "get_text_content"):
|
||||
summary_text = msg.get_text_content()
|
||||
|
||||
return summary_text or (
|
||||
f"Reached maximum iterations ({MAX_ITERATIONS} times). "
|
||||
"Summary generation succeeded but content is empty."
|
||||
)
|
||||
|
||||
except Exception as parse_error:
|
||||
return f"Summary parsing failed: {str(parse_error)}"
|
||||
|
||||
except Exception as e:
|
||||
return (
|
||||
f"Reached maximum iterations ({MAX_ITERATIONS} times). "
|
||||
f"Summary generation failed: {str(e)}"
|
||||
)
|
||||
|
||||
async def _execute_tool_calls(self, tool_calls: list) -> list:
|
||||
"""Execute a list of tool calls and return structured results.
|
||||
|
||||
Executes each tool call sequentially, captures results, and records
|
||||
them in memory for subsequent evaluation.
|
||||
|
||||
|
||||
Returns:
|
||||
`list`:
|
||||
A list of execution result dictionaries, each containing:
|
||||
- 'tool_name': Name of the executed tool
|
||||
- 'tool_args': Arguments passed to the tool
|
||||
- 'result': Execution result or error message
|
||||
- 'status': 'SUCCESS' or 'ERROR'
|
||||
|
||||
Note:
|
||||
All results are automatically added to the category's memory
|
||||
for use in subsequent evaluation steps. Invalid tool call
|
||||
formats are handled gracefully and reported as errors.
|
||||
"""
|
||||
results = []
|
||||
|
||||
for tool_call in tool_calls:
|
||||
try:
|
||||
# Ensure tool_call is in correct ToolUseBlock format
|
||||
if not isinstance(tool_call, dict) or "name" not in tool_call:
|
||||
results.append(
|
||||
{
|
||||
"tool_name": "unknown",
|
||||
"tool_args": {},
|
||||
"result": f"Invalid tool call format: {tool_call}",
|
||||
"status": "ERROR",
|
||||
},
|
||||
)
|
||||
continue
|
||||
|
||||
tool_res = await self.internal_toolkit.call_tool_function(
|
||||
tool_call,
|
||||
)
|
||||
|
||||
result_chunks = []
|
||||
async for chunk in tool_res:
|
||||
result_chunks.append(chunk)
|
||||
|
||||
# Get final result
|
||||
final_result = result_chunks[-1] if result_chunks else None
|
||||
|
||||
# Extract content from ToolResponse
|
||||
if final_result and hasattr(final_result, "content"):
|
||||
result_text = ""
|
||||
for content_block in final_result.content:
|
||||
if (
|
||||
isinstance(content_block, dict)
|
||||
and content_block.get("type") == "text"
|
||||
):
|
||||
result_text += content_block.get("text", "")
|
||||
|
||||
result_content = (
|
||||
result_text
|
||||
or "Execution successful but no text output"
|
||||
)
|
||||
else:
|
||||
result_content = "No result"
|
||||
|
||||
# Record result
|
||||
results.append(
|
||||
{
|
||||
"tool_name": tool_call["name"],
|
||||
"tool_args": tool_call.get("input", {}),
|
||||
"result": result_content,
|
||||
"status": "SUCCESS" if final_result else "ERROR",
|
||||
},
|
||||
)
|
||||
|
||||
# Add to memory (simplified format)
|
||||
await self.memory.add(
|
||||
Msg(
|
||||
name="tool_result",
|
||||
content=(
|
||||
f"Executed {tool_call['name']}: {result_content}"
|
||||
),
|
||||
role="system",
|
||||
),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error: {str(e)}"
|
||||
results.append(
|
||||
{
|
||||
"tool_name": tool_call.get("name", "unknown"),
|
||||
"tool_args": tool_call.get("input", {}),
|
||||
"result": error_msg,
|
||||
"status": "ERROR",
|
||||
},
|
||||
)
|
||||
|
||||
await self.memory.add(
|
||||
Msg(
|
||||
name="tool_result",
|
||||
content=(
|
||||
f"Tool {tool_call.get('name', 'unknown')} failed: "
|
||||
f"{error_msg}"
|
||||
),
|
||||
role="system",
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class MetaManager(Toolkit):
|
||||
"""Level 3 Meta Manager - Manages Level 2 Category Managers.
|
||||
|
||||
The MetaManager extends the Toolkit class to provide hierarchical tool
|
||||
management. It manages CategoryManager instances and exposes them as
|
||||
callable tools to external agents, while hiding the internal tool
|
||||
complexity.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: Optional[ChatModelBase] = None,
|
||||
meta_tool_config: Optional[Union[Dict, MetaToolConfig]] = None,
|
||||
global_toolkit: Optional[Toolkit] = None,
|
||||
formatter: Optional[FormatterBase] = None,
|
||||
memory: MemoryBase = InMemoryMemory(),
|
||||
) -> None:
|
||||
"""Initialize the Meta Manager.
|
||||
|
||||
Args:
|
||||
model (`ChatModelBase`, optional):
|
||||
The chat model to be used for all category managers.
|
||||
If provided along with meta_tool_config and global_toolkit,
|
||||
the meta manager will be automatically initialized.
|
||||
meta_tool_config (`Union[Dict, MetaToolConfig]`, optional):
|
||||
Dictionary or MetaToolConfig instance containing category
|
||||
configurations loaded from Meta_tool_config.json or
|
||||
similar structure. If a dictionary is provided, it will
|
||||
be validated using Pydantic.
|
||||
global_toolkit (`Toolkit`, optional):
|
||||
The global toolkit containing all available tools to be
|
||||
distributed among categories.
|
||||
formatter (`FormatterBase`, optional):
|
||||
The formatter to be used for all category managers.
|
||||
Required if auto-initialization is desired (when model,
|
||||
meta_tool_config, and global_toolkit are all provided).
|
||||
memory (`MemoryBase`, optional):
|
||||
The memory instance to be used by all category managers.
|
||||
Defaults to InMemoryMemory() if not provided.
|
||||
"""
|
||||
# self.toolkit manages the external interface of category manager.
|
||||
# The internal routing is by self.category_managers
|
||||
super().__init__()
|
||||
self.category_managers: Dict[str, CategoryManager] = {}
|
||||
|
||||
# Auto-initialize if all required parameters are provided
|
||||
if (
|
||||
model is not None
|
||||
and meta_tool_config is not None
|
||||
and global_toolkit is not None
|
||||
):
|
||||
if formatter is None:
|
||||
raise ValueError(
|
||||
"formatter parameter is required when auto-initializing "
|
||||
"MetaManager. Please provide a formatter that matches "
|
||||
"your model type.",
|
||||
)
|
||||
|
||||
# Validate and convert config
|
||||
validated_config = self._validate_config(meta_tool_config)
|
||||
self._initialize_from_config(
|
||||
model,
|
||||
validated_config,
|
||||
global_toolkit,
|
||||
formatter,
|
||||
memory,
|
||||
)
|
||||
|
||||
def _validate_config(
|
||||
self,
|
||||
config: Union[Dict, MetaToolConfig],
|
||||
) -> Union[Dict, MetaToolConfig]:
|
||||
"""Validate configuration using Pydantic models.
|
||||
|
||||
Args:
|
||||
config (`Union[Dict, MetaToolConfig]`):
|
||||
Configuration to validate. If already a MetaToolConfig
|
||||
instance, it's considered validated and returned as-is.
|
||||
|
||||
Returns:
|
||||
`Union[Dict, MetaToolConfig]`: Validated configuration.
|
||||
|
||||
Raises:
|
||||
ValueError: If configuration is invalid.
|
||||
"""
|
||||
|
||||
# If already validated (MetaToolConfig instance), return as-is
|
||||
if isinstance(config, MetaToolConfig):
|
||||
return config
|
||||
if isinstance(config, dict):
|
||||
if MetaToolConfig is not None:
|
||||
# Pydantic available, validate the config
|
||||
try:
|
||||
return MetaToolConfig.from_dict(config)
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Configuration validation failed: {e}",
|
||||
) from e
|
||||
else:
|
||||
# Pydantic not available, return dict as-is with warning
|
||||
print(
|
||||
"Warning: Pydantic not available. Configuration "
|
||||
"validation skipped.",
|
||||
)
|
||||
return config
|
||||
|
||||
# Invalid type
|
||||
raise ValueError(
|
||||
f"Invalid configuration type: {type(config)}. "
|
||||
"Expected dict or MetaToolConfig-like object with "
|
||||
"to_dict() method.",
|
||||
)
|
||||
|
||||
def _initialize_from_config(
|
||||
self,
|
||||
model: ChatModelBase,
|
||||
meta_tool_config: Union[Dict, MetaToolConfig],
|
||||
global_toolkit: Toolkit,
|
||||
formatter: FormatterBase,
|
||||
memory: MemoryBase,
|
||||
) -> None:
|
||||
"""Initialize category managers from configuration.
|
||||
|
||||
Args:
|
||||
model (`ChatModelBase`):
|
||||
The chat model to be used for all category managers.
|
||||
meta_tool_config (`Union[Dict, MetaToolConfig]`):
|
||||
Dictionary or MetaToolConfig instance containing category
|
||||
configurations.
|
||||
global_toolkit (`Toolkit`):
|
||||
The global toolkit containing all available tools.
|
||||
formatter (`FormatterBase`):
|
||||
The formatter to be used for all category managers.
|
||||
memory (`MemoryBase`):
|
||||
The memory instance to be used by all category managers.
|
||||
"""
|
||||
# Convert MetaToolConfig to dict if needed
|
||||
if isinstance(meta_tool_config, MetaToolConfig):
|
||||
config_dict = meta_tool_config.to_dict()
|
||||
elif isinstance(meta_tool_config, dict):
|
||||
config_dict = meta_tool_config
|
||||
else:
|
||||
raise ValueError(
|
||||
f"meta_tool_config must be a dict or MetaToolConfig instance, "
|
||||
f"got {type(meta_tool_config)}",
|
||||
)
|
||||
|
||||
# Iterate through categories
|
||||
for category_name, category_config in config_dict.items():
|
||||
category_manager = CategoryManager(
|
||||
category_name=category_name,
|
||||
category_description=category_config["description"],
|
||||
model=model,
|
||||
formatter=formatter,
|
||||
tool_usage_notes=category_config.get("tool_usage_notes", ""),
|
||||
memory=memory,
|
||||
)
|
||||
|
||||
# Add tools to the category manager
|
||||
for tool_name in category_config["tools"]:
|
||||
if tool_name not in global_toolkit.tools:
|
||||
print(f"Tool {tool_name} not found in global toolkit")
|
||||
continue
|
||||
|
||||
tool_func_obj = global_toolkit.tools[tool_name]
|
||||
if tool_func_obj.group == "basic":
|
||||
category_manager.add_internal_func_obj(
|
||||
func_obj=tool_func_obj,
|
||||
)
|
||||
else:
|
||||
category_manager.add_internal_func_obj(
|
||||
func_obj=tool_func_obj,
|
||||
tool_group=global_toolkit.groups[tool_func_obj.group],
|
||||
)
|
||||
|
||||
# Only add category manager if it has tools
|
||||
if not category_manager.internal_toolkit.tools:
|
||||
print(f"Category {category_name} has no tools, skip it")
|
||||
continue
|
||||
|
||||
self.add_category_manager(category_manager=category_manager)
|
||||
|
||||
def add_category_manager(self, category_manager: CategoryManager) -> None:
|
||||
"""Add a category manager to the meta manager.
|
||||
|
||||
Registers a CategoryManager instance as a callable tool function
|
||||
in the meta manager's toolkit. The category manager becomes accessible
|
||||
to external agents as a standard tool with objective and exact_input
|
||||
parameters.
|
||||
|
||||
Args:
|
||||
category_manager (`CategoryManager`):
|
||||
The category manager instance to be added. Must have a unique
|
||||
category name that doesn't conflict with existing managers.
|
||||
|
||||
Raises:
|
||||
ValueError: If a category manager with the same name already
|
||||
exists.
|
||||
|
||||
Note:
|
||||
The method creates a named wrapper function that forwards calls
|
||||
to the category manager's execute_category_task method. This
|
||||
ensures proper function naming for tool registration while
|
||||
maintaining the execution logic within the category manager.
|
||||
"""
|
||||
category_name = category_manager.category_name
|
||||
if category_name in self.category_managers:
|
||||
raise ValueError(f"Category {category_name} already exists")
|
||||
|
||||
self.category_managers[category_name] = category_manager
|
||||
|
||||
# category_schema is the external schema exposed to the agent and does
|
||||
# not contain any internal tools information
|
||||
category_schema = category_manager.json_schema
|
||||
|
||||
# Create a renamable function copy
|
||||
async def named_executor(
|
||||
objective: str,
|
||||
exact_input: str,
|
||||
) -> ToolResponse:
|
||||
"""
|
||||
Wrapper function that forwards calls to the corresponding
|
||||
category_manager.
|
||||
"""
|
||||
return await category_manager.execute_category_task(
|
||||
objective,
|
||||
exact_input,
|
||||
)
|
||||
|
||||
# Set function name as category_name
|
||||
named_executor.__name__ = category_name
|
||||
named_executor.__doc__ = f"{category_manager.category_description}"
|
||||
|
||||
# Key: Register the execution function of CategoryManager as a tool
|
||||
self.register_tool_function(
|
||||
named_executor,
|
||||
json_schema=category_schema,
|
||||
)
|
||||
150
Meta_tools/example.py
Normal file
150
Meta_tools/example.py
Normal file
@@ -0,0 +1,150 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Example script demonstrating the use of Meta tools with AgentScope.
|
||||
|
||||
This module shows how to initialize and use a MetaManager to organize tools
|
||||
into categories and use them with ReActAgent for conversational interactions.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
|
||||
from Meta_toolkit import MetaManager
|
||||
|
||||
from agentscope.agent import ReActAgent, UserAgent
|
||||
from agentscope.formatter import DashScopeChatFormatter
|
||||
from agentscope.mcp import HttpStatefulClient
|
||||
from agentscope.memory import InMemoryMemory
|
||||
from agentscope.model import DashScopeChatModel
|
||||
from agentscope.tool import Toolkit, execute_python_code, execute_shell_command
|
||||
|
||||
gaode_api_key = os.environ["GAODE_API_KEY"]
|
||||
bing_api_key = os.environ["BING_API_KEY"]
|
||||
train_api_key = os.environ["TRAIN_API_KEY"]
|
||||
dashscope_api_key = os.environ["DASHSCOPE_API_KEY"]
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
"""Main function to demonstrate Meta tools usage."""
|
||||
# Create global toolkit with your tools
|
||||
toolkit = Toolkit()
|
||||
toolkit.register_tool_function(execute_python_code)
|
||||
toolkit.register_tool_function(execute_shell_command)
|
||||
|
||||
toolkit.create_tool_group(
|
||||
"map_tools",
|
||||
"Tools related to Gaode map",
|
||||
active=True,
|
||||
)
|
||||
# If a group's 'active' flag is set to False, its tools will be registered
|
||||
# to toolkit but remain hidden
|
||||
|
||||
gaode_client = HttpStatefulClient(
|
||||
name="amap-sse",
|
||||
transport="sse",
|
||||
url=f"https://mcp.amap.com/sse?key={gaode_api_key}",
|
||||
# get your own API keys from Gaode MCP servers
|
||||
)
|
||||
bing_client = HttpStatefulClient(
|
||||
name="bing-cn-mcp-server",
|
||||
transport="sse",
|
||||
url=f"https://mcp.api-inference.modelscope.net/{bing_api_key}/sse",
|
||||
# get your own API keys from Modelscope's Bing MCP servers
|
||||
)
|
||||
train_client = HttpStatefulClient(
|
||||
name="12306-mcp",
|
||||
transport="sse",
|
||||
url=f"https://mcp.api-inference.modelscope.net/{train_api_key}/sse",
|
||||
# get your own API keys from Modelscope's train 12306 MCP servers
|
||||
)
|
||||
try:
|
||||
await gaode_client.connect()
|
||||
await toolkit.register_mcp_client(
|
||||
gaode_client,
|
||||
group_name="map_tools",
|
||||
)
|
||||
print("Gaode MCP client connected successfully")
|
||||
|
||||
await bing_client.connect()
|
||||
await toolkit.register_mcp_client(bing_client)
|
||||
print("Bing MCP client connected successfully")
|
||||
|
||||
await train_client.connect()
|
||||
await toolkit.register_mcp_client(train_client)
|
||||
print("12306 MCP client connected successfully")
|
||||
|
||||
# Meta tool initialization with Pydantic validation
|
||||
current_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
meta_tool_config_path = os.path.join(
|
||||
current_dir,
|
||||
"Meta_tool_config.json",
|
||||
)
|
||||
|
||||
model = DashScopeChatModel(
|
||||
model_name="qwen-max",
|
||||
api_key=dashscope_api_key,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
# Load and validate configuration
|
||||
try:
|
||||
from meta_config_models import MetaToolConfig
|
||||
|
||||
# Load and validate with Pydantic
|
||||
meta_tool_config = MetaToolConfig.from_json_file(
|
||||
meta_tool_config_path,
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
# Fallback: load as plain dict
|
||||
with open(meta_tool_config_path, "r", encoding="utf-8") as f:
|
||||
meta_tool_config = json.load(f)
|
||||
meta_manager = MetaManager(
|
||||
model=model,
|
||||
meta_tool_config=meta_tool_config,
|
||||
global_toolkit=toolkit,
|
||||
formatter=DashScopeChatFormatter(),
|
||||
memory=InMemoryMemory(),
|
||||
)
|
||||
|
||||
# Use meta_manager directly as toolkit for ReActAgent
|
||||
agent = ReActAgent(
|
||||
name="Friday",
|
||||
sys_prompt="You're a helpful assistant named Friday.",
|
||||
model=DashScopeChatModel(
|
||||
model_name="qwen-max",
|
||||
api_key=dashscope_api_key,
|
||||
stream=True,
|
||||
),
|
||||
memory=InMemoryMemory(),
|
||||
formatter=DashScopeChatFormatter(),
|
||||
toolkit=meta_manager, # Direct replacement for traditional toolkit
|
||||
)
|
||||
|
||||
user = UserAgent(name="user")
|
||||
|
||||
msg = None
|
||||
try:
|
||||
while True:
|
||||
msg = await agent(msg)
|
||||
msg = await user(msg)
|
||||
if msg.get_text_content() == "exit":
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
return
|
||||
|
||||
finally:
|
||||
with contextlib.suppress(Exception):
|
||||
await train_client.close()
|
||||
with contextlib.suppress(Exception):
|
||||
await bing_client.close()
|
||||
with contextlib.suppress(Exception):
|
||||
await gaode_client.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
178
Meta_tools/meta_config_models.py
Normal file
178
Meta_tools/meta_config_models.py
Normal file
@@ -0,0 +1,178 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Pydantic models for Meta Tools configuration validation.
|
||||
|
||||
This module defines the data models used for validating Meta_tool_config.json
|
||||
and ensuring type safety during configuration loading.
|
||||
"""
|
||||
|
||||
from typing import Dict, List
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
field_validator,
|
||||
model_validator,
|
||||
RootModel,
|
||||
)
|
||||
|
||||
|
||||
class CategoryConfig(BaseModel):
|
||||
"""Configuration model for a single tool category.
|
||||
|
||||
Attributes:
|
||||
description (`str`):
|
||||
A comprehensive description of what this category manages
|
||||
and its functional scope.
|
||||
tool_usage_notes (`str`):
|
||||
Special usage notes and considerations for tools in this
|
||||
category. Can be empty string if no special notes are needed.
|
||||
tools (`List[str]`):
|
||||
List of tool names that belong to this category.
|
||||
Each tool name must be a non-empty string.
|
||||
"""
|
||||
|
||||
description: str = Field(
|
||||
...,
|
||||
min_length=10,
|
||||
description=(
|
||||
"Comprehensive description of the category's purpose and scope"
|
||||
),
|
||||
)
|
||||
|
||||
tool_usage_notes: str = Field(
|
||||
default="",
|
||||
description="Special usage notes for tools in this category",
|
||||
)
|
||||
|
||||
tools: List[str] = Field(
|
||||
...,
|
||||
min_items=1,
|
||||
description="List of tool names belonging to this category",
|
||||
)
|
||||
|
||||
@field_validator("tools")
|
||||
@classmethod
|
||||
def validate_tools(cls, v):
|
||||
"""Validate that all tool names are non-empty strings."""
|
||||
if not v:
|
||||
raise ValueError("Category must have at least one tool")
|
||||
|
||||
for tool_name in v:
|
||||
if not isinstance(tool_name, str) or not tool_name.strip():
|
||||
raise ValueError(
|
||||
f"Tool name must be a non-empty string, got: {tool_name}",
|
||||
)
|
||||
|
||||
return v
|
||||
|
||||
@field_validator("description")
|
||||
@classmethod
|
||||
def validate_description(cls, v):
|
||||
"""Validate that description is meaningful."""
|
||||
if not v.strip():
|
||||
raise ValueError("Description cannot be empty")
|
||||
return v.strip()
|
||||
|
||||
|
||||
class MetaToolConfig(RootModel[Dict[str, CategoryConfig]]):
|
||||
"""Root configuration model for Meta Tools system.
|
||||
|
||||
This model represents the entire Meta_tool_config.json structure
|
||||
and validates that all categories are properly configured.
|
||||
|
||||
The root structure is a dictionary mapping category names to their
|
||||
configurations. Category names must be non-empty strings.
|
||||
"""
|
||||
|
||||
root: Dict[str, CategoryConfig] = Field(
|
||||
...,
|
||||
description="Dictionary of category configurations",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_not_empty(self) -> "MetaToolConfig":
|
||||
"""Validate that configuration has at least one category."""
|
||||
if not self.root:
|
||||
raise ValueError("Configuration must have at least one category")
|
||||
|
||||
# Validate category names are non-empty
|
||||
for category_name in self.root.keys():
|
||||
if not category_name or not category_name.strip():
|
||||
raise ValueError(
|
||||
"Category name cannot be empty or whitespace",
|
||||
)
|
||||
|
||||
return self
|
||||
|
||||
# Dictionary-like access methods for better user experience
|
||||
# These allow using MetaToolConfig as if it were a dict without conversion
|
||||
# Example: config["category_name"] instead of config.root["category_name"]
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate over category names: for name in config: ..."""
|
||||
return iter(self.root)
|
||||
|
||||
def __getitem__(self, key: str) -> CategoryConfig:
|
||||
"""Access category by name: config["category_name"]"""
|
||||
return self.root[key]
|
||||
|
||||
def items(self):
|
||||
"""Get (name, config) pairs: for name, cfg in config.items(): ..."""
|
||||
return self.root.items()
|
||||
|
||||
def keys(self):
|
||||
"""Get category names: list(config.keys())"""
|
||||
return self.root.keys()
|
||||
|
||||
def values(self):
|
||||
"""Get category configs: list(config.values())"""
|
||||
return self.root.values()
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Get number of categories: len(config)"""
|
||||
return len(self.root)
|
||||
|
||||
@classmethod
|
||||
def from_json_file(cls, file_path: str) -> "MetaToolConfig":
|
||||
"""Load and validate configuration from JSON file.
|
||||
|
||||
Args:
|
||||
file_path (`str`): Path to the Meta_tool_config.json file.
|
||||
|
||||
Returns:
|
||||
`MetaToolConfig`: Validated configuration instance.
|
||||
|
||||
Raises:
|
||||
ValidationError: If the configuration is invalid.
|
||||
FileNotFoundError: If the configuration file doesn't exist.
|
||||
JSONDecodeError: If the JSON is malformed.
|
||||
"""
|
||||
import json
|
||||
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
config_data = json.load(f)
|
||||
|
||||
return cls(config_data)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, config_dict: Dict) -> "MetaToolConfig":
|
||||
"""Create configuration from dictionary.
|
||||
|
||||
Args:
|
||||
config_dict (`Dict`): Configuration dictionary.
|
||||
|
||||
Returns:
|
||||
`MetaToolConfig`: Validated configuration instance.
|
||||
"""
|
||||
return cls(config_dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Dict]:
|
||||
"""Convert to plain dictionary format.
|
||||
|
||||
Returns:
|
||||
`Dict[str, Dict]`: Plain dictionary representation.
|
||||
"""
|
||||
return {
|
||||
category_name: category_config.model_dump()
|
||||
for category_name, category_config in self.root.items()
|
||||
}
|
||||
BIN
Meta_tools/meta_tool_example_image.jpg
Normal file
BIN
Meta_tools/meta_tool_example_image.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 457 KiB |
11
Meta_tools/meta_tool_prompts/prompt_max_iteration_summary.md
Normal file
11
Meta_tools/meta_tool_prompts/prompt_max_iteration_summary.md
Normal file
@@ -0,0 +1,11 @@
|
||||
You are an intelligent summarizer for the {category_name} category. You need to analyze and summarize the complete tool execution history and provide a comprehensive summary of what was accomplished and what remains incomplete.
|
||||
|
||||
## Task
|
||||
Based on the conversation history, provide a concise but comprehensive summary that includes:
|
||||
1. **What was accomplished**: Summarize the successful tool executions and their key outputs
|
||||
2. **What remains incomplete**: Identify any unfinished aspects of the user's original objective
|
||||
|
||||
## Important Notes
|
||||
- Be specific about what outputs/results are available for the user
|
||||
- Avoid technical details about iterations - focus on valuable information
|
||||
- Keep the summary concise but informative
|
||||
@@ -0,0 +1,40 @@
|
||||
## Instruction
|
||||
You are an intelligent agent responsible for evaluating the execution results of tool_calls within the {category_name} category. Your job is to carefully analyze the user's objective and input data, assess whether the current tool execution results have successfully fulfilled the task, and determine whether further tool_calls are necessary. If necessary, directly call the most appropriate tool to further accomplish the task.
|
||||
|
||||
## Evaluation Guidelines
|
||||
|
||||
Carefully Compare the execution results against the user's objective and input data.
|
||||
|
||||
**If the user's task is not complete**:
|
||||
|
||||
If the previous tool results are incomplete or inadequate for user's task, directly call the most appropriate tool to accomplish the remaining task.
|
||||
|
||||
**If the user's task is complete**:
|
||||
|
||||
- Synthesize a clear and informative summary that connects the core results of all executed tools with the user's original objective and input. **Do not generate any new tool_calls.**
|
||||
|
||||
## Important Notes
|
||||
|
||||
1. If there are unresolved parts of the task **and** there are tools in this category capable of addressing them, you **must** directly call the most appropriate tool. Only generate **one** new tool_call at a time.
|
||||
|
||||
- Always consider the results and context of previous tools when generating a new tool call — build on prior execution history to avoid redundancy and ensure progress.
|
||||
|
||||
2. If a previous tool call returned an Error:
|
||||
|
||||
- Carefully analyze the error message to determine its cause.
|
||||
|
||||
- If caused by incorrect or incomplete parameters, fix the parameters accordingly and retry with the appropriate tool.
|
||||
|
||||
- If caused by other unknown issues, consider **switching to another suitable tool** in this category and map its parameters correctly.
|
||||
|
||||
3. If you determine that the user's objective has been fully satisfied, **Do not** generate any new tool calls . Only respond with a well-organized summary.
|
||||
|
||||
4. If multiple similar tasks were required by user and part of them were successfully completed by current tool, consider reusing the same tool with different args to finish the remaining parts when generating tool calls.
|
||||
|
||||
- If the result quality is poor or inadequate due to limitations of the current tool, consider switching to other more suitable tool when generating tool calls.
|
||||
|
||||
5. Thoughtfully adapt and assign the user's remaining **objective** and **input data** to the correct parameters for the selected tool. - Consider both required and optional parameters when making the tool call.
|
||||
|
||||
6. Do not hallucinate or assume the output of any tool that has not yet been executed. Only include tool call schema — do not provide analysis based on imaginary results!!!!
|
||||
|
||||
7. **Do not attempt to engage in dialogue or ask the user for clarification** — your job is to independently plan and structure what tool should be called.
|
||||
20
Meta_tools/meta_tool_prompts/prompt_tool_selection.md
Normal file
20
Meta_tools/meta_tool_prompts/prompt_tool_selection.md
Normal file
@@ -0,0 +1,20 @@
|
||||
## Task
|
||||
You are an intelligent tool selector for the {category_name} category. Your job is to carefully analyze the user's objective and input data, then directly call the most appropriate tool(s) to accomplish the task.
|
||||
|
||||
## Selection Guidelines
|
||||
1. **Functional Relevance**: Select the tool(s) whose description best matches the user's stated objective
|
||||
2. **Input Compatibility**: Ensure the selected tool(s) can handle the provided input format
|
||||
3. **Parameter Mapping**: Map the user's objective and input to the corresponding parameters in the tool's schema. Interpret **both** required and optional parameters based on tool descriptions
|
||||
|
||||
## Important Notes
|
||||
- You have access to detailed descriptions and parameter schemas for all tools in this category
|
||||
- Carefully compare the functionality and parameter requirements of each tool before making a decision
|
||||
- Thoughtfully adapt and assign the user's **objective** and **input data** to the correct parameters for the selected tool
|
||||
- Consider both required and optional parameters when making the tool call
|
||||
|
||||
## Strict Constraints
|
||||
- Only use tools from the provided list. **Do not invent, assume, or call any tools not explicitly defined in this category.**
|
||||
|
||||
- If any required input content are missing or insufficient, do not generate tool calls. Instead, respond with detailed content of all missing or ambiguous elements needed to proceed.
|
||||
|
||||
- If no tool in this category can accomplish the task due to functional limitations, do not generate tool calls. Instead, explain clearly why none of the tools in this category are suitable, and offer suggestions for other possible categories or actions to solve the objective.
|
||||
183
Meta_tools/readme.md
Normal file
183
Meta_tools/readme.md
Normal file
@@ -0,0 +1,183 @@
|
||||
# Meta Tools System for AgentScope
|
||||
|
||||
Meta Tools is an innovative extension for AgentScope designed to manage and invoke large, diverse toolsets efficiently.
|
||||
By using a layered architecture, it drastically reduces context length, lowers cognitive load, and improves tool selection accuracy.
|
||||
|
||||
## Background
|
||||
|
||||
In the native AgentScope framework, the `Toolkit` exposes all active tools directly to a `ReActAgent`. While this approach works well for smaller tool sets, it faces significant challenges when dealing with numerous and diverse tools:
|
||||
|
||||
1. **Excessive Context Length** – Long prompts reduce LLM reasoning efficiency
|
||||
2. **Cognitive Overload** – The agent must handle tasks *and* search through a large tool pool, which hurts performance
|
||||
|
||||
## Our Solution: Three-Layer Meta Tools Architecture
|
||||
|
||||
Meta Tools organizes tools into functional categories and applies intelligent selection within each category.
|
||||
From the main agent's perspective, only high-level categories are visible — the actual tools are selected internally.
|
||||
|
||||
```
|
||||
┌─────────────────────────┐
|
||||
│ Level 3: MetaManager │ ← Exposed to main agent, shows categories only
|
||||
└──────────┬──────────────┘
|
||||
│
|
||||
┌──────────▼──────────────┐
|
||||
│ Level 2: CategoryManager│ ← Manages category tools, multi-round reasoning
|
||||
└──────────┬──────────────┘
|
||||
│
|
||||
┌──────────▼──────────────┐
|
||||
│ Level 1: Normal Tool │ ← MCP tools, built-ins, custom functions
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
|
||||
- **Reduced Context Length**: Main agent only sees category-level abstractions, not individual tools
|
||||
- **Improved Selection Accuracy**: Specialized prompts and focused tool sets enhance selection precision
|
||||
- **Scalable Architecture**: System performance remains stable as tool count increases
|
||||
- **Agent-Level Decoupling**: Separates detailed tool selection from the agent, enabling focus on task decomposition and state monitoring
|
||||
|
||||

|
||||
|
||||
|
||||
## How to Run This Example
|
||||
|
||||
**Environment**
|
||||
|
||||
* **LLM API Key**: Configure your environment variable:
|
||||
```bash
|
||||
export DASHSCOPE_API_KEY="your_dashscope_api_key"
|
||||
export GAODE_API_KEY="your_gaode_api_key"
|
||||
export BING_API_KEY="your_bing_api_key_from_modelscope"
|
||||
export TRAIN_API_KEY="your_12306_api_key_from_modelscope"
|
||||
pip install agentscope
|
||||
```
|
||||
**MCP Service Setup**
|
||||
|
||||
* **Gaode Maps** (Navigation): [Gaode Open Platform](https://lbs.amap.com/)
|
||||
* **Bing Search** (Information Retrieval): [ModelScope MCP Service](https://www.modelscope.cn/mcp/servers/@yan5236/bing-cn-mcp-server)
|
||||
* **12306 Train Services** (Information Retrieval): [ModelScope MCP Service](https://www.modelscope.cn/mcp/servers/@Joooook/12306-mcp)
|
||||
|
||||
Update your sse API keys in your environment variable.
|
||||
|
||||
**Run Example**
|
||||
|
||||
```bash
|
||||
cd Meta_tools
|
||||
python example.py
|
||||
```
|
||||
|
||||
The system automatically constructs the Meta Tool System based on the categories and tool descriptions defined in `Meta_tool_config.json`, and launches an interactive agent named Friday to assist you with various tasks across three main categories: Information Retrieval, Programming & Tech Support, and Location & Navigation.
|
||||
For example, you can try a prompt like:
|
||||
`I'm going to drive from West Lake in Hangzhou to Zhoushan. Please give me the driving route and a travel guide for Zhoushan.`
|
||||
|
||||
|
||||
## Key Features
|
||||
|
||||
### 1. Unified Interface Design
|
||||
From the external agent's perspective, each `CategoryManager` appears as a standard tool function with consistent schema:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "<category_name>",
|
||||
"description": "<category_description> This category automatically selects and operates the most appropriate tool based on your objective and input.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"objective": {
|
||||
"type": "string",
|
||||
"description": "A clear and well-defined description of the goal you wish to accomplish using tools in this category."
|
||||
},
|
||||
"exact_input": {
|
||||
"type": "string",
|
||||
"description": "The precise, detailed, and complete input or query to be processed by the selected tool."
|
||||
}
|
||||
},
|
||||
"required": ["objective", "exact_input"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
From the main agent's perspective, it only needs to call the corresponding high-level category tool and provide the `objective` and `exact_input` parameters to receive a complete and detailed execution flow along with a summary—without having to worry about which specific Level 1 tools were executed behind the scenes.
|
||||
|
||||
### 2. Intelligent Multi-Round Execution
|
||||
|
||||
* **Select → Execute → Evaluate → Continue/Summarize**: Each category starts by selecting the most suitable tool, executes it, evaluates the result, and decides whether to proceed with further actions or produce a final summary
|
||||
* **Isolated in-category memory context**: Maintains a dedicated memory space for each category, preserving execution history and reasoning steps without polluting the main agent's context
|
||||
* **Automatic recovery on failure with fallback tools**: If a selected tool fails, the system automatically retries with alternative tools and adjusted parameters to ensure task completion
|
||||
|
||||
### 3. Configuration-Driven Setup
|
||||
Once tools are added to the global toolkit, they are grouped into meta categories via `Meta_tool_config.json`:
|
||||
```json
|
||||
{
|
||||
"Information Retrieval": {
|
||||
"description": "...",
|
||||
"tool_usage_notes": "...",
|
||||
"tools": [...]
|
||||
}
|
||||
}
|
||||
```
|
||||
In the default configuration, `Meta_tool_config.json` defines a basic categorization for the MCP tools used in `example.py`. When DIY your meta tool system, remember to update `Meta_tool_config.json` along with your agent file.
|
||||
|
||||
### 4. Execution Situations
|
||||
- **Normal Execution**: Complete execution history with results and comprehensive summaries
|
||||
|
||||
The Meta Tool system includes robust countermeasures for all potential issues it may encounter:
|
||||
|
||||
- **Insufficient Input**: Detailed explanation of missing required elements without tool execution
|
||||
- **No Suitable Tools**: Clear reasoning about functional limitations with alternative suggestions, without tool execution
|
||||
- **Execution Failures**: Automatic error recovery with alternative tool selection and parameter adjustment
|
||||
|
||||
## Advanced Customization
|
||||
|
||||
* **Custom Categories**: Edit `Meta_tool_config.json` to create new functional domains or reorganize existing tools into different categories
|
||||
* **Custom Prompts**: Modify the selection, evaluation, and summary templates in `meta_tool_prompts/` to adapt reasoning behavior for specific domains or workflows
|
||||
* **New Tools**: Integrate additional MCP services, built-in AgentScope tools, or custom local functions, and assign them to the most relevant categories for seamless integration
|
||||
|
||||
|
||||
Here is the execution flowchart inside the category of the Meta tool that received the instruction(Objective and Exact input) from agent:
|
||||
```mermaid
|
||||
graph TD
|
||||
A["User request<br>objective + exact input"] --> B["Initial tool selection<br>LLM chooses tools"]
|
||||
B --> C{"Tool calls?"}
|
||||
|
||||
C --> D["Execute tool set<br>record results"]
|
||||
C --> E["Insufficient input<br>explain missing parts"]
|
||||
C --> F["No suitable tool<br>explain limitations"]
|
||||
|
||||
E --> G["Return to user<br>ask for more input"]
|
||||
F --> H["Return to user<br>suggest alternatives"]
|
||||
|
||||
D --> I["Result evaluation<br>check if goal is met"]
|
||||
I --> J{"Goal satisfied?"}
|
||||
|
||||
J --> K["Final summary<br>return SUCCESS"]
|
||||
J --> L["New tool calls<br>start next round"]
|
||||
|
||||
L --> M{"Check iterations"}
|
||||
M --> N["Below max limit<br>continue next iteration"]
|
||||
M --> O["Reached max limit<br>summarize all runs"]
|
||||
|
||||
N --> D
|
||||
|
||||
K --> P["Final result<br>task completed"]
|
||||
O --> Q["Final result<br>stopped by iteration limit"]
|
||||
|
||||
R["Temporary memory<br>store execution history"] -.-> I
|
||||
D -.-> R
|
||||
|
||||
style A fill:#e1f5fe
|
||||
style C fill:#fff3e0
|
||||
style E fill:#ffeb3b
|
||||
style F fill:#ff9800
|
||||
style G fill:#fff59d
|
||||
style H fill:#ffcc02
|
||||
style K fill:#c8e6c9
|
||||
style O fill:#ffcdd2
|
||||
style I fill:#fff3e0
|
||||
style J fill:#fce4ec
|
||||
```
|
||||
|
||||
You can freely integrate multiple custom tools into the system, organize them into your own Meta Tool categories, and tailor the workflow to your needs—give it a try and explore the possibilities!
|
||||
Reference in New Issue
Block a user