BasalGanglia's picture
πŸ† Multi-Track Hackathon Submission
1f2d50a verified
"""Tests for app.py functionality."""
from unittest.mock import patch
import pytest
from kg_services.ontology import MCPPrompt, MCPTool, PlannedStep
class TestEnhancedInputVariableDisplay:
"""Test cases for enhanced input variable display functionality."""
def test_format_input_variables_no_inputs(self):
"""Test formatting when prompt has no input variables."""
from app import _format_input_variables_info
result = _format_input_variables_info(
[], "This is a simple template with no variables"
)
assert result["🟒 Status"] == "Ready to Use"
assert (
result["✨ Message"]
== "This prompt requires no input - you can use it immediately!"
)
assert result["🎯 Next Steps"] == "Simply click to execute this action plan"
assert len(result) == 3 # Status, message, and next steps
def test_format_input_variables_single_input(self):
"""Test formatting with single input variable."""
from app import _format_input_variables_info
input_vars = ["input_text"]
template = "Process this: {{input_text}}"
result = _format_input_variables_info(input_vars, template)
# Test Input Summary
input_summary = result["πŸ”§ Input Summary"]
assert input_summary["Total Variables Required"] == 1
assert input_summary["Complexity Level"] == "🟒 Simple (1 input)"
assert input_summary["Estimated Setup Time"] == "⚑ < 1 minute"
# Test Variable Details
variable_details = result["πŸ“‹ Variable Details"]
assert len(variable_details) == 1
var1 = variable_details[0]["πŸ“ Variable 1"]
assert var1["Name"] == "input_text"
assert var1["Placeholder"] == "{{input_text}}"
assert var1["Description"] == "The main text content you want to process"
assert var1["Required"] == "βœ… Yes"
# Test Ready-to-Use Example
example_section = result["🎯 Ready-to-Use Example"]
assert example_section["Original Template"] == "Process this: {{input_text}}"
assert "'your text here'" in example_section["With Sample Data"]
def test_format_input_variables_multiple_inputs(self):
"""Test formatting with multiple input variables."""
from app import _format_input_variables_info
input_vars = ["content", "method", "format"]
template = "Analyze {{content}} using {{method}} and output {{format}}"
result = _format_input_variables_info(input_vars, template)
# Test Input Summary
input_summary = result["πŸ”§ Input Summary"]
assert input_summary["Total Variables Required"] == 3
assert input_summary["Complexity Level"] == "🟑 Moderate (2-3 inputs)"
assert input_summary["Estimated Setup Time"] == "⏱️ 2-3 minutes"
# Test Variable Details
variable_details = result["πŸ“‹ Variable Details"]
assert len(variable_details) == 3
# Check each variable
var_names = [
var_details[next(iter(var_details.keys()))]["Name"]
for var_details in variable_details
]
assert "content" in var_names
assert "method" in var_names
assert "format" in var_names
# Test Usage Guide
usage_guide = result["πŸ’‘ Usage Guide"]
assert (
"πŸ”„ Replace all 3 placeholder(s) in the template" in usage_guide["Step 2"]
)
# Test Ready-to-Use Example
example_section = result["🎯 Ready-to-Use Example"]
assert (
example_section["Original Template"]
== "Analyze {{content}} using {{method}} and output {{format}}"
)
@pytest.mark.parametrize(
("input_vars", "template", "expected_patterns"),
[
(
["input_text", "method", "feedback_text"],
"Process {{input_text}} using {{method}} for {{feedback_text}}",
[
"'your text here'",
"'systematic analysis'",
"'customer feedback about our service'",
],
),
(
["unknown_var", "custom_input"],
"Handle {{unknown_var}} and {{custom_input}}",
["'[your unknown var]'", "'[your custom input]'"],
),
(
["text", "unknown_param", "format"],
"Transform {{text}} with {{unknown_param}} to {{format}}",
[
"'sample text content'",
"'[your unknown param]'",
"'structured summary'",
],
),
(
["data", "analysis_type", "output"],
"Analyze {{data}} with {{analysis_type}} to generate {{output}}",
["'[your data]'", "'[your analysis type]'", "'[your output]'"],
),
],
)
def test_generate_enhanced_example_variables(
self, input_vars, template, expected_patterns
):
"""Test example generation with various variable patterns."""
from app import _generate_enhanced_example
result = _generate_enhanced_example(input_vars, template)
for pattern in expected_patterns:
assert pattern in result
assert "{{" not in result # All placeholders should be replaced
assert "}}" not in result
class TestFormatPlannedStepForDisplay:
"""Test cases for the format_planned_step_for_display function."""
def test_format_planned_step_basic(self):
"""Test basic formatting of PlannedStep for display."""
from app import format_planned_step_for_display
# Create sample tool
tool = MCPTool(
tool_id="test_tool_v1",
name="Test Tool",
description="A tool for testing",
tags=["test", "utility"],
invocation_command_stub="test_command --input {input}",
)
# Create sample prompt
prompt = MCPPrompt(
prompt_id="test_prompt_v1",
name="Test Prompt",
description="A prompt for testing",
target_tool_id="test_tool_v1",
template_string="Process this: {{input_text}} with {{method}}",
tags=["test", "example"],
input_variables=["input_text", "method"],
difficulty_level="intermediate",
)
# Create PlannedStep
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.85)
# Format for display
result = format_planned_step_for_display(planned_step)
# Assertions for top-level structure
assert "🎯 Action Plan" in result
assert "πŸ”§ Tool Information" in result
assert "πŸ“‹ Prompt Details" in result
assert "πŸ“ Input Requirements" in result
assert "🎯 Relevance Score" in result
# Test Action Plan
assert result["🎯 Action Plan"] == "Use 'Test Tool' with 'Test Prompt' prompt"
# Test Tool Information
tool_details = result["πŸ”§ Tool Information"]
assert tool_details["ID"] == "test_tool_v1"
assert tool_details["Name"] == "Test Tool"
assert tool_details["Description"] == "A tool for testing"
assert tool_details["Tags"] == "test, utility"
assert tool_details["Command Template"] == "test_command --input {input}"
# Test Prompt Details
prompt_details = result["πŸ“‹ Prompt Details"]
assert prompt_details["ID"] == "test_prompt_v1"
assert prompt_details["Name"] == "Test Prompt"
assert prompt_details["Description"] == "A prompt for testing"
assert (
prompt_details["Template"] == "Process this: {{input_text}} with {{method}}"
)
assert prompt_details["Difficulty"] == "Intermediate"
assert prompt_details["Tags"] == "test, example"
# Test Enhanced Input Requirements Structure
input_reqs = result["πŸ“ Input Requirements"]
# Test Input Summary
assert "πŸ”§ Input Summary" in input_reqs
input_summary = input_reqs["πŸ”§ Input Summary"]
assert input_summary["Total Variables Required"] == 2
assert input_summary["Complexity Level"] == "🟑 Moderate (2-3 inputs)"
assert input_summary["Estimated Setup Time"] == "⏱️ 2-3 minutes"
# Test Variable Details
assert "πŸ“‹ Variable Details" in input_reqs
variable_details = input_reqs["πŸ“‹ Variable Details"]
assert len(variable_details) == 2
# Check first variable
var1 = variable_details[0]["πŸ“ Variable 1"]
assert var1["Name"] == "input_text"
assert var1["Placeholder"] == "{{input_text}}"
assert var1["Description"] == "The main text content you want to process"
assert var1["Required"] == "βœ… Yes"
# Check second variable
var2 = variable_details[1]["πŸ“ Variable 2"]
assert var2["Name"] == "method"
assert var2["Placeholder"] == "{{method}}"
assert var2["Description"] == "Approach or method to use"
assert var2["Required"] == "βœ… Yes"
# Test Usage Guide
assert "πŸ’‘ Usage Guide" in input_reqs
usage_guide = input_reqs["πŸ’‘ Usage Guide"]
assert (
usage_guide["Step 1"]
== "πŸ“ Prepare your data for each variable listed above"
)
assert (
usage_guide["Step 2"] == "πŸ”„ Replace all 2 placeholder(s) in the template"
)
assert (
usage_guide["Step 3"]
== "πŸš€ Execute the action plan with your customized prompt"
)
# Test Ready-to-Use Example
assert "🎯 Ready-to-Use Example" in input_reqs
example_section = input_reqs["🎯 Ready-to-Use Example"]
assert (
example_section["Original Template"]
== "Process this: {{input_text}} with {{method}}"
)
assert "With Sample Data" in example_section
assert (
example_section["πŸ’‘ Tip"]
== "Replace the sample values with your actual data"
)
# Test Relevance Score
assert result["🎯 Relevance Score"] == "0.85"
def test_format_planned_step_empty_fields(self):
"""Test formatting with empty tags and variables."""
from app import format_planned_step_for_display
tool = MCPTool(
tool_id="minimal_tool",
name="Minimal Tool",
description="Minimal description",
tags=[], # Empty tags
invocation_command_stub="",
)
prompt = MCPPrompt(
prompt_id="minimal_prompt",
name="Minimal Prompt",
description="Minimal prompt",
target_tool_id="minimal_tool",
template_string="Simple template",
tags=[], # Empty tags
input_variables=[], # No variables
difficulty_level="beginner",
)
planned_step = PlannedStep(tool=tool, prompt=prompt) # No relevance score
result = format_planned_step_for_display(planned_step)
# Test empty fields handling
assert result["πŸ”§ Tool Information"]["Tags"] == "N/A"
assert result["πŸ“‹ Prompt Details"]["Tags"] == "N/A"
# Test enhanced no-input format
input_reqs = result["πŸ“ Input Requirements"]
assert input_reqs["🟒 Status"] == "Ready to Use"
assert (
input_reqs["✨ Message"]
== "This prompt requires no input - you can use it immediately!"
)
assert input_reqs["🎯 Next Steps"] == "Simply click to execute this action plan"
assert result["🎯 Relevance Score"] == "Not calculated"
# Test other fields still work
assert (
result["🎯 Action Plan"]
== "Use 'Minimal Tool' with 'Minimal Prompt' prompt"
)
assert result["πŸ”§ Tool Information"]["Name"] == "Minimal Tool"
assert result["πŸ“‹ Prompt Details"]["Template"] == "Simple template"
def test_format_planned_step_single_items(self):
"""Test formatting with single tag and variable."""
from app import format_planned_step_for_display
tool = MCPTool(
tool_id="single_tool",
name="Single Tool",
description="Tool with single tag",
tags=["nlp"], # Single tag
invocation_command_stub="process --text {input}",
)
prompt = MCPPrompt(
prompt_id="single_prompt",
name="Single Prompt",
description="Prompt with single variable",
target_tool_id="single_tool",
template_string="Analyze: {{text}}",
tags=["analysis"], # Single tag
input_variables=["text"], # Single variable
difficulty_level="advanced",
)
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.95)
result = format_planned_step_for_display(planned_step)
# Test single items (no comma separation)
assert result["πŸ”§ Tool Information"]["Tags"] == "nlp"
assert result["πŸ“‹ Prompt Details"]["Tags"] == "analysis"
# Test enhanced single input format
input_reqs = result["πŸ“ Input Requirements"]
input_summary = input_reqs["πŸ”§ Input Summary"]
assert input_summary["Total Variables Required"] == 1
assert input_summary["Complexity Level"] == "🟒 Simple (1 input)"
assert input_summary["Estimated Setup Time"] == "⚑ < 1 minute"
# Test single variable details
variable_details = input_reqs["πŸ“‹ Variable Details"]
assert len(variable_details) == 1
var1 = variable_details[0]["πŸ“ Variable 1"]
assert var1["Name"] == "text"
assert var1["Placeholder"] == "{{text}}"
assert var1["Description"] == "Text content for analysis or processing"
assert result["🎯 Relevance Score"] == "0.95"
def test_format_planned_step_complex_content(self):
"""Test formatting with complex content including special characters."""
from app import format_planned_step_for_display
tool = MCPTool(
tool_id="complex_tool_v2.1",
name="Complex Tool & Analyzer",
description="A tool with special chars: <>[]{}|\\",
tags=["complex", "special-chars", "v2"],
invocation_command_stub="complex-analyzer --input '{input}' --format json",
)
prompt = MCPPrompt(
prompt_id="complex_prompt_v1",
name="Complex Analysis Prompt",
description="Handle complex analysis with multiple parameters",
target_tool_id="complex_tool_v2.1",
template_string="Analyze {{input_data}} with parameters: {{param1}}, {{param2}}, {{param3}}",
tags=["analysis", "multi-param", "complex"],
input_variables=["input_data", "param1", "param2", "param3"],
difficulty_level="advanced",
)
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.42)
result = format_planned_step_for_display(planned_step)
# Test complex content handling
assert "Complex Tool & Analyzer" in result["🎯 Action Plan"]
assert (
result["πŸ”§ Tool Information"]["Description"]
== "A tool with special chars: <>[]{}|\\"
)
assert result["πŸ”§ Tool Information"]["Tags"] == "complex, special-chars, v2"
# Test enhanced complex input format
input_reqs = result["πŸ“ Input Requirements"]
input_summary = input_reqs["πŸ”§ Input Summary"]
assert input_summary["Total Variables Required"] == 4
assert input_summary["Complexity Level"] == "πŸ”΄ Complex (4+ inputs)"
assert input_summary["Estimated Setup Time"] == "πŸ• 5+ minutes"
# Test complex variable details
variable_details = input_reqs["πŸ“‹ Variable Details"]
assert len(variable_details) == 4
# Check that all variables are present
var_names = [next(iter(var.keys())) for var in variable_details]
expected_vars = [
"πŸ“ Variable 1",
"πŸ“ Variable 2",
"πŸ“ Variable 3",
"πŸ“ Variable 4",
]
assert var_names == expected_vars
assert result["πŸ“‹ Prompt Details"]["Difficulty"] == "Advanced"
assert result["🎯 Relevance Score"] == "0.42"
def test_format_planned_step_zero_relevance_score(self):
"""Test formatting with zero relevance score (valid but low)."""
from app import format_planned_step_for_display
tool = MCPTool(
tool_id="zero_tool",
name="Zero Tool",
description="Tool with zero relevance",
tags=["test"],
invocation_command_stub="zero --input {data}",
)
prompt = MCPPrompt(
prompt_id="zero_prompt",
name="Zero Prompt",
description="Prompt with zero relevance",
target_tool_id="zero_tool",
template_string="Process: {{data}}",
tags=["test"],
input_variables=["data"],
difficulty_level="beginner",
)
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.0)
result = format_planned_step_for_display(planned_step)
# Zero should be preserved, not treated as falsy
assert result["🎯 Relevance Score"] == "0.00"
def test_format_planned_step_type_consistency(self):
"""Test that the output structure is consistent and typed correctly."""
from app import format_planned_step_for_display
tool = MCPTool(
tool_id="type_tool",
name="Type Tool",
description="Tool for type testing",
tags=["typing"],
invocation_command_stub="type-test --input {data}",
)
prompt = MCPPrompt(
prompt_id="type_prompt",
name="Type Prompt",
description="Prompt for type testing",
target_tool_id="type_tool",
template_string="Type test: {{data}}",
tags=["typing"],
input_variables=["data"],
difficulty_level="intermediate",
)
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.75)
result = format_planned_step_for_display(planned_step)
# Test return type is Dict[str, Any]
assert isinstance(result, dict)
# Test required top-level keys exist
required_keys = [
"🎯 Action Plan",
"πŸ”§ Tool Information",
"πŸ“‹ Prompt Details",
"πŸ“ Input Requirements",
"🎯 Relevance Score",
]
for key in required_keys:
assert key in result
# Test nested dictionaries
assert isinstance(result["πŸ”§ Tool Information"], dict)
assert isinstance(result["πŸ“‹ Prompt Details"], dict)
assert isinstance(result["πŸ“ Input Requirements"], dict)
# Test string values
assert isinstance(result["🎯 Action Plan"], str)
assert isinstance(result["πŸ”§ Tool Information"]["Name"], str)
assert isinstance(result["πŸ“‹ Prompt Details"]["Template"], str)
assert isinstance(result["🎯 Relevance Score"], str)
# Test enhanced input requirements structure
input_reqs = result["πŸ“ Input Requirements"]
# Test Input Summary structure
assert "πŸ”§ Input Summary" in input_reqs
input_summary = input_reqs["πŸ”§ Input Summary"]
assert isinstance(input_summary["Total Variables Required"], int)
assert isinstance(input_summary["Complexity Level"], str)
assert isinstance(input_summary["Estimated Setup Time"], str)
# Test Variable Details structure
assert "πŸ“‹ Variable Details" in input_reqs
variable_details = input_reqs["πŸ“‹ Variable Details"]
assert isinstance(variable_details, list)
assert len(variable_details) == 1 # One variable
# Test individual variable structure
var1 = variable_details[0]["πŸ“ Variable 1"]
assert isinstance(var1["Name"], str)
assert isinstance(var1["Placeholder"], str)
assert isinstance(var1["Description"], str)
assert isinstance(var1["Required"], str)
# Test Usage Guide structure
assert "πŸ’‘ Usage Guide" in input_reqs
usage_guide = input_reqs["πŸ’‘ Usage Guide"]
assert isinstance(usage_guide["Step 1"], str)
assert isinstance(usage_guide["Step 2"], str)
assert isinstance(usage_guide["Step 3"], str)
# Test Ready-to-Use Example structure
assert "🎯 Ready-to-Use Example" in input_reqs
example_section = input_reqs["🎯 Ready-to-Use Example"]
assert isinstance(example_section["Original Template"], str)
assert isinstance(example_section["With Sample Data"], str)
assert isinstance(example_section["πŸ’‘ Tip"], str)
class TestHandleFindTools:
"""Test cases for the handle_find_tools function."""
def test_handle_find_tools_success(self):
"""Test successful enhanced planning with PlannedStep objects."""
# Import here to avoid circular imports during testing
from app import handle_find_tools
# Create mock MCPTool and MCPPrompt objects
mock_tool = MCPTool(
tool_id="sentiment-analyzer",
name="Sentiment Analyzer",
description="Analyzes sentiment of text",
tags=["text", "sentiment", "analysis"],
invocation_command_stub="sentiment --text {text}",
)
mock_prompt = MCPPrompt(
prompt_id="sentiment_customer_v1",
name="Customer Feedback Analysis",
description="Analyze customer feedback sentiment",
target_tool_id="sentiment-analyzer",
template_string="Analyze sentiment: {{feedback_text}}",
tags=["customer", "feedback"],
input_variables=["feedback_text"],
difficulty_level="beginner",
)
mock_planned_step = PlannedStep(
tool=mock_tool, prompt=mock_prompt, relevance_score=0.92
)
# Mock the global planner_agent
with patch("app.planner_agent") as mock_agent:
mock_agent.generate_plan.return_value = [mock_planned_step]
result = handle_find_tools("analyze text sentiment")
# Extract the first element (JSON result) from the tuple
json_result = result[0]
# Verify the result structure
assert json_result["status"] == "success"
assert "Generated 1 personalized action plan(s)" in json_result["message"]
assert json_result["query"] == "analyze text sentiment"
assert json_result["query_summary"] == "Your goal: 'analyze text sentiment'"
assert json_result["total_steps"] == 1
assert len(json_result["planned_steps"]) == 1
assert "next_steps" in json_result
# Check formatted step structure
formatted_step = json_result["planned_steps"][0]
assert "🎯 Action Plan" in formatted_step
assert "πŸ”§ Tool Information" in formatted_step
assert "πŸ“‹ Prompt Details" in formatted_step
assert "πŸ“ Input Requirements" in formatted_step
assert "🎯 Relevance Score" in formatted_step
# Verify specific content
assert (
formatted_step["🎯 Action Plan"]
== "Use 'Sentiment Analyzer' with 'Customer Feedback Analysis' prompt"
)
assert formatted_step["πŸ”§ Tool Information"]["Name"] == "Sentiment Analyzer"
assert (
formatted_step["πŸ“‹ Prompt Details"]["Template"]
== "Analyze sentiment: {{feedback_text}}"
)
assert formatted_step["🎯 Relevance Score"] == "0.92"
# Verify enhanced input requirements structure
input_reqs = formatted_step["πŸ“ Input Requirements"]
input_summary = input_reqs["πŸ”§ Input Summary"]
assert input_summary["Total Variables Required"] == 1
assert input_summary["Complexity Level"] == "🟒 Simple (1 input)"
# Verify planner was called correctly
mock_agent.generate_plan.assert_called_once_with(
"analyze text sentiment", top_k=3
)
def test_handle_find_tools_no_agent(self):
"""Test behavior when planner agent is not initialized."""
from app import handle_find_tools
# Mock planner_agent as None
with patch("app.planner_agent", None):
result = handle_find_tools("test query")
json_result = result[0]
assert json_result["status"] == "error"
assert "System Error" in json_result["message"]
assert "Agent system not initialized" in json_result["message"]
assert json_result["error_type"] == "system_error"
assert json_result["planned_steps"] == []
assert "troubleshooting" in json_result
def test_handle_find_tools_empty_query(self):
"""Test behavior with empty query."""
from app import handle_find_tools
with patch("app.planner_agent") as mock_agent:
# Test empty string
result = handle_find_tools("")
json_result = result[0]
assert json_result["status"] == "error"
assert "Input Required" in json_result["message"]
assert "describe what you'd like to accomplish" in json_result["message"]
assert json_result["error_type"] == "user_input"
assert json_result["planned_steps"] == []
assert "suggestion" in json_result
# Test whitespace only
result = handle_find_tools(" ")
json_result = result[0]
assert json_result["status"] == "error"
assert "Input Required" in json_result["message"]
assert json_result["error_type"] == "user_input"
assert json_result["planned_steps"] == []
# Ensure agent wasn't called
mock_agent.generate_plan.assert_not_called()
def test_handle_find_tools_no_results(self):
"""Test behavior when no planned steps are found."""
from app import handle_find_tools
with patch("app.planner_agent") as mock_agent:
mock_agent.generate_plan.return_value = []
result = handle_find_tools("nonexistent tool type")
json_result = result[0]
assert json_result["status"] == "no_results"
assert "No Matches Found" in json_result["message"]
assert json_result["query"] == "nonexistent tool type"
assert json_result["query_summary"] == "Your goal: 'nonexistent tool type'"
assert json_result["planned_steps"] == []
assert "suggestions" in json_result
assert "available_capabilities" in json_result
mock_agent.generate_plan.assert_called_once_with(
"nonexistent tool type", top_k=3
)
def test_handle_find_tools_exception(self):
"""Test error handling when agent raises exception."""
from app import handle_find_tools
with patch("app.planner_agent") as mock_agent:
mock_agent.generate_plan.side_effect = Exception("API failure")
result = handle_find_tools("test query")
json_result = result[0]
assert json_result["status"] == "error"
assert "Processing Error" in json_result["message"]
assert json_result["error_type"] == "processing_error"
assert json_result["technical_details"] == "API failure"
assert json_result["query"] == "test query"
assert json_result["planned_steps"] == []
assert "troubleshooting" in json_result
def test_handle_find_tools_logging(self):
"""Test that proper logging occurs."""
from app import handle_find_tools
mock_tool = MCPTool(
tool_id="test-tool",
name="Test Tool",
description="A test tool",
tags=["test"],
invocation_command_stub="test --input {text}",
)
mock_prompt = MCPPrompt(
prompt_id="test_prompt",
name="Test Prompt",
description="A test prompt",
target_tool_id="test-tool",
template_string="Test: {{input}}",
tags=["test"],
input_variables=["input"],
difficulty_level="beginner",
)
mock_planned_step = PlannedStep(tool=mock_tool, prompt=mock_prompt)
with (
patch("app.planner_agent") as mock_agent,
patch("app.logger") as mock_logger,
):
mock_agent.generate_plan.return_value = [mock_planned_step]
handle_find_tools("test query")
# Verify logging calls - check that appropriate logging happened
# We'll be more flexible with the exact message content
log_calls = [call.args[0] for call in mock_logger.info.call_args_list]
# Check that processing message was logged
processing_logged = any(
"Processing enhanced planning request" in msg for msg in log_calls
)
assert processing_logged, f"Processing log not found in: {log_calls}"
# Check that success message was logged
success_logged = any(
"Successfully generated" in msg and "planned steps for query" in msg
for msg in log_calls
)
assert success_logged, f"Success log not found in: {log_calls}"