|
|
"""Tests for app.py functionality.""" |
|
|
|
|
|
from unittest.mock import patch |
|
|
|
|
|
import pytest |
|
|
|
|
|
from kg_services.ontology import MCPPrompt, MCPTool, PlannedStep |
|
|
|
|
|
|
|
|
class TestEnhancedInputVariableDisplay: |
|
|
"""Test cases for enhanced input variable display functionality.""" |
|
|
|
|
|
def test_format_input_variables_no_inputs(self): |
|
|
"""Test formatting when prompt has no input variables.""" |
|
|
from app import _format_input_variables_info |
|
|
|
|
|
result = _format_input_variables_info( |
|
|
[], "This is a simple template with no variables" |
|
|
) |
|
|
|
|
|
assert result["π’ Status"] == "Ready to Use" |
|
|
assert ( |
|
|
result["β¨ Message"] |
|
|
== "This prompt requires no input - you can use it immediately!" |
|
|
) |
|
|
assert result["π― Next Steps"] == "Simply click to execute this action plan" |
|
|
assert len(result) == 3 |
|
|
|
|
|
def test_format_input_variables_single_input(self): |
|
|
"""Test formatting with single input variable.""" |
|
|
from app import _format_input_variables_info |
|
|
|
|
|
input_vars = ["input_text"] |
|
|
template = "Process this: {{input_text}}" |
|
|
|
|
|
result = _format_input_variables_info(input_vars, template) |
|
|
|
|
|
|
|
|
input_summary = result["π§ Input Summary"] |
|
|
assert input_summary["Total Variables Required"] == 1 |
|
|
assert input_summary["Complexity Level"] == "π’ Simple (1 input)" |
|
|
assert input_summary["Estimated Setup Time"] == "β‘ < 1 minute" |
|
|
|
|
|
|
|
|
variable_details = result["π Variable Details"] |
|
|
assert len(variable_details) == 1 |
|
|
var1 = variable_details[0]["π Variable 1"] |
|
|
assert var1["Name"] == "input_text" |
|
|
assert var1["Placeholder"] == "{{input_text}}" |
|
|
assert var1["Description"] == "The main text content you want to process" |
|
|
assert var1["Required"] == "β
Yes" |
|
|
|
|
|
|
|
|
example_section = result["π― Ready-to-Use Example"] |
|
|
assert example_section["Original Template"] == "Process this: {{input_text}}" |
|
|
assert "'your text here'" in example_section["With Sample Data"] |
|
|
|
|
|
def test_format_input_variables_multiple_inputs(self): |
|
|
"""Test formatting with multiple input variables.""" |
|
|
from app import _format_input_variables_info |
|
|
|
|
|
input_vars = ["content", "method", "format"] |
|
|
template = "Analyze {{content}} using {{method}} and output {{format}}" |
|
|
|
|
|
result = _format_input_variables_info(input_vars, template) |
|
|
|
|
|
|
|
|
input_summary = result["π§ Input Summary"] |
|
|
assert input_summary["Total Variables Required"] == 3 |
|
|
assert input_summary["Complexity Level"] == "π‘ Moderate (2-3 inputs)" |
|
|
assert input_summary["Estimated Setup Time"] == "β±οΈ 2-3 minutes" |
|
|
|
|
|
|
|
|
variable_details = result["π Variable Details"] |
|
|
assert len(variable_details) == 3 |
|
|
|
|
|
|
|
|
var_names = [ |
|
|
var_details[next(iter(var_details.keys()))]["Name"] |
|
|
for var_details in variable_details |
|
|
] |
|
|
assert "content" in var_names |
|
|
assert "method" in var_names |
|
|
assert "format" in var_names |
|
|
|
|
|
|
|
|
usage_guide = result["π‘ Usage Guide"] |
|
|
assert ( |
|
|
"π Replace all 3 placeholder(s) in the template" in usage_guide["Step 2"] |
|
|
) |
|
|
|
|
|
|
|
|
example_section = result["π― Ready-to-Use Example"] |
|
|
assert ( |
|
|
example_section["Original Template"] |
|
|
== "Analyze {{content}} using {{method}} and output {{format}}" |
|
|
) |
|
|
|
|
|
@pytest.mark.parametrize( |
|
|
("input_vars", "template", "expected_patterns"), |
|
|
[ |
|
|
( |
|
|
["input_text", "method", "feedback_text"], |
|
|
"Process {{input_text}} using {{method}} for {{feedback_text}}", |
|
|
[ |
|
|
"'your text here'", |
|
|
"'systematic analysis'", |
|
|
"'customer feedback about our service'", |
|
|
], |
|
|
), |
|
|
( |
|
|
["unknown_var", "custom_input"], |
|
|
"Handle {{unknown_var}} and {{custom_input}}", |
|
|
["'[your unknown var]'", "'[your custom input]'"], |
|
|
), |
|
|
( |
|
|
["text", "unknown_param", "format"], |
|
|
"Transform {{text}} with {{unknown_param}} to {{format}}", |
|
|
[ |
|
|
"'sample text content'", |
|
|
"'[your unknown param]'", |
|
|
"'structured summary'", |
|
|
], |
|
|
), |
|
|
( |
|
|
["data", "analysis_type", "output"], |
|
|
"Analyze {{data}} with {{analysis_type}} to generate {{output}}", |
|
|
["'[your data]'", "'[your analysis type]'", "'[your output]'"], |
|
|
), |
|
|
], |
|
|
) |
|
|
def test_generate_enhanced_example_variables( |
|
|
self, input_vars, template, expected_patterns |
|
|
): |
|
|
"""Test example generation with various variable patterns.""" |
|
|
from app import _generate_enhanced_example |
|
|
|
|
|
result = _generate_enhanced_example(input_vars, template) |
|
|
|
|
|
for pattern in expected_patterns: |
|
|
assert pattern in result |
|
|
assert "{{" not in result |
|
|
assert "}}" not in result |
|
|
|
|
|
|
|
|
class TestFormatPlannedStepForDisplay: |
|
|
"""Test cases for the format_planned_step_for_display function.""" |
|
|
|
|
|
def test_format_planned_step_basic(self): |
|
|
"""Test basic formatting of PlannedStep for display.""" |
|
|
from app import format_planned_step_for_display |
|
|
|
|
|
|
|
|
tool = MCPTool( |
|
|
tool_id="test_tool_v1", |
|
|
name="Test Tool", |
|
|
description="A tool for testing", |
|
|
tags=["test", "utility"], |
|
|
invocation_command_stub="test_command --input {input}", |
|
|
) |
|
|
|
|
|
|
|
|
prompt = MCPPrompt( |
|
|
prompt_id="test_prompt_v1", |
|
|
name="Test Prompt", |
|
|
description="A prompt for testing", |
|
|
target_tool_id="test_tool_v1", |
|
|
template_string="Process this: {{input_text}} with {{method}}", |
|
|
tags=["test", "example"], |
|
|
input_variables=["input_text", "method"], |
|
|
difficulty_level="intermediate", |
|
|
) |
|
|
|
|
|
|
|
|
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.85) |
|
|
|
|
|
|
|
|
result = format_planned_step_for_display(planned_step) |
|
|
|
|
|
|
|
|
assert "π― Action Plan" in result |
|
|
assert "π§ Tool Information" in result |
|
|
assert "π Prompt Details" in result |
|
|
assert "π Input Requirements" in result |
|
|
assert "π― Relevance Score" in result |
|
|
|
|
|
|
|
|
assert result["π― Action Plan"] == "Use 'Test Tool' with 'Test Prompt' prompt" |
|
|
|
|
|
|
|
|
tool_details = result["π§ Tool Information"] |
|
|
assert tool_details["ID"] == "test_tool_v1" |
|
|
assert tool_details["Name"] == "Test Tool" |
|
|
assert tool_details["Description"] == "A tool for testing" |
|
|
assert tool_details["Tags"] == "test, utility" |
|
|
assert tool_details["Command Template"] == "test_command --input {input}" |
|
|
|
|
|
|
|
|
prompt_details = result["π Prompt Details"] |
|
|
assert prompt_details["ID"] == "test_prompt_v1" |
|
|
assert prompt_details["Name"] == "Test Prompt" |
|
|
assert prompt_details["Description"] == "A prompt for testing" |
|
|
assert ( |
|
|
prompt_details["Template"] == "Process this: {{input_text}} with {{method}}" |
|
|
) |
|
|
assert prompt_details["Difficulty"] == "Intermediate" |
|
|
assert prompt_details["Tags"] == "test, example" |
|
|
|
|
|
|
|
|
input_reqs = result["π Input Requirements"] |
|
|
|
|
|
|
|
|
assert "π§ Input Summary" in input_reqs |
|
|
input_summary = input_reqs["π§ Input Summary"] |
|
|
assert input_summary["Total Variables Required"] == 2 |
|
|
assert input_summary["Complexity Level"] == "π‘ Moderate (2-3 inputs)" |
|
|
assert input_summary["Estimated Setup Time"] == "β±οΈ 2-3 minutes" |
|
|
|
|
|
|
|
|
assert "π Variable Details" in input_reqs |
|
|
variable_details = input_reqs["π Variable Details"] |
|
|
assert len(variable_details) == 2 |
|
|
|
|
|
|
|
|
var1 = variable_details[0]["π Variable 1"] |
|
|
assert var1["Name"] == "input_text" |
|
|
assert var1["Placeholder"] == "{{input_text}}" |
|
|
assert var1["Description"] == "The main text content you want to process" |
|
|
assert var1["Required"] == "β
Yes" |
|
|
|
|
|
|
|
|
var2 = variable_details[1]["π Variable 2"] |
|
|
assert var2["Name"] == "method" |
|
|
assert var2["Placeholder"] == "{{method}}" |
|
|
assert var2["Description"] == "Approach or method to use" |
|
|
assert var2["Required"] == "β
Yes" |
|
|
|
|
|
|
|
|
assert "π‘ Usage Guide" in input_reqs |
|
|
usage_guide = input_reqs["π‘ Usage Guide"] |
|
|
assert ( |
|
|
usage_guide["Step 1"] |
|
|
== "π Prepare your data for each variable listed above" |
|
|
) |
|
|
assert ( |
|
|
usage_guide["Step 2"] == "π Replace all 2 placeholder(s) in the template" |
|
|
) |
|
|
assert ( |
|
|
usage_guide["Step 3"] |
|
|
== "π Execute the action plan with your customized prompt" |
|
|
) |
|
|
|
|
|
|
|
|
assert "π― Ready-to-Use Example" in input_reqs |
|
|
example_section = input_reqs["π― Ready-to-Use Example"] |
|
|
assert ( |
|
|
example_section["Original Template"] |
|
|
== "Process this: {{input_text}} with {{method}}" |
|
|
) |
|
|
assert "With Sample Data" in example_section |
|
|
assert ( |
|
|
example_section["π‘ Tip"] |
|
|
== "Replace the sample values with your actual data" |
|
|
) |
|
|
|
|
|
|
|
|
assert result["π― Relevance Score"] == "0.85" |
|
|
|
|
|
def test_format_planned_step_empty_fields(self): |
|
|
"""Test formatting with empty tags and variables.""" |
|
|
from app import format_planned_step_for_display |
|
|
|
|
|
tool = MCPTool( |
|
|
tool_id="minimal_tool", |
|
|
name="Minimal Tool", |
|
|
description="Minimal description", |
|
|
tags=[], |
|
|
invocation_command_stub="", |
|
|
) |
|
|
|
|
|
prompt = MCPPrompt( |
|
|
prompt_id="minimal_prompt", |
|
|
name="Minimal Prompt", |
|
|
description="Minimal prompt", |
|
|
target_tool_id="minimal_tool", |
|
|
template_string="Simple template", |
|
|
tags=[], |
|
|
input_variables=[], |
|
|
difficulty_level="beginner", |
|
|
) |
|
|
|
|
|
planned_step = PlannedStep(tool=tool, prompt=prompt) |
|
|
|
|
|
result = format_planned_step_for_display(planned_step) |
|
|
|
|
|
|
|
|
assert result["π§ Tool Information"]["Tags"] == "N/A" |
|
|
assert result["π Prompt Details"]["Tags"] == "N/A" |
|
|
|
|
|
|
|
|
input_reqs = result["π Input Requirements"] |
|
|
assert input_reqs["π’ Status"] == "Ready to Use" |
|
|
assert ( |
|
|
input_reqs["β¨ Message"] |
|
|
== "This prompt requires no input - you can use it immediately!" |
|
|
) |
|
|
assert input_reqs["π― Next Steps"] == "Simply click to execute this action plan" |
|
|
|
|
|
assert result["π― Relevance Score"] == "Not calculated" |
|
|
|
|
|
|
|
|
assert ( |
|
|
result["π― Action Plan"] |
|
|
== "Use 'Minimal Tool' with 'Minimal Prompt' prompt" |
|
|
) |
|
|
assert result["π§ Tool Information"]["Name"] == "Minimal Tool" |
|
|
assert result["π Prompt Details"]["Template"] == "Simple template" |
|
|
|
|
|
def test_format_planned_step_single_items(self): |
|
|
"""Test formatting with single tag and variable.""" |
|
|
from app import format_planned_step_for_display |
|
|
|
|
|
tool = MCPTool( |
|
|
tool_id="single_tool", |
|
|
name="Single Tool", |
|
|
description="Tool with single tag", |
|
|
tags=["nlp"], |
|
|
invocation_command_stub="process --text {input}", |
|
|
) |
|
|
|
|
|
prompt = MCPPrompt( |
|
|
prompt_id="single_prompt", |
|
|
name="Single Prompt", |
|
|
description="Prompt with single variable", |
|
|
target_tool_id="single_tool", |
|
|
template_string="Analyze: {{text}}", |
|
|
tags=["analysis"], |
|
|
input_variables=["text"], |
|
|
difficulty_level="advanced", |
|
|
) |
|
|
|
|
|
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.95) |
|
|
|
|
|
result = format_planned_step_for_display(planned_step) |
|
|
|
|
|
|
|
|
assert result["π§ Tool Information"]["Tags"] == "nlp" |
|
|
assert result["π Prompt Details"]["Tags"] == "analysis" |
|
|
|
|
|
|
|
|
input_reqs = result["π Input Requirements"] |
|
|
input_summary = input_reqs["π§ Input Summary"] |
|
|
assert input_summary["Total Variables Required"] == 1 |
|
|
assert input_summary["Complexity Level"] == "π’ Simple (1 input)" |
|
|
assert input_summary["Estimated Setup Time"] == "β‘ < 1 minute" |
|
|
|
|
|
|
|
|
variable_details = input_reqs["π Variable Details"] |
|
|
assert len(variable_details) == 1 |
|
|
var1 = variable_details[0]["π Variable 1"] |
|
|
assert var1["Name"] == "text" |
|
|
assert var1["Placeholder"] == "{{text}}" |
|
|
assert var1["Description"] == "Text content for analysis or processing" |
|
|
|
|
|
assert result["π― Relevance Score"] == "0.95" |
|
|
|
|
|
def test_format_planned_step_complex_content(self): |
|
|
"""Test formatting with complex content including special characters.""" |
|
|
from app import format_planned_step_for_display |
|
|
|
|
|
tool = MCPTool( |
|
|
tool_id="complex_tool_v2.1", |
|
|
name="Complex Tool & Analyzer", |
|
|
description="A tool with special chars: <>[]{}|\\", |
|
|
tags=["complex", "special-chars", "v2"], |
|
|
invocation_command_stub="complex-analyzer --input '{input}' --format json", |
|
|
) |
|
|
|
|
|
prompt = MCPPrompt( |
|
|
prompt_id="complex_prompt_v1", |
|
|
name="Complex Analysis Prompt", |
|
|
description="Handle complex analysis with multiple parameters", |
|
|
target_tool_id="complex_tool_v2.1", |
|
|
template_string="Analyze {{input_data}} with parameters: {{param1}}, {{param2}}, {{param3}}", |
|
|
tags=["analysis", "multi-param", "complex"], |
|
|
input_variables=["input_data", "param1", "param2", "param3"], |
|
|
difficulty_level="advanced", |
|
|
) |
|
|
|
|
|
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.42) |
|
|
|
|
|
result = format_planned_step_for_display(planned_step) |
|
|
|
|
|
|
|
|
assert "Complex Tool & Analyzer" in result["π― Action Plan"] |
|
|
assert ( |
|
|
result["π§ Tool Information"]["Description"] |
|
|
== "A tool with special chars: <>[]{}|\\" |
|
|
) |
|
|
assert result["π§ Tool Information"]["Tags"] == "complex, special-chars, v2" |
|
|
|
|
|
|
|
|
input_reqs = result["π Input Requirements"] |
|
|
input_summary = input_reqs["π§ Input Summary"] |
|
|
assert input_summary["Total Variables Required"] == 4 |
|
|
assert input_summary["Complexity Level"] == "π΄ Complex (4+ inputs)" |
|
|
assert input_summary["Estimated Setup Time"] == "π 5+ minutes" |
|
|
|
|
|
|
|
|
variable_details = input_reqs["π Variable Details"] |
|
|
assert len(variable_details) == 4 |
|
|
|
|
|
|
|
|
var_names = [next(iter(var.keys())) for var in variable_details] |
|
|
expected_vars = [ |
|
|
"π Variable 1", |
|
|
"π Variable 2", |
|
|
"π Variable 3", |
|
|
"π Variable 4", |
|
|
] |
|
|
assert var_names == expected_vars |
|
|
|
|
|
assert result["π Prompt Details"]["Difficulty"] == "Advanced" |
|
|
assert result["π― Relevance Score"] == "0.42" |
|
|
|
|
|
def test_format_planned_step_zero_relevance_score(self): |
|
|
"""Test formatting with zero relevance score (valid but low).""" |
|
|
from app import format_planned_step_for_display |
|
|
|
|
|
tool = MCPTool( |
|
|
tool_id="zero_tool", |
|
|
name="Zero Tool", |
|
|
description="Tool with zero relevance", |
|
|
tags=["test"], |
|
|
invocation_command_stub="zero --input {data}", |
|
|
) |
|
|
|
|
|
prompt = MCPPrompt( |
|
|
prompt_id="zero_prompt", |
|
|
name="Zero Prompt", |
|
|
description="Prompt with zero relevance", |
|
|
target_tool_id="zero_tool", |
|
|
template_string="Process: {{data}}", |
|
|
tags=["test"], |
|
|
input_variables=["data"], |
|
|
difficulty_level="beginner", |
|
|
) |
|
|
|
|
|
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.0) |
|
|
|
|
|
result = format_planned_step_for_display(planned_step) |
|
|
|
|
|
|
|
|
assert result["π― Relevance Score"] == "0.00" |
|
|
|
|
|
def test_format_planned_step_type_consistency(self): |
|
|
"""Test that the output structure is consistent and typed correctly.""" |
|
|
from app import format_planned_step_for_display |
|
|
|
|
|
tool = MCPTool( |
|
|
tool_id="type_tool", |
|
|
name="Type Tool", |
|
|
description="Tool for type testing", |
|
|
tags=["typing"], |
|
|
invocation_command_stub="type-test --input {data}", |
|
|
) |
|
|
|
|
|
prompt = MCPPrompt( |
|
|
prompt_id="type_prompt", |
|
|
name="Type Prompt", |
|
|
description="Prompt for type testing", |
|
|
target_tool_id="type_tool", |
|
|
template_string="Type test: {{data}}", |
|
|
tags=["typing"], |
|
|
input_variables=["data"], |
|
|
difficulty_level="intermediate", |
|
|
) |
|
|
|
|
|
planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.75) |
|
|
|
|
|
result = format_planned_step_for_display(planned_step) |
|
|
|
|
|
|
|
|
assert isinstance(result, dict) |
|
|
|
|
|
|
|
|
required_keys = [ |
|
|
"π― Action Plan", |
|
|
"π§ Tool Information", |
|
|
"π Prompt Details", |
|
|
"π Input Requirements", |
|
|
"π― Relevance Score", |
|
|
] |
|
|
for key in required_keys: |
|
|
assert key in result |
|
|
|
|
|
|
|
|
assert isinstance(result["π§ Tool Information"], dict) |
|
|
assert isinstance(result["π Prompt Details"], dict) |
|
|
assert isinstance(result["π Input Requirements"], dict) |
|
|
|
|
|
|
|
|
assert isinstance(result["π― Action Plan"], str) |
|
|
assert isinstance(result["π§ Tool Information"]["Name"], str) |
|
|
assert isinstance(result["π Prompt Details"]["Template"], str) |
|
|
assert isinstance(result["π― Relevance Score"], str) |
|
|
|
|
|
|
|
|
input_reqs = result["π Input Requirements"] |
|
|
|
|
|
|
|
|
assert "π§ Input Summary" in input_reqs |
|
|
input_summary = input_reqs["π§ Input Summary"] |
|
|
assert isinstance(input_summary["Total Variables Required"], int) |
|
|
assert isinstance(input_summary["Complexity Level"], str) |
|
|
assert isinstance(input_summary["Estimated Setup Time"], str) |
|
|
|
|
|
|
|
|
assert "π Variable Details" in input_reqs |
|
|
variable_details = input_reqs["π Variable Details"] |
|
|
assert isinstance(variable_details, list) |
|
|
assert len(variable_details) == 1 |
|
|
|
|
|
|
|
|
var1 = variable_details[0]["π Variable 1"] |
|
|
assert isinstance(var1["Name"], str) |
|
|
assert isinstance(var1["Placeholder"], str) |
|
|
assert isinstance(var1["Description"], str) |
|
|
assert isinstance(var1["Required"], str) |
|
|
|
|
|
|
|
|
assert "π‘ Usage Guide" in input_reqs |
|
|
usage_guide = input_reqs["π‘ Usage Guide"] |
|
|
assert isinstance(usage_guide["Step 1"], str) |
|
|
assert isinstance(usage_guide["Step 2"], str) |
|
|
assert isinstance(usage_guide["Step 3"], str) |
|
|
|
|
|
|
|
|
assert "π― Ready-to-Use Example" in input_reqs |
|
|
example_section = input_reqs["π― Ready-to-Use Example"] |
|
|
assert isinstance(example_section["Original Template"], str) |
|
|
assert isinstance(example_section["With Sample Data"], str) |
|
|
assert isinstance(example_section["π‘ Tip"], str) |
|
|
|
|
|
|
|
|
class TestHandleFindTools: |
|
|
"""Test cases for the handle_find_tools function.""" |
|
|
|
|
|
def test_handle_find_tools_success(self): |
|
|
"""Test successful enhanced planning with PlannedStep objects.""" |
|
|
|
|
|
from app import handle_find_tools |
|
|
|
|
|
|
|
|
mock_tool = MCPTool( |
|
|
tool_id="sentiment-analyzer", |
|
|
name="Sentiment Analyzer", |
|
|
description="Analyzes sentiment of text", |
|
|
tags=["text", "sentiment", "analysis"], |
|
|
invocation_command_stub="sentiment --text {text}", |
|
|
) |
|
|
|
|
|
mock_prompt = MCPPrompt( |
|
|
prompt_id="sentiment_customer_v1", |
|
|
name="Customer Feedback Analysis", |
|
|
description="Analyze customer feedback sentiment", |
|
|
target_tool_id="sentiment-analyzer", |
|
|
template_string="Analyze sentiment: {{feedback_text}}", |
|
|
tags=["customer", "feedback"], |
|
|
input_variables=["feedback_text"], |
|
|
difficulty_level="beginner", |
|
|
) |
|
|
|
|
|
mock_planned_step = PlannedStep( |
|
|
tool=mock_tool, prompt=mock_prompt, relevance_score=0.92 |
|
|
) |
|
|
|
|
|
|
|
|
with patch("app.planner_agent") as mock_agent: |
|
|
mock_agent.generate_plan.return_value = [mock_planned_step] |
|
|
|
|
|
result = handle_find_tools("analyze text sentiment") |
|
|
|
|
|
|
|
|
json_result = result[0] |
|
|
|
|
|
|
|
|
assert json_result["status"] == "success" |
|
|
assert "Generated 1 personalized action plan(s)" in json_result["message"] |
|
|
assert json_result["query"] == "analyze text sentiment" |
|
|
assert json_result["query_summary"] == "Your goal: 'analyze text sentiment'" |
|
|
assert json_result["total_steps"] == 1 |
|
|
assert len(json_result["planned_steps"]) == 1 |
|
|
assert "next_steps" in json_result |
|
|
|
|
|
|
|
|
formatted_step = json_result["planned_steps"][0] |
|
|
assert "π― Action Plan" in formatted_step |
|
|
assert "π§ Tool Information" in formatted_step |
|
|
assert "π Prompt Details" in formatted_step |
|
|
assert "π Input Requirements" in formatted_step |
|
|
assert "π― Relevance Score" in formatted_step |
|
|
|
|
|
|
|
|
assert ( |
|
|
formatted_step["π― Action Plan"] |
|
|
== "Use 'Sentiment Analyzer' with 'Customer Feedback Analysis' prompt" |
|
|
) |
|
|
assert formatted_step["π§ Tool Information"]["Name"] == "Sentiment Analyzer" |
|
|
assert ( |
|
|
formatted_step["π Prompt Details"]["Template"] |
|
|
== "Analyze sentiment: {{feedback_text}}" |
|
|
) |
|
|
assert formatted_step["π― Relevance Score"] == "0.92" |
|
|
|
|
|
|
|
|
input_reqs = formatted_step["π Input Requirements"] |
|
|
input_summary = input_reqs["π§ Input Summary"] |
|
|
assert input_summary["Total Variables Required"] == 1 |
|
|
assert input_summary["Complexity Level"] == "π’ Simple (1 input)" |
|
|
|
|
|
|
|
|
mock_agent.generate_plan.assert_called_once_with( |
|
|
"analyze text sentiment", top_k=3 |
|
|
) |
|
|
|
|
|
def test_handle_find_tools_no_agent(self): |
|
|
"""Test behavior when planner agent is not initialized.""" |
|
|
from app import handle_find_tools |
|
|
|
|
|
|
|
|
with patch("app.planner_agent", None): |
|
|
result = handle_find_tools("test query") |
|
|
json_result = result[0] |
|
|
|
|
|
assert json_result["status"] == "error" |
|
|
assert "System Error" in json_result["message"] |
|
|
assert "Agent system not initialized" in json_result["message"] |
|
|
assert json_result["error_type"] == "system_error" |
|
|
assert json_result["planned_steps"] == [] |
|
|
assert "troubleshooting" in json_result |
|
|
|
|
|
def test_handle_find_tools_empty_query(self): |
|
|
"""Test behavior with empty query.""" |
|
|
from app import handle_find_tools |
|
|
|
|
|
with patch("app.planner_agent") as mock_agent: |
|
|
|
|
|
result = handle_find_tools("") |
|
|
json_result = result[0] |
|
|
assert json_result["status"] == "error" |
|
|
assert "Input Required" in json_result["message"] |
|
|
assert "describe what you'd like to accomplish" in json_result["message"] |
|
|
assert json_result["error_type"] == "user_input" |
|
|
assert json_result["planned_steps"] == [] |
|
|
assert "suggestion" in json_result |
|
|
|
|
|
|
|
|
result = handle_find_tools(" ") |
|
|
json_result = result[0] |
|
|
assert json_result["status"] == "error" |
|
|
assert "Input Required" in json_result["message"] |
|
|
assert json_result["error_type"] == "user_input" |
|
|
assert json_result["planned_steps"] == [] |
|
|
|
|
|
|
|
|
mock_agent.generate_plan.assert_not_called() |
|
|
|
|
|
def test_handle_find_tools_no_results(self): |
|
|
"""Test behavior when no planned steps are found.""" |
|
|
from app import handle_find_tools |
|
|
|
|
|
with patch("app.planner_agent") as mock_agent: |
|
|
mock_agent.generate_plan.return_value = [] |
|
|
|
|
|
result = handle_find_tools("nonexistent tool type") |
|
|
json_result = result[0] |
|
|
|
|
|
assert json_result["status"] == "no_results" |
|
|
assert "No Matches Found" in json_result["message"] |
|
|
assert json_result["query"] == "nonexistent tool type" |
|
|
assert json_result["query_summary"] == "Your goal: 'nonexistent tool type'" |
|
|
assert json_result["planned_steps"] == [] |
|
|
assert "suggestions" in json_result |
|
|
assert "available_capabilities" in json_result |
|
|
|
|
|
mock_agent.generate_plan.assert_called_once_with( |
|
|
"nonexistent tool type", top_k=3 |
|
|
) |
|
|
|
|
|
def test_handle_find_tools_exception(self): |
|
|
"""Test error handling when agent raises exception.""" |
|
|
from app import handle_find_tools |
|
|
|
|
|
with patch("app.planner_agent") as mock_agent: |
|
|
mock_agent.generate_plan.side_effect = Exception("API failure") |
|
|
|
|
|
result = handle_find_tools("test query") |
|
|
json_result = result[0] |
|
|
|
|
|
assert json_result["status"] == "error" |
|
|
assert "Processing Error" in json_result["message"] |
|
|
assert json_result["error_type"] == "processing_error" |
|
|
assert json_result["technical_details"] == "API failure" |
|
|
assert json_result["query"] == "test query" |
|
|
assert json_result["planned_steps"] == [] |
|
|
assert "troubleshooting" in json_result |
|
|
|
|
|
def test_handle_find_tools_logging(self): |
|
|
"""Test that proper logging occurs.""" |
|
|
from app import handle_find_tools |
|
|
|
|
|
mock_tool = MCPTool( |
|
|
tool_id="test-tool", |
|
|
name="Test Tool", |
|
|
description="A test tool", |
|
|
tags=["test"], |
|
|
invocation_command_stub="test --input {text}", |
|
|
) |
|
|
|
|
|
mock_prompt = MCPPrompt( |
|
|
prompt_id="test_prompt", |
|
|
name="Test Prompt", |
|
|
description="A test prompt", |
|
|
target_tool_id="test-tool", |
|
|
template_string="Test: {{input}}", |
|
|
tags=["test"], |
|
|
input_variables=["input"], |
|
|
difficulty_level="beginner", |
|
|
) |
|
|
|
|
|
mock_planned_step = PlannedStep(tool=mock_tool, prompt=mock_prompt) |
|
|
|
|
|
with ( |
|
|
patch("app.planner_agent") as mock_agent, |
|
|
patch("app.logger") as mock_logger, |
|
|
): |
|
|
mock_agent.generate_plan.return_value = [mock_planned_step] |
|
|
|
|
|
handle_find_tools("test query") |
|
|
|
|
|
|
|
|
|
|
|
log_calls = [call.args[0] for call in mock_logger.info.call_args_list] |
|
|
|
|
|
|
|
|
processing_logged = any( |
|
|
"Processing enhanced planning request" in msg for msg in log_calls |
|
|
) |
|
|
assert processing_logged, f"Processing log not found in: {log_calls}" |
|
|
|
|
|
|
|
|
success_logged = any( |
|
|
"Successfully generated" in msg and "planned steps for query" in msg |
|
|
for msg in log_calls |
|
|
) |
|
|
assert success_logged, f"Success log not found in: {log_calls}" |
|
|
|