kgraph-mcp-agent-platform / tests /test_track2_visualization.py
BasalGanglia's picture
๐Ÿ› ๏ธ Fix HuggingFace Space configuration - Remove quotes from frontmatter
64ced8b verified
"""Tests for Track 2 visualization components."""
from unittest.mock import Mock, patch
import pytest
from kg_services.ontology import MCPPrompt, MCPTool, PlannedStep
from kg_services.visualizer import (
KGVisualizer,
create_ecosystem_visualization,
create_plan_visualization,
)
class TestKGVisualizer:
"""Test cases for the KGVisualizer class."""
def test_kg_visualizer_initialization(self):
"""Test that KGVisualizer initializes with professional colors."""
visualizer = KGVisualizer()
# Check that professional color palette is loaded
assert visualizer.colors["primary"] == "#1e40af"
assert visualizer.colors["tool"] == "#059669"
assert visualizer.colors["prompt"] == "#7c3aed"
assert visualizer.colors["background"] == "#f8fafc"
# Check layout configuration
assert visualizer.layout_config["showlegend"] is True
assert visualizer.layout_config["hovermode"] == "closest"
def test_create_plan_visualization_empty(self):
"""Test plan visualization with empty planned steps."""
visualizer = KGVisualizer()
fig = visualizer.create_plan_visualization([], "test query")
# Should return a figure (not None)
assert fig is not None
# Should have layout configured
assert hasattr(fig, "layout")
def test_create_plan_visualization_with_steps(self):
"""Test plan visualization with actual planned steps."""
# Create test data
tool = MCPTool(
tool_id="test_tool",
name="Test Tool",
description="A test tool for visualization",
tags=["test", "visualization"]
)
prompt = MCPPrompt(
prompt_id="test_prompt",
name="Test Prompt",
description="A test prompt",
target_tool_id="test_tool",
template_string="Process: {{input}}",
input_variables=["input"]
)
step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.95)
visualizer = KGVisualizer()
fig = visualizer.create_plan_visualization([step], "test query")
# Should return a figure
assert fig is not None
# Should have data (traces)
assert hasattr(fig, "data")
assert len(fig.data) > 0
def test_create_ecosystem_visualization(self):
"""Test ecosystem visualization with tools and prompts."""
# Create test data
tool1 = MCPTool(
tool_id="tool1",
name="Tool 1",
description="First test tool",
tags=["category1"]
)
tool2 = MCPTool(
tool_id="tool2",
name="Tool 2",
description="Second test tool",
tags=["category2"]
)
prompt1 = MCPPrompt(
prompt_id="prompt1",
name="Prompt 1",
description="First prompt",
target_tool_id="tool1",
template_string="{{input1}}"
)
prompt2 = MCPPrompt(
prompt_id="prompt2",
name="Prompt 2",
description="Second prompt",
target_tool_id="tool2",
template_string="{{input2}}"
)
visualizer = KGVisualizer()
fig = visualizer.create_tool_ecosystem_visualization([tool1, tool2], [prompt1, prompt2])
assert fig is not None
assert hasattr(fig, "data")
assert len(fig.data) > 0
def test_performance_metrics_chart(self):
"""Test performance metrics chart creation."""
visualizer = KGVisualizer()
fig = visualizer.create_performance_metrics_chart({})
assert fig is not None
assert hasattr(fig, "data")
# Should have polar chart data
assert len(fig.data) > 0
def test_error_handling_in_visualization(self):
"""Test that visualization handles errors gracefully."""
visualizer = KGVisualizer()
# Test with invalid data - should not raise exception
try:
# This might cause an error internally but should return a figure
fig = visualizer.create_plan_visualization(None, "")
assert fig is not None
except Exception as e:
pytest.fail(f"Visualization should handle errors gracefully: {e}")
def test_color_consistency(self):
"""Test that colors are consistent and professional."""
visualizer = KGVisualizer()
# Test hex color format
for color_name, color_value in visualizer.colors.items():
assert color_value.startswith("#"), f"Color {color_name} should be hex format"
assert len(color_value) == 7, f"Color {color_name} should be 6-digit hex"
def test_hex_to_rgb_conversion(self):
"""Test hex to RGB color conversion utility."""
visualizer = KGVisualizer()
rgb = visualizer._hex_to_rgb("#1e40af")
assert rgb == "30, 64, 175"
rgb = visualizer._hex_to_rgb("#ffffff")
assert rgb == "255, 255, 255"
class TestVisualizationFunctions:
"""Test convenience functions for visualization."""
def test_create_plan_visualization_function(self):
"""Test the convenience function for plan visualization."""
tool = MCPTool(
tool_id="func_test_tool",
name="Function Test Tool",
description="Test tool for function",
tags=["test"]
)
prompt = MCPPrompt(
prompt_id="func_test_prompt",
name="Function Test Prompt",
description="Test prompt for function",
target_tool_id="func_test_tool",
template_string="{{input}}"
)
step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.8)
fig = create_plan_visualization([step], "function test")
assert fig is not None
assert hasattr(fig, "data")
def test_create_ecosystem_visualization_function(self):
"""Test the convenience function for ecosystem visualization."""
tool = MCPTool(
tool_id="eco_tool",
name="Ecosystem Tool",
description="Tool for ecosystem test",
tags=["ecosystem"]
)
prompt = MCPPrompt(
prompt_id="eco_prompt",
name="Ecosystem Prompt",
description="Prompt for ecosystem test",
target_tool_id="eco_tool",
template_string="{{input}}"
)
fig = create_ecosystem_visualization([tool], [prompt])
assert fig is not None
assert hasattr(fig, "data")
class TestVisualizationIntegration:
"""Test integration with app components."""
@patch("kg_services.visualizer.create_plan_visualization")
def test_visualization_import_in_app(self, mock_create_plan):
"""Test that visualization can be imported in app context."""
mock_create_plan.return_value = Mock()
# This should not raise import errors
from kg_services.visualizer import create_plan_visualization
# Call the function to ensure it works
result = create_plan_visualization([], "test")
assert result is not None
def test_visualization_with_realistic_data(self):
"""Test visualization with realistic planning data."""
# Create realistic test data similar to what the app would generate
tools = []
prompts = []
steps = []
# Create multiple tools with different categories
for i in range(3):
tool = MCPTool(
tool_id=f"tool_{i}",
name=f"Tool {i}",
description=f"Description for tool {i}",
tags=[f"category_{i % 2}", "test"]
)
tools.append(tool)
# Create 2 prompts per tool
for j in range(2):
prompt = MCPPrompt(
prompt_id=f"prompt_{i}_{j}",
name=f"Prompt {i}-{j}",
description=f"Prompt for tool {i}, variant {j}",
target_tool_id=f"tool_{i}",
template_string=f"Process {{input_{j}}} with method {{method_{j}}}",
input_variables=[f"input_{j}", f"method_{j}"],
difficulty_level="intermediate" if j == 0 else "advanced"
)
prompts.append(prompt)
# Create planned step
if j == 0: # Only first prompt per tool
step = PlannedStep(
tool=tool,
prompt=prompt,
relevance_score=0.9 - (i * 0.1)
)
steps.append(step)
# Test plan visualization
plan_fig = create_plan_visualization(steps, "realistic test query")
assert plan_fig is not None
assert len(plan_fig.data) > 0
# Test ecosystem visualization
eco_fig = create_ecosystem_visualization(tools, prompts)
assert eco_fig is not None
assert len(eco_fig.data) > 0
def test_visualization_performance(self):
"""Test that visualization can handle reasonable data sizes."""
# Create larger dataset to test performance
tools = []
prompts = []
# Create 10 tools with 5 prompts each
for i in range(10):
tool = MCPTool(
tool_id=f"perf_tool_{i}",
name=f"Performance Tool {i}",
description=f"Tool {i} for performance testing",
tags=[f"perf_cat_{i % 3}"]
)
tools.append(tool)
for j in range(5):
prompt = MCPPrompt(
prompt_id=f"perf_prompt_{i}_{j}",
name=f"Performance Prompt {i}-{j}",
description=f"Performance prompt for tool {i}",
target_tool_id=f"perf_tool_{i}",
template_string="{{input}}"
)
prompts.append(prompt)
# This should complete without timeout
import time
start_time = time.time()
fig = create_ecosystem_visualization(tools, prompts)
end_time = time.time()
execution_time = end_time - start_time
# Should complete in reasonable time (less than 5 seconds)
assert execution_time < 5.0, f"Visualization took too long: {execution_time} seconds"
assert fig is not None