File size: 10,504 Bytes
64ced8b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
"""Tests for Track 2 visualization components."""
from unittest.mock import Mock, patch
import pytest
from kg_services.ontology import MCPPrompt, MCPTool, PlannedStep
from kg_services.visualizer import (
KGVisualizer,
create_ecosystem_visualization,
create_plan_visualization,
)
class TestKGVisualizer:
"""Test cases for the KGVisualizer class."""
def test_kg_visualizer_initialization(self):
"""Test that KGVisualizer initializes with professional colors."""
visualizer = KGVisualizer()
# Check that professional color palette is loaded
assert visualizer.colors["primary"] == "#1e40af"
assert visualizer.colors["tool"] == "#059669"
assert visualizer.colors["prompt"] == "#7c3aed"
assert visualizer.colors["background"] == "#f8fafc"
# Check layout configuration
assert visualizer.layout_config["showlegend"] is True
assert visualizer.layout_config["hovermode"] == "closest"
def test_create_plan_visualization_empty(self):
"""Test plan visualization with empty planned steps."""
visualizer = KGVisualizer()
fig = visualizer.create_plan_visualization([], "test query")
# Should return a figure (not None)
assert fig is not None
# Should have layout configured
assert hasattr(fig, "layout")
def test_create_plan_visualization_with_steps(self):
"""Test plan visualization with actual planned steps."""
# Create test data
tool = MCPTool(
tool_id="test_tool",
name="Test Tool",
description="A test tool for visualization",
tags=["test", "visualization"]
)
prompt = MCPPrompt(
prompt_id="test_prompt",
name="Test Prompt",
description="A test prompt",
target_tool_id="test_tool",
template_string="Process: {{input}}",
input_variables=["input"]
)
step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.95)
visualizer = KGVisualizer()
fig = visualizer.create_plan_visualization([step], "test query")
# Should return a figure
assert fig is not None
# Should have data (traces)
assert hasattr(fig, "data")
assert len(fig.data) > 0
def test_create_ecosystem_visualization(self):
"""Test ecosystem visualization with tools and prompts."""
# Create test data
tool1 = MCPTool(
tool_id="tool1",
name="Tool 1",
description="First test tool",
tags=["category1"]
)
tool2 = MCPTool(
tool_id="tool2",
name="Tool 2",
description="Second test tool",
tags=["category2"]
)
prompt1 = MCPPrompt(
prompt_id="prompt1",
name="Prompt 1",
description="First prompt",
target_tool_id="tool1",
template_string="{{input1}}"
)
prompt2 = MCPPrompt(
prompt_id="prompt2",
name="Prompt 2",
description="Second prompt",
target_tool_id="tool2",
template_string="{{input2}}"
)
visualizer = KGVisualizer()
fig = visualizer.create_tool_ecosystem_visualization([tool1, tool2], [prompt1, prompt2])
assert fig is not None
assert hasattr(fig, "data")
assert len(fig.data) > 0
def test_performance_metrics_chart(self):
"""Test performance metrics chart creation."""
visualizer = KGVisualizer()
fig = visualizer.create_performance_metrics_chart({})
assert fig is not None
assert hasattr(fig, "data")
# Should have polar chart data
assert len(fig.data) > 0
def test_error_handling_in_visualization(self):
"""Test that visualization handles errors gracefully."""
visualizer = KGVisualizer()
# Test with invalid data - should not raise exception
try:
# This might cause an error internally but should return a figure
fig = visualizer.create_plan_visualization(None, "")
assert fig is not None
except Exception as e:
pytest.fail(f"Visualization should handle errors gracefully: {e}")
def test_color_consistency(self):
"""Test that colors are consistent and professional."""
visualizer = KGVisualizer()
# Test hex color format
for color_name, color_value in visualizer.colors.items():
assert color_value.startswith("#"), f"Color {color_name} should be hex format"
assert len(color_value) == 7, f"Color {color_name} should be 6-digit hex"
def test_hex_to_rgb_conversion(self):
"""Test hex to RGB color conversion utility."""
visualizer = KGVisualizer()
rgb = visualizer._hex_to_rgb("#1e40af")
assert rgb == "30, 64, 175"
rgb = visualizer._hex_to_rgb("#ffffff")
assert rgb == "255, 255, 255"
class TestVisualizationFunctions:
"""Test convenience functions for visualization."""
def test_create_plan_visualization_function(self):
"""Test the convenience function for plan visualization."""
tool = MCPTool(
tool_id="func_test_tool",
name="Function Test Tool",
description="Test tool for function",
tags=["test"]
)
prompt = MCPPrompt(
prompt_id="func_test_prompt",
name="Function Test Prompt",
description="Test prompt for function",
target_tool_id="func_test_tool",
template_string="{{input}}"
)
step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.8)
fig = create_plan_visualization([step], "function test")
assert fig is not None
assert hasattr(fig, "data")
def test_create_ecosystem_visualization_function(self):
"""Test the convenience function for ecosystem visualization."""
tool = MCPTool(
tool_id="eco_tool",
name="Ecosystem Tool",
description="Tool for ecosystem test",
tags=["ecosystem"]
)
prompt = MCPPrompt(
prompt_id="eco_prompt",
name="Ecosystem Prompt",
description="Prompt for ecosystem test",
target_tool_id="eco_tool",
template_string="{{input}}"
)
fig = create_ecosystem_visualization([tool], [prompt])
assert fig is not None
assert hasattr(fig, "data")
class TestVisualizationIntegration:
"""Test integration with app components."""
@patch("kg_services.visualizer.create_plan_visualization")
def test_visualization_import_in_app(self, mock_create_plan):
"""Test that visualization can be imported in app context."""
mock_create_plan.return_value = Mock()
# This should not raise import errors
from kg_services.visualizer import create_plan_visualization
# Call the function to ensure it works
result = create_plan_visualization([], "test")
assert result is not None
def test_visualization_with_realistic_data(self):
"""Test visualization with realistic planning data."""
# Create realistic test data similar to what the app would generate
tools = []
prompts = []
steps = []
# Create multiple tools with different categories
for i in range(3):
tool = MCPTool(
tool_id=f"tool_{i}",
name=f"Tool {i}",
description=f"Description for tool {i}",
tags=[f"category_{i % 2}", "test"]
)
tools.append(tool)
# Create 2 prompts per tool
for j in range(2):
prompt = MCPPrompt(
prompt_id=f"prompt_{i}_{j}",
name=f"Prompt {i}-{j}",
description=f"Prompt for tool {i}, variant {j}",
target_tool_id=f"tool_{i}",
template_string=f"Process {{input_{j}}} with method {{method_{j}}}",
input_variables=[f"input_{j}", f"method_{j}"],
difficulty_level="intermediate" if j == 0 else "advanced"
)
prompts.append(prompt)
# Create planned step
if j == 0: # Only first prompt per tool
step = PlannedStep(
tool=tool,
prompt=prompt,
relevance_score=0.9 - (i * 0.1)
)
steps.append(step)
# Test plan visualization
plan_fig = create_plan_visualization(steps, "realistic test query")
assert plan_fig is not None
assert len(plan_fig.data) > 0
# Test ecosystem visualization
eco_fig = create_ecosystem_visualization(tools, prompts)
assert eco_fig is not None
assert len(eco_fig.data) > 0
def test_visualization_performance(self):
"""Test that visualization can handle reasonable data sizes."""
# Create larger dataset to test performance
tools = []
prompts = []
# Create 10 tools with 5 prompts each
for i in range(10):
tool = MCPTool(
tool_id=f"perf_tool_{i}",
name=f"Performance Tool {i}",
description=f"Tool {i} for performance testing",
tags=[f"perf_cat_{i % 3}"]
)
tools.append(tool)
for j in range(5):
prompt = MCPPrompt(
prompt_id=f"perf_prompt_{i}_{j}",
name=f"Performance Prompt {i}-{j}",
description=f"Performance prompt for tool {i}",
target_tool_id=f"perf_tool_{i}",
template_string="{{input}}"
)
prompts.append(prompt)
# This should complete without timeout
import time
start_time = time.time()
fig = create_ecosystem_visualization(tools, prompts)
end_time = time.time()
execution_time = end_time - start_time
# Should complete in reasonable time (less than 5 seconds)
assert execution_time < 5.0, f"Visualization took too long: {execution_time} seconds"
assert fig is not None
|