File size: 30,133 Bytes
1f2d50a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
"""Tests for app.py functionality."""

from unittest.mock import patch

import pytest

from kg_services.ontology import MCPPrompt, MCPTool, PlannedStep


class TestEnhancedInputVariableDisplay:
    """Test cases for enhanced input variable display functionality."""

    def test_format_input_variables_no_inputs(self):
        """Test formatting when prompt has no input variables."""
        from app import _format_input_variables_info

        result = _format_input_variables_info(
            [], "This is a simple template with no variables"
        )

        assert result["🟒 Status"] == "Ready to Use"
        assert (
            result["✨ Message"]
            == "This prompt requires no input - you can use it immediately!"
        )
        assert result["🎯 Next Steps"] == "Simply click to execute this action plan"
        assert len(result) == 3  # Status, message, and next steps

    def test_format_input_variables_single_input(self):
        """Test formatting with single input variable."""
        from app import _format_input_variables_info

        input_vars = ["input_text"]
        template = "Process this: {{input_text}}"

        result = _format_input_variables_info(input_vars, template)

        # Test Input Summary
        input_summary = result["πŸ”§ Input Summary"]
        assert input_summary["Total Variables Required"] == 1
        assert input_summary["Complexity Level"] == "🟒 Simple (1 input)"
        assert input_summary["Estimated Setup Time"] == "⚑ < 1 minute"

        # Test Variable Details
        variable_details = result["πŸ“‹ Variable Details"]
        assert len(variable_details) == 1
        var1 = variable_details[0]["πŸ“ Variable 1"]
        assert var1["Name"] == "input_text"
        assert var1["Placeholder"] == "{{input_text}}"
        assert var1["Description"] == "The main text content you want to process"
        assert var1["Required"] == "βœ… Yes"

        # Test Ready-to-Use Example
        example_section = result["🎯 Ready-to-Use Example"]
        assert example_section["Original Template"] == "Process this: {{input_text}}"
        assert "'your text here'" in example_section["With Sample Data"]

    def test_format_input_variables_multiple_inputs(self):
        """Test formatting with multiple input variables."""
        from app import _format_input_variables_info

        input_vars = ["content", "method", "format"]
        template = "Analyze {{content}} using {{method}} and output {{format}}"

        result = _format_input_variables_info(input_vars, template)

        # Test Input Summary
        input_summary = result["πŸ”§ Input Summary"]
        assert input_summary["Total Variables Required"] == 3
        assert input_summary["Complexity Level"] == "🟑 Moderate (2-3 inputs)"
        assert input_summary["Estimated Setup Time"] == "⏱️ 2-3 minutes"

        # Test Variable Details
        variable_details = result["πŸ“‹ Variable Details"]
        assert len(variable_details) == 3

        # Check each variable
        var_names = [
            var_details[next(iter(var_details.keys()))]["Name"]
            for var_details in variable_details
        ]
        assert "content" in var_names
        assert "method" in var_names
        assert "format" in var_names

        # Test Usage Guide
        usage_guide = result["πŸ’‘ Usage Guide"]
        assert (
            "πŸ”„ Replace all 3 placeholder(s) in the template" in usage_guide["Step 2"]
        )

        # Test Ready-to-Use Example
        example_section = result["🎯 Ready-to-Use Example"]
        assert (
            example_section["Original Template"]
            == "Analyze {{content}} using {{method}} and output {{format}}"
        )

    @pytest.mark.parametrize(
        ("input_vars", "template", "expected_patterns"),
        [
            (
                ["input_text", "method", "feedback_text"],
                "Process {{input_text}} using {{method}} for {{feedback_text}}",
                [
                    "'your text here'",
                    "'systematic analysis'",
                    "'customer feedback about our service'",
                ],
            ),
            (
                ["unknown_var", "custom_input"],
                "Handle {{unknown_var}} and {{custom_input}}",
                ["'[your unknown var]'", "'[your custom input]'"],
            ),
            (
                ["text", "unknown_param", "format"],
                "Transform {{text}} with {{unknown_param}} to {{format}}",
                [
                    "'sample text content'",
                    "'[your unknown param]'",
                    "'structured summary'",
                ],
            ),
            (
                ["data", "analysis_type", "output"],
                "Analyze {{data}} with {{analysis_type}} to generate {{output}}",
                ["'[your data]'", "'[your analysis type]'", "'[your output]'"],
            ),
        ],
    )
    def test_generate_enhanced_example_variables(
        self, input_vars, template, expected_patterns
    ):
        """Test example generation with various variable patterns."""
        from app import _generate_enhanced_example

        result = _generate_enhanced_example(input_vars, template)

        for pattern in expected_patterns:
            assert pattern in result
        assert "{{" not in result  # All placeholders should be replaced
        assert "}}" not in result


class TestFormatPlannedStepForDisplay:
    """Test cases for the format_planned_step_for_display function."""

    def test_format_planned_step_basic(self):
        """Test basic formatting of PlannedStep for display."""
        from app import format_planned_step_for_display

        # Create sample tool
        tool = MCPTool(
            tool_id="test_tool_v1",
            name="Test Tool",
            description="A tool for testing",
            tags=["test", "utility"],
            invocation_command_stub="test_command --input {input}",
        )

        # Create sample prompt
        prompt = MCPPrompt(
            prompt_id="test_prompt_v1",
            name="Test Prompt",
            description="A prompt for testing",
            target_tool_id="test_tool_v1",
            template_string="Process this: {{input_text}} with {{method}}",
            tags=["test", "example"],
            input_variables=["input_text", "method"],
            difficulty_level="intermediate",
        )

        # Create PlannedStep
        planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.85)

        # Format for display
        result = format_planned_step_for_display(planned_step)

        # Assertions for top-level structure
        assert "🎯 Action Plan" in result
        assert "πŸ”§ Tool Information" in result
        assert "πŸ“‹ Prompt Details" in result
        assert "πŸ“ Input Requirements" in result
        assert "🎯 Relevance Score" in result

        # Test Action Plan
        assert result["🎯 Action Plan"] == "Use 'Test Tool' with 'Test Prompt' prompt"

        # Test Tool Information
        tool_details = result["πŸ”§ Tool Information"]
        assert tool_details["ID"] == "test_tool_v1"
        assert tool_details["Name"] == "Test Tool"
        assert tool_details["Description"] == "A tool for testing"
        assert tool_details["Tags"] == "test, utility"
        assert tool_details["Command Template"] == "test_command --input {input}"

        # Test Prompt Details
        prompt_details = result["πŸ“‹ Prompt Details"]
        assert prompt_details["ID"] == "test_prompt_v1"
        assert prompt_details["Name"] == "Test Prompt"
        assert prompt_details["Description"] == "A prompt for testing"
        assert (
            prompt_details["Template"] == "Process this: {{input_text}} with {{method}}"
        )
        assert prompt_details["Difficulty"] == "Intermediate"
        assert prompt_details["Tags"] == "test, example"

        # Test Enhanced Input Requirements Structure
        input_reqs = result["πŸ“ Input Requirements"]

        # Test Input Summary
        assert "πŸ”§ Input Summary" in input_reqs
        input_summary = input_reqs["πŸ”§ Input Summary"]
        assert input_summary["Total Variables Required"] == 2
        assert input_summary["Complexity Level"] == "🟑 Moderate (2-3 inputs)"
        assert input_summary["Estimated Setup Time"] == "⏱️ 2-3 minutes"

        # Test Variable Details
        assert "πŸ“‹ Variable Details" in input_reqs
        variable_details = input_reqs["πŸ“‹ Variable Details"]
        assert len(variable_details) == 2

        # Check first variable
        var1 = variable_details[0]["πŸ“ Variable 1"]
        assert var1["Name"] == "input_text"
        assert var1["Placeholder"] == "{{input_text}}"
        assert var1["Description"] == "The main text content you want to process"
        assert var1["Required"] == "βœ… Yes"

        # Check second variable
        var2 = variable_details[1]["πŸ“ Variable 2"]
        assert var2["Name"] == "method"
        assert var2["Placeholder"] == "{{method}}"
        assert var2["Description"] == "Approach or method to use"
        assert var2["Required"] == "βœ… Yes"

        # Test Usage Guide
        assert "πŸ’‘ Usage Guide" in input_reqs
        usage_guide = input_reqs["πŸ’‘ Usage Guide"]
        assert (
            usage_guide["Step 1"]
            == "πŸ“ Prepare your data for each variable listed above"
        )
        assert (
            usage_guide["Step 2"] == "πŸ”„ Replace all 2 placeholder(s) in the template"
        )
        assert (
            usage_guide["Step 3"]
            == "πŸš€ Execute the action plan with your customized prompt"
        )

        # Test Ready-to-Use Example
        assert "🎯 Ready-to-Use Example" in input_reqs
        example_section = input_reqs["🎯 Ready-to-Use Example"]
        assert (
            example_section["Original Template"]
            == "Process this: {{input_text}} with {{method}}"
        )
        assert "With Sample Data" in example_section
        assert (
            example_section["πŸ’‘ Tip"]
            == "Replace the sample values with your actual data"
        )

        # Test Relevance Score
        assert result["🎯 Relevance Score"] == "0.85"

    def test_format_planned_step_empty_fields(self):
        """Test formatting with empty tags and variables."""
        from app import format_planned_step_for_display

        tool = MCPTool(
            tool_id="minimal_tool",
            name="Minimal Tool",
            description="Minimal description",
            tags=[],  # Empty tags
            invocation_command_stub="",
        )

        prompt = MCPPrompt(
            prompt_id="minimal_prompt",
            name="Minimal Prompt",
            description="Minimal prompt",
            target_tool_id="minimal_tool",
            template_string="Simple template",
            tags=[],  # Empty tags
            input_variables=[],  # No variables
            difficulty_level="beginner",
        )

        planned_step = PlannedStep(tool=tool, prompt=prompt)  # No relevance score

        result = format_planned_step_for_display(planned_step)

        # Test empty fields handling
        assert result["πŸ”§ Tool Information"]["Tags"] == "N/A"
        assert result["πŸ“‹ Prompt Details"]["Tags"] == "N/A"

        # Test enhanced no-input format
        input_reqs = result["πŸ“ Input Requirements"]
        assert input_reqs["🟒 Status"] == "Ready to Use"
        assert (
            input_reqs["✨ Message"]
            == "This prompt requires no input - you can use it immediately!"
        )
        assert input_reqs["🎯 Next Steps"] == "Simply click to execute this action plan"

        assert result["🎯 Relevance Score"] == "Not calculated"

        # Test other fields still work
        assert (
            result["🎯 Action Plan"]
            == "Use 'Minimal Tool' with 'Minimal Prompt' prompt"
        )
        assert result["πŸ”§ Tool Information"]["Name"] == "Minimal Tool"
        assert result["πŸ“‹ Prompt Details"]["Template"] == "Simple template"

    def test_format_planned_step_single_items(self):
        """Test formatting with single tag and variable."""
        from app import format_planned_step_for_display

        tool = MCPTool(
            tool_id="single_tool",
            name="Single Tool",
            description="Tool with single tag",
            tags=["nlp"],  # Single tag
            invocation_command_stub="process --text {input}",
        )

        prompt = MCPPrompt(
            prompt_id="single_prompt",
            name="Single Prompt",
            description="Prompt with single variable",
            target_tool_id="single_tool",
            template_string="Analyze: {{text}}",
            tags=["analysis"],  # Single tag
            input_variables=["text"],  # Single variable
            difficulty_level="advanced",
        )

        planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.95)

        result = format_planned_step_for_display(planned_step)

        # Test single items (no comma separation)
        assert result["πŸ”§ Tool Information"]["Tags"] == "nlp"
        assert result["πŸ“‹ Prompt Details"]["Tags"] == "analysis"

        # Test enhanced single input format
        input_reqs = result["πŸ“ Input Requirements"]
        input_summary = input_reqs["πŸ”§ Input Summary"]
        assert input_summary["Total Variables Required"] == 1
        assert input_summary["Complexity Level"] == "🟒 Simple (1 input)"
        assert input_summary["Estimated Setup Time"] == "⚑ < 1 minute"

        # Test single variable details
        variable_details = input_reqs["πŸ“‹ Variable Details"]
        assert len(variable_details) == 1
        var1 = variable_details[0]["πŸ“ Variable 1"]
        assert var1["Name"] == "text"
        assert var1["Placeholder"] == "{{text}}"
        assert var1["Description"] == "Text content for analysis or processing"

        assert result["🎯 Relevance Score"] == "0.95"

    def test_format_planned_step_complex_content(self):
        """Test formatting with complex content including special characters."""
        from app import format_planned_step_for_display

        tool = MCPTool(
            tool_id="complex_tool_v2.1",
            name="Complex Tool & Analyzer",
            description="A tool with special chars: <>[]{}|\\",
            tags=["complex", "special-chars", "v2"],
            invocation_command_stub="complex-analyzer --input '{input}' --format json",
        )

        prompt = MCPPrompt(
            prompt_id="complex_prompt_v1",
            name="Complex Analysis Prompt",
            description="Handle complex analysis with multiple parameters",
            target_tool_id="complex_tool_v2.1",
            template_string="Analyze {{input_data}} with parameters: {{param1}}, {{param2}}, {{param3}}",
            tags=["analysis", "multi-param", "complex"],
            input_variables=["input_data", "param1", "param2", "param3"],
            difficulty_level="advanced",
        )

        planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.42)

        result = format_planned_step_for_display(planned_step)

        # Test complex content handling
        assert "Complex Tool & Analyzer" in result["🎯 Action Plan"]
        assert (
            result["πŸ”§ Tool Information"]["Description"]
            == "A tool with special chars: <>[]{}|\\"
        )
        assert result["πŸ”§ Tool Information"]["Tags"] == "complex, special-chars, v2"

        # Test enhanced complex input format
        input_reqs = result["πŸ“ Input Requirements"]
        input_summary = input_reqs["πŸ”§ Input Summary"]
        assert input_summary["Total Variables Required"] == 4
        assert input_summary["Complexity Level"] == "πŸ”΄ Complex (4+ inputs)"
        assert input_summary["Estimated Setup Time"] == "πŸ• 5+ minutes"

        # Test complex variable details
        variable_details = input_reqs["πŸ“‹ Variable Details"]
        assert len(variable_details) == 4

        # Check that all variables are present
        var_names = [next(iter(var.keys())) for var in variable_details]
        expected_vars = [
            "πŸ“ Variable 1",
            "πŸ“ Variable 2",
            "πŸ“ Variable 3",
            "πŸ“ Variable 4",
        ]
        assert var_names == expected_vars

        assert result["πŸ“‹ Prompt Details"]["Difficulty"] == "Advanced"
        assert result["🎯 Relevance Score"] == "0.42"

    def test_format_planned_step_zero_relevance_score(self):
        """Test formatting with zero relevance score (valid but low)."""
        from app import format_planned_step_for_display

        tool = MCPTool(
            tool_id="zero_tool",
            name="Zero Tool",
            description="Tool with zero relevance",
            tags=["test"],
            invocation_command_stub="zero --input {data}",
        )

        prompt = MCPPrompt(
            prompt_id="zero_prompt",
            name="Zero Prompt",
            description="Prompt with zero relevance",
            target_tool_id="zero_tool",
            template_string="Process: {{data}}",
            tags=["test"],
            input_variables=["data"],
            difficulty_level="beginner",
        )

        planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.0)

        result = format_planned_step_for_display(planned_step)

        # Zero should be preserved, not treated as falsy
        assert result["🎯 Relevance Score"] == "0.00"

    def test_format_planned_step_type_consistency(self):
        """Test that the output structure is consistent and typed correctly."""
        from app import format_planned_step_for_display

        tool = MCPTool(
            tool_id="type_tool",
            name="Type Tool",
            description="Tool for type testing",
            tags=["typing"],
            invocation_command_stub="type-test --input {data}",
        )

        prompt = MCPPrompt(
            prompt_id="type_prompt",
            name="Type Prompt",
            description="Prompt for type testing",
            target_tool_id="type_tool",
            template_string="Type test: {{data}}",
            tags=["typing"],
            input_variables=["data"],
            difficulty_level="intermediate",
        )

        planned_step = PlannedStep(tool=tool, prompt=prompt, relevance_score=0.75)

        result = format_planned_step_for_display(planned_step)

        # Test return type is Dict[str, Any]
        assert isinstance(result, dict)

        # Test required top-level keys exist
        required_keys = [
            "🎯 Action Plan",
            "πŸ”§ Tool Information",
            "πŸ“‹ Prompt Details",
            "πŸ“ Input Requirements",
            "🎯 Relevance Score",
        ]
        for key in required_keys:
            assert key in result

        # Test nested dictionaries
        assert isinstance(result["πŸ”§ Tool Information"], dict)
        assert isinstance(result["πŸ“‹ Prompt Details"], dict)
        assert isinstance(result["πŸ“ Input Requirements"], dict)

        # Test string values
        assert isinstance(result["🎯 Action Plan"], str)
        assert isinstance(result["πŸ”§ Tool Information"]["Name"], str)
        assert isinstance(result["πŸ“‹ Prompt Details"]["Template"], str)
        assert isinstance(result["🎯 Relevance Score"], str)

        # Test enhanced input requirements structure
        input_reqs = result["πŸ“ Input Requirements"]

        # Test Input Summary structure
        assert "πŸ”§ Input Summary" in input_reqs
        input_summary = input_reqs["πŸ”§ Input Summary"]
        assert isinstance(input_summary["Total Variables Required"], int)
        assert isinstance(input_summary["Complexity Level"], str)
        assert isinstance(input_summary["Estimated Setup Time"], str)

        # Test Variable Details structure
        assert "πŸ“‹ Variable Details" in input_reqs
        variable_details = input_reqs["πŸ“‹ Variable Details"]
        assert isinstance(variable_details, list)
        assert len(variable_details) == 1  # One variable

        # Test individual variable structure
        var1 = variable_details[0]["πŸ“ Variable 1"]
        assert isinstance(var1["Name"], str)
        assert isinstance(var1["Placeholder"], str)
        assert isinstance(var1["Description"], str)
        assert isinstance(var1["Required"], str)

        # Test Usage Guide structure
        assert "πŸ’‘ Usage Guide" in input_reqs
        usage_guide = input_reqs["πŸ’‘ Usage Guide"]
        assert isinstance(usage_guide["Step 1"], str)
        assert isinstance(usage_guide["Step 2"], str)
        assert isinstance(usage_guide["Step 3"], str)

        # Test Ready-to-Use Example structure
        assert "🎯 Ready-to-Use Example" in input_reqs
        example_section = input_reqs["🎯 Ready-to-Use Example"]
        assert isinstance(example_section["Original Template"], str)
        assert isinstance(example_section["With Sample Data"], str)
        assert isinstance(example_section["πŸ’‘ Tip"], str)


class TestHandleFindTools:
    """Test cases for the handle_find_tools function."""

    def test_handle_find_tools_success(self):
        """Test successful enhanced planning with PlannedStep objects."""
        # Import here to avoid circular imports during testing
        from app import handle_find_tools

        # Create mock MCPTool and MCPPrompt objects
        mock_tool = MCPTool(
            tool_id="sentiment-analyzer",
            name="Sentiment Analyzer",
            description="Analyzes sentiment of text",
            tags=["text", "sentiment", "analysis"],
            invocation_command_stub="sentiment --text {text}",
        )

        mock_prompt = MCPPrompt(
            prompt_id="sentiment_customer_v1",
            name="Customer Feedback Analysis",
            description="Analyze customer feedback sentiment",
            target_tool_id="sentiment-analyzer",
            template_string="Analyze sentiment: {{feedback_text}}",
            tags=["customer", "feedback"],
            input_variables=["feedback_text"],
            difficulty_level="beginner",
        )

        mock_planned_step = PlannedStep(
            tool=mock_tool, prompt=mock_prompt, relevance_score=0.92
        )

        # Mock the global planner_agent
        with patch("app.planner_agent") as mock_agent:
            mock_agent.generate_plan.return_value = [mock_planned_step]

            result = handle_find_tools("analyze text sentiment")

            # Extract the first element (JSON result) from the tuple
            json_result = result[0]

            # Verify the result structure
            assert json_result["status"] == "success"
            assert "Generated 1 personalized action plan(s)" in json_result["message"]
            assert json_result["query"] == "analyze text sentiment"
            assert json_result["query_summary"] == "Your goal: 'analyze text sentiment'"
            assert json_result["total_steps"] == 1
            assert len(json_result["planned_steps"]) == 1
            assert "next_steps" in json_result

            # Check formatted step structure
            formatted_step = json_result["planned_steps"][0]
            assert "🎯 Action Plan" in formatted_step
            assert "πŸ”§ Tool Information" in formatted_step
            assert "πŸ“‹ Prompt Details" in formatted_step
            assert "πŸ“ Input Requirements" in formatted_step
            assert "🎯 Relevance Score" in formatted_step

            # Verify specific content
            assert (
                formatted_step["🎯 Action Plan"]
                == "Use 'Sentiment Analyzer' with 'Customer Feedback Analysis' prompt"
            )
            assert formatted_step["πŸ”§ Tool Information"]["Name"] == "Sentiment Analyzer"
            assert (
                formatted_step["πŸ“‹ Prompt Details"]["Template"]
                == "Analyze sentiment: {{feedback_text}}"
            )
            assert formatted_step["🎯 Relevance Score"] == "0.92"

            # Verify enhanced input requirements structure
            input_reqs = formatted_step["πŸ“ Input Requirements"]
            input_summary = input_reqs["πŸ”§ Input Summary"]
            assert input_summary["Total Variables Required"] == 1
            assert input_summary["Complexity Level"] == "🟒 Simple (1 input)"

            # Verify planner was called correctly
            mock_agent.generate_plan.assert_called_once_with(
                "analyze text sentiment", top_k=3
            )

    def test_handle_find_tools_no_agent(self):
        """Test behavior when planner agent is not initialized."""
        from app import handle_find_tools

        # Mock planner_agent as None
        with patch("app.planner_agent", None):
            result = handle_find_tools("test query")
            json_result = result[0]

            assert json_result["status"] == "error"
            assert "System Error" in json_result["message"]
            assert "Agent system not initialized" in json_result["message"]
            assert json_result["error_type"] == "system_error"
            assert json_result["planned_steps"] == []
            assert "troubleshooting" in json_result

    def test_handle_find_tools_empty_query(self):
        """Test behavior with empty query."""
        from app import handle_find_tools

        with patch("app.planner_agent") as mock_agent:
            # Test empty string
            result = handle_find_tools("")
            json_result = result[0]
            assert json_result["status"] == "error"
            assert "Input Required" in json_result["message"]
            assert "describe what you'd like to accomplish" in json_result["message"]
            assert json_result["error_type"] == "user_input"
            assert json_result["planned_steps"] == []
            assert "suggestion" in json_result

            # Test whitespace only
            result = handle_find_tools("   ")
            json_result = result[0]
            assert json_result["status"] == "error"
            assert "Input Required" in json_result["message"]
            assert json_result["error_type"] == "user_input"
            assert json_result["planned_steps"] == []

            # Ensure agent wasn't called
            mock_agent.generate_plan.assert_not_called()

    def test_handle_find_tools_no_results(self):
        """Test behavior when no planned steps are found."""
        from app import handle_find_tools

        with patch("app.planner_agent") as mock_agent:
            mock_agent.generate_plan.return_value = []

            result = handle_find_tools("nonexistent tool type")
            json_result = result[0]

            assert json_result["status"] == "no_results"
            assert "No Matches Found" in json_result["message"]
            assert json_result["query"] == "nonexistent tool type"
            assert json_result["query_summary"] == "Your goal: 'nonexistent tool type'"
            assert json_result["planned_steps"] == []
            assert "suggestions" in json_result
            assert "available_capabilities" in json_result

            mock_agent.generate_plan.assert_called_once_with(
                "nonexistent tool type", top_k=3
            )

    def test_handle_find_tools_exception(self):
        """Test error handling when agent raises exception."""
        from app import handle_find_tools

        with patch("app.planner_agent") as mock_agent:
            mock_agent.generate_plan.side_effect = Exception("API failure")

            result = handle_find_tools("test query")
            json_result = result[0]

            assert json_result["status"] == "error"
            assert "Processing Error" in json_result["message"]
            assert json_result["error_type"] == "processing_error"
            assert json_result["technical_details"] == "API failure"
            assert json_result["query"] == "test query"
            assert json_result["planned_steps"] == []
            assert "troubleshooting" in json_result

    def test_handle_find_tools_logging(self):
        """Test that proper logging occurs."""
        from app import handle_find_tools

        mock_tool = MCPTool(
            tool_id="test-tool",
            name="Test Tool",
            description="A test tool",
            tags=["test"],
            invocation_command_stub="test --input {text}",
        )

        mock_prompt = MCPPrompt(
            prompt_id="test_prompt",
            name="Test Prompt",
            description="A test prompt",
            target_tool_id="test-tool",
            template_string="Test: {{input}}",
            tags=["test"],
            input_variables=["input"],
            difficulty_level="beginner",
        )

        mock_planned_step = PlannedStep(tool=mock_tool, prompt=mock_prompt)

        with (
            patch("app.planner_agent") as mock_agent,
            patch("app.logger") as mock_logger,
        ):
            mock_agent.generate_plan.return_value = [mock_planned_step]

            handle_find_tools("test query")

            # Verify logging calls - check that appropriate logging happened
            # We'll be more flexible with the exact message content
            log_calls = [call.args[0] for call in mock_logger.info.call_args_list]

            # Check that processing message was logged
            processing_logged = any(
                "Processing enhanced planning request" in msg for msg in log_calls
            )
            assert processing_logged, f"Processing log not found in: {log_calls}"

            # Check that success message was logged
            success_logged = any(
                "Successfully generated" in msg and "planned steps for query" in msg
                for msg in log_calls
            )
            assert success_logged, f"Success log not found in: {log_calls}"