Csaba Bolyos commited on
Commit
1c9f2ea
·
1 Parent(s): b0b5357

simplified front end

Browse files
README.md CHANGED
@@ -13,7 +13,6 @@ tags:
13
  - pose-estimation
14
  - movement-analysis
15
  - video-analysis
16
- - webrtc
17
  - youtube
18
  - vimeo
19
  - mcp
@@ -39,907 +38,81 @@ pip install gradio_labanmovementanalysis
39
  ## Usage
40
 
41
  ```python
 
42
  """
43
- Unified Laban Movement Analysis Demo
44
- Comprehensive interface combining all features:
45
- - Standard LMA analysis
46
- - Enhanced features (WebRTC, YouTube/Vimeo)
47
- - Agent API (batch processing, filtering)
48
- - Real-time analysis
49
- - Model comparison
50
-
51
- Created by: Csaba Bolyós (BladeSzaSza)
52
- Contact: [email protected]
53
- GitHub: https://github.com/bladeszasza
54
- LinkedIn: https://www.linkedin.com/in/csaba-bolyós-00a11767/
55
- Hugging Face: https://huggingface.co/BladeSzaSza
56
-
57
- Heavy Beta Version - Under Active Development
58
  """
59
 
60
  import gradio as gr
61
- import sys
62
- from pathlib import Path
63
- from typing import Dict, Any, List, Tuple, Union, Optional
64
-
65
- # Add parent directory to path
66
- sys.path.insert(0, str(Path(__file__).parent.parent / "backend"))
67
-
68
- from gradio_labanmovementanalysis import LabanMovementAnalysis
69
-
70
- # Import agent API if available
71
- try:
72
- from gradio_labanmovementanalysis.agent_api import (
73
- LabanAgentAPI,
74
- PoseModel,
75
- MovementDirection,
76
- MovementIntensity
77
- )
78
- HAS_AGENT_API = True
79
- except ImportError:
80
- HAS_AGENT_API = False
81
-
82
- # Import WebRTC components if available
83
- try:
84
- from gradio_webrtc import WebRTC
85
- from gradio_labanmovementanalysis.webrtc_handler import (
86
- webrtc_detection,
87
- get_rtc_configuration
88
- )
89
- HAS_WEBRTC = True
90
- except ImportError as e:
91
- print(f"WebRTC import failed: {e}")
92
- HAS_WEBRTC = False
93
-
94
- # Initialize components
95
- try:
96
- # Initialize with WebRTC support
97
- analyzer = LabanMovementAnalysis(
98
- enable_webrtc=True,
99
- enable_visualization=True
100
- )
101
- print("✅ Core features initialized successfully")
102
- except Exception as e:
103
- print(f"Warning: Some features may not be available: {e}")
104
- analyzer = LabanMovementAnalysis(enable_webrtc=False)
105
-
106
- # Initialize agent API if available
107
- agent_api = None
108
- if HAS_AGENT_API:
109
- try:
110
- agent_api = LabanAgentAPI()
111
- except Exception as e:
112
- print(f"Warning: Agent API not available: {e}")
113
- agent_api = None
114
-
115
-
116
- def process_video_standard(video: Optional[str], model: str, enable_viz: int, include_keypoints: int) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
117
- """Standard video processing function with proper type annotations."""
118
- if video is None:
119
- return None, None
120
-
121
- try:
122
- # Convert int to bool
123
- enable_viz_bool = bool(enable_viz)
124
- include_keypoints_bool = bool(include_keypoints)
125
-
126
- json_output, video_output = analyzer.process_video(
127
- video,
128
- model=model,
129
- enable_visualization=enable_viz_bool,
130
- include_keypoints=include_keypoints_bool
131
- )
132
- return json_output, video_output
133
- except Exception as e:
134
- return {"error": str(e)}, None
135
 
136
 
137
- def process_video_enhanced(video_input: Optional[str], model: str, enable_viz: int, include_keypoints: int) -> Tuple[Dict[str, Any], Optional[str]]:
138
- """Enhanced video processing with all new features."""
139
- if not video_input:
140
- return {"error": "No video provided"}, None
141
-
142
- try:
143
- # Convert int to bool
144
- enable_viz_bool = bool(enable_viz)
145
- include_keypoints_bool = bool(include_keypoints)
146
-
147
- # Handle both file upload and URL input
148
- video_path = video_input.name if hasattr(video_input, 'name') else video_input
149
-
150
- json_result, viz_result = analyzer.process_video(
151
- video_path,
152
- model=model,
153
- enable_visualization=enable_viz_bool,
154
- include_keypoints=include_keypoints_bool
155
- )
156
- return json_result, viz_result
157
- except Exception as e:
158
- error_result = {"error": str(e)}
159
- return error_result, None
160
 
 
 
 
 
 
 
 
161
 
162
- def process_video_for_agent(video: Optional[str], model: str, output_format: str = "summary") -> Dict[str, Any]:
163
- """Process video with agent-friendly output format."""
164
- if not HAS_AGENT_API or agent_api is None:
165
- return {"error": "Agent API not available"}
166
-
167
- if not video:
168
- return {"error": "No video provided"}
169
-
170
- try:
171
- model_enum = PoseModel(model)
172
- result = agent_api.analyze(video, model=model_enum, generate_visualization=False)
173
-
174
- if output_format == "summary":
175
- return {"summary": agent_api.get_movement_summary(result)}
176
- elif output_format == "structured":
177
- return {
178
- "success": result.success,
179
- "direction": result.dominant_direction.value,
180
- "intensity": result.dominant_intensity.value,
181
- "speed": result.dominant_speed,
182
- "fluidity": result.fluidity_score,
183
- "expansion": result.expansion_score,
184
- "segments": len(result.movement_segments)
185
- }
186
- else: # json
187
- return result.raw_data
188
- except Exception as e:
189
- return {"error": str(e)}
190
-
191
-
192
- def batch_process_videos(files: Optional[List], model: str) -> Dict[str, Any]:
193
- """Process multiple videos in batch."""
194
- if not HAS_AGENT_API or agent_api is None:
195
- return {"error": "Agent API not available"}
196
-
197
- if not files:
198
- return {"error": "No videos provided"}
199
-
200
- try:
201
- video_paths = [f.name for f in files]
202
- results = agent_api.batch_analyze(video_paths, model=PoseModel(model), parallel=True)
203
-
204
- output = {
205
- "total_videos": len(results),
206
- "successful": sum(1 for r in results if r.success),
207
- "failed": sum(1 for r in results if not r.success),
208
- "results": []
209
- }
210
-
211
- for result in results:
212
- output["results"].append({
213
- "video": Path(result.video_path).name,
214
- "success": result.success,
215
- "summary": agent_api.get_movement_summary(result) if result.success else result.error
216
- })
217
-
218
- return output
219
- except Exception as e:
220
- return {"error": str(e)}
221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
 
223
- def filter_videos_by_movement(files: Optional[List], direction: str, intensity: str, min_fluidity: float, min_expansion: float) -> Dict[str, Any]:
224
- """Filter videos based on movement characteristics."""
225
- if not HAS_AGENT_API or agent_api is None:
226
- return {"error": "Agent API not available"}
227
-
228
- if not files:
229
- return {"error": "No videos provided"}
230
-
231
- try:
232
- video_paths = [f.name for f in files]
233
-
234
- dir_filter = MovementDirection(direction) if direction != "any" else None
235
- int_filter = MovementIntensity(intensity) if intensity != "any" else None
236
-
237
- filtered = agent_api.filter_by_movement(
238
- video_paths,
239
- direction=dir_filter,
240
- intensity=int_filter,
241
- min_fluidity=min_fluidity if min_fluidity > 0 else None,
242
- min_expansion=min_expansion if min_expansion > 0 else None
243
  )
244
-
245
- return {
246
- "total_analyzed": len(video_paths),
247
- "matching_videos": len(filtered),
248
- "matches": [
249
- {
250
- "video": Path(r.video_path).name,
251
- "direction": r.dominant_direction.value,
252
- "intensity": r.dominant_intensity.value,
253
- "fluidity": r.fluidity_score,
254
- "expansion": r.expansion_score
255
- }
256
- for r in filtered
257
- ]
258
- }
259
- except Exception as e:
260
- return {"error": str(e)}
261
-
262
-
263
- def compare_models(video: Optional[str], model1: str, model2: str) -> List[List[str]]:
264
- """Compare two different pose models on the same video."""
265
- if not video:
266
- return [["Error", "No video provided", "", ""]]
267
-
268
- try:
269
- # Analyze with both models
270
- result1, _ = analyzer.process_video(video, model=model1, enable_visualization=False)
271
- result2, _ = analyzer.process_video(video, model=model2, enable_visualization=False)
272
-
273
- # Extract key metrics for comparison
274
- def extract_metrics(result):
275
- summary = result.get("movement_analysis", {}).get("summary", {})
276
- return {
277
- "direction": summary.get("direction", {}).get("dominant", "unknown"),
278
- "intensity": summary.get("intensity", {}).get("dominant", "unknown"),
279
- "speed": summary.get("speed", {}).get("dominant", "unknown"),
280
- "frame_count": result.get("video_info", {}).get("frame_count", 0)
281
- }
282
-
283
- metrics1 = extract_metrics(result1)
284
- metrics2 = extract_metrics(result2)
285
-
286
- # Create comparison table data
287
- comparison_data = [
288
- ["Direction", metrics1["direction"], metrics2["direction"],
289
- "✓" if metrics1["direction"] == metrics2["direction"] else "✗"],
290
- ["Intensity", metrics1["intensity"], metrics2["intensity"],
291
- "✓" if metrics1["intensity"] == metrics2["intensity"] else "✗"],
292
- ["Speed", metrics1["speed"], metrics2["speed"],
293
- "✓" if metrics1["speed"] == metrics2["speed"] else "✗"],
294
- ["Frames Processed", str(metrics1["frame_count"]), str(metrics2["frame_count"]),
295
- "✓" if metrics1["frame_count"] == metrics2["frame_count"] else "✗"]
296
- ]
297
-
298
- return comparison_data
299
-
300
- except Exception as e:
301
- return [["Error", str(e), "", ""]]
302
-
303
-
304
- def start_webrtc_stream(model: str) -> Tuple[str, Dict[str, Any]]:
305
- """Start WebRTC real-time analysis."""
306
- try:
307
- success = analyzer.start_webrtc_stream(model)
308
- if success:
309
- return "🟢 Stream Active", {"status": "streaming", "model": model}
310
- else:
311
- return "🔴 Failed to start", {"status": "error"}
312
- except Exception as e:
313
- return f"🔴 Error: {str(e)}", {"status": "error"}
314
-
315
-
316
- def stop_webrtc_stream() -> Tuple[str, Dict[str, Any]]:
317
- """Stop WebRTC real-time analysis."""
318
- try:
319
- success = analyzer.stop_webrtc_stream()
320
- if success:
321
- return "🟡 Stream Stopped", {"status": "stopped"}
322
- else:
323
- return "🔴 Failed to stop", {"status": "error"}
324
- except Exception as e:
325
- return f"🔴 Error: {str(e)}", {"status": "error"}
326
-
327
-
328
- def create_unified_demo():
329
- """Create the unified comprehensive demo."""
330
-
331
- # Custom theme - simplified to avoid JSON schema issues
332
- custom_theme = gr.themes.Soft(
333
- primary_hue=gr.themes.colors.emerald, # Viridian green
334
- secondary_hue=gr.themes.colors.blue, # Cobalt blue
335
- neutral_hue=gr.themes.colors.gray,
336
- )
337
-
338
- with gr.Blocks(
339
- title="Laban Movement Analysis - Complete Suite by Csaba Bolyós",
340
- theme=custom_theme,
341
- css="""
342
- .main-header {
343
- background: linear-gradient(135deg, #40826D 0%, #2E5E4A 50%, #1B3A2F 100%);
344
- color: white;
345
- padding: 30px;
346
- border-radius: 10px;
347
- margin-bottom: 20px;
348
- text-align: center;
349
- }
350
- .feature-card {
351
- border: 1px solid #40826D;
352
- border-radius: 8px;
353
- padding: 16px;
354
- margin: 8px 0;
355
- background: linear-gradient(135deg, #F5F5DC 0%, #F0F8E8 100%);
356
- }
357
- .json-output {
358
- max-height: 600px;
359
- overflow-y: auto;
360
- font-family: monospace;
361
- font-size: 12px;
362
- background: #F8F9FA;
363
- border: 1px solid #40826D;
364
- }
365
- .author-info {
366
- background: linear-gradient(135deg, #40826D 0%, #2E5E4A 100%);
367
- color: white;
368
- padding: 15px;
369
- border-radius: 8px;
370
- margin: 10px 0;
371
- text-align: center;
372
- }
373
- /* Dark mode support */
374
- .dark .feature-card {
375
- background: linear-gradient(135deg, #1e3a32 0%, #2d4a3d 100%);
376
- border-color: #40826D;
377
- }
378
- .dark .json-output {
379
- background: #2d3748;
380
- color: #e2e8f0;
381
- }
382
- """
383
- ) as demo:
384
-
385
- # Main Header
386
- gr.HTML("""
387
- <div class="main-header">
388
- <h1>🎭 Laban Movement Analysis - Complete Suite</h1>
389
- <p style="font-size: 18px; margin: 10px 0;">
390
- Professional movement analysis with pose estimation, AI action recognition,
391
- real-time processing, and agent automation
392
- </p>
393
- <p style="font-size: 14px; opacity: 0.9;">
394
- Supports YouTube/Vimeo URLs • WebRTC Streaming • 20+ Pose Models • MCP Integration
395
- </p>
396
- <p style="font-size: 12px; margin-top: 15px; opacity: 0.8;">
397
- <strong>Version 0.01-beta</strong> - Heavy Beta Under Active Development
398
- </p>
399
- </div>
400
- """)
401
-
402
- with gr.Tabs():
403
- # Tab 1: Standard Analysis
404
- with gr.Tab("🎬 Standard Analysis"):
405
- gr.Markdown("""
406
- ### Classic Laban Movement Analysis
407
- Upload a video file to analyze movement using traditional LMA metrics with pose estimation.
408
- """)
409
-
410
- with gr.Row():
411
- with gr.Column(scale=1):
412
- video_input_std = gr.Video(
413
- label="Upload Video",
414
- sources=["upload"],
415
- format="mp4"
416
- )
417
-
418
- model_dropdown_std = gr.Dropdown(
419
- choices=["mediapipe", "movenet", "yolo"],
420
- value="mediapipe",
421
- label="Pose Estimation Model"
422
- )
423
-
424
- with gr.Row():
425
- enable_viz_std = gr.Radio(
426
- choices=[("Yes", 1), ("No", 0)],
427
- value=1,
428
- label="Generate Visualization"
429
- )
430
-
431
- include_keypoints_std = gr.Radio(
432
- choices=[("Yes", 1), ("No", 0)],
433
- value=0,
434
- label="Include Keypoints"
435
- )
436
-
437
- process_btn_std = gr.Button("Analyze Movement", variant="primary")
438
-
439
- gr.Examples(
440
- examples=[
441
- ["examples/balette.mp4"]
442
- ],
443
- inputs=video_input_std,
444
- label="Example Videos"
445
- )
446
-
447
- with gr.Column(scale=2):
448
- with gr.Tab("Analysis Results"):
449
- json_output_std = gr.JSON(
450
- label="Movement Analysis (JSON)",
451
- elem_classes=["json-output"]
452
- )
453
-
454
- with gr.Tab("Visualization"):
455
- video_output_std = gr.Video(
456
- label="Annotated Video",
457
- format="mp4"
458
- )
459
-
460
- gr.Markdown("""
461
- **Visualization Guide:**
462
- - 🦴 **Skeleton**: Pose keypoints and connections
463
- - 🌊 **Trails**: Motion history (fading lines)
464
- - ➡️ **Arrows**: Movement direction indicators
465
- - 🎨 **Colors**: Green (low) → Orange (medium) → Red (high) intensity
466
- """)
467
-
468
- process_btn_std.click(
469
- fn=process_video_standard,
470
- inputs=[video_input_std, model_dropdown_std, enable_viz_std, include_keypoints_std],
471
- outputs=[json_output_std, video_output_std],
472
- api_name="analyze_standard"
473
- )
474
-
475
- # Tab 2: Enhanced Analysis
476
- with gr.Tab("🚀 Enhanced Analysis"):
477
- gr.Markdown("""
478
- ### Advanced Analysis with AI and URL Support
479
- Analyze videos from URLs (YouTube/Vimeo), use advanced pose models, and get AI-powered insights.
480
- """)
481
-
482
- with gr.Row():
483
- with gr.Column(scale=1):
484
- with gr.Group(elem_classes=["feature-card"]):
485
- gr.Markdown("**Video Input**")
486
-
487
- # Changed from textbox to file upload as requested
488
- video_input_enh = gr.File(
489
- label="Upload Video or Drop File",
490
- file_types=["video"],
491
- type="filepath"
492
- )
493
-
494
- # URL input option
495
- url_input_enh = gr.Textbox(
496
- label="Or Enter Video URL",
497
- placeholder="YouTube URL, Vimeo URL, or direct video URL",
498
- info="Leave file upload empty to use URL"
499
- )
500
-
501
- gr.Examples(
502
- examples=[
503
- ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
504
- ["https://vimeo.com/815392738"]
505
- ],
506
- inputs=url_input_enh,
507
- label="Example URLs"
508
- )
509
-
510
- gr.Markdown("**Model Selection**")
511
-
512
- model_select_enh = gr.Dropdown(
513
- choices=[
514
- # MediaPipe variants
515
- "mediapipe-lite", "mediapipe-full", "mediapipe-heavy",
516
- # MoveNet variants
517
- "movenet-lightning", "movenet-thunder",
518
- # YOLO variants (added X models)
519
- "yolo-v8-n", "yolo-v8-s", "yolo-v8-m", "yolo-v8-l", "yolo-v8-x",
520
- # YOLO v11 variants
521
- "yolo-v11-n", "yolo-v11-s", "yolo-v11-m", "yolo-v11-l", "yolo-v11-x"
522
- ],
523
- value="mediapipe-full",
524
- label="Advanced Pose Models",
525
- info="17+ model variants available"
526
- )
527
-
528
- gr.Markdown("**Analysis Options**")
529
-
530
- with gr.Row():
531
- enable_viz_enh = gr.Radio(
532
- choices=[("Yes", 1), ("No", 0)],
533
- value=1,
534
- label="Visualization"
535
- )
536
-
537
- with gr.Row():
538
- include_keypoints_enh = gr.Radio(
539
- choices=[("Yes", 1), ("No", 0)],
540
- value=0,
541
- label="Raw Keypoints"
542
- )
543
-
544
- analyze_btn_enh = gr.Button("🚀 Enhanced Analysis", variant="primary", size="lg")
545
-
546
- with gr.Column(scale=2):
547
- with gr.Tab("📊 Analysis"):
548
- analysis_output_enh = gr.JSON(label="Enhanced Analysis Results")
549
-
550
- with gr.Tab("🎥 Visualization"):
551
- viz_output_enh = gr.Video(label="Annotated Video")
552
-
553
- def process_enhanced_input(file_input: Optional[str], url_input: str, model: str, enable_viz: int, include_keypoints: int) -> Tuple[Dict[str, Any], Optional[str]]:
554
- """Process either file upload or URL input."""
555
- video_source = file_input if file_input else url_input
556
- return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
557
-
558
- analyze_btn_enh.click(
559
- fn=process_enhanced_input,
560
- inputs=[video_input_enh, url_input_enh, model_select_enh, enable_viz_enh, include_keypoints_enh],
561
- outputs=[analysis_output_enh, viz_output_enh],
562
- api_name="analyze_enhanced"
563
- )
564
-
565
- # Tab 3: Agent API
566
- with gr.Tab("🤖 Agent API"):
567
- gr.Markdown("""
568
- ### AI Agent & Automation Features
569
- Batch processing, filtering, and structured outputs designed for AI agents and automation.
570
- """)
571
-
572
- with gr.Tabs():
573
- with gr.Tab("Single Analysis"):
574
- with gr.Row():
575
- with gr.Column():
576
- video_input_agent = gr.Video(label="Upload Video", sources=["upload"])
577
- model_select_agent = gr.Dropdown(
578
- choices=["mediapipe", "movenet", "yolo"],
579
- value="mediapipe",
580
- label="Model"
581
- )
582
- output_format_agent = gr.Radio(
583
- choices=["summary", "structured", "json"],
584
- value="summary",
585
- label="Output Format"
586
- )
587
- analyze_btn_agent = gr.Button("Analyze", variant="primary")
588
-
589
- with gr.Column():
590
- output_display_agent = gr.JSON(label="Agent Output")
591
-
592
- analyze_btn_agent.click(
593
- fn=process_video_for_agent,
594
- inputs=[video_input_agent, model_select_agent, output_format_agent],
595
- outputs=output_display_agent,
596
- api_name="analyze_agent"
597
- )
598
-
599
- with gr.Tab("Batch Processing"):
600
- with gr.Row():
601
- with gr.Column():
602
- batch_files = gr.File(
603
- label="Upload Multiple Videos",
604
- file_count="multiple",
605
- file_types=["video"]
606
- )
607
- batch_model = gr.Dropdown(
608
- choices=["mediapipe", "movenet", "yolo"],
609
- value="mediapipe",
610
- label="Model"
611
- )
612
- batch_btn = gr.Button("Process Batch", variant="primary")
613
-
614
- with gr.Column():
615
- batch_output = gr.JSON(label="Batch Results")
616
-
617
- batch_btn.click(
618
- fn=batch_process_videos,
619
- inputs=[batch_files, batch_model],
620
- outputs=batch_output,
621
- api_name="batch_analyze"
622
- )
623
-
624
- with gr.Tab("Movement Filter"):
625
- with gr.Row():
626
- with gr.Column():
627
- filter_files = gr.File(
628
- label="Videos to Filter",
629
- file_count="multiple",
630
- file_types=["video"]
631
- )
632
-
633
- with gr.Group():
634
- direction_filter = gr.Dropdown(
635
- choices=["any", "up", "down", "left", "right", "stationary"],
636
- value="any",
637
- label="Direction Filter"
638
- )
639
- intensity_filter = gr.Dropdown(
640
- choices=["any", "low", "medium", "high"],
641
- value="any",
642
- label="Intensity Filter"
643
- )
644
- fluidity_threshold = gr.Slider(0.0, 1.0, 0.0, label="Min Fluidity")
645
- expansion_threshold = gr.Slider(0.0, 1.0, 0.0, label="Min Expansion")
646
-
647
- filter_btn = gr.Button("Apply Filters", variant="primary")
648
-
649
- with gr.Column():
650
- filter_output = gr.JSON(label="Filtered Results")
651
-
652
- filter_btn.click(
653
- fn=filter_videos_by_movement,
654
- inputs=[filter_files, direction_filter, intensity_filter,
655
- fluidity_threshold, expansion_threshold],
656
- outputs=filter_output,
657
- api_name="filter_videos"
658
- )
659
-
660
- # Tab 4: Real-time WebRTC
661
- with gr.Tab("📹 Real-time Analysis"):
662
- gr.Markdown("""
663
- ### Live Camera Movement Analysis
664
- Real-time pose detection and movement analysis from your webcam using WebRTC.
665
- **Grant camera permissions when prompted for best experience.**
666
- """)
667
-
668
- # Official Gradio WebRTC approach (compatible with NumPy 1.x)
669
- if HAS_WEBRTC:
670
-
671
- # Get RTC configuration
672
- rtc_config = get_rtc_configuration()
673
-
674
- # Custom CSS following official guide
675
- css_webrtc = """
676
- .my-group {max-width: 480px !important; max-height: 480px !important;}
677
- .my-column {display: flex !important; justify-content: center !important; align-items: center !important;}
678
- """
679
-
680
- with gr.Column(elem_classes=["my-column"]):
681
- with gr.Group(elem_classes=["my-group"]):
682
- # Official WebRTC Component
683
- webrtc_stream = WebRTC(
684
- label="🎥 Live Camera Stream",
685
- rtc_configuration=rtc_config
686
- )
687
-
688
- webrtc_model = gr.Dropdown(
689
- choices=["mediapipe-lite", "movenet-lightning", "yolo-v11-n"],
690
- value="mediapipe-lite",
691
- label="Pose Model",
692
- info="Optimized for real-time processing"
693
- )
694
-
695
- confidence_slider = gr.Slider(
696
- label="Detection Confidence",
697
- minimum=0.0,
698
- maximum=1.0,
699
- step=0.05,
700
- value=0.5,
701
- info="Higher = fewer false positives"
702
- )
703
-
704
- # Official WebRTC streaming setup following Gradio guide
705
- webrtc_stream.stream(
706
- fn=webrtc_detection,
707
- inputs=[webrtc_stream, webrtc_model, confidence_slider],
708
- outputs=[webrtc_stream],
709
- time_limit=10 # Following official guide: 10 seconds per user
710
- )
711
-
712
- # Info display
713
- gr.HTML("""
714
- <div style="background: linear-gradient(135deg, #F0F8E8 0%, #E8F4FD 100%); padding: 15px; border-radius: 8px; margin-top: 10px; border: 1px solid #40826D;">
715
- <h4 style="color: #2E5E4A;">📹 WebRTC Pose Analysis</h4>
716
- <p style="margin: 5px 0; color: #1B3A2F;">Real-time movement analysis using your webcam</p>
717
-
718
- <h4 style="color: #2E5E4A;">🔒 Privacy</h4>
719
- <p style="margin: 5px 0; color: #1B3A2F;">Processing happens locally - no video data stored</p>
720
-
721
- <h4 style="color: #2E5E4A;">💡 Usage</h4>
722
- <ul style="margin: 5px 0; padding-left: 20px; color: #1B3A2F;">
723
- <li>Grant camera permission when prompted</li>
724
- <li>Move in front of camera to see pose detection</li>
725
- <li>Adjust confidence threshold as needed</li>
726
- </ul>
727
- </div>
728
- """)
729
-
730
- else:
731
- # Fallback if WebRTC component not available
732
- gr.HTML("""
733
- <div style="text-align: center; padding: 50px; border: 2px dashed #ff6b6b; border-radius: 8px; background: #ffe0e0;">
734
- <h3>📦 WebRTC Component Required</h3>
735
- <p><strong>To enable real-time camera analysis, install:</strong></p>
736
- <code style="background: #f0f0f0; padding: 10px; border-radius: 4px; display: block; margin: 10px 0;">
737
- pip install gradio-webrtc twilio
738
- </code>
739
- <p style="margin-top: 15px;"><em>Use Enhanced Analysis tab for video files meanwhile</em></p>
740
- </div>
741
- """)
742
-
743
- # Tab 5: Model Comparison
744
- with gr.Tab("⚖️ Model Comparison"):
745
- gr.Markdown("""
746
- ### Compare Pose Estimation Models
747
- Analyze the same video with different models to compare accuracy and results.
748
- """)
749
-
750
- with gr.Column():
751
- comparison_video = gr.Video(
752
- label="Video for Comparison",
753
- sources=["upload"]
754
- )
755
-
756
- with gr.Row():
757
- model1_comp = gr.Dropdown(
758
- choices=["mediapipe-full", "movenet-thunder", "yolo-v11-s"],
759
- value="mediapipe-full",
760
- label="Model 1"
761
- )
762
-
763
- model2_comp = gr.Dropdown(
764
- choices=["mediapipe-full", "movenet-thunder", "yolo-v11-s"],
765
- value="yolo-v11-s",
766
- label="Model 2"
767
- )
768
-
769
- compare_btn = gr.Button("🔄 Compare Models", variant="primary")
770
-
771
- comparison_results = gr.DataFrame(
772
- headers=["Metric", "Model 1", "Model 2", "Match"],
773
- label="Comparison Results"
774
- )
775
-
776
- compare_btn.click(
777
- fn=compare_models,
778
- inputs=[comparison_video, model1_comp, model2_comp],
779
- outputs=comparison_results,
780
- api_name="compare_models"
781
- )
782
-
783
- # Tab 6: Documentation
784
- with gr.Tab("📚 Documentation"):
785
- gr.Markdown("""
786
- # Complete Feature Documentation
787
-
788
- ## 🎥 Video Input Support
789
- - **Local Files**: MP4, AVI, MOV, WebM formats
790
- - **YouTube**: Automatic download from YouTube URLs
791
- - **Vimeo**: Automatic download from Vimeo URLs
792
- - **Direct URLs**: Any direct video file URL
793
-
794
- ## 🤖 Pose Estimation Models
795
-
796
- ### MediaPipe (Google) - 33 3D Landmarks
797
- - **Lite**: Fastest CPU performance
798
- - **Full**: Balanced accuracy/speed (recommended)
799
- - **Heavy**: Highest accuracy
800
-
801
- ### MoveNet (Google) - 17 COCO Keypoints
802
- - **Lightning**: Mobile-optimized, very fast
803
- - **Thunder**: Higher accuracy variant
804
-
805
- ### YOLO (Ultralytics) - 17 COCO Keypoints
806
- - **v8 variants**: n/s/m/l/x sizes (nano to extra-large)
807
- - **v11 variants**: Latest with improved accuracy (n/s/m/l/x)
808
- - **Multi-person**: Supports multiple people in frame
809
-
810
- ## 📹 Real-time WebRTC
811
-
812
- - **Live Camera**: Direct webcam access via WebRTC
813
- - **Low Latency**: Sub-100ms processing
814
- - **Adaptive Quality**: Automatic performance optimization
815
- - **Live Overlay**: Real-time pose and metrics display
816
-
817
- ## 🤖 Agent & MCP Integration
818
-
819
- ### API Endpoints
820
- - `/analyze_standard` - Basic LMA analysis
821
- - `/analyze_enhanced` - Advanced analysis with all features
822
- - `/analyze_agent` - Agent-optimized output
823
- - `/batch_analyze` - Multiple video processing
824
- - `/filter_videos` - Movement-based filtering
825
- - `/compare_models` - Model comparison
826
-
827
- ### MCP Server
828
- ```bash
829
- # Start MCP server for AI assistants
830
- python -m backend.mcp_server
831
- ```
832
-
833
- ### Python API
834
- ```python
835
- from gradio_labanmovementanalysis import LabanMovementAnalysis
836
-
837
- # Initialize with all features
838
- analyzer = LabanMovementAnalysis(
839
- enable_webrtc=True
840
- )
841
-
842
- # Analyze YouTube video
843
- result, viz = analyzer.process_video(
844
- "https://youtube.com/watch?v=...",
845
- model="yolo-v11-s"
846
- )
847
- ```
848
-
849
- ## 📊 Output Formats
850
-
851
- ### Summary Format
852
- Human-readable movement analysis summary.
853
-
854
- ### Structured Format
855
- ```json
856
- {
857
- "success": true,
858
- "direction": "up",
859
- "intensity": "medium",
860
- "fluidity": 0.85,
861
- "expansion": 0.72
862
- }
863
- ```
864
-
865
- ### Full JSON Format
866
- Complete frame-by-frame analysis with all metrics.
867
-
868
- ## 🎯 Applications
869
-
870
- - **Sports**: Technique analysis and performance tracking
871
- - **Dance**: Choreography analysis and movement quality
872
- - **Healthcare**: Physical therapy and rehabilitation
873
- - **Research**: Large-scale movement pattern studies
874
- - **Entertainment**: Interactive applications and games
875
- - **Education**: Movement teaching and body awareness
876
-
877
- ## 🔗 Integration Examples
878
-
879
- ### Gradio Client
880
- ```python
881
- from gradio_client import Client
882
-
883
- client = Client("http://localhost:7860")
884
- result = client.predict(
885
- video="path/to/video.mp4",
886
- model="mediapipe-full",
887
- api_name="/analyze_enhanced"
888
- )
889
- ```
890
-
891
- ### Batch Processing
892
- ```python
893
- results = client.predict(
894
- files=["video1.mp4", "video2.mp4"],
895
- model="yolo-v11-s",
896
- api_name="/batch_analyze"
897
- )
898
- ```
899
- """)
900
-
901
- # Author info with proper styling
902
- gr.HTML("""
903
- <div class="author-info">
904
- <p><strong>Created by:</strong> Csaba Bolyós (BladeSzaSza)</p>
905
- <p style="margin: 5px 0;">
906
- <a href="https://github.com/bladeszasza" style="color: #a8e6cf; text-decoration: none;">🔗 GitHub</a> •
907
- <a href="https://huggingface.co/BladeSzaSza" style="color: #a8e6cf; text-decoration: none;">🤗 Hugging Face</a> •
908
- <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #a8e6cf; text-decoration: none;">💼 LinkedIn</a>
909
- </p>
910
- <p style="font-size: 12px; opacity: 0.9;">Contact: [email protected]</p>
911
- </div>
912
- """)
913
-
914
- # Footer with proper attribution
915
- gr.HTML("""
916
- <div style="text-align: center; padding: 20px; margin-top: 30px; border-top: 1px solid #40826D;">
917
- <p style="color: #2E5E4A; margin-bottom: 10px;">
918
- 🎭 Laban Movement Analysis - Complete Suite | Heavy Beta Version
919
- </p>
920
- <p style="color: #2E5E4A; font-size: 12px;">
921
- Created by <strong>Csaba Bolyós</strong> | Powered by MediaPipe, MoveNet & YOLO
922
- </p>
923
- <p style="color: #2E5E4A; font-size: 10px; margin-top: 10px;">
924
- <a href="https://github.com/bladeszasza" style="color: #40826D;">GitHub</a> •
925
- <a href="https://huggingface.co/BladeSzaSza" style="color: #40826D;">Hugging Face</a> •
926
- <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #40826D;">LinkedIn</a>
927
- </p>
928
- </div>
929
- """)
930
-
931
  return demo
932
-
933
-
934
  if __name__ == "__main__":
935
- demo = create_unified_demo()
936
- demo.launch(
937
- server_name="0.0.0.0",
938
- server_port=7860,
939
- share=False,
940
- show_error=True,
941
- favicon_path=None
942
- )
943
 
944
  ```
945
 
@@ -996,18 +169,8 @@ bool
996
  <td align="left">Whether to include raw keypoints in JSON output</td>
997
  </tr>
998
 
999
- <tr>
1000
- <td align="left"><code>enable_webrtc</code></td>
1001
- <td align="left" style="width: 25%;">
1002
 
1003
- ```python
1004
- bool
1005
- ```
1006
 
1007
- </td>
1008
- <td align="left"><code>False</code></td>
1009
- <td align="left">Whether to enable WebRTC real-time analysis</td>
1010
- </tr>
1011
 
1012
  <tr>
1013
  <td align="left"><code>label</code></td>
 
13
  - pose-estimation
14
  - movement-analysis
15
  - video-analysis
 
16
  - youtube
17
  - vimeo
18
  - mcp
 
38
  ## Usage
39
 
40
  ```python
41
+ # app.py ─────────────────────────────────────────────────────────
42
  """
43
+ Laban Movement Analysis – modernised Gradio Space
44
+ Author: Csaba (BladeSzaSza)
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  """
46
 
47
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
 
50
+ # ── 3. Dummy backend for local dev (replace with real fn) ───────
51
+ def process_video_standard(video, model, viz, kp):
52
+ """Return empty JSON + passthrough video placeholder."""
53
+ return {}, video
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ # ── 4. Build UI ─────────────────────────────────────────────────
56
+ def create_demo() -> gr.Blocks:
57
+ with gr.Blocks(
58
+ title="Laban Movement Analysis – Complete Suite",
59
+ theme='gstaff/sketch',
60
+ fill_width=True,
61
+ ) as demo:
62
 
63
+ # ── Hero banner ──
64
+ gr.HTML(
65
+ """
66
+ <div class="main-header">
67
+ <h1>🎭 Laban Movement Analysis – Complete Suite</h1>
68
+ <p>Pose estimation • AI action recognition • Real-time agents</p>
69
+ <p style="font-size:.85rem;opacity:.85">v0.01-beta 20+ pose models • MCP</p>
70
+ </div>
71
+ """
72
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ # ── Workspace ──
75
+ with gr.Row(equal_height=True):
76
+ # Input column
77
+ with gr.Column(scale=1, min_width=260):
78
+ video_in = gr.Video(label="Upload Video", sources=["upload"], format="mp4")
79
+ model_sel = gr.Dropdown(
80
+ ["mediapipe", "movenet", "yolo"], value="mediapipe", label="Pose Model"
81
+ )
82
+ with gr.Accordion("Options", open=False):
83
+ enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
84
+ include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
85
+ analyze_btn = gr.Button("Analyze Movement", variant="primary")
86
+
87
+ # Output column
88
+ with gr.Column(scale=2, min_width=320):
89
+ viz_out = gr.Video(label="Annotated Video")
90
+ with gr.Accordion("Raw JSON", open=False):
91
+ json_out = gr.JSON(label="Movement Analysis", elem_classes=["json-output"])
92
+
93
+ # Wiring
94
+ analyze_btn.click(
95
+ fn=process_video_standard,
96
+ inputs=[video_in, model_sel, enable_viz, include_kp],
97
+ outputs=[json_out, viz_out],
98
+ )
99
 
100
+ # Footer
101
+ gr.HTML(
102
+ """
103
+ <div class="author-info">
104
+ Built by Csaba Bolyós •
105
+ <a href="https://github.com/bladeszasza" target="_blank">GitHub</a> •
106
+ <a href="https://huggingface.co/BladeSzaSza" target="_blank">HF</a>
107
+ </div>
108
+ """
 
 
 
 
 
 
 
 
 
 
 
109
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  return demo
111
+
 
112
  if __name__ == "__main__":
113
+ print("🚀 Starting Laban Movement Analysis...")
114
+ demo = create_demo()
115
+
 
 
 
 
 
116
 
117
  ```
118
 
 
169
  <td align="left">Whether to include raw keypoints in JSON output</td>
170
  </tr>
171
 
 
 
 
172
 
 
 
 
173
 
 
 
 
 
174
 
175
  <tr>
176
  <td align="left"><code>label</code></td>
backend/gradio_labanmovementanalysis/__init__.py CHANGED
@@ -33,19 +33,3 @@ try:
33
  __all__.extend(['video_downloader', 'VideoDownloader', 'SmartVideoInput'])
34
  except ImportError:
35
  pass
36
-
37
- try:
38
- from . import webrtc_handler
39
- from .webrtc_handler import WebRTCMovementAnalyzer, WebRTCGradioInterface
40
- __all__.extend(['webrtc_handler', 'WebRTCMovementAnalyzer', 'WebRTCGradioInterface'])
41
- except ImportError:
42
- pass
43
-
44
- try:
45
- # SkateFormer integration reserved for Version 2
46
- # from . import skateformer_integration
47
- # from .skateformer_integration import SkateFormerAnalyzer, SkateFormerConfig
48
- # __all__.extend(['skateformer_integration', 'SkateFormerAnalyzer', 'SkateFormerConfig'])
49
- pass
50
- except ImportError:
51
- pass
 
33
  __all__.extend(['video_downloader', 'VideoDownloader', 'SmartVideoInput'])
34
  except ImportError:
35
  pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/gradio_labanmovementanalysis/labanmovementanalysis.py CHANGED
@@ -19,11 +19,6 @@ from .video_downloader import SmartVideoInput
19
  # Advanced features reserved for Version 2
20
  # SkateFormer AI integration will be available in future release
21
 
22
- try:
23
- from .webrtc_handler import WebRTCMovementAnalyzer, WebRTCGradioInterface
24
- HAS_WEBRTC = True
25
- except ImportError:
26
- HAS_WEBRTC = False
27
 
28
 
29
  # SkateFormerCompatibility class removed for Version 1 stability
@@ -43,7 +38,7 @@ class LabanMovementAnalysis(Component):
43
  default_model: str = DEFAULT_MODEL,
44
  enable_visualization: bool = True,
45
  include_keypoints: bool = False,
46
- enable_webrtc: bool = False,
47
  label: Optional[str] = None,
48
  every: Optional[float] = None,
49
  show_label: Optional[bool] = None,
@@ -63,7 +58,7 @@ class LabanMovementAnalysis(Component):
63
  default_model: Default pose estimation model ("mediapipe", "movenet", "yolo")
64
  enable_visualization: Whether to generate visualization video by default
65
  include_keypoints: Whether to include raw keypoints in JSON output
66
- enable_webrtc: Whether to enable WebRTC real-time analysis
67
  label: Component label
68
  ... (other standard Gradio component args)
69
  """
@@ -85,8 +80,6 @@ class LabanMovementAnalysis(Component):
85
  self.default_model = default_model
86
  self.enable_visualization = enable_visualization
87
  self.include_keypoints = include_keypoints
88
- self.enable_webrtc = enable_webrtc and HAS_WEBRTC
89
-
90
  # Cache for pose estimators
91
  self._estimators = {}
92
 
@@ -94,14 +87,6 @@ class LabanMovementAnalysis(Component):
94
  self.video_input = SmartVideoInput()
95
 
96
  # SkateFormer features reserved for Version 2
97
-
98
- self.webrtc_analyzer = None
99
- if self.enable_webrtc:
100
- try:
101
- self.webrtc_analyzer = WebRTCMovementAnalyzer(model=default_model)
102
- except Exception as e:
103
- print(f"Warning: Failed to initialize WebRTC: {e}")
104
- self.enable_webrtc = False
105
 
106
  def preprocess(self, payload: Dict[str, Any]) -> Dict[str, Any]:
107
  """
@@ -293,61 +278,7 @@ class LabanMovementAnalysis(Component):
293
  """
294
  return self.process_video(video_path, **kwargs)
295
 
296
- def start_webrtc_stream(self, model: str = None) -> bool:
297
- """
298
- Start WebRTC real-time analysis stream.
299
-
300
- Args:
301
- model: Pose model to use for real-time analysis
302
-
303
- Returns:
304
- True if stream started successfully
305
- """
306
- if not self.enable_webrtc or not self.webrtc_analyzer:
307
- print("WebRTC not enabled or available")
308
- return False
309
-
310
- try:
311
- if model:
312
- self.webrtc_analyzer.model = model
313
- self.webrtc_analyzer.pose_estimator = get_pose_estimator(model)
314
-
315
- self.webrtc_analyzer.start_stream()
316
- print(f"WebRTC stream started with {self.webrtc_analyzer.model} model")
317
- return True
318
- except Exception as e:
319
- print(f"Failed to start WebRTC stream: {e}")
320
- return False
321
-
322
- def stop_webrtc_stream(self) -> bool:
323
- """
324
- Stop WebRTC real-time analysis stream.
325
-
326
- Returns:
327
- True if stream stopped successfully
328
- """
329
- if not self.webrtc_analyzer:
330
- return False
331
-
332
- try:
333
- self.webrtc_analyzer.stop_stream()
334
- print("WebRTC stream stopped")
335
- return True
336
- except Exception as e:
337
- print(f"Failed to stop WebRTC stream: {e}")
338
- return False
339
-
340
- def get_webrtc_interface(self):
341
- """
342
- Get WebRTC Gradio interface for real-time streaming.
343
-
344
- Returns:
345
- WebRTCGradioInterface instance or None
346
- """
347
- if not self.enable_webrtc or not self.webrtc_analyzer:
348
- return None
349
-
350
- return WebRTCGradioInterface(self.webrtc_analyzer)
351
 
352
  # SkateFormer methods moved to Version 2 development
353
  # get_skateformer_compatibility() and get_skateformer_status_report()
@@ -358,10 +289,6 @@ class LabanMovementAnalysis(Component):
358
  # Clean up video input handler
359
  if hasattr(self, 'video_input'):
360
  self.video_input.cleanup()
361
-
362
- # Stop WebRTC if running
363
- if self.webrtc_analyzer and self.webrtc_analyzer.is_running:
364
- self.stop_webrtc_stream()
365
 
366
  def example_payload(self) -> Dict[str, Any]:
367
  """Example input payload for documentation."""
 
19
  # Advanced features reserved for Version 2
20
  # SkateFormer AI integration will be available in future release
21
 
 
 
 
 
 
22
 
23
 
24
  # SkateFormerCompatibility class removed for Version 1 stability
 
38
  default_model: str = DEFAULT_MODEL,
39
  enable_visualization: bool = True,
40
  include_keypoints: bool = False,
41
+
42
  label: Optional[str] = None,
43
  every: Optional[float] = None,
44
  show_label: Optional[bool] = None,
 
58
  default_model: Default pose estimation model ("mediapipe", "movenet", "yolo")
59
  enable_visualization: Whether to generate visualization video by default
60
  include_keypoints: Whether to include raw keypoints in JSON output
61
+
62
  label: Component label
63
  ... (other standard Gradio component args)
64
  """
 
80
  self.default_model = default_model
81
  self.enable_visualization = enable_visualization
82
  self.include_keypoints = include_keypoints
 
 
83
  # Cache for pose estimators
84
  self._estimators = {}
85
 
 
87
  self.video_input = SmartVideoInput()
88
 
89
  # SkateFormer features reserved for Version 2
 
 
 
 
 
 
 
 
90
 
91
  def preprocess(self, payload: Dict[str, Any]) -> Dict[str, Any]:
92
  """
 
278
  """
279
  return self.process_video(video_path, **kwargs)
280
 
281
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
 
283
  # SkateFormer methods moved to Version 2 development
284
  # get_skateformer_compatibility() and get_skateformer_status_report()
 
289
  # Clean up video input handler
290
  if hasattr(self, 'video_input'):
291
  self.video_input.cleanup()
 
 
 
 
292
 
293
  def example_payload(self) -> Dict[str, Any]:
294
  """Example input payload for documentation."""
backend/gradio_labanmovementanalysis/labanmovementanalysis.pyi DELETED
@@ -1,448 +0,0 @@
1
- """
2
- Custom Gradio v5 component for video-based pose analysis with LMA-inspired metrics.
3
- """
4
-
5
- import gradio as gr
6
- from gradio.components.base import Component
7
- from typing import Dict, Any, Optional, Tuple, List, Union
8
- import tempfile
9
- import os
10
- import numpy as np
11
-
12
- from .video_utils import extract_frames, get_video_info
13
- from .pose_estimation import get_pose_estimator
14
- from .notation_engine import analyze_pose_sequence
15
- from .json_generator import generate_json, format_for_display
16
- from .visualizer import PoseVisualizer
17
- from .video_downloader import SmartVideoInput
18
-
19
- # Optional advanced features
20
- try:
21
- from .skateformer_integration import SkateFormerAnalyzer
22
- HAS_SKATEFORMER = True
23
- except ImportError:
24
- HAS_SKATEFORMER = False
25
-
26
- try:
27
- from .webrtc_handler import WebRTCMovementAnalyzer, WebRTCGradioInterface
28
- HAS_WEBRTC = True
29
- except ImportError:
30
- HAS_WEBRTC = False
31
-
32
- from gradio.events import Dependency
33
-
34
- class LabanMovementAnalysis(Component):
35
- """
36
- Gradio component for video-based pose analysis with Laban Movement Analysis metrics.
37
- """
38
-
39
- # Component metadata
40
- COMPONENT_TYPE = "composite"
41
- DEFAULT_MODEL = "mediapipe"
42
-
43
- def __init__(self,
44
- default_model: str = DEFAULT_MODEL,
45
- enable_visualization: bool = True,
46
- include_keypoints: bool = False,
47
- enable_webrtc: bool = False,
48
- label: Optional[str] = None,
49
- every: Optional[float] = None,
50
- show_label: Optional[bool] = None,
51
- container: bool = True,
52
- scale: Optional[int] = None,
53
- min_width: int = 160,
54
- interactive: Optional[bool] = None,
55
- visible: bool = True,
56
- elem_id: Optional[str] = None,
57
- elem_classes: Optional[List[str]] = None,
58
- render: bool = True,
59
- **kwargs):
60
- """
61
- Initialize the Laban Movement Analysis component.
62
-
63
- Args:
64
- default_model: Default pose estimation model ("mediapipe", "movenet", "yolo")
65
- enable_visualization: Whether to generate visualization video by default
66
- include_keypoints: Whether to include raw keypoints in JSON output
67
- enable_webrtc: Whether to enable WebRTC real-time analysis
68
- label: Component label
69
- ... (other standard Gradio component args)
70
- """
71
- super().__init__(
72
- label=label,
73
- every=every,
74
- show_label=show_label,
75
- container=container,
76
- scale=scale,
77
- min_width=min_width,
78
- interactive=interactive,
79
- visible=visible,
80
- elem_id=elem_id,
81
- elem_classes=elem_classes,
82
- render=render,
83
- **kwargs
84
- )
85
-
86
- self.default_model = default_model
87
- self.enable_visualization = enable_visualization
88
- self.include_keypoints = include_keypoints
89
- self.enable_webrtc = enable_webrtc and HAS_WEBRTC
90
-
91
- # Cache for pose estimators
92
- self._estimators = {}
93
-
94
- # Video input handler for URLs
95
- self.video_input = SmartVideoInput()
96
-
97
- # SkateFormer features reserved for Version 2
98
-
99
- self.webrtc_analyzer = None
100
- if self.enable_webrtc:
101
- try:
102
- self.webrtc_analyzer = WebRTCMovementAnalyzer(model=default_model)
103
- except Exception as e:
104
- print(f"Warning: Failed to initialize WebRTC: {e}")
105
- self.enable_webrtc = False
106
-
107
- def preprocess(self, payload: Dict[str, Any]) -> Dict[str, Any]:
108
- """
109
- Preprocess input from the frontend.
110
-
111
- Args:
112
- payload: Input data containing video file and options
113
-
114
- Returns:
115
- Processed data for analysis
116
- """
117
- if not payload:
118
- return None
119
-
120
- # Extract video file path
121
- video_data = payload.get("video")
122
- if not video_data:
123
- return None
124
-
125
- # Handle different input formats
126
- if isinstance(video_data, str):
127
- video_path = video_data
128
- elif isinstance(video_data, dict):
129
- video_path = video_data.get("path") or video_data.get("name")
130
- else:
131
- # Assume it's a file object
132
- video_path = video_data.name if hasattr(video_data, "name") else str(video_data)
133
-
134
- # Extract options
135
- options = {
136
- "video_path": video_path,
137
- "model": payload.get("model", self.default_model),
138
- "enable_visualization": payload.get("enable_visualization", self.enable_visualization),
139
- "include_keypoints": payload.get("include_keypoints", self.include_keypoints)
140
- }
141
-
142
- return options
143
-
144
- def postprocess(self, value: Any) -> Dict[str, Any]:
145
- """
146
- Postprocess analysis results for the frontend.
147
-
148
- Args:
149
- value: Analysis results
150
-
151
- Returns:
152
- Formatted output for display
153
- """
154
- if value is None:
155
- return {"json_output": {}, "video_output": None}
156
-
157
- # Ensure we have the expected format
158
- if isinstance(value, tuple) and len(value) == 2:
159
- json_data, video_path = value
160
- else:
161
- json_data = value
162
- video_path = None
163
-
164
- return {
165
- "json_output": json_data,
166
- "video_output": video_path
167
- }
168
-
169
- def process_video(self, video_input: Union[str, os.PathLike], model: str = DEFAULT_MODEL,
170
- enable_visualization: bool = True,
171
- include_keypoints: bool = False) -> Tuple[Dict[str, Any], Optional[str]]:
172
- """
173
- Main processing function that performs pose analysis on a video.
174
-
175
- Args:
176
- video_input: Path to input video, video URL (YouTube/Vimeo), or file object
177
- model: Pose estimation model to use (supports enhanced syntax like "yolo-v11-s")
178
- enable_visualization: Whether to generate visualization video
179
- include_keypoints: Whether to include keypoints in JSON
180
-
181
- Returns:
182
- Tuple of (analysis_json, visualization_video_path)
183
- """
184
- # Handle video input (local file, URL, etc.)
185
- try:
186
- video_path, video_metadata = self.video_input.process_input(str(video_input))
187
- print(f"Processing video: {video_metadata.get('title', 'Unknown')}")
188
- if video_metadata.get('platform') in ['youtube', 'vimeo']:
189
- print(f"Downloaded from {video_metadata['platform']}")
190
- except Exception as e:
191
- raise ValueError(f"Failed to process video input: {str(e)}")
192
- # Get video metadata
193
- frame_count, fps, (width, height) = get_video_info(video_path)
194
-
195
- # Create or get pose estimator
196
- if model not in self._estimators:
197
- self._estimators[model] = get_pose_estimator(model)
198
- estimator = self._estimators[model]
199
-
200
- # Process video frame by frame
201
- print(f"Processing {frame_count} frames with {model} model...")
202
-
203
- all_frames = []
204
- all_pose_results = []
205
-
206
- for i, frame in enumerate(extract_frames(video_path)):
207
- # Store frame if visualization is needed
208
- if enable_visualization:
209
- all_frames.append(frame)
210
-
211
- # Detect poses
212
- pose_results = estimator.detect(frame)
213
-
214
- # Update frame indices
215
- for result in pose_results:
216
- result.frame_index = i
217
-
218
- all_pose_results.append(pose_results)
219
-
220
- # Progress indicator
221
- if i % 30 == 0:
222
- print(f"Processed {i}/{frame_count} frames...")
223
-
224
- print("Analyzing movement patterns...")
225
-
226
- # Analyze movement
227
- movement_metrics = analyze_pose_sequence(all_pose_results, fps=fps)
228
-
229
- # Enhanced AI analysis reserved for Version 2
230
- print("LMA analysis complete - advanced AI features coming in Version 2!")
231
-
232
- # Generate JSON output
233
- video_metadata = {
234
- "fps": fps,
235
- "width": width,
236
- "height": height,
237
- "frame_count": frame_count,
238
- "model_info": {
239
- "name": model,
240
- "type": "pose_estimation"
241
- },
242
- "input_metadata": video_metadata # Include video source metadata
243
- }
244
-
245
- json_output = generate_json(
246
- movement_metrics,
247
- all_pose_results if include_keypoints else None,
248
- video_metadata,
249
- include_keypoints=include_keypoints
250
- )
251
-
252
- # Enhanced AI analysis will be added in Version 2
253
-
254
- # Generate visualization if requested
255
- visualization_path = None
256
- if enable_visualization:
257
- print("Generating visualization video...")
258
-
259
- # Create temporary output file
260
- with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp:
261
- visualization_path = tmp.name
262
-
263
- # Create visualizer
264
- visualizer = PoseVisualizer(
265
- show_trails=True,
266
- show_skeleton=True,
267
- show_direction_arrows=True,
268
- show_metrics=True
269
- )
270
-
271
- # Generate overlay video
272
- visualization_path = visualizer.generate_overlay_video(
273
- all_frames,
274
- all_pose_results,
275
- movement_metrics,
276
- visualization_path,
277
- fps
278
- )
279
-
280
- print(f"Visualization saved to: {visualization_path}")
281
-
282
- return json_output, visualization_path
283
-
284
- def __call__(self, video_path: str, **kwargs) -> Tuple[Dict[str, Any], Optional[str]]:
285
- """
286
- Make the component callable for easy use.
287
-
288
- Args:
289
- video_path: Path to video file
290
- **kwargs: Additional options
291
-
292
- Returns:
293
- Analysis results
294
- """
295
- return self.process_video(video_path, **kwargs)
296
-
297
- def start_webrtc_stream(self, model: str = None) -> bool:
298
- """
299
- Start WebRTC real-time analysis stream.
300
-
301
- Args:
302
- model: Pose model to use for real-time analysis
303
-
304
- Returns:
305
- True if stream started successfully
306
- """
307
- if not self.enable_webrtc or not self.webrtc_analyzer:
308
- print("WebRTC not enabled or available")
309
- return False
310
-
311
- try:
312
- if model:
313
- self.webrtc_analyzer.model = model
314
- self.webrtc_analyzer.pose_estimator = get_pose_estimator(model)
315
-
316
- self.webrtc_analyzer.start_stream()
317
- print(f"WebRTC stream started with {self.webrtc_analyzer.model} model")
318
- return True
319
- except Exception as e:
320
- print(f"Failed to start WebRTC stream: {e}")
321
- return False
322
-
323
- def stop_webrtc_stream(self) -> bool:
324
- """
325
- Stop WebRTC real-time analysis stream.
326
-
327
- Returns:
328
- True if stream stopped successfully
329
- """
330
- if not self.webrtc_analyzer:
331
- return False
332
-
333
- try:
334
- self.webrtc_analyzer.stop_stream()
335
- print("WebRTC stream stopped")
336
- return True
337
- except Exception as e:
338
- print(f"Failed to stop WebRTC stream: {e}")
339
- return False
340
-
341
- def get_webrtc_interface(self):
342
- """
343
- Get WebRTC Gradio interface for real-time streaming.
344
-
345
- Returns:
346
- WebRTCGradioInterface instance or None
347
- """
348
- if not self.enable_webrtc or not self.webrtc_analyzer:
349
- return None
350
-
351
- return WebRTCGradioInterface(self.webrtc_analyzer)
352
-
353
- # SkateFormer methods moved to Version 2 development
354
- # get_skateformer_compatibility() and get_skateformer_status_report()
355
- # will be available in the next major release
356
-
357
- def cleanup(self):
358
- """Clean up temporary files and resources."""
359
- # Clean up video input handler
360
- if hasattr(self, 'video_input'):
361
- self.video_input.cleanup()
362
-
363
- # Stop WebRTC if running
364
- if self.webrtc_analyzer and self.webrtc_analyzer.is_running:
365
- self.stop_webrtc_stream()
366
-
367
- def example_payload(self) -> Dict[str, Any]:
368
- """Example input payload for documentation."""
369
- return {
370
- "video": {"path": "/path/to/video.mp4"},
371
- "model": "mediapipe",
372
- "enable_visualization": True,
373
- "include_keypoints": False
374
- }
375
-
376
- def example_value(self) -> Dict[str, Any]:
377
- """Example output value for documentation."""
378
- return {
379
- "json_output": {
380
- "analysis_metadata": {
381
- "timestamp": "2024-01-01T00:00:00",
382
- "version": "1.0.0",
383
- "model_info": {"name": "mediapipe", "type": "pose_estimation"}
384
- },
385
- "video_info": {
386
- "fps": 30.0,
387
- "duration_seconds": 5.0,
388
- "width": 1920,
389
- "height": 1080,
390
- "frame_count": 150
391
- },
392
- "movement_analysis": {
393
- "frame_count": 150,
394
- "frames": [
395
- {
396
- "frame_index": 0,
397
- "timestamp": 0.0,
398
- "metrics": {
399
- "direction": "stationary",
400
- "intensity": "low",
401
- "speed": "slow",
402
- "velocity": 0.0,
403
- "acceleration": 0.0,
404
- "fluidity": 1.0,
405
- "expansion": 0.5
406
- }
407
- }
408
- ],
409
- "summary": {
410
- "direction": {
411
- "distribution": {"stationary": 50, "up": 30, "down": 20},
412
- "dominant": "stationary"
413
- },
414
- "intensity": {
415
- "distribution": {"low": 80, "medium": 15, "high": 5},
416
- "dominant": "low"
417
- }
418
- }
419
- }
420
- },
421
- "video_output": "/tmp/visualization.mp4"
422
- }
423
-
424
- def api_info(self) -> Dict[str, Any]:
425
- """API information for the component."""
426
- return {
427
- "type": "composite",
428
- "description": "Video-based pose analysis with Laban Movement Analysis metrics",
429
- "parameters": {
430
- "video": {"type": "file", "description": "Input video file or URL (YouTube/Vimeo)"},
431
- "model": {"type": "string", "description": "Pose model: mediapipe, movenet, or yolo variants"},
432
- "enable_visualization": {"type": "integer", "description": "Generate visualization video (1=yes, 0=no)"},
433
- "include_keypoints": {"type": "integer", "description": "Include keypoints in JSON (1=yes, 0=no)"}
434
- },
435
- "returns": {
436
- "json_output": {"type": "object", "description": "LMA analysis results"},
437
- "video_output": {"type": "file", "description": "Visualization video (optional)"}
438
- },
439
- "version_2_preview": {
440
- "planned_features": ["SkateFormer AI integration", "Enhanced movement recognition", "Real-time analysis"],
441
- "note": "Advanced AI features coming in Version 2!"
442
- }
443
- }
444
- from typing import Callable, Literal, Sequence, Any, TYPE_CHECKING
445
- from gradio.blocks import Block
446
- if TYPE_CHECKING:
447
- from gradio.components import Timer
448
- from gradio.components.base import Component
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/gradio_labanmovementanalysis/webrtc_handler.py DELETED
@@ -1,293 +0,0 @@
1
- """
2
- Professional WebRTC handler for real-time video streaming and movement analysis
3
- Using FastRTC (the current WebRTC standard, replaces deprecated gradio-webrtc)
4
- Based on: https://fastrtc.org and https://www.gradio.app/guides/object-detection-from-webcam-with-webrtc
5
- """
6
-
7
- import cv2
8
- import numpy as np
9
- from typing import Optional, Dict, Any, Tuple
10
- from collections import deque
11
- import time
12
- import logging
13
- import os
14
-
15
- from .pose_estimation import get_pose_estimator
16
- from .notation_engine import MovementAnalyzer
17
- from .visualizer import PoseVisualizer
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
- # Official Gradio WebRTC approach (compatible with NumPy 1.x)
22
- try:
23
- from gradio_webrtc import WebRTC
24
- HAS_WEBRTC_COMPONENT = True
25
- except ImportError:
26
- HAS_WEBRTC_COMPONENT = False
27
-
28
-
29
- class RealtimeMovementAnalyzer:
30
- """Real-time movement analyzer for WebRTC streams following Gradio 5 best practices"""
31
-
32
- # Gradio component compatibility
33
- events = {}
34
-
35
- def __init__(self, model: str = "mediapipe-lite", buffer_size: int = 30):
36
- """
37
- Initialize real-time movement analyzer.
38
-
39
- Args:
40
- model: Pose estimation model optimized for real-time processing
41
- buffer_size: Number of frames to buffer for analysis
42
- """
43
- self.model = model
44
- self.pose_estimator = get_pose_estimator(model)
45
- self.movement_analyzer = MovementAnalyzer(fps=30.0)
46
- self.visualizer = PoseVisualizer(
47
- trail_length=10,
48
- show_skeleton=True,
49
- show_trails=True,
50
- show_direction_arrows=True,
51
- show_metrics=True
52
- )
53
-
54
- # Real-time buffers
55
- self.pose_buffer = deque(maxlen=buffer_size)
56
- self.metrics_buffer = deque(maxlen=buffer_size)
57
-
58
- # Performance tracking
59
- self.frame_count = 0
60
- self.last_fps_update = time.time()
61
- self.current_fps = 0.0
62
-
63
- # Current metrics for display
64
- self.current_metrics = {
65
- "direction": "stationary",
66
- "intensity": "low",
67
- "fluidity": 0.0,
68
- "expansion": 0.5,
69
- "fps": 0.0
70
- }
71
-
72
- def process_frame(self, image: np.ndarray, conf_threshold: float = 0.5) -> np.ndarray:
73
- """
74
- Process a single frame from WebRTC stream for real-time movement analysis.
75
-
76
- Args:
77
- image: Input frame from webcam as numpy array (RGB format from WebRTC)
78
- conf_threshold: Confidence threshold for pose detection
79
-
80
- Returns:
81
- Processed frame with pose overlay and movement metrics
82
- """
83
- if image is None:
84
- return None
85
-
86
- # Convert RGB to BGR for OpenCV processing
87
- frame_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
88
-
89
- # Update frame count and FPS
90
- self.frame_count += 1
91
- current_time = time.time()
92
- if current_time - self.last_fps_update >= 1.0:
93
- self.current_fps = self.frame_count / (current_time - self.last_fps_update)
94
- self.frame_count = 0
95
- self.last_fps_update = current_time
96
- self.current_metrics["fps"] = self.current_fps
97
-
98
- # Pose detection
99
- pose_results = self.pose_estimator.detect(frame_bgr)
100
-
101
- # Store pose data
102
- self.pose_buffer.append(pose_results)
103
-
104
- # Calculate movement metrics if we have enough frames
105
- if len(self.pose_buffer) >= 2:
106
- recent_poses = list(self.pose_buffer)[-10:] # Last 10 frames for analysis
107
-
108
- try:
109
- # Analyze movement from recent poses
110
- movement_metrics = self.movement_analyzer.analyze_movement(recent_poses)
111
-
112
- if movement_metrics:
113
- latest_metrics = movement_metrics[-1]
114
- self.current_metrics.update({
115
- "direction": latest_metrics.direction.value if latest_metrics.direction else "stationary",
116
- "intensity": latest_metrics.intensity.value if latest_metrics.intensity else "low",
117
- "fluidity": latest_metrics.fluidity if latest_metrics.fluidity is not None else 0.0,
118
- "expansion": latest_metrics.expansion if latest_metrics.expansion is not None else 0.5
119
- })
120
-
121
- self.metrics_buffer.append(self.current_metrics.copy())
122
-
123
- except Exception as e:
124
- logger.warning(f"Movement analysis error: {e}")
125
-
126
- # Apply visualization overlays
127
- output_frame = self._apply_visualization(frame_bgr, pose_results, self.current_metrics)
128
-
129
- # Convert back to RGB for WebRTC output
130
- output_rgb = cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGB)
131
-
132
- return output_rgb
133
-
134
- def _apply_visualization(self, frame: np.ndarray, pose_results: list, metrics: dict) -> np.ndarray:
135
- """Apply pose and movement visualization overlays"""
136
- output_frame = frame.copy()
137
-
138
- # Draw pose skeleton if detected
139
- if pose_results:
140
- for pose_result in pose_results:
141
- # Draw skeleton
142
- if hasattr(self.visualizer, 'draw_skeleton'):
143
- output_frame = self.visualizer.draw_skeleton(output_frame, pose_result.keypoints)
144
-
145
- # Draw keypoints
146
- for keypoint in pose_result.keypoints:
147
- if keypoint.confidence > 0.5:
148
- x = int(keypoint.x * frame.shape[1])
149
- y = int(keypoint.y * frame.shape[0])
150
- cv2.circle(output_frame, (x, y), 5, (0, 255, 0), -1)
151
-
152
- # Draw real-time metrics overlay
153
- self._draw_metrics_overlay(output_frame, metrics)
154
-
155
- return output_frame
156
-
157
- def _draw_metrics_overlay(self, frame: np.ndarray, metrics: dict):
158
- """Draw real-time metrics overlay following professional UI standards"""
159
- h, w = frame.shape[:2]
160
-
161
- # Semi-transparent background
162
- overlay = frame.copy()
163
- cv2.rectangle(overlay, (10, 10), (320, 160), (0, 0, 0), -1)
164
- cv2.addWeighted(overlay, 0.3, frame, 0.7, 0, frame)
165
-
166
- # Header
167
- cv2.putText(frame, "Real-time Movement Analysis", (20, 35),
168
- cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
169
-
170
- # Metrics
171
- y_offset = 60
172
- spacing = 22
173
-
174
- cv2.putText(frame, f"Direction: {metrics['direction']}",
175
- (20, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
176
- y_offset += spacing
177
-
178
- cv2.putText(frame, f"Intensity: {metrics['intensity']}",
179
- (20, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
180
- y_offset += spacing
181
-
182
- cv2.putText(frame, f"Fluidity: {metrics['fluidity']:.2f}",
183
- (20, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
184
- y_offset += spacing
185
-
186
- cv2.putText(frame, f"FPS: {metrics['fps']:.1f}",
187
- (20, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
188
-
189
- def get_current_metrics(self) -> dict:
190
- """Get current movement metrics for external display"""
191
- return self.current_metrics.copy()
192
-
193
-
194
- def get_rtc_configuration():
195
- """
196
- Get RTC configuration for WebRTC.
197
- Uses Twilio TURN servers if credentials are available, otherwise uses default.
198
- """
199
- # For local development, no TURN servers needed
200
- # For cloud deployment, set TWILIO_ACCOUNT_SID and TWILIO_AUTH_TOKEN
201
-
202
- twilio_account_sid = os.getenv("TWILIO_ACCOUNT_SID")
203
- twilio_auth_token = os.getenv("TWILIO_AUTH_TOKEN")
204
-
205
- if twilio_account_sid and twilio_auth_token:
206
- # Use Twilio TURN servers for cloud deployment
207
- return {
208
- "iceServers": [
209
- {"urls": ["stun:global.stun.twilio.com:3478"]},
210
- {
211
- "urls": ["turn:global.turn.twilio.com:3478?transport=udp"],
212
- "username": twilio_account_sid,
213
- "credential": twilio_auth_token,
214
- },
215
- {
216
- "urls": ["turn:global.turn.twilio.com:3478?transport=tcp"],
217
- "username": twilio_account_sid,
218
- "credential": twilio_auth_token,
219
- },
220
- ]
221
- }
222
- else:
223
- # Default configuration for local development
224
- return {
225
- "iceServers": [
226
- {"urls": ["stun:stun.l.google.com:19302"]}
227
- ]
228
- }
229
-
230
-
231
- # Global analyzer instance for demo
232
- _analyzer = None
233
-
234
- def get_analyzer(model: str = "mediapipe-lite") -> RealtimeMovementAnalyzer:
235
- """Get or create analyzer instance"""
236
- global _analyzer
237
- if _analyzer is None or _analyzer.model != model:
238
- _analyzer = RealtimeMovementAnalyzer(model)
239
- return _analyzer
240
-
241
-
242
- def webrtc_detection(image: np.ndarray, model: str, conf_threshold: float = 0.5) -> np.ndarray:
243
- """
244
- Main detection function for WebRTC streaming.
245
- Compatible with Gradio 5 WebRTC streaming API.
246
-
247
- Args:
248
- image: Input frame from webcam (RGB format)
249
- model: Pose estimation model name
250
- conf_threshold: Confidence threshold for pose detection
251
-
252
- Returns:
253
- Processed frame with pose overlay and metrics
254
- """
255
- analyzer = get_analyzer(model)
256
- return analyzer.process_frame(image, conf_threshold)
257
-
258
-
259
- def get_webrtc_interface():
260
- """
261
- Create streaming interface using built-in Gradio components.
262
- Avoids NumPy 2.x dependency conflicts with FastRTC.
263
-
264
- Returns:
265
- Tuple of (streaming_config, rtc_configuration)
266
- """
267
- rtc_config = get_rtc_configuration()
268
-
269
- # Use built-in Gradio streaming capabilities
270
- streaming_config = {
271
- "sources": ["webcam"],
272
- "streaming": True,
273
- "mirror_webcam": False
274
- }
275
-
276
- return streaming_config, rtc_config
277
-
278
-
279
- # Compatibility exports with Gradio component attributes
280
- class WebRTCMovementAnalyzer(RealtimeMovementAnalyzer):
281
- """Real-time movement analyzer for WebRTC streams following Gradio 5 best practices"""
282
- events = {} # Gradio component compatibility
283
-
284
-
285
- class WebRTCGradioInterface:
286
- """Create streaming interface using built-in Gradio components.
287
- Avoids NumPy 2.x dependency conflicts with FastRTC."""
288
-
289
- events = {} # Gradio component compatibility
290
-
291
- @staticmethod
292
- def get_config():
293
- return get_webrtc_interface()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
demo/app.py CHANGED
@@ -21,13 +21,12 @@ def create_demo() -> gr.Blocks:
21
  ) as demo:
22
 
23
  # ── Hero banner ──
24
- gr.HTML(
25
  """
26
- <div class="main-header">
27
- <h1>🎭 Laban Movement Analysis – Complete Suite</h1>
28
- <p>Pose estimation • AI action recognition • Real-time agents</p>
29
- <p style="font-size:.85rem;opacity:.85">v0.01-beta • 20+ pose models • MCP</p>
30
- </div>
31
  """
32
  )
33
 
@@ -58,14 +57,16 @@ def create_demo() -> gr.Blocks:
58
  )
59
 
60
  # Footer
61
- gr.HTML(
62
- """
63
- <div class="author-info">
64
- Built by Csaba Bolyós
65
- <a href="https://github.com/bladeszasza" target="_blank">GitHub</a>
66
- <a href="https://huggingface.co/BladeSzaSza" target="_blank">HF</a>
67
- </div>
68
- """
69
- )
70
  return demo
71
-
 
 
 
 
 
21
  ) as demo:
22
 
23
  # ── Hero banner ──
24
+ gr.Markdown(
25
  """
26
+ # 🎭 Laban Movement Analysis – Complete Suite
27
+
28
+ Pose estimation • AI action recognition • Real-time agents
29
+ **v0.01-beta • 20+ pose models • MCP**
 
30
  """
31
  )
32
 
 
57
  )
58
 
59
  # Footer
60
+ with gr.Row():
61
+ gr.Markdown(
62
+ """
63
+ **Built by Csaba Bolyós**
64
+ [GitHub](https://github.com/bladeszasza) [HF](https://huggingface.co/BladeSzaSza)
65
+ """
66
+ )
 
 
67
  return demo
68
+
69
+ if __name__ == "__main__":
70
+ print("🚀 Starting Laban Movement Analysis...")
71
+ demo = create_demo()
72
+
demo/css.css ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ html {
2
+ font-family: Inter;
3
+ font-size: 16px;
4
+ font-weight: 400;
5
+ line-height: 1.5;
6
+ -webkit-text-size-adjust: 100%;
7
+ background: #fff;
8
+ color: #323232;
9
+ -webkit-font-smoothing: antialiased;
10
+ -moz-osx-font-smoothing: grayscale;
11
+ text-rendering: optimizeLegibility;
12
+ }
13
+
14
+ :root {
15
+ --space: 1;
16
+ --vspace: calc(var(--space) * 1rem);
17
+ --vspace-0: calc(3 * var(--space) * 1rem);
18
+ --vspace-1: calc(2 * var(--space) * 1rem);
19
+ --vspace-2: calc(1.5 * var(--space) * 1rem);
20
+ --vspace-3: calc(0.5 * var(--space) * 1rem);
21
+ }
22
+
23
+ .app {
24
+ max-width: 748px !important;
25
+ }
26
+
27
+ .prose p {
28
+ margin: var(--vspace) 0;
29
+ line-height: var(--vspace * 2);
30
+ font-size: 1rem;
31
+ }
32
+
33
+ code {
34
+ font-family: "Inconsolata", sans-serif;
35
+ font-size: 16px;
36
+ }
37
+
38
+ h1,
39
+ h1 code {
40
+ font-weight: 400;
41
+ line-height: calc(2.5 / var(--space) * var(--vspace));
42
+ }
43
+
44
+ h1 code {
45
+ background: none;
46
+ border: none;
47
+ letter-spacing: 0.05em;
48
+ padding-bottom: 5px;
49
+ position: relative;
50
+ padding: 0;
51
+ }
52
+
53
+ h2 {
54
+ margin: var(--vspace-1) 0 var(--vspace-2) 0;
55
+ line-height: 1em;
56
+ }
57
+
58
+ h3,
59
+ h3 code {
60
+ margin: var(--vspace-1) 0 var(--vspace-2) 0;
61
+ line-height: 1em;
62
+ }
63
+
64
+ h4,
65
+ h5,
66
+ h6 {
67
+ margin: var(--vspace-3) 0 var(--vspace-3) 0;
68
+ line-height: var(--vspace);
69
+ }
70
+
71
+ .bigtitle,
72
+ h1,
73
+ h1 code {
74
+ font-size: calc(8px * 4.5);
75
+ word-break: break-word;
76
+ }
77
+
78
+ .title,
79
+ h2,
80
+ h2 code {
81
+ font-size: calc(8px * 3.375);
82
+ font-weight: lighter;
83
+ word-break: break-word;
84
+ border: none;
85
+ background: none;
86
+ }
87
+
88
+ .subheading1,
89
+ h3,
90
+ h3 code {
91
+ font-size: calc(8px * 1.8);
92
+ font-weight: 600;
93
+ border: none;
94
+ background: none;
95
+ letter-spacing: 0.1em;
96
+ text-transform: uppercase;
97
+ }
98
+
99
+ h2 code {
100
+ padding: 0;
101
+ position: relative;
102
+ letter-spacing: 0.05em;
103
+ }
104
+
105
+ blockquote {
106
+ font-size: calc(8px * 1.1667);
107
+ font-style: italic;
108
+ line-height: calc(1.1667 * var(--vspace));
109
+ margin: var(--vspace-2) var(--vspace-2);
110
+ }
111
+
112
+ .subheading2,
113
+ h4 {
114
+ font-size: calc(8px * 1.4292);
115
+ text-transform: uppercase;
116
+ font-weight: 600;
117
+ }
118
+
119
+ .subheading3,
120
+ h5 {
121
+ font-size: calc(8px * 1.2917);
122
+ line-height: calc(1.2917 * var(--vspace));
123
+
124
+ font-weight: lighter;
125
+ text-transform: uppercase;
126
+ letter-spacing: 0.15em;
127
+ }
128
+
129
+ h6 {
130
+ font-size: calc(8px * 1.1667);
131
+ font-size: 1.1667em;
132
+ font-weight: normal;
133
+ font-style: italic;
134
+ font-family: "le-monde-livre-classic-byol", serif !important;
135
+ letter-spacing: 0px !important;
136
+ }
137
+
138
+ #start .md > *:first-child {
139
+ margin-top: 0;
140
+ }
141
+
142
+ h2 + h3 {
143
+ margin-top: 0;
144
+ }
145
+
146
+ .md hr {
147
+ border: none;
148
+ border-top: 1px solid var(--block-border-color);
149
+ margin: var(--vspace-2) 0 var(--vspace-2) 0;
150
+ }
151
+ .prose ul {
152
+ margin: var(--vspace-2) 0 var(--vspace-1) 0;
153
+ }
154
+
155
+ .gap {
156
+ gap: 0;
157
+ }
demo/space.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from app import demo as app
3
+ import os
4
+
5
+ _docs = {'LabanMovementAnalysis': {'description': 'Gradio component for video-based pose analysis with Laban Movement Analysis metrics.', 'members': {'__init__': {'default_model': {'type': 'str', 'default': '"mediapipe"', 'description': 'Default pose estimation model ("mediapipe", "movenet", "yolo")'}, 'enable_visualization': {'type': 'bool', 'default': 'True', 'description': 'Whether to generate visualization video by default'}, 'include_keypoints': {'type': 'bool', 'default': 'False', 'description': 'Whether to include raw keypoints in JSON output'}, 'enable_webrtc': {'type': 'bool', 'default': 'False', 'description': 'Whether to enable WebRTC real-time analysis'}, 'label': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': 'Component label'}, 'every': {'type': 'typing.Optional[float][float, None]', 'default': 'None', 'description': None}, 'show_label': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'container': {'type': 'bool', 'default': 'True', 'description': None}, 'scale': {'type': 'typing.Optional[int][int, None]', 'default': 'None', 'description': None}, 'min_width': {'type': 'int', 'default': '160', 'description': None}, 'interactive': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'visible': {'type': 'bool', 'default': 'True', 'description': None}, 'elem_id': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': None}, 'elem_classes': {'type': 'typing.Optional[typing.List[str]][\n typing.List[str][str], None\n]', 'default': 'None', 'description': None}, 'render': {'type': 'bool', 'default': 'True', 'description': None}}, 'postprocess': {'value': {'type': 'typing.Any', 'description': 'Analysis results'}}, 'preprocess': {'return': {'type': 'typing.Dict[str, typing.Any][str, typing.Any]', 'description': 'Processed data for analysis'}, 'value': None}}, 'events': {}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'LabanMovementAnalysis': []}}}
6
+
7
+ abs_path = os.path.join(os.path.dirname(__file__), "css.css")
8
+
9
+ with gr.Blocks(
10
+ theme='gstaff/sketch',
11
+ ) as demo:
12
+ gr.Markdown(
13
+ """
14
+ # `gradio_labanmovementanalysis`
15
+
16
+ <div style="display: flex; gap: 7px;">
17
+ <a href="https://pypi.org/project/gradio_labanmovementanalysis/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_labanmovementanalysis"></a>
18
+ </div>
19
+
20
+ A Gradio 5 component for video movement analysis using Laban Movement Analysis (LMA) with MCP support for AI agents
21
+ """, elem_classes=["md-custom"], header_links=True)
22
+ app.render()
23
+ gr.Markdown(
24
+ """
25
+ ## Installation
26
+
27
+ ```bash
28
+ pip install gradio_labanmovementanalysis
29
+ ```
30
+
31
+ ## Usage
32
+
33
+ ```python
34
+ # app.py ─────────────────────────────────────────────────────────
35
+ \"\"\"
36
+ Laban Movement Analysis – modernised Gradio Space
37
+ Author: Csaba (BladeSzaSza)
38
+ \"\"\"
39
+
40
+ import gradio as gr
41
+
42
+
43
+ # ── 3. Dummy backend for local dev (replace with real fn) ───────
44
+ def process_video_standard(video, model, viz, kp):
45
+ \"\"\"Return empty JSON + passthrough video placeholder.\"\"\"
46
+ return {}, video
47
+
48
+ # ── 4. Build UI ─────────────────────────────────────────────────
49
+ def create_demo() -> gr.Blocks:
50
+ with gr.Blocks(
51
+ title="Laban Movement Analysis – Complete Suite",
52
+ theme='gstaff/sketch',
53
+ fill_width=True,
54
+ ) as demo:
55
+
56
+ # ── Hero banner ──
57
+ gr.Markdown(
58
+ """
59
+ # 🎭 Laban Movement Analysis – Complete Suite
60
+
61
+ Pose estimation • AI action recognition • Real-time agents
62
+ **v0.01-beta • 20+ pose models • MCP**
63
+ """
64
+ )
65
+
66
+ # ── Workspace ──
67
+ with gr.Row(equal_height=True):
68
+ # Input column
69
+ with gr.Column(scale=1, min_width=260):
70
+ video_in = gr.Video(label="Upload Video", sources=["upload"], format="mp4")
71
+ model_sel = gr.Dropdown(
72
+ ["mediapipe", "movenet", "yolo"], value="mediapipe", label="Pose Model"
73
+ )
74
+ with gr.Accordion("Options", open=False):
75
+ enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
76
+ include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
77
+ analyze_btn = gr.Button("Analyze Movement", variant="primary")
78
+
79
+ # Output column
80
+ with gr.Column(scale=2, min_width=320):
81
+ viz_out = gr.Video(label="Annotated Video")
82
+ with gr.Accordion("Raw JSON", open=False):
83
+ json_out = gr.JSON(label="Movement Analysis", elem_classes=["json-output"])
84
+
85
+ # Wiring
86
+ analyze_btn.click(
87
+ fn=process_video_standard,
88
+ inputs=[video_in, model_sel, enable_viz, include_kp],
89
+ outputs=[json_out, viz_out],
90
+ )
91
+
92
+ # Footer
93
+ with gr.Row():
94
+ gr.Markdown(
95
+ """
96
+ **Built by Csaba Bolyós**
97
+ [GitHub](https://github.com/bladeszasza) • [HF](https://huggingface.co/BladeSzaSza)
98
+ """
99
+ )
100
+ return demo
101
+
102
+ if __name__ == "__main__":
103
+ print("🚀 Starting Laban Movement Analysis...")
104
+ demo = create_demo()
105
+
106
+
107
+ ```
108
+ """, elem_classes=["md-custom"], header_links=True)
109
+
110
+
111
+ gr.Markdown("""
112
+ ## `LabanMovementAnalysis`
113
+
114
+ ### Initialization
115
+ """, elem_classes=["md-custom"], header_links=True)
116
+
117
+ gr.ParamViewer(value=_docs["LabanMovementAnalysis"]["members"]["__init__"], linkify=[])
118
+
119
+
120
+
121
+
122
+ gr.Markdown("""
123
+
124
+ ### User function
125
+
126
+ The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
127
+
128
+ - When used as an Input, the component only impacts the input signature of the user function.
129
+ - When used as an output, the component only impacts the return signature of the user function.
130
+
131
+ The code snippet below is accurate in cases where the component is used as both an input and an output.
132
+
133
+ - **As input:** Is passed, processed data for analysis.
134
+ - **As output:** Should return, analysis results.
135
+
136
+ ```python
137
+ def predict(
138
+ value: typing.Dict[str, typing.Any][str, typing.Any]
139
+ ) -> typing.Any:
140
+ return value
141
+ ```
142
+ """, elem_classes=["md-custom", "LabanMovementAnalysis-user-fn"], header_links=True)
143
+
144
+
145
+
146
+
147
+ demo.load(None, js=r"""function() {
148
+ const refs = {};
149
+ const user_fn_refs = {
150
+ LabanMovementAnalysis: [], };
151
+ requestAnimationFrame(() => {
152
+
153
+ Object.entries(user_fn_refs).forEach(([key, refs]) => {
154
+ if (refs.length > 0) {
155
+ const el = document.querySelector(`.${key}-user-fn`);
156
+ if (!el) return;
157
+ refs.forEach(ref => {
158
+ el.innerHTML = el.innerHTML.replace(
159
+ new RegExp("\\b"+ref+"\\b", "g"),
160
+ `<a href="#h-${ref.toLowerCase()}">${ref}</a>`
161
+ );
162
+ })
163
+ }
164
+ })
165
+
166
+ Object.entries(refs).forEach(([key, refs]) => {
167
+ if (refs.length > 0) {
168
+ const el = document.querySelector(`.${key}`);
169
+ if (!el) return;
170
+ refs.forEach(ref => {
171
+ el.innerHTML = el.innerHTML.replace(
172
+ new RegExp("\\b"+ref+"\\b", "g"),
173
+ `<a href="#h-${ref.toLowerCase()}">${ref}</a>`
174
+ );
175
+ })
176
+ }
177
+ })
178
+ })
179
+ }
180
+
181
+ """)
182
+
183
+ demo.launch()