Csaba Bolyos commited on
Commit
3aee170
·
1 Parent(s): c6c85af

applied new css

Browse files
README.md CHANGED
@@ -60,7 +60,7 @@ Heavy Beta Version - Under Active Development
60
  import gradio as gr
61
  import sys
62
  from pathlib import Path
63
- from typing import Dict, Any, List, Tuple
64
 
65
  # Add parent directory to path
66
  sys.path.insert(0, str(Path(__file__).parent.parent / "backend"))
@@ -113,37 +113,45 @@ if HAS_AGENT_API:
113
  agent_api = None
114
 
115
 
116
- def process_video_standard(video, model, enable_viz, include_keypoints):
117
- """Standard video processing function."""
118
  if video is None:
119
  return None, None
120
 
121
  try:
 
 
 
 
122
  json_output, video_output = analyzer.process_video(
123
  video,
124
  model=model,
125
- enable_visualization=enable_viz,
126
- include_keypoints=include_keypoints
127
  )
128
  return json_output, video_output
129
  except Exception as e:
130
  return {"error": str(e)}, None
131
 
132
 
133
- def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
134
  """Enhanced video processing with all new features."""
135
  if not video_input:
136
  return {"error": "No video provided"}, None
137
 
138
  try:
 
 
 
 
139
  # Handle both file upload and URL input
140
  video_path = video_input.name if hasattr(video_input, 'name') else video_input
141
 
142
  json_result, viz_result = analyzer.process_video(
143
  video_path,
144
  model=model,
145
- enable_visualization=enable_viz,
146
- include_keypoints=include_keypoints
147
  )
148
  return json_result, viz_result
149
  except Exception as e:
@@ -151,7 +159,7 @@ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
151
  return error_result, None
152
 
153
 
154
- def process_video_for_agent(video, model, output_format="summary"):
155
  """Process video with agent-friendly output format."""
156
  if not HAS_AGENT_API or agent_api is None:
157
  return {"error": "Agent API not available"}
@@ -181,7 +189,7 @@ def process_video_for_agent(video, model, output_format="summary"):
181
  return {"error": str(e)}
182
 
183
 
184
- def batch_process_videos(files, model):
185
  """Process multiple videos in batch."""
186
  if not HAS_AGENT_API or agent_api is None:
187
  return {"error": "Agent API not available"}
@@ -212,7 +220,7 @@ def batch_process_videos(files, model):
212
  return {"error": str(e)}
213
 
214
 
215
- def filter_videos_by_movement(files, direction, intensity, min_fluidity, min_expansion):
216
  """Filter videos based on movement characteristics."""
217
  if not HAS_AGENT_API or agent_api is None:
218
  return {"error": "Agent API not available"}
@@ -252,10 +260,10 @@ def filter_videos_by_movement(files, direction, intensity, min_fluidity, min_exp
252
  return {"error": str(e)}
253
 
254
 
255
- def compare_models(video, model1, model2):
256
  """Compare two different pose models on the same video."""
257
  if not video:
258
- return "No video provided"
259
 
260
  try:
261
  # Analyze with both models
@@ -293,7 +301,7 @@ def compare_models(video, model1, model2):
293
  return [["Error", str(e), "", ""]]
294
 
295
 
296
- def start_webrtc_stream(model):
297
  """Start WebRTC real-time analysis."""
298
  try:
299
  success = analyzer.start_webrtc_stream(model)
@@ -305,7 +313,7 @@ def start_webrtc_stream(model):
305
  return f"🔴 Error: {str(e)}", {"status": "error"}
306
 
307
 
308
- def stop_webrtc_stream():
309
  """Stop WebRTC real-time analysis."""
310
  try:
311
  success = analyzer.stop_webrtc_stream()
@@ -320,9 +328,16 @@ def stop_webrtc_stream():
320
  def create_unified_demo():
321
  """Create the unified comprehensive demo."""
322
 
 
 
 
 
 
 
 
323
  with gr.Blocks(
324
  title="Laban Movement Analysis - Complete Suite by Csaba Bolyós",
325
- theme=gr.themes.Soft(),
326
  css="""
327
  .main-header {
328
  background: linear-gradient(135deg, #40826D 0%, #2E5E4A 50%, #1B3A2F 100%);
@@ -333,17 +348,19 @@ def create_unified_demo():
333
  text-align: center;
334
  }
335
  .feature-card {
336
- border: 1px solid #e1e5e9;
337
  border-radius: 8px;
338
  padding: 16px;
339
  margin: 8px 0;
340
- background: #f8f9fa;
341
  }
342
  .json-output {
343
  max-height: 600px;
344
  overflow-y: auto;
345
  font-family: monospace;
346
  font-size: 12px;
 
 
347
  }
348
  .author-info {
349
  background: linear-gradient(135deg, #40826D 0%, #2E5E4A 100%);
@@ -353,6 +370,15 @@ def create_unified_demo():
353
  margin: 10px 0;
354
  text-align: center;
355
  }
 
 
 
 
 
 
 
 
 
356
  """
357
  ) as demo:
358
 
@@ -396,13 +422,15 @@ def create_unified_demo():
396
  )
397
 
398
  with gr.Row():
399
- enable_viz_std = gr.Checkbox(
400
- value=True,
 
401
  label="Generate Visualization"
402
  )
403
 
404
- include_keypoints_std = gr.Checkbox(
405
- value=False,
 
406
  label="Include Keypoints"
407
  )
408
 
@@ -410,8 +438,7 @@ def create_unified_demo():
410
 
411
  gr.Examples(
412
  examples=[
413
- ["examples/balette.mov"],
414
- ["examples/balette.mp4"],
415
  ],
416
  inputs=video_input_std,
417
  label="Example Videos"
@@ -454,61 +481,67 @@ def create_unified_demo():
454
 
455
  with gr.Row():
456
  with gr.Column(scale=1):
457
- gr.HTML('<div class="feature-card">')
458
- gr.Markdown("**Video Input**")
459
-
460
- # Changed from textbox to file upload as requested
461
- video_input_enh = gr.File(
462
- label="Upload Video or Drop File",
463
- file_types=["video"],
464
- type="filepath"
465
- )
466
-
467
- # URL input option
468
- url_input_enh = gr.Textbox(
469
- label="Or Enter Video URL",
470
- placeholder="YouTube URL, Vimeo URL, or direct video URL",
471
- info="Leave file upload empty to use URL"
472
- )
473
-
474
- gr.Examples(
475
- examples=[
476
- ["examples/balette.mov"],
477
- ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
478
- ["https://vimeo.com/815392738"]
479
- ],
480
- inputs=url_input_enh,
481
- label="Example URLs"
482
- )
483
-
484
- gr.Markdown("**Model Selection**")
485
-
486
- model_select_enh = gr.Dropdown(
487
- choices=[
488
- # MediaPipe variants
489
- "mediapipe-lite", "mediapipe-full", "mediapipe-heavy",
490
- # MoveNet variants
491
- "movenet-lightning", "movenet-thunder",
492
- # YOLO variants (added X models)
493
- "yolo-v8-n", "yolo-v8-s", "yolo-v8-m", "yolo-v8-l", "yolo-v8-x",
494
- # YOLO v11 variants
495
- "yolo-v11-n", "yolo-v11-s", "yolo-v11-m", "yolo-v11-l", "yolo-v11-x"
496
- ],
497
- value="mediapipe-full",
498
- label="Advanced Pose Models",
499
- info="17+ model variants available"
500
- )
501
-
502
- gr.Markdown("**Analysis Options**")
503
-
504
- with gr.Row():
505
- enable_viz_enh = gr.Checkbox(value=True, label="Visualization")
506
-
507
- with gr.Row():
508
- include_keypoints_enh = gr.Checkbox(value=False, label="Raw Keypoints")
509
-
510
- analyze_btn_enh = gr.Button("🚀 Enhanced Analysis", variant="primary", size="lg")
511
- gr.HTML('</div>')
 
 
 
 
 
 
512
 
513
  with gr.Column(scale=2):
514
  with gr.Tab("📊 Analysis"):
@@ -517,7 +550,7 @@ def create_unified_demo():
517
  with gr.Tab("🎥 Visualization"):
518
  viz_output_enh = gr.Video(label="Annotated Video")
519
 
520
- def process_enhanced_input(file_input, url_input, model, enable_viz, include_keypoints):
521
  """Process either file upload or URL input."""
522
  video_source = file_input if file_input else url_input
523
  return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
@@ -678,21 +711,21 @@ def create_unified_demo():
678
 
679
  # Info display
680
  gr.HTML("""
681
- <div style="background: #e8f4fd; padding: 15px; border-radius: 8px; margin-top: 10px;">
682
- <h4>📹 WebRTC Pose Analysis</h4>
683
- <p style="margin: 5px 0;">Real-time movement analysis using your webcam</p>
684
 
685
- <h4>🔒 Privacy</h4>
686
- <p style="margin: 5px 0;">Processing happens locally - no video data stored</p>
687
 
688
- <h4>💡 Usage</h4>
689
- <ul style="margin: 5px 0; padding-left: 20px;">
690
  <li>Grant camera permission when prompted</li>
691
  <li>Move in front of camera to see pose detection</li>
692
  <li>Adjust confidence threshold as needed</li>
693
  </ul>
694
  </div>
695
- """)
696
 
697
  else:
698
  # Fallback if WebRTC component not available
@@ -864,28 +897,30 @@ def create_unified_demo():
864
  )
865
  ```
866
  """)
 
 
867
  gr.HTML("""
868
- <div class="author-info">
869
- <p><strong>Created by:</strong> Csaba Bolyós (BladeSzaSza)</p>
870
- <p style="margin: 5px 0;">
871
- <a href="https://github.com/bladeszasza" style="color: #a8e6cf; text-decoration: none;">🔗 GitHub</a> •
872
- <a href="https://huggingface.co/BladeSzaSza" style="color: #a8e6cf; text-decoration: none;">🤗 Hugging Face</a> •
873
- <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #a8e6cf; text-decoration: none;">💼 LinkedIn</a>
874
- </p>
875
- <p style="font-size: 12px; opacity: 0.9;">Contact: [email protected]</p>
876
- </div>
877
- """)
878
 
879
- # Footer with proper attribution
880
  gr.HTML("""
881
- <div style="text-align: center; padding: 20px; margin-top: 30px; border-top: 1px solid #eee;">
882
- <p style="color: #666; margin-bottom: 10px;">
883
  🎭 Laban Movement Analysis - Complete Suite | Heavy Beta Version
884
  </p>
885
- <p style="color: #666; font-size: 12px;">
886
  Created by <strong>Csaba Bolyós</strong> | Powered by MediaPipe, MoveNet & YOLO
887
  </p>
888
- <p style="color: #666; font-size: 10px; margin-top: 10px;">
889
  <a href="https://github.com/bladeszasza" style="color: #40826D;">GitHub</a> •
890
  <a href="https://huggingface.co/BladeSzaSza" style="color: #40826D;">Hugging Face</a> •
891
  <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #40826D;">LinkedIn</a>
 
60
  import gradio as gr
61
  import sys
62
  from pathlib import Path
63
+ from typing import Dict, Any, List, Tuple, Union, Optional
64
 
65
  # Add parent directory to path
66
  sys.path.insert(0, str(Path(__file__).parent.parent / "backend"))
 
113
  agent_api = None
114
 
115
 
116
+ def process_video_standard(video: Optional[str], model: str, enable_viz: int, include_keypoints: int) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
117
+ """Standard video processing function with proper type annotations."""
118
  if video is None:
119
  return None, None
120
 
121
  try:
122
+ # Convert int to bool
123
+ enable_viz_bool = bool(enable_viz)
124
+ include_keypoints_bool = bool(include_keypoints)
125
+
126
  json_output, video_output = analyzer.process_video(
127
  video,
128
  model=model,
129
+ enable_visualization=enable_viz_bool,
130
+ include_keypoints=include_keypoints_bool
131
  )
132
  return json_output, video_output
133
  except Exception as e:
134
  return {"error": str(e)}, None
135
 
136
 
137
+ def process_video_enhanced(video_input: Optional[str], model: str, enable_viz: int, include_keypoints: int) -> Tuple[Dict[str, Any], Optional[str]]:
138
  """Enhanced video processing with all new features."""
139
  if not video_input:
140
  return {"error": "No video provided"}, None
141
 
142
  try:
143
+ # Convert int to bool
144
+ enable_viz_bool = bool(enable_viz)
145
+ include_keypoints_bool = bool(include_keypoints)
146
+
147
  # Handle both file upload and URL input
148
  video_path = video_input.name if hasattr(video_input, 'name') else video_input
149
 
150
  json_result, viz_result = analyzer.process_video(
151
  video_path,
152
  model=model,
153
+ enable_visualization=enable_viz_bool,
154
+ include_keypoints=include_keypoints_bool
155
  )
156
  return json_result, viz_result
157
  except Exception as e:
 
159
  return error_result, None
160
 
161
 
162
+ def process_video_for_agent(video: Optional[str], model: str, output_format: str = "summary") -> Dict[str, Any]:
163
  """Process video with agent-friendly output format."""
164
  if not HAS_AGENT_API or agent_api is None:
165
  return {"error": "Agent API not available"}
 
189
  return {"error": str(e)}
190
 
191
 
192
+ def batch_process_videos(files: Optional[List], model: str) -> Dict[str, Any]:
193
  """Process multiple videos in batch."""
194
  if not HAS_AGENT_API or agent_api is None:
195
  return {"error": "Agent API not available"}
 
220
  return {"error": str(e)}
221
 
222
 
223
+ def filter_videos_by_movement(files: Optional[List], direction: str, intensity: str, min_fluidity: float, min_expansion: float) -> Dict[str, Any]:
224
  """Filter videos based on movement characteristics."""
225
  if not HAS_AGENT_API or agent_api is None:
226
  return {"error": "Agent API not available"}
 
260
  return {"error": str(e)}
261
 
262
 
263
+ def compare_models(video: Optional[str], model1: str, model2: str) -> List[List[str]]:
264
  """Compare two different pose models on the same video."""
265
  if not video:
266
+ return [["Error", "No video provided", "", ""]]
267
 
268
  try:
269
  # Analyze with both models
 
301
  return [["Error", str(e), "", ""]]
302
 
303
 
304
+ def start_webrtc_stream(model: str) -> Tuple[str, Dict[str, Any]]:
305
  """Start WebRTC real-time analysis."""
306
  try:
307
  success = analyzer.start_webrtc_stream(model)
 
313
  return f"🔴 Error: {str(e)}", {"status": "error"}
314
 
315
 
316
+ def stop_webrtc_stream() -> Tuple[str, Dict[str, Any]]:
317
  """Stop WebRTC real-time analysis."""
318
  try:
319
  success = analyzer.stop_webrtc_stream()
 
328
  def create_unified_demo():
329
  """Create the unified comprehensive demo."""
330
 
331
+ # Custom theme - simplified to avoid JSON schema issues
332
+ custom_theme = gr.themes.Soft(
333
+ primary_hue=gr.themes.colors.emerald, # Viridian green
334
+ secondary_hue=gr.themes.colors.blue, # Cobalt blue
335
+ neutral_hue=gr.themes.colors.gray,
336
+ )
337
+
338
  with gr.Blocks(
339
  title="Laban Movement Analysis - Complete Suite by Csaba Bolyós",
340
+ theme=custom_theme,
341
  css="""
342
  .main-header {
343
  background: linear-gradient(135deg, #40826D 0%, #2E5E4A 50%, #1B3A2F 100%);
 
348
  text-align: center;
349
  }
350
  .feature-card {
351
+ border: 1px solid #40826D;
352
  border-radius: 8px;
353
  padding: 16px;
354
  margin: 8px 0;
355
+ background: linear-gradient(135deg, #F5F5DC 0%, #F0F8E8 100%);
356
  }
357
  .json-output {
358
  max-height: 600px;
359
  overflow-y: auto;
360
  font-family: monospace;
361
  font-size: 12px;
362
+ background: #F8F9FA;
363
+ border: 1px solid #40826D;
364
  }
365
  .author-info {
366
  background: linear-gradient(135deg, #40826D 0%, #2E5E4A 100%);
 
370
  margin: 10px 0;
371
  text-align: center;
372
  }
373
+ /* Dark mode support */
374
+ .dark .feature-card {
375
+ background: linear-gradient(135deg, #1e3a32 0%, #2d4a3d 100%);
376
+ border-color: #40826D;
377
+ }
378
+ .dark .json-output {
379
+ background: #2d3748;
380
+ color: #e2e8f0;
381
+ }
382
  """
383
  ) as demo:
384
 
 
422
  )
423
 
424
  with gr.Row():
425
+ enable_viz_std = gr.Radio(
426
+ choices=[("Yes", 1), ("No", 0)],
427
+ value=1,
428
  label="Generate Visualization"
429
  )
430
 
431
+ include_keypoints_std = gr.Radio(
432
+ choices=[("Yes", 1), ("No", 0)],
433
+ value=0,
434
  label="Include Keypoints"
435
  )
436
 
 
438
 
439
  gr.Examples(
440
  examples=[
441
+ ["examples/balette.mp4"]
 
442
  ],
443
  inputs=video_input_std,
444
  label="Example Videos"
 
481
 
482
  with gr.Row():
483
  with gr.Column(scale=1):
484
+ with gr.Group(elem_classes=["feature-card"]):
485
+ gr.Markdown("**Video Input**")
486
+
487
+ # Changed from textbox to file upload as requested
488
+ video_input_enh = gr.File(
489
+ label="Upload Video or Drop File",
490
+ file_types=["video"],
491
+ type="filepath"
492
+ )
493
+
494
+ # URL input option
495
+ url_input_enh = gr.Textbox(
496
+ label="Or Enter Video URL",
497
+ placeholder="YouTube URL, Vimeo URL, or direct video URL",
498
+ info="Leave file upload empty to use URL"
499
+ )
500
+
501
+ gr.Examples(
502
+ examples=[
503
+ ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
504
+ ["https://vimeo.com/815392738"]
505
+ ],
506
+ inputs=url_input_enh,
507
+ label="Example URLs"
508
+ )
509
+
510
+ gr.Markdown("**Model Selection**")
511
+
512
+ model_select_enh = gr.Dropdown(
513
+ choices=[
514
+ # MediaPipe variants
515
+ "mediapipe-lite", "mediapipe-full", "mediapipe-heavy",
516
+ # MoveNet variants
517
+ "movenet-lightning", "movenet-thunder",
518
+ # YOLO variants (added X models)
519
+ "yolo-v8-n", "yolo-v8-s", "yolo-v8-m", "yolo-v8-l", "yolo-v8-x",
520
+ # YOLO v11 variants
521
+ "yolo-v11-n", "yolo-v11-s", "yolo-v11-m", "yolo-v11-l", "yolo-v11-x"
522
+ ],
523
+ value="mediapipe-full",
524
+ label="Advanced Pose Models",
525
+ info="17+ model variants available"
526
+ )
527
+
528
+ gr.Markdown("**Analysis Options**")
529
+
530
+ with gr.Row():
531
+ enable_viz_enh = gr.Radio(
532
+ choices=[("Yes", 1), ("No", 0)],
533
+ value=1,
534
+ label="Visualization"
535
+ )
536
+
537
+ with gr.Row():
538
+ include_keypoints_enh = gr.Radio(
539
+ choices=[("Yes", 1), ("No", 0)],
540
+ value=0,
541
+ label="Raw Keypoints"
542
+ )
543
+
544
+ analyze_btn_enh = gr.Button("🚀 Enhanced Analysis", variant="primary", size="lg")
545
 
546
  with gr.Column(scale=2):
547
  with gr.Tab("📊 Analysis"):
 
550
  with gr.Tab("🎥 Visualization"):
551
  viz_output_enh = gr.Video(label="Annotated Video")
552
 
553
+ def process_enhanced_input(file_input: Optional[str], url_input: str, model: str, enable_viz: int, include_keypoints: int) -> Tuple[Dict[str, Any], Optional[str]]:
554
  """Process either file upload or URL input."""
555
  video_source = file_input if file_input else url_input
556
  return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
 
711
 
712
  # Info display
713
  gr.HTML("""
714
+ <div style="background: linear-gradient(135deg, #F0F8E8 0%, #E8F4FD 100%); padding: 15px; border-radius: 8px; margin-top: 10px; border: 1px solid #40826D;">
715
+ <h4 style="color: #2E5E4A;">📹 WebRTC Pose Analysis</h4>
716
+ <p style="margin: 5px 0; color: #1B3A2F;">Real-time movement analysis using your webcam</p>
717
 
718
+ <h4 style="color: #2E5E4A;">🔒 Privacy</h4>
719
+ <p style="margin: 5px 0; color: #1B3A2F;">Processing happens locally - no video data stored</p>
720
 
721
+ <h4 style="color: #2E5E4A;">💡 Usage</h4>
722
+ <ul style="margin: 5px 0; padding-left: 20px; color: #1B3A2F;">
723
  <li>Grant camera permission when prompted</li>
724
  <li>Move in front of camera to see pose detection</li>
725
  <li>Adjust confidence threshold as needed</li>
726
  </ul>
727
  </div>
728
+ """)
729
 
730
  else:
731
  # Fallback if WebRTC component not available
 
897
  )
898
  ```
899
  """)
900
+
901
+ # Author info with proper styling
902
  gr.HTML("""
903
+ <div class="author-info">
904
+ <p><strong>Created by:</strong> Csaba Bolyós (BladeSzaSza)</p>
905
+ <p style="margin: 5px 0;">
906
+ <a href="https://github.com/bladeszasza" style="color: #a8e6cf; text-decoration: none;">🔗 GitHub</a> •
907
+ <a href="https://huggingface.co/BladeSzaSza" style="color: #a8e6cf; text-decoration: none;">🤗 Hugging Face</a> •
908
+ <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #a8e6cf; text-decoration: none;">💼 LinkedIn</a>
909
+ </p>
910
+ <p style="font-size: 12px; opacity: 0.9;">Contact: [email protected]</p>
911
+ </div>
912
+ """)
913
 
914
+ # Footer with proper attribution
915
  gr.HTML("""
916
+ <div style="text-align: center; padding: 20px; margin-top: 30px; border-top: 1px solid #40826D;">
917
+ <p style="color: #2E5E4A; margin-bottom: 10px;">
918
  🎭 Laban Movement Analysis - Complete Suite | Heavy Beta Version
919
  </p>
920
+ <p style="color: #2E5E4A; font-size: 12px;">
921
  Created by <strong>Csaba Bolyós</strong> | Powered by MediaPipe, MoveNet & YOLO
922
  </p>
923
+ <p style="color: #2E5E4A; font-size: 10px; margin-top: 10px;">
924
  <a href="https://github.com/bladeszasza" style="color: #40826D;">GitHub</a> •
925
  <a href="https://huggingface.co/BladeSzaSza" style="color: #40826D;">Hugging Face</a> •
926
  <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #40826D;">LinkedIn</a>
demo/app.py CHANGED
@@ -71,6 +71,15 @@ if HAS_AGENT_API:
71
  print(f"Warning: Agent API not available: {e}")
72
  agent_api = None
73
 
 
 
 
 
 
 
 
 
 
74
 
75
  def process_video_standard(video: Optional[str], model: str, enable_viz: int, include_keypoints: int) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
76
  """Standard video processing function with proper type annotations."""
@@ -296,49 +305,8 @@ def create_unified_demo():
296
 
297
  with gr.Blocks(
298
  title="Laban Movement Analysis - Complete Suite by Csaba Bolyós",
299
- theme=custom_theme,
300
- css="""
301
- .main-header {
302
- background: linear-gradient(135deg, #40826D 0%, #2E5E4A 50%, #1B3A2F 100%);
303
- color: white;
304
- padding: 30px;
305
- border-radius: 10px;
306
- margin-bottom: 20px;
307
- text-align: center;
308
- }
309
- .feature-card {
310
- border: 1px solid #40826D;
311
- border-radius: 8px;
312
- padding: 16px;
313
- margin: 8px 0;
314
- background: linear-gradient(135deg, #F5F5DC 0%, #F0F8E8 100%);
315
- }
316
- .json-output {
317
- max-height: 600px;
318
- overflow-y: auto;
319
- font-family: monospace;
320
- font-size: 12px;
321
- background: #F8F9FA;
322
- border: 1px solid #40826D;
323
- }
324
- .author-info {
325
- background: linear-gradient(135deg, #40826D 0%, #2E5E4A 100%);
326
- color: white;
327
- padding: 15px;
328
- border-radius: 8px;
329
- margin: 10px 0;
330
- text-align: center;
331
- }
332
- /* Dark mode support */
333
- .dark .feature-card {
334
- background: linear-gradient(135deg, #1e3a32 0%, #2d4a3d 100%);
335
- border-color: #40826D;
336
- }
337
- .dark .json-output {
338
- background: #2d3748;
339
- color: #e2e8f0;
340
- }
341
- """
342
  ) as demo:
343
 
344
  # Main Header
 
71
  print(f"Warning: Agent API not available: {e}")
72
  agent_api = None
73
 
74
+ ROOT = Path(__file__).parent
75
+ CSS_PATH = ROOT / "css.css"
76
+
77
+ THEME = gr.themes.Soft( # same palette everywhere
78
+ primary_hue=gr.themes.colors.emerald,
79
+ secondary_hue=gr.themes.colors.blue,
80
+ neutral_hue=gr.themes.colors.gray,
81
+ )
82
+
83
 
84
  def process_video_standard(video: Optional[str], model: str, enable_viz: int, include_keypoints: int) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
85
  """Standard video processing function with proper type annotations."""
 
305
 
306
  with gr.Blocks(
307
  title="Laban Movement Analysis - Complete Suite by Csaba Bolyós",
308
+ css=str(CSS_PATH), # <— external stylesheet
309
+ theme=THEME,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  ) as demo:
311
 
312
  # Main Header
demo/space.py CHANGED
@@ -6,15 +6,16 @@ import os
6
  _docs = {'LabanMovementAnalysis': {'description': 'Gradio component for video-based pose analysis with Laban Movement Analysis metrics.', 'members': {'__init__': {'default_model': {'type': 'str', 'default': '"mediapipe"', 'description': 'Default pose estimation model ("mediapipe", "movenet", "yolo")'}, 'enable_visualization': {'type': 'bool', 'default': 'True', 'description': 'Whether to generate visualization video by default'}, 'include_keypoints': {'type': 'bool', 'default': 'False', 'description': 'Whether to include raw keypoints in JSON output'}, 'enable_webrtc': {'type': 'bool', 'default': 'False', 'description': 'Whether to enable WebRTC real-time analysis'}, 'label': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': 'Component label'}, 'every': {'type': 'typing.Optional[float][float, None]', 'default': 'None', 'description': None}, 'show_label': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'container': {'type': 'bool', 'default': 'True', 'description': None}, 'scale': {'type': 'typing.Optional[int][int, None]', 'default': 'None', 'description': None}, 'min_width': {'type': 'int', 'default': '160', 'description': None}, 'interactive': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'visible': {'type': 'bool', 'default': 'True', 'description': None}, 'elem_id': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': None}, 'elem_classes': {'type': 'typing.Optional[typing.List[str]][\n typing.List[str][str], None\n]', 'default': 'None', 'description': None}, 'render': {'type': 'bool', 'default': 'True', 'description': None}}, 'postprocess': {'value': {'type': 'typing.Any', 'description': 'Analysis results'}}, 'preprocess': {'return': {'type': 'typing.Dict[str, typing.Any][str, typing.Any]', 'description': 'Processed data for analysis'}, 'value': None}}, 'events': {}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'LabanMovementAnalysis': []}}}
7
 
8
  abs_path = os.path.join(os.path.dirname(__file__), "css.css")
 
 
 
 
 
 
9
 
10
  with gr.Blocks(
11
  css=abs_path,
12
- theme=gr.themes.Default(
13
- font_mono=[
14
- gr.themes.GoogleFont("Inconsolata"),
15
- "monospace",
16
- ],
17
- ),
18
  ) as demo:
19
  gr.Markdown(
20
  """
@@ -59,7 +60,7 @@ Heavy Beta Version - Under Active Development
59
  import gradio as gr
60
  import sys
61
  from pathlib import Path
62
- from typing import Dict, Any, List, Tuple
63
 
64
  # Add parent directory to path
65
  sys.path.insert(0, str(Path(__file__).parent.parent / "backend"))
@@ -112,8 +113,8 @@ if HAS_AGENT_API:
112
  agent_api = None
113
 
114
 
115
- def process_video_standard(video, model, enable_viz, include_keypoints):
116
- \"\"\"Standard video processing function.\"\"\"
117
  if video is None:
118
  return None, None
119
 
@@ -133,7 +134,7 @@ def process_video_standard(video, model, enable_viz, include_keypoints):
133
  return {"error": str(e)}, None
134
 
135
 
136
- def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
137
  \"\"\"Enhanced video processing with all new features.\"\"\"
138
  if not video_input:
139
  return {"error": "No video provided"}, None
@@ -158,7 +159,7 @@ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
158
  return error_result, None
159
 
160
 
161
- def process_video_for_agent(video, model, output_format="summary"):
162
  \"\"\"Process video with agent-friendly output format.\"\"\"
163
  if not HAS_AGENT_API or agent_api is None:
164
  return {"error": "Agent API not available"}
@@ -188,7 +189,7 @@ def process_video_for_agent(video, model, output_format="summary"):
188
  return {"error": str(e)}
189
 
190
 
191
- def batch_process_videos(files, model):
192
  \"\"\"Process multiple videos in batch.\"\"\"
193
  if not HAS_AGENT_API or agent_api is None:
194
  return {"error": "Agent API not available"}
@@ -219,7 +220,7 @@ def batch_process_videos(files, model):
219
  return {"error": str(e)}
220
 
221
 
222
- def filter_videos_by_movement(files, direction, intensity, min_fluidity, min_expansion):
223
  \"\"\"Filter videos based on movement characteristics.\"\"\"
224
  if not HAS_AGENT_API or agent_api is None:
225
  return {"error": "Agent API not available"}
@@ -259,10 +260,10 @@ def filter_videos_by_movement(files, direction, intensity, min_fluidity, min_exp
259
  return {"error": str(e)}
260
 
261
 
262
- def compare_models(video, model1, model2):
263
  \"\"\"Compare two different pose models on the same video.\"\"\"
264
  if not video:
265
- return "No video provided"
266
 
267
  try:
268
  # Analyze with both models
@@ -300,7 +301,7 @@ def compare_models(video, model1, model2):
300
  return [["Error", str(e), "", ""]]
301
 
302
 
303
- def start_webrtc_stream(model):
304
  \"\"\"Start WebRTC real-time analysis.\"\"\"
305
  try:
306
  success = analyzer.start_webrtc_stream(model)
@@ -312,7 +313,7 @@ def start_webrtc_stream(model):
312
  return f"🔴 Error: {str(e)}", {"status": "error"}
313
 
314
 
315
- def stop_webrtc_stream():
316
  \"\"\"Stop WebRTC real-time analysis.\"\"\"
317
  try:
318
  success = analyzer.stop_webrtc_stream()
@@ -327,40 +328,17 @@ def stop_webrtc_stream():
327
  def create_unified_demo():
328
  \"\"\"Create the unified comprehensive demo.\"\"\"
329
 
 
 
 
 
 
 
 
330
  with gr.Blocks(
331
  title="Laban Movement Analysis - Complete Suite by Csaba Bolyós",
332
- theme=gr.themes.Soft(),
333
- css=\"\"\"
334
- .main-header {
335
- background: linear-gradient(135deg, #40826D 0%, #2E5E4A 50%, #1B3A2F 100%);
336
- color: white;
337
- padding: 30px;
338
- border-radius: 10px;
339
- margin-bottom: 20px;
340
- text-align: center;
341
- }
342
- .feature-card {
343
- border: 1px solid #e1e5e9;
344
- border-radius: 8px;
345
- padding: 16px;
346
- margin: 8px 0;
347
- background: #f8f9fa;
348
- }
349
- .json-output {
350
- max-height: 600px;
351
- overflow-y: auto;
352
- font-family: monospace;
353
- font-size: 12px;
354
- }
355
- .author-info {
356
- background: linear-gradient(135deg, #40826D 0%, #2E5E4A 100%);
357
- color: white;
358
- padding: 15px;
359
- border-radius: 8px;
360
- margin: 10px 0;
361
- text-align: center;
362
- }
363
- \"\"\"
364
  ) as demo:
365
 
366
  # Main Header
@@ -419,7 +397,7 @@ def create_unified_demo():
419
 
420
  gr.Examples(
421
  examples=[
422
- ["https://www.youtube.com/shorts/RX9kH2l3L8U"]
423
  ],
424
  inputs=video_input_std,
425
  label="Example Videos"
@@ -462,68 +440,67 @@ def create_unified_demo():
462
 
463
  with gr.Row():
464
  with gr.Column(scale=1):
465
- gr.HTML('<div class="feature-card">')
466
- gr.Markdown("**Video Input**")
467
-
468
- # Changed from textbox to file upload as requested
469
- video_input_enh = gr.File(
470
- label="Upload Video or Drop File",
471
- file_types=["video"],
472
- type="filepath"
473
- )
474
-
475
- # URL input option
476
- url_input_enh = gr.Textbox(
477
- label="Or Enter Video URL",
478
- placeholder="YouTube URL, Vimeo URL, or direct video URL",
479
- info="Leave file upload empty to use URL"
480
- )
481
-
482
- gr.Examples(
483
- examples=[
484
- ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
485
- ["https://vimeo.com/815392738"]
486
- ],
487
- inputs=url_input_enh,
488
- label="Example URLs"
489
- )
490
-
491
- gr.Markdown("**Model Selection**")
492
-
493
- model_select_enh = gr.Dropdown(
494
- choices=[
495
- # MediaPipe variants
496
- "mediapipe-lite", "mediapipe-full", "mediapipe-heavy",
497
- # MoveNet variants
498
- "movenet-lightning", "movenet-thunder",
499
- # YOLO variants (added X models)
500
- "yolo-v8-n", "yolo-v8-s", "yolo-v8-m", "yolo-v8-l", "yolo-v8-x",
501
- # YOLO v11 variants
502
- "yolo-v11-n", "yolo-v11-s", "yolo-v11-m", "yolo-v11-l", "yolo-v11-x"
503
- ],
504
- value="mediapipe-full",
505
- label="Advanced Pose Models",
506
- info="17+ model variants available"
507
- )
508
-
509
- gr.Markdown("**Analysis Options**")
510
-
511
- with gr.Row():
512
- enable_viz_enh = gr.Radio(
513
- choices=[("Yes", 1), ("No", 0)],
514
- value=1,
515
- label="Visualization"
516
  )
517
-
518
- with gr.Row():
519
- include_keypoints_enh = gr.Radio(
520
- choices=[("Yes", 1), ("No", 0)],
521
- value=0,
522
- label="Raw Keypoints"
523
  )
524
-
525
- analyze_btn_enh = gr.Button("🚀 Enhanced Analysis", variant="primary", size="lg")
526
- gr.HTML('</div>')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
527
 
528
  with gr.Column(scale=2):
529
  with gr.Tab("📊 Analysis"):
@@ -532,7 +509,7 @@ def create_unified_demo():
532
  with gr.Tab("🎥 Visualization"):
533
  viz_output_enh = gr.Video(label="Annotated Video")
534
 
535
- def process_enhanced_input(file_input, url_input, model, enable_viz, include_keypoints):
536
  \"\"\"Process either file upload or URL input.\"\"\"
537
  video_source = file_input if file_input else url_input
538
  return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
@@ -693,21 +670,21 @@ def create_unified_demo():
693
 
694
  # Info display
695
  gr.HTML(\"\"\"
696
- <div style="background: #e8f4fd; padding: 15px; border-radius: 8px; margin-top: 10px;">
697
- <h4>📹 WebRTC Pose Analysis</h4>
698
- <p style="margin: 5px 0;">Real-time movement analysis using your webcam</p>
699
 
700
- <h4>🔒 Privacy</h4>
701
- <p style="margin: 5px 0;">Processing happens locally - no video data stored</p>
702
 
703
- <h4>💡 Usage</h4>
704
- <ul style="margin: 5px 0; padding-left: 20px;">
705
  <li>Grant camera permission when prompted</li>
706
  <li>Move in front of camera to see pose detection</li>
707
  <li>Adjust confidence threshold as needed</li>
708
  </ul>
709
  </div>
710
- \"\"\")
711
 
712
  else:
713
  # Fallback if WebRTC component not available
@@ -879,28 +856,30 @@ def create_unified_demo():
879
  )
880
  ```
881
  \"\"\")
 
 
882
  gr.HTML(\"\"\"
883
- <div class="author-info">
884
- <p><strong>Created by:</strong> Csaba Bolyós (BladeSzaSza)</p>
885
- <p style="margin: 5px 0;">
886
- <a href="https://github.com/bladeszasza" style="color: #a8e6cf; text-decoration: none;">🔗 GitHub</a> •
887
- <a href="https://huggingface.co/BladeSzaSza" style="color: #a8e6cf; text-decoration: none;">🤗 Hugging Face</a> •
888
- <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #a8e6cf; text-decoration: none;">💼 LinkedIn</a>
889
- </p>
890
- <p style="font-size: 12px; opacity: 0.9;">Contact: [email protected]</p>
891
- </div>
892
- \"\"\")
893
 
894
- # Footer with proper attribution
895
  gr.HTML(\"\"\"
896
- <div style="text-align: center; padding: 20px; margin-top: 30px; border-top: 1px solid #eee;">
897
- <p style="color: #666; margin-bottom: 10px;">
898
  🎭 Laban Movement Analysis - Complete Suite | Heavy Beta Version
899
  </p>
900
- <p style="color: #666; font-size: 12px;">
901
  Created by <strong>Csaba Bolyós</strong> | Powered by MediaPipe, MoveNet & YOLO
902
  </p>
903
- <p style="color: #666; font-size: 10px; margin-top: 10px;">
904
  <a href="https://github.com/bladeszasza" style="color: #40826D;">GitHub</a> •
905
  <a href="https://huggingface.co/BladeSzaSza" style="color: #40826D;">Hugging Face</a> •
906
  <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #40826D;">LinkedIn</a>
 
6
  _docs = {'LabanMovementAnalysis': {'description': 'Gradio component for video-based pose analysis with Laban Movement Analysis metrics.', 'members': {'__init__': {'default_model': {'type': 'str', 'default': '"mediapipe"', 'description': 'Default pose estimation model ("mediapipe", "movenet", "yolo")'}, 'enable_visualization': {'type': 'bool', 'default': 'True', 'description': 'Whether to generate visualization video by default'}, 'include_keypoints': {'type': 'bool', 'default': 'False', 'description': 'Whether to include raw keypoints in JSON output'}, 'enable_webrtc': {'type': 'bool', 'default': 'False', 'description': 'Whether to enable WebRTC real-time analysis'}, 'label': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': 'Component label'}, 'every': {'type': 'typing.Optional[float][float, None]', 'default': 'None', 'description': None}, 'show_label': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'container': {'type': 'bool', 'default': 'True', 'description': None}, 'scale': {'type': 'typing.Optional[int][int, None]', 'default': 'None', 'description': None}, 'min_width': {'type': 'int', 'default': '160', 'description': None}, 'interactive': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'visible': {'type': 'bool', 'default': 'True', 'description': None}, 'elem_id': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': None}, 'elem_classes': {'type': 'typing.Optional[typing.List[str]][\n typing.List[str][str], None\n]', 'default': 'None', 'description': None}, 'render': {'type': 'bool', 'default': 'True', 'description': None}}, 'postprocess': {'value': {'type': 'typing.Any', 'description': 'Analysis results'}}, 'preprocess': {'return': {'type': 'typing.Dict[str, typing.Any][str, typing.Any]', 'description': 'Processed data for analysis'}, 'value': None}}, 'events': {}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'LabanMovementAnalysis': []}}}
7
 
8
  abs_path = os.path.join(os.path.dirname(__file__), "css.css")
9
+ THEME = gr.themes.Soft( # same palette everywhere
10
+ primary_hue=gr.themes.colors.emerald,
11
+ secondary_hue=gr.themes.colors.blue,
12
+ neutral_hue=gr.themes.colors.gray,
13
+ )
14
+
15
 
16
  with gr.Blocks(
17
  css=abs_path,
18
+ theme=THEME,
 
 
 
 
 
19
  ) as demo:
20
  gr.Markdown(
21
  """
 
60
  import gradio as gr
61
  import sys
62
  from pathlib import Path
63
+ from typing import Dict, Any, List, Tuple, Union, Optional
64
 
65
  # Add parent directory to path
66
  sys.path.insert(0, str(Path(__file__).parent.parent / "backend"))
 
113
  agent_api = None
114
 
115
 
116
+ def process_video_standard(video: Optional[str], model: str, enable_viz: int, include_keypoints: int) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
117
+ \"\"\"Standard video processing function with proper type annotations.\"\"\"
118
  if video is None:
119
  return None, None
120
 
 
134
  return {"error": str(e)}, None
135
 
136
 
137
+ def process_video_enhanced(video_input: Optional[str], model: str, enable_viz: int, include_keypoints: int) -> Tuple[Dict[str, Any], Optional[str]]:
138
  \"\"\"Enhanced video processing with all new features.\"\"\"
139
  if not video_input:
140
  return {"error": "No video provided"}, None
 
159
  return error_result, None
160
 
161
 
162
+ def process_video_for_agent(video: Optional[str], model: str, output_format: str = "summary") -> Dict[str, Any]:
163
  \"\"\"Process video with agent-friendly output format.\"\"\"
164
  if not HAS_AGENT_API or agent_api is None:
165
  return {"error": "Agent API not available"}
 
189
  return {"error": str(e)}
190
 
191
 
192
+ def batch_process_videos(files: Optional[List], model: str) -> Dict[str, Any]:
193
  \"\"\"Process multiple videos in batch.\"\"\"
194
  if not HAS_AGENT_API or agent_api is None:
195
  return {"error": "Agent API not available"}
 
220
  return {"error": str(e)}
221
 
222
 
223
+ def filter_videos_by_movement(files: Optional[List], direction: str, intensity: str, min_fluidity: float, min_expansion: float) -> Dict[str, Any]:
224
  \"\"\"Filter videos based on movement characteristics.\"\"\"
225
  if not HAS_AGENT_API or agent_api is None:
226
  return {"error": "Agent API not available"}
 
260
  return {"error": str(e)}
261
 
262
 
263
+ def compare_models(video: Optional[str], model1: str, model2: str) -> List[List[str]]:
264
  \"\"\"Compare two different pose models on the same video.\"\"\"
265
  if not video:
266
+ return [["Error", "No video provided", "", ""]]
267
 
268
  try:
269
  # Analyze with both models
 
301
  return [["Error", str(e), "", ""]]
302
 
303
 
304
+ def start_webrtc_stream(model: str) -> Tuple[str, Dict[str, Any]]:
305
  \"\"\"Start WebRTC real-time analysis.\"\"\"
306
  try:
307
  success = analyzer.start_webrtc_stream(model)
 
313
  return f"🔴 Error: {str(e)}", {"status": "error"}
314
 
315
 
316
+ def stop_webrtc_stream() -> Tuple[str, Dict[str, Any]]:
317
  \"\"\"Stop WebRTC real-time analysis.\"\"\"
318
  try:
319
  success = analyzer.stop_webrtc_stream()
 
328
  def create_unified_demo():
329
  \"\"\"Create the unified comprehensive demo.\"\"\"
330
 
331
+ # Custom theme - simplified to avoid JSON schema issues
332
+ custom_theme = gr.themes.Soft(
333
+ primary_hue=gr.themes.colors.emerald, # Viridian green
334
+ secondary_hue=gr.themes.colors.blue, # Cobalt blue
335
+ neutral_hue=gr.themes.colors.gray,
336
+ )
337
+
338
  with gr.Blocks(
339
  title="Laban Movement Analysis - Complete Suite by Csaba Bolyós",
340
+ css=str(CSS_PATH),
341
+ theme=THEME,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  ) as demo:
343
 
344
  # Main Header
 
397
 
398
  gr.Examples(
399
  examples=[
400
+ ["examples/balette.mp4"]
401
  ],
402
  inputs=video_input_std,
403
  label="Example Videos"
 
440
 
441
  with gr.Row():
442
  with gr.Column(scale=1):
443
+ with gr.Group(elem_classes=["feature-card"]):
444
+ gr.Markdown("**Video Input**")
445
+
446
+ # Changed from textbox to file upload as requested
447
+ video_input_enh = gr.File(
448
+ label="Upload Video or Drop File",
449
+ file_types=["video"],
450
+ type="filepath"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
  )
452
+
453
+ # URL input option
454
+ url_input_enh = gr.Textbox(
455
+ label="Or Enter Video URL",
456
+ placeholder="YouTube URL, Vimeo URL, or direct video URL",
457
+ info="Leave file upload empty to use URL"
458
  )
459
+
460
+ gr.Examples(
461
+ examples=[
462
+ ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
463
+ ["https://vimeo.com/815392738"]
464
+ ],
465
+ inputs=url_input_enh,
466
+ label="Example URLs"
467
+ )
468
+
469
+ gr.Markdown("**Model Selection**")
470
+
471
+ model_select_enh = gr.Dropdown(
472
+ choices=[
473
+ # MediaPipe variants
474
+ "mediapipe-lite", "mediapipe-full", "mediapipe-heavy",
475
+ # MoveNet variants
476
+ "movenet-lightning", "movenet-thunder",
477
+ # YOLO variants (added X models)
478
+ "yolo-v8-n", "yolo-v8-s", "yolo-v8-m", "yolo-v8-l", "yolo-v8-x",
479
+ # YOLO v11 variants
480
+ "yolo-v11-n", "yolo-v11-s", "yolo-v11-m", "yolo-v11-l", "yolo-v11-x"
481
+ ],
482
+ value="mediapipe-full",
483
+ label="Advanced Pose Models",
484
+ info="17+ model variants available"
485
+ )
486
+
487
+ gr.Markdown("**Analysis Options**")
488
+
489
+ with gr.Row():
490
+ enable_viz_enh = gr.Radio(
491
+ choices=[("Yes", 1), ("No", 0)],
492
+ value=1,
493
+ label="Visualization"
494
+ )
495
+
496
+ with gr.Row():
497
+ include_keypoints_enh = gr.Radio(
498
+ choices=[("Yes", 1), ("No", 0)],
499
+ value=0,
500
+ label="Raw Keypoints"
501
+ )
502
+
503
+ analyze_btn_enh = gr.Button("🚀 Enhanced Analysis", variant="primary", size="lg")
504
 
505
  with gr.Column(scale=2):
506
  with gr.Tab("📊 Analysis"):
 
509
  with gr.Tab("🎥 Visualization"):
510
  viz_output_enh = gr.Video(label="Annotated Video")
511
 
512
+ def process_enhanced_input(file_input: Optional[str], url_input: str, model: str, enable_viz: int, include_keypoints: int) -> Tuple[Dict[str, Any], Optional[str]]:
513
  \"\"\"Process either file upload or URL input.\"\"\"
514
  video_source = file_input if file_input else url_input
515
  return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
 
670
 
671
  # Info display
672
  gr.HTML(\"\"\"
673
+ <div style="background: linear-gradient(135deg, #F0F8E8 0%, #E8F4FD 100%); padding: 15px; border-radius: 8px; margin-top: 10px; border: 1px solid #40826D;">
674
+ <h4 style="color: #2E5E4A;">📹 WebRTC Pose Analysis</h4>
675
+ <p style="margin: 5px 0; color: #1B3A2F;">Real-time movement analysis using your webcam</p>
676
 
677
+ <h4 style="color: #2E5E4A;">🔒 Privacy</h4>
678
+ <p style="margin: 5px 0; color: #1B3A2F;">Processing happens locally - no video data stored</p>
679
 
680
+ <h4 style="color: #2E5E4A;">💡 Usage</h4>
681
+ <ul style="margin: 5px 0; padding-left: 20px; color: #1B3A2F;">
682
  <li>Grant camera permission when prompted</li>
683
  <li>Move in front of camera to see pose detection</li>
684
  <li>Adjust confidence threshold as needed</li>
685
  </ul>
686
  </div>
687
+ \"\"\")
688
 
689
  else:
690
  # Fallback if WebRTC component not available
 
856
  )
857
  ```
858
  \"\"\")
859
+
860
+ # Author info with proper styling
861
  gr.HTML(\"\"\"
862
+ <div class="author-info">
863
+ <p><strong>Created by:</strong> Csaba Bolyós (BladeSzaSza)</p>
864
+ <p style="margin: 5px 0;">
865
+ <a href="https://github.com/bladeszasza" style="color: #a8e6cf; text-decoration: none;">🔗 GitHub</a> •
866
+ <a href="https://huggingface.co/BladeSzaSza" style="color: #a8e6cf; text-decoration: none;">🤗 Hugging Face</a> •
867
+ <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #a8e6cf; text-decoration: none;">💼 LinkedIn</a>
868
+ </p>
869
+ <p style="font-size: 12px; opacity: 0.9;">Contact: [email protected]</p>
870
+ </div>
871
+ \"\"\")
872
 
873
+ # Footer with proper attribution
874
  gr.HTML(\"\"\"
875
+ <div style="text-align: center; padding: 20px; margin-top: 30px; border-top: 1px solid #40826D;">
876
+ <p style="color: #2E5E4A; margin-bottom: 10px;">
877
  🎭 Laban Movement Analysis - Complete Suite | Heavy Beta Version
878
  </p>
879
+ <p style="color: #2E5E4A; font-size: 12px;">
880
  Created by <strong>Csaba Bolyós</strong> | Powered by MediaPipe, MoveNet & YOLO
881
  </p>
882
+ <p style="color: #2E5E4A; font-size: 10px; margin-top: 10px;">
883
  <a href="https://github.com/bladeszasza" style="color: #40826D;">GitHub</a> •
884
  <a href="https://huggingface.co/BladeSzaSza" style="color: #40826D;">Hugging Face</a> •
885
  <a href="https://www.linkedin.com/in/csaba-bolyós-00a11767/" style="color: #40826D;">LinkedIn</a>
dist/gradio_labanmovementanalysis-0.0.2-py3-none-any.whl CHANGED
Binary files a/dist/gradio_labanmovementanalysis-0.0.2-py3-none-any.whl and b/dist/gradio_labanmovementanalysis-0.0.2-py3-none-any.whl differ
 
dist/gradio_labanmovementanalysis-0.0.2.tar.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e161cca0ea5443885301b3362b9b8a7553fa06622dcdef7a62a03a30d15271ef
3
- size 84525440
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e389bf63ed20ab7ea50043fc3c6f8958d5ac7842b7ef41b37e7f9a4e4b83718a
3
+ size 136361