[AUTOMATED] Migration to Gradio 6.0

#1
by multimodalart HF Staff - opened
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +77 -77
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 💻
4
  colorFrom: indigo
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: indigo
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 6.0.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
app.py CHANGED
@@ -303,8 +303,81 @@ def predict(image, model_selection, top_k, threshold):
303
 
304
 
305
  # --- Gradio Interface ---
306
- with gr.Blocks(
307
- css="""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
  * { box-sizing: border-box; }
309
  @media (max-width: 1022px) {
310
  #slider-row-container {
@@ -393,7 +466,7 @@ with gr.Blocks(
393
  background: rgba(128, 128, 128, 0.1);
394
  }
395
  """,
396
- js="""
397
  function() {
398
  document.addEventListener('click', function(e) {
399
  if (e.target.classList.contains('copy-btn')) {
@@ -428,77 +501,4 @@ with gr.Blocks(
428
  }, true);
429
  }
430
  """,
431
- ) as demo:
432
-
433
- gr.Markdown("# Kaloscope Artist Style Classification")
434
-
435
- with gr.Row():
436
- with gr.Column():
437
- image_input = gr.Image(type="pil", label="Upload Image", elem_id="image-upload")
438
-
439
- with gr.Column():
440
- submit_btn = gr.Button("Predict")
441
-
442
- tags_output = gr.Textbox(label="Predicted Tags", show_copy_button=True)
443
- prettier_output = gr.DataFrame(
444
- elem_id="results-table-wrapper",
445
- # value=[
446
- # [
447
- # 1,
448
- # "[Samplaaaae Artist](https://example.com)",
449
- # "<span class='copy-btn' data-copy='Samplaaaae Artist'>📋</span>",
450
- # "95.00%",
451
- # ],
452
- # [
453
- # 2,
454
- # "[Another Artist](https://example.com)",
455
- # "<span class='copy-btn' data-copy='Another Artist'>📋</span>",
456
- # "90.00%",
457
- # ],
458
- # [
459
- # 3,
460
- # "[Third Artist](https://example.com)",
461
- # "<span class='copy-btn' data-copy='Third Artist'>📋</span>",
462
- # "85.00%",
463
- # ],
464
- # ],
465
- interactive=False,
466
- datatype=["number", "markdown", "html", "str"],
467
- headers=["Rank", "Artist", "", "Score"],
468
- )
469
- json_accordion = gr.Accordion("JSON Output", open=False)
470
- with json_accordion:
471
- json_output = gr.Code(language="json", show_label=False, lines=7)
472
-
473
- with gr.Group():
474
- model_selection = gr.Dropdown(
475
- choices=[
476
- (
477
- f"{name}",
478
- # f"{name} | Repo: {MODELS[name].get('repo_id') or 'local'}",
479
- name,
480
- )
481
- for name in MODELS
482
- ],
483
- value=list(MODELS.keys())[0],
484
- label="Select Model",
485
- )
486
- with gr.Row(elem_id="slider-row-container"):
487
- top_k_slider = gr.Slider(minimum=1, maximum=25, value=5, step=1, label="Top K")
488
- threshold_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.01, label="Threshold")
489
- time_display = gr.Markdown() # populated after prediction
490
-
491
- gr.Markdown(
492
- "Models sourced from [heathcliff01/Kaloscope](https://huggingface.co/heathcliff01/Kaloscope) & [heathcliff01/Kaloscope2.0](https://huggingface.co/heathcliff01/Kaloscope2.0) (Original PyTorch releases) "
493
- + "and [DraconicDragon/Kaloscope-onnx](https://huggingface.co/DraconicDragon/Kaloscope-onnx) (ONNX converted and EMA weights). \n"
494
- + "OpenVINO™ will be used to accelerate ONNX CPU inference with ONNX CPUExecutionProvider as fallback."
495
- )
496
-
497
- submit_btn.click(
498
- fn=predict,
499
- inputs=[image_input, model_selection, top_k_slider, threshold_slider],
500
- outputs=[tags_output, prettier_output, json_output, time_display],
501
- )
502
-
503
- if __name__ == "__main__":
504
- demo.launch()
 
303
 
304
 
305
  # --- Gradio Interface ---
306
+ with gr.Blocks() as demo:
307
+
308
+ gr.Markdown("# Kaloscope Artist Style Classification")
309
+
310
+ with gr.Row():
311
+ with gr.Column():
312
+ image_input = gr.Image(type="pil", label="Upload Image", elem_id="image-upload")
313
+
314
+ with gr.Column():
315
+ submit_btn = gr.Button("Predict")
316
+
317
+ tags_output = gr.Textbox(label="Predicted Tags", buttons=["copy"])
318
+ prettier_output = gr.DataFrame(
319
+ elem_id="results-table-wrapper",
320
+ # value=[
321
+ # [
322
+ # 1,
323
+ # "[Samplaaaae Artist](https://example.com)",
324
+ # "<span class='copy-btn' data-copy='Samplaaaae Artist'>📋</span>",
325
+ # "95.00%",
326
+ # ],
327
+ # [
328
+ # 2,
329
+ # "[Another Artist](https://example.com)",
330
+ # "<span class='copy-btn' data-copy='Another Artist'>📋</span>",
331
+ # "90.00%",
332
+ # ],
333
+ # [
334
+ # 3,
335
+ # "[Third Artist](https://example.com)",
336
+ # "<span class='copy-btn' data-copy='Third Artist'>📋</span>",
337
+ # "85.00%",
338
+ # ],
339
+ # ],
340
+ interactive=False,
341
+ datatype=["number", "markdown", "html", "str"],
342
+ headers=["Rank", "Artist", "", "Score"],
343
+ )
344
+ json_accordion = gr.Accordion("JSON Output", open=False)
345
+ with json_accordion:
346
+ json_output = gr.Code(language="json", show_label=False, lines=7)
347
+
348
+ with gr.Group():
349
+ model_selection = gr.Dropdown(
350
+ choices=[
351
+ (
352
+ f"{name}",
353
+ # f"{name} | Repo: {MODELS[name].get('repo_id') or 'local'}",
354
+ name,
355
+ )
356
+ for name in MODELS
357
+ ],
358
+ value=list(MODELS.keys())[0],
359
+ label="Select Model",
360
+ )
361
+ with gr.Row(elem_id="slider-row-container"):
362
+ top_k_slider = gr.Slider(minimum=1, maximum=25, value=5, step=1, label="Top K")
363
+ threshold_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.01, label="Threshold")
364
+ time_display = gr.Markdown() # populated after prediction
365
+
366
+ gr.Markdown(
367
+ "Models sourced from [heathcliff01/Kaloscope](https://huggingface.co/heathcliff01/Kaloscope) & [heathcliff01/Kaloscope2.0](https://huggingface.co/heathcliff01/Kaloscope2.0) (Original PyTorch releases) "
368
+ + "and [DraconicDragon/Kaloscope-onnx](https://huggingface.co/DraconicDragon/Kaloscope-onnx) (ONNX converted and EMA weights). \n"
369
+ + "OpenVINO™ will be used to accelerate ONNX CPU inference with ONNX CPUExecutionProvider as fallback."
370
+ )
371
+
372
+ submit_btn.click(
373
+ fn=predict,
374
+ inputs=[image_input, model_selection, top_k_slider, threshold_slider],
375
+ outputs=[tags_output, prettier_output, json_output, time_display],
376
+ )
377
+
378
+ if __name__ == "__main__":
379
+ demo.launch(
380
+ css="""
381
  * { box-sizing: border-box; }
382
  @media (max-width: 1022px) {
383
  #slider-row-container {
 
466
  background: rgba(128, 128, 128, 0.1);
467
  }
468
  """,
469
+ js="""
470
  function() {
471
  document.addEventListener('click', function(e) {
472
  if (e.target.classList.contains('copy-btn')) {
 
501
  }, true);
502
  }
503
  """,
504
+ )