HAL1993 commited on
Commit
257570a
·
verified ·
1 Parent(s): 77d0c2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -7
app.py CHANGED
@@ -1,11 +1,20 @@
 
 
 
 
 
 
1
  import os
2
- # PyTorch 2.8 (temporary hack)
3
  os.system(
4
- 'pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces'
 
 
 
 
5
  )
6
 
7
  # ----------------------------------------------------------------------
8
- # 1️⃣ Imports
9
  # ----------------------------------------------------------------------
10
  import spaces
11
  import gradio as gr
@@ -57,7 +66,7 @@ def translate_albanian_to_english(text: str, language: str = "en"):
57
  raise gr.Error("Translation failed. Please try again.")
58
 
59
  # ----------------------------------------------------------------------
60
- # 4️⃣ Model & Scheduler Loading (run once at startup)
61
  # ----------------------------------------------------------------------
62
  scheduler_config = {
63
  "base_image_seq_len": 256,
@@ -85,7 +94,7 @@ pipeline = QwenImageEditPlusPipeline.from_pretrained(
85
  pipeline.to("cuda")
86
  pipeline.set_progress_bar_config(disable=None)
87
 
88
- # Load Lightning LoRA and fuse
89
  pipeline.load_lora_weights(
90
  "lightx2v/Qwen-Image-Lightning",
91
  weight_name="Qwen-Image-Lightning-8steps-V2.0-bf16.safetensors",
@@ -110,7 +119,7 @@ def edit_images(image1, image2, prompt):
110
  prompt_en = translate_albanian_to_english(prompt.strip(), language="en")
111
  prompt_final = prompt_en + QUALITY_PROMPT
112
 
113
- # Ensure PIL Images
114
  if not isinstance(image1, Image.Image):
115
  image1 = Image.fromarray(image1)
116
  if not isinstance(image2, Image.Image):
@@ -430,7 +439,7 @@ def create_demo():
430
 
431
  with gr.Row(elem_id="general_items"):
432
  gr.Markdown("# Image Fusion")
433
- gr.Markdown("Blend images together guided by prompt description.", elem_id="subtitle")
434
  with gr.Column(elem_id="input_column"):
435
  image1_input = gr.Image(
436
  label="First Image",
 
1
+ # ----------------------------------------------------------------------
2
+ # 0️⃣ Install the exact versions we need (run once at start‑up)
3
+ # ----------------------------------------------------------------------
4
+ # 1️⃣ Install a nightly torch < 2.9 (as you already did)
5
+ # 2️⃣ Upgrade huggingface‑hub to the version required by the Qwen pipeline
6
+ # 3️⃣ Install a recent transformers build (needed for Qwen2.5‑VL)
7
  import os
 
8
  os.system(
9
+ 'pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 '
10
+ '"torch<2.9" '
11
+ '"huggingface-hub>=1.0.0rc6" '
12
+ '"transformers>=4.40.0" '
13
+ 'spaces -q'
14
  )
15
 
16
  # ----------------------------------------------------------------------
17
+ # 1️⃣ Imports (now safe because the correct packages are installed)
18
  # ----------------------------------------------------------------------
19
  import spaces
20
  import gradio as gr
 
66
  raise gr.Error("Translation failed. Please try again.")
67
 
68
  # ----------------------------------------------------------------------
69
+ # 4️⃣ Scheduler & Model Loading (run once at startup)
70
  # ----------------------------------------------------------------------
71
  scheduler_config = {
72
  "base_image_seq_len": 256,
 
94
  pipeline.to("cuda")
95
  pipeline.set_progress_bar_config(disable=None)
96
 
97
+ # Load Lightning LoRA and fuse it (speed‑up)
98
  pipeline.load_lora_weights(
99
  "lightx2v/Qwen-Image-Lightning",
100
  weight_name="Qwen-Image-Lightning-8steps-V2.0-bf16.safetensors",
 
119
  prompt_en = translate_albanian_to_english(prompt.strip(), language="en")
120
  prompt_final = prompt_en + QUALITY_PROMPT
121
 
122
+ # Ensure we have PIL Images
123
  if not isinstance(image1, Image.Image):
124
  image1 = Image.fromarray(image1)
125
  if not isinstance(image2, Image.Image):
 
439
 
440
  with gr.Row(elem_id="general_items"):
441
  gr.Markdown("# Image Fusion")
442
+ gr.Markdown("Blend images together guided by a prompt description.", elem_id="subtitle")
443
  with gr.Column(elem_id="input_column"):
444
  image1_input = gr.Image(
445
  label="First Image",