AiCoderv2 commited on
Commit
2bcb219
·
verified ·
1 Parent(s): 087faf7

Deploy Gradio app with multiple files

Browse files
Files changed (3) hide show
  1. aoti.py +35 -0
  2. app.py +251 -0
  3. requirements.txt +10 -0
aoti.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ """
3
+
4
+ from typing import cast
5
+
6
+ import torch
7
+ from huggingface_hub import hf_hub_download
8
+ from spaces.zero.torch.aoti import ZeroGPUCompiledModel
9
+ from spaces.zero.torch.aoti import ZeroGPUWeights
10
+ from torch._functorch._aot_autograd.subclass_parametrization import unwrap_tensor_subclass_parameters
11
+
12
+
13
+ def _shallow_clone_module(module: torch.nn.Module) -> torch.nn.Module:
14
+ clone = object.__new__(module.__class__)
15
+ clone.__dict__ = module.__dict__.copy()
16
+ clone._parameters = module._parameters.copy()
17
+ clone._buffers = module._buffers.copy()
18
+ clone._modules = {k: _shallow_clone_module(v) for k, v in module._modules.items() if v is not None}
19
+ return clone
20
+
21
+
22
+ def aoti_blocks_load(module: torch.nn.Module, repo_id: str, variant: str | None = None):
23
+ repeated_blocks = cast(list[str], module._repeated_blocks)
24
+ aoti_files = {name: hf_hub_download(
25
+ repo_id=repo_id,
26
+ filename='package.pt2',
27
+ subfolder=name if variant is None else f'{name}.{variant}',
28
+ ) for name in repeated_blocks}
29
+ for block_name, aoti_file in aoti_files.items():
30
+ for block in module.modules():
31
+ if block.__class__.__name__ == block_name:
32
+ block_ = _shallow_clone_module(block)
33
+ unwrap_tensor_subclass_parameters(block_)
34
+ weights = ZeroGPUWeights(block_.state_dict())
35
+ block.forward = ZeroGPUCompiledModel(aoti_file, weights)
app.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import spaces
3
+ import torch
4
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
5
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
6
+ from diffusers.utils.export_utils import export_to_video
7
+ import gradio as gr
8
+ import tempfile
9
+ import numpy as np
10
+ from PIL import Image
11
+ import random
12
+ import gc
13
+
14
+ from torchao.quantization import quantize_
15
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig, Int8WeightOnlyConfig
16
+ import aoti
17
+
18
+ # =========================================================
19
+ # MODEL CONFIGURATION
20
+ # =========================================================
21
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers" # المسار الجديد للنموذج
22
+ HF_TOKEN = os.environ.get("HF_TOKEN") # ضع توكن Hugging Face هنا إذا كان النموذج خاصًا
23
+
24
+ MAX_DIM = 832
25
+ MIN_DIM = 480
26
+ SQUARE_DIM = 640
27
+ MULTIPLE_OF = 16
28
+
29
+ MAX_SEED = np.iinfo(np.int32).max
30
+
31
+ FIXED_FPS = 16
32
+ MIN_FRAMES_MODEL = 8
33
+ MAX_FRAMES_MODEL = 7720
34
+
35
+ MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
36
+ MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
37
+
38
+ # =========================================================
39
+ # LOAD PIPELINE
40
+ # =========================================================
41
+ pipe = WanImageToVideoPipeline.from_pretrained(
42
+ MODEL_ID,
43
+ transformer=WanTransformer3DModel.from_pretrained(
44
+ MODEL_ID,
45
+ subfolder="transformer",
46
+ torch_dtype=torch.bfloat16,
47
+ device_map="cuda",
48
+ token=HF_TOKEN
49
+ ),
50
+ transformer_2=WanTransformer3DModel.from_pretrained(
51
+ MODEL_ID,
52
+ subfolder="transformer_2",
53
+ torch_dtype=torch.bfloat16,
54
+ device_map="cuda",
55
+ token=HF_TOKEN
56
+ ),
57
+ torch_dtype=torch.bfloat16,
58
+ ).to("cuda")
59
+
60
+ # =========================================================
61
+ # LOAD LORA ADAPTERS
62
+ # =========================================================
63
+ pipe.load_lora_weights(
64
+ "Kijai/WanVideo_comfy",
65
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
66
+ adapter_name="lightx2v"
67
+ )
68
+ pipe.load_lora_weights(
69
+ "Kijai/WanVideo_comfy",
70
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
71
+ adapter_name="lightx2v_2",
72
+ load_into_transformer_2=True
73
+ )
74
+
75
+ pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.])
76
+ pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
77
+ pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
78
+ pipe.unload_lora_weights()
79
+
80
+ # =========================================================
81
+ # QUANTIZATION & AOT OPTIMIZATION
82
+ # =========================================================
83
+ quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
84
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
85
+ quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
86
+
87
+ aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')
88
+ aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
89
+
90
+ # =========================================================
91
+ # DEFAULT PROMPTS
92
+ # =========================================================
93
+ default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
94
+ default_negative_prompt = (
95
+ "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, "
96
+ "最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, "
97
+ "畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
98
+ )
99
+
100
+ # =========================================================
101
+ # IMAGE RESIZING LOGIC
102
+ # =========================================================
103
+ def resize_image(image: Image.Image) -> Image.Image:
104
+ width, height = image.size
105
+ if width == height:
106
+ return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
107
+
108
+ aspect_ratio = width / height
109
+ MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
110
+ MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
111
+
112
+ image_to_resize = image
113
+
114
+ if aspect_ratio > MAX_ASPECT_RATIO:
115
+ crop_width = int(round(height * MAX_ASPECT_RATIO))
116
+ left = (width - crop_width) // 2
117
+ image_to_resize = image.crop((left, 0, left + crop_width, height))
118
+ elif aspect_ratio < MIN_ASPECT_RATIO:
119
+ crop_height = int(round(width / MIN_ASPECT_RATIO))
120
+ top = (height - crop_height) // 2
121
+ image_to_resize = image.crop((0, top, width, top + crop_height))
122
+
123
+ if width > height:
124
+ target_w = MAX_DIM
125
+ target_h = int(round(target_w / aspect_ratio))
126
+ else:
127
+ target_h = MAX_DIM
128
+ target_w = int(round(target_h * aspect_ratio))
129
+
130
+ final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
131
+ final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
132
+
133
+ final_w = max(MIN_DIM, min(MAX_DIM, final_w))
134
+ final_h = max(MIN_DIM, min(MAX_DIM, final_h))
135
+
136
+ return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
137
+
138
+ # =========================================================
139
+ # UTILITY FUNCTIONS
140
+ # =========================================================
141
+ def get_num_frames(duration_seconds: float):
142
+ return 1 + int(np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL))
143
+
144
+ def get_duration(
145
+ input_image, prompt, steps, negative_prompt,
146
+ duration_seconds, guidance_scale, guidance_scale_2,
147
+ seed, randomize_seed, progress,
148
+ ):
149
+ BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624
150
+ BASE_STEP_DURATION = 15
151
+ width, height = resize_image(input_image).size
152
+ frames = get_num_frames(duration_seconds)
153
+ factor = frames * width * height / BASE_FRAMES_HEIGHT_WIDTH
154
+ step_duration = BASE_STEP_DURATION * factor ** 1.5
155
+ return 10 + int(steps) * step_duration
156
+
157
+ # =========================================================
158
+ # MAIN GENERATION FUNCTION
159
+ # =========================================================
160
+ @spaces.GPU(duration=get_duration)
161
+ def generate_video(
162
+ input_image,
163
+ prompt,
164
+ steps=4,
165
+ negative_prompt=default_negative_prompt,
166
+ duration_seconds=MAX_DURATION,
167
+ guidance_scale=1,
168
+ guidance_scale_2=1,
169
+ seed=42,
170
+ randomize_seed=False,
171
+ progress=gr.Progress(track_tqdm=True),
172
+ ):
173
+ if input_image is None:
174
+ raise gr.Error("Please upload an input image.")
175
+
176
+ num_frames = get_num_frames(duration_seconds)
177
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
178
+ resized_image = resize_image(input_image)
179
+
180
+ output_frames_list = pipe(
181
+ image=resized_image,
182
+ prompt=prompt,
183
+ negative_prompt=negative_prompt,
184
+ height=resized_image.height,
185
+ width=resized_image.width,
186
+ num_frames=num_frames,
187
+ guidance_scale=float(guidance_scale),
188
+ guidance_scale_2=float(guidance_scale_2),
189
+ num_inference_steps=int(steps),
190
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
191
+ ).frames[0]
192
+
193
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
194
+ video_path = tmpfile.name
195
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
196
+ return video_path, current_seed
197
+
198
+ # =========================================================
199
+ # GRADIO UI
200
+ # =========================================================
201
+ with gr.Blocks() as demo:
202
+ gr.Markdown("# 🚀 Dream Wan 2.2 Faster Pro (14B) — Ultra Fast I2V with Lightning LoRA")
203
+ gr.Markdown("Optimized FP8 quantized pipeline with AoT blocks & 4-step fast inference ⚡")
204
+
205
+ with gr.Row():
206
+ with gr.Column():
207
+ input_image_component = gr.Image(type="pil", label="Input Image")
208
+ prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
209
+ duration_seconds_input = gr.Slider(
210
+ minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5,
211
+ label="Duration (seconds)",
212
+ info=f"Model range: {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps."
213
+ )
214
+
215
+ with gr.Accordion("Advanced Settings", open=False):
216
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
217
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
218
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
219
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
220
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale (high noise)")
221
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 (low noise)")
222
+
223
+ generate_button = gr.Button("🎬 Generate Video", variant="primary")
224
+
225
+ with gr.Column():
226
+ video_output = gr.Video(label="Generated Video", autoplay=True)
227
+
228
+ ui_inputs = [
229
+ input_image_component, prompt_input, steps_slider,
230
+ negative_prompt_input, duration_seconds_input,
231
+ guidance_scale_input, guidance_scale_2_input,
232
+ seed_input, randomize_seed_checkbox
233
+ ]
234
+ generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
235
+
236
+ gr.Examples(
237
+ examples=[
238
+ [
239
+ "wan_i2v_input.JPG",
240
+ "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of cat’s face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
241
+ 4,
242
+ ],
243
+ ],
244
+ inputs=[input_image_component, prompt_input, steps_slider],
245
+ outputs=[video_output, seed_input],
246
+ fn=generate_video,
247
+ cache_examples="lazy"
248
+ )
249
+
250
+ if __name__ == "__main__":
251
+ demo.queue().launch(mcp_server=True)
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ git+https://github.com/linoytsaban/diffusers.git@wan22-loras
2
+ transformers
3
+ accelerate
4
+ safetensors
5
+ sentencepiece
6
+ peft
7
+ ftfy
8
+ imageio-ffmpeg
9
+ opencv-python
10
+ torchao==0.11.0