Spaces:
Runtime error
Runtime error
| import spaces | |
| import torch | |
| from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline | |
| from diffusers.models.transformers.transformer_wan import WanTransformer3DModel | |
| from diffusers.utils.export_utils import export_to_video | |
| import gradio as gr | |
| import tempfile | |
| import numpy as np | |
| from PIL import Image | |
| import random | |
| import gc | |
| from torchao.quantization import quantize_ | |
| from torchao.quantization import Float8DynamicActivationFloat8WeightConfig | |
| from torchao.quantization import Int8WeightOnlyConfig | |
| import aoti | |
| MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers" | |
| MAX_DIM = 832 | |
| MIN_DIM = 480 | |
| SQUARE_DIM = 640 | |
| MULTIPLE_OF = 16 | |
| MAX_SEED = np.iinfo(np.int32).max | |
| FIXED_FPS = 16 | |
| MIN_FRAMES_MODEL = 8 | |
| MAX_FRAMES_MODEL = 80 | |
| MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1) | |
| MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1) | |
| pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID, | |
| transformer=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers', | |
| subfolder='transformer', | |
| torch_dtype=torch.bfloat16, | |
| device_map='cuda', | |
| ), | |
| transformer_2=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers', | |
| subfolder='transformer_2', | |
| torch_dtype=torch.bfloat16, | |
| device_map='cuda', | |
| ), | |
| torch_dtype=torch.bfloat16, | |
| ).to('cuda') | |
| pipe.load_lora_weights( | |
| "Kijai/WanVideo_comfy", | |
| weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors", | |
| adapter_name="lightx2v" | |
| ) | |
| kwargs_lora = {} | |
| kwargs_lora["load_into_transformer_2"] = True | |
| pipe.load_lora_weights( | |
| "Kijai/WanVideo_comfy", | |
| weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors", | |
| adapter_name="lightx2v_2", **kwargs_lora | |
| ) | |
| pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.]) | |
| pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"]) | |
| pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"]) | |
| pipe.unload_lora_weights() | |
| quantize_(pipe.text_encoder, Int8WeightOnlyConfig()) | |
| quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig()) | |
| quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig()) | |
| aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da') | |
| aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da') | |
| default_prompt_i2v = "μ΄ μ΄λ―Έμ§μ μλκ°μ λΆμ¬νκ³ , μν κ°μ μμ§μκ³Ό λΆλλ¬μ΄ μ λλ©μ΄μ μ μ μ©" | |
| default_negative_prompt = "μμ‘° μ λͺ , κ³Όλ€ λ ΈμΆ, μ μ , μΈλΆ νλ¦Ό, μλ§, μ€νμΌ, μν, κ·Έλ¦Ό, νλ©΄, μ μ§, νμμ‘°, μ΅μ νμ§, μ νμ§, JPEG μμΆ, μΆν¨, λΆμμ , μΆκ° μκ°λ½, μλͺ» κ·Έλ €μ§ μ, μλͺ» κ·Έλ €μ§ μΌκ΅΄, κΈ°ν, λ³ν, νν λΆλ μ¬μ§, μκ°λ½ μ΅ν©, μ μ§ νλ©΄, μ§μ λΆν λ°°κ²½, μΈ κ°μ λ€λ¦¬, λ°°κ²½ μ¬λ λ§μ, λ€λ‘ κ±·κΈ°" | |
| def resize_image(image: Image.Image) -> Image.Image: | |
| width, height = image.size | |
| if width == height: | |
| return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS) | |
| aspect_ratio = width / height | |
| MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM | |
| MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM | |
| image_to_resize = image | |
| if aspect_ratio > MAX_ASPECT_RATIO: | |
| target_w, target_h = MAX_DIM, MIN_DIM | |
| crop_width = int(round(height * MAX_ASPECT_RATIO)) | |
| left = (width - crop_width) // 2 | |
| image_to_resize = image.crop((left, 0, left + crop_width, height)) | |
| elif aspect_ratio < MIN_ASPECT_RATIO: | |
| target_w, target_h = MIN_DIM, MAX_DIM | |
| crop_height = int(round(width / MIN_ASPECT_RATIO)) | |
| top = (height - crop_height) // 2 | |
| image_to_resize = image.crop((0, top, width, top + crop_height)) | |
| else: | |
| if width > height: | |
| target_w = MAX_DIM | |
| target_h = int(round(target_w / aspect_ratio)) | |
| else: | |
| target_h = MAX_DIM | |
| target_w = int(round(target_h * aspect_ratio)) | |
| final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF | |
| final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF | |
| final_w = max(MIN_DIM, min(MAX_DIM, final_w)) | |
| final_h = max(MIN_DIM, min(MAX_DIM, final_h)) | |
| return image_to_resize.resize((final_w, final_h), Image.LANCZOS) | |
| def get_num_frames(duration_seconds: float): | |
| return 1 + int(np.clip( | |
| int(round(duration_seconds * FIXED_FPS)), | |
| MIN_FRAMES_MODEL, | |
| MAX_FRAMES_MODEL, | |
| )) | |
| def get_duration( | |
| input_image, | |
| prompt, | |
| steps, | |
| negative_prompt, | |
| duration_seconds, | |
| guidance_scale, | |
| guidance_scale_2, | |
| seed, | |
| randomize_seed, | |
| progress, | |
| ): | |
| BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624 | |
| BASE_STEP_DURATION = 15 | |
| width, height = resize_image(input_image).size | |
| frames = get_num_frames(duration_seconds) | |
| factor = frames * width * height / BASE_FRAMES_HEIGHT_WIDTH | |
| step_duration = BASE_STEP_DURATION * factor ** 1.5 | |
| return 10 + int(steps) * step_duration | |
| def generate_video( | |
| input_image, | |
| prompt, | |
| steps = 4, | |
| negative_prompt=default_negative_prompt, | |
| duration_seconds = MAX_DURATION, | |
| guidance_scale = 1, | |
| guidance_scale_2 = 1, | |
| seed = 42, | |
| randomize_seed = False, | |
| progress=gr.Progress(track_tqdm=True), | |
| ): | |
| if input_image is None: | |
| raise gr.Error("μ΄λ―Έμ§λ₯Ό μ λ‘λν΄μ£ΌμΈμ.") | |
| num_frames = get_num_frames(duration_seconds) | |
| current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed) | |
| resized_image = resize_image(input_image) | |
| output_frames_list = pipe( | |
| image=resized_image, | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| height=resized_image.height, | |
| width=resized_image.width, | |
| num_frames=num_frames, | |
| guidance_scale=float(guidance_scale), | |
| guidance_scale_2=float(guidance_scale_2), | |
| num_inference_steps=int(steps), | |
| generator=torch.Generator(device="cuda").manual_seed(current_seed), | |
| ).frames[0] | |
| with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile: | |
| video_path = tmpfile.name | |
| export_to_video(output_frames_list, video_path, fps=FIXED_FPS) | |
| return video_path, current_seed | |
| # μΈλ ¨λ νκΈ UI | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# π¬ WAN κΈ°λ° μ΄κ³ μ μ΄λ―Έμ§ to λΉλμ€ λ¬΄λ£ μμ± μ€νμμ€") | |
| gr.Markdown("** WAN 2.2 14B + FAST + νκΈν + νλ ** - 4~8λ¨κ³λ‘ λΉ λ₯Έ μμ μμ±") | |
| gr.Markdown("** νΈλν½ μ νμ λ€μ 4κ°μ λ―Έλ¬λ§ μλ²λ€μ μ΄μ©νμ¬ λΆμ° μ¬μ© κΆκ³ ") | |
| gr.HTML(""" | |
| <div style="display: flex; gap: 10px; flex-wrap: wrap; justify-content: center; margin: 20px 0;"> | |
| <a href="https://huggingface.co/spaces/Heartsync/wan2_2-I2V-14B-FAST" target="_blank"> | |
| <img src="https://img.shields.io/static/v1?label=WAN%202.2%2014B%20FAST%2B&message=Image%20to%20Video&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge"> | |
| </a> | |
| <a href="https://huggingface.co/spaces/ginipick/wan2_2-I2V-14B-FAST" target="_blank"> | |
| <img src="https://img.shields.io/static/v1?label=WAN%202.2%2014B%20FAST%2B&message=Image%20to%20Video&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge"> | |
| </a> | |
| <a href="https://huggingface.co/spaces/ginigen/wan2_2-I2V-14B-FAST" target="_blank"> | |
| <img src="https://img.shields.io/static/v1?label=WAN%202.2%2014B%20FAST%2B&message=Image%20to%20Video&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge"> | |
| </a> | |
| <a href="https://huggingface.co/spaces/VIDraft/wan2_2-I2V-14B-FAST" target="_blank"> | |
| <img src="https://img.shields.io/static/v1?label=WAN%202.2%2014B%20FAST%2B&message=Image%20to%20Video&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge"> | |
| </a> | |
| <a href="https://discord.gg/openfreeai" target="_blank"> | |
| <img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="badge"></a> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| input_image_component = gr.Image(type="pil", label="μ λ ₯ μ΄λ―Έμ§") | |
| prompt_input = gr.Textbox(label="ν둬ννΈ", value=default_prompt_i2v, lines=2) | |
| duration_seconds_input = gr.Slider( | |
| minimum=MIN_DURATION, | |
| maximum=MAX_DURATION, | |
| step=0.1, | |
| value=3.5, | |
| label="μμ κΈΈμ΄ (μ΄)" | |
| ) | |
| with gr.Accordion("κ³ κΈ μ€μ ", open=False): | |
| negative_prompt_input = gr.Textbox(label="λ€κ±°ν°λΈ ν둬ννΈ", value=default_negative_prompt, lines=2) | |
| steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="μμ± λ¨κ³") | |
| guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="κ°μ΄λμ€ μ€μΌμΌ 1") | |
| guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="κ°μ΄λμ€ μ€μΌμΌ 2") | |
| seed_input = gr.Slider(label="μλ", minimum=0, maximum=MAX_SEED, step=1, value=42) | |
| randomize_seed_checkbox = gr.Checkbox(label="λλ€ μλ μ¬μ©", value=True) | |
| generate_button = gr.Button("π₯ μμ μμ±", variant="primary", size="lg") | |
| with gr.Column(scale=1): | |
| video_output = gr.Video(label="μμ±λ μμ", autoplay=True, interactive=False) | |
| ui_inputs = [ | |
| input_image_component, prompt_input, steps_slider, | |
| negative_prompt_input, duration_seconds_input, | |
| guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox | |
| ] | |
| generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input]) | |
| gr.Examples( | |
| examples=[ | |
| [ | |
| "wan_i2v_input.JPG", | |
| "POV μ μΉ΄ μμ, μ κΈλΌμ€ λ ν° κ³ μμ΄κ° μν보λμ μμ νΈμν λ―Έμ. λ°°κ²½μ μ΄λ ν΄λ³(λ§μ λ¬Ό, λ Ήμ μΈλ, κ΅¬λ¦ λ νΈλ₯Έ νλ). μν보λκ° κΈ°μΈμ΄μ§κ³ κ³ μμ΄κ° λ°λ€λ‘ λ¨μ΄μ§λ©° μΉ΄λ©λΌκ° κ±°νκ³Ό νλΉκ³Ό ν¨κ» λ¬ΌμμΌλ‘ λΉ μ§. μ κΉ λ¬Όμμμ κ³ μμ΄ μΌκ΅΄ 보μ΄λ€κ° λ€μ μλ©΄ μλ‘ μ¬λΌμ μ μΉ΄ 촬μ κ³μ, μ¦κ±°μ΄ μ¬λ¦ ν΄κ° λΆμκΈ°.", | |
| 4, | |
| ], | |
| [ | |
| "wan22_input_2.jpg", | |
| "μΈλ ¨λ λ¬ νμ¬ μ°¨λμ΄ μΌμͺ½μμ μ€λ₯Έμͺ½μΌλ‘ λ―Έλλ¬μ§λ― μ΄λνλ©° λ¬ λ¨Όμ§λ₯Ό μΌμΌν΄. ν° μ°μ£Όλ³΅μ μ μ μ°μ£ΌμΈλ€μ΄ λ¬ νΉμ μ λ°λ λμμΌλ‘ νμΉ. λ¨Ό λ°°κ²½μμ VTOL λΉνμ²΄κ° μμ§μΌλ‘ νκ°νμ¬ νλ©΄μ μ‘°μ©ν μ°©λ₯. μ₯λ©΄ μ 체μ κ±Έμ³ μ΄νμ€μ μΈ μ€λ‘λΌκ° λ³μ΄ κ°λν νλμ κ°λ‘μ§λ₯΄λ©° μΆ€μΆκ³ , λ Ήμ, νλμ, 보λΌμ λΉμ 컀νΌμ΄ λ¬ νκ²½μ μ λΉλ‘κ³ λ§λ² κ°μ λΉμΌλ‘ κ°μ.", | |
| 4, | |
| ], | |
| [ | |
| "kill_bill.jpeg", | |
| "μ°λ§ μλ¨Όμ μΊλ¦ν° λ² μνΈλ¦μ€ ν€λκ° μν κ°μ μ‘°λͺ μμμ λ μΉ΄λ‘μ΄ μΉ΄νλ κ²μ μμ μ μΌλ‘ λ€κ³ μμ. κ°μκΈ° κ΄ν λλ κ°μ² μ΄ λΆλλ¬μμ§κ³ μ곑λκΈ° μμνλ©° κ°μ΄λ κΈμμ²λΌ ꡬ쑰μ μμ μ±μ μκΈ° μμ. κ²λ μ μλ²½ν λμ΄ μ²μ²ν νμ΄μ§κ³ λμ΄μ§λ©°, λ Ήμ κ°μ² μ΄ μλΉ λ¬Όμ€κΈ°λ‘ μλλ‘ νλ¬λ΄λ¦Ό. λ³νμ μ²μμλ λ―Έλ¬νκ² μμλλ€κ° κΈμμ΄ μ μ λ μ λμ μ΄ λλ©΄μ κ°μν. μΉ΄λ©λΌλ κ·Έλ μ μΌκ΅΄μ κ³ μ νκ³ λ μΉ΄λ‘μ΄ λλΉμ΄ μ μ°¨ μ’μμ§λλ°, μΉλͺ μ μΈ μ§μ€μ΄ μλλΌ λ¬΄κΈ°κ° λμμμ λ Ήλ κ²μ 보며 νΌλκ³Ό κ²½μ . νΈν‘μ΄ μ½κ° λΉ¨λΌμ§λ©° μ΄ λΆκ°λ₯ν λ³νμ λͺ©κ²©. λ Ήλ νμμ΄ κ°νλκ³ μΉ΄νλμ μλ²½ν ννκ° μ μ μΆμμ μ΄ λλ©° μμμ μμμ²λΌ λ¨μ΄μ§. λ Ήμ λ°©μΈμ΄ λΆλλ¬μ΄ κΈμ 좩격μκ³Ό ν¨κ» λ°λ₯μ λ¨μ΄μ§. νμ μ΄ μ°¨λΆν μ€λΉμμ λΉνΉκ°κ³Ό μ°λ €λ‘ λ°λλ©° μ μ€μ μΈ λ³΅μμ λκ΅¬κ° μμμ λ¬Έμ κ·Έλλ‘ μ‘νλμ΄ λ¬΄λ°©λΉ μνκ° λ¨.", | |
| 6, | |
| ], | |
| ], | |
| inputs=[input_image_component, prompt_input, steps_slider], | |
| outputs=[video_output, seed_input], | |
| fn=generate_video, | |
| cache_examples="lazy" | |
| ) | |
| if __name__ == "__main__": | |
| demo.queue().launch(mcp_server=True) |