SkyReels-V2-Clean / motion_utils /video_generator_animatediff.py
blabla974's picture
Fix: switch to stabilityai/stable-video-diffusion-img2vid (public UNetMotionModel-compatible)
1742f2f verified
import torch
import numpy as np
import os
import imageio
from PIL import Image
from diffusers import AnimateDiffPipeline
from diffusers.utils import load_image
def generate_video_from_image(image_path, prompt, duration, fps, output_path):
# Charger l’image et la convertir en RGB
image = load_image(image_path).convert("RGB")
# Utiliser un modèle AnimateDiff compatible (UNetMotionModel)
model_id = "stabilityai/stable-video-diffusion-img2vid"
pipe = AnimateDiffPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
# Calculer le nombre de frames à générer
num_frames = duration * fps
# Générer les frames à partir du prompt et de l’image
output = pipe(prompt=prompt, image=image, num_inference_steps=25, guidance_scale=7.5, num_frames=num_frames)
frames = output.frames
# Convertir les frames en tableau numpy et sauvegarder en vidéo
frames = [np.array(frame) for frame in frames]
imageio.mimwrite(output_path, frames, fps=fps)