File size: 1,042 Bytes
2bfb106
 
 
 
 
 
 
 
 
d8df835
2bfb106
 
18566c3
1742f2f
d8df835
2bfb106
 
 
 
 
d8df835
2bfb106
 
 
 
 
18566c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import torch
import numpy as np
import os
import imageio
from PIL import Image
from diffusers import AnimateDiffPipeline
from diffusers.utils import load_image

def generate_video_from_image(image_path, prompt, duration, fps, output_path):
    # Charger l’image et la convertir en RGB
    image = load_image(image_path).convert("RGB")

    # Utiliser un modèle AnimateDiff compatible (UNetMotionModel)
    model_id = "stabilityai/stable-video-diffusion-img2vid"
    pipe = AnimateDiffPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
    pipe.enable_model_cpu_offload()

    # Calculer le nombre de frames à générer
    num_frames = duration * fps

    # Générer les frames à partir du prompt et de l’image
    output = pipe(prompt=prompt, image=image, num_inference_steps=25, guidance_scale=7.5, num_frames=num_frames)
    frames = output.frames

    # Convertir les frames en tableau numpy et sauvegarder en vidéo
    frames = [np.array(frame) for frame in frames]
    imageio.mimwrite(output_path, frames, fps=fps)