File size: 2,474 Bytes
9165be4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
!pip install diffusers torch moviepy pillow
import torch
from diffusers import StableDiffusionImg2ImgPipeline
from PIL import Image
from moviepy.editor import ImageSequenceClip
import os

# Step 1: Set up Stable Diffusion img2img pipeline
def setup_pipeline(model_name="CompVis/stable-diffusion-v1-4"):
    pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_name, torch_dtype=torch.float16)
    pipe.to("cuda")  # Use GPU for faster generation
    return pipe

# Step 2: Generate frames from the single image
def generate_frames(pipe, input_image_path, prompt, num_frames=30, guidance_scale=7.5, strength=0.5, output_folder="frames"):
    os.makedirs(output_folder, exist_ok=True)
    frames = []
    
    # Load the input image
    input_image = Image.open(input_image_path).convert("RGB")
    
    for i in range(num_frames):
        # Slightly modify the prompt or strength for variation
        current_prompt = f"{prompt}, frame {i+1} of {num_frames}"
        current_strength = strength + (0.01 * i)  # Gradual change in strength
        
        # Generate a new image
        generated_image = pipe(
            prompt=current_prompt,
            image=input_image,
            strength=current_strength,
            guidance_scale=guidance_scale
        ).images[0]
        
        # Save the frame
        frame_path = os.path.join(output_folder, f"frame_{i:03d}.png")
        generated_image.save(frame_path)
        frames.append(frame_path)
        print(f"Generated frame {i+1}/{num_frames}")
    
    return frames

# Step 3: Create video from frames
def create_video(frames, output_file="output_video.mp4", fps=24):
    clip = ImageSequenceClip(frames, fps=fps)
    clip.write_videofile(output_file, codec="libx264")
    print(f"Video saved as {output_file}")

# Step 4: Main script
if __name__ == "__main__":
    # Model and prompt configuration
    input_image_path = "/mnt/data/Screenshot 2025-01-03 171727.png"  # Use the uploaded image
    prompt = "A child riding a bicycle through a magical forest, dynamic and cinematic lighting"
    num_frames = 30
    fps = 24
    
    # Initialize Stable Diffusion img2img pipeline
    pipe = setup_pipeline()

    # Generate frames from the single image
    print("Generating frames...")
    frames = generate_frames(pipe, input_image_path, prompt, num_frames=num_frames)

    # Create video
    print("Creating video...")
    create_video(frames, output_file="image_to_video_diffusion.mp4", fps=fps)