SHAILJA1 commited on
Commit
9165be4
·
verified ·
1 Parent(s): bd96388

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install diffusers torch moviepy pillow
2
+ import torch
3
+ from diffusers import StableDiffusionImg2ImgPipeline
4
+ from PIL import Image
5
+ from moviepy.editor import ImageSequenceClip
6
+ import os
7
+
8
+ # Step 1: Set up Stable Diffusion img2img pipeline
9
+ def setup_pipeline(model_name="CompVis/stable-diffusion-v1-4"):
10
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_name, torch_dtype=torch.float16)
11
+ pipe.to("cuda") # Use GPU for faster generation
12
+ return pipe
13
+
14
+ # Step 2: Generate frames from the single image
15
+ def generate_frames(pipe, input_image_path, prompt, num_frames=30, guidance_scale=7.5, strength=0.5, output_folder="frames"):
16
+ os.makedirs(output_folder, exist_ok=True)
17
+ frames = []
18
+
19
+ # Load the input image
20
+ input_image = Image.open(input_image_path).convert("RGB")
21
+
22
+ for i in range(num_frames):
23
+ # Slightly modify the prompt or strength for variation
24
+ current_prompt = f"{prompt}, frame {i+1} of {num_frames}"
25
+ current_strength = strength + (0.01 * i) # Gradual change in strength
26
+
27
+ # Generate a new image
28
+ generated_image = pipe(
29
+ prompt=current_prompt,
30
+ image=input_image,
31
+ strength=current_strength,
32
+ guidance_scale=guidance_scale
33
+ ).images[0]
34
+
35
+ # Save the frame
36
+ frame_path = os.path.join(output_folder, f"frame_{i:03d}.png")
37
+ generated_image.save(frame_path)
38
+ frames.append(frame_path)
39
+ print(f"Generated frame {i+1}/{num_frames}")
40
+
41
+ return frames
42
+
43
+ # Step 3: Create video from frames
44
+ def create_video(frames, output_file="output_video.mp4", fps=24):
45
+ clip = ImageSequenceClip(frames, fps=fps)
46
+ clip.write_videofile(output_file, codec="libx264")
47
+ print(f"Video saved as {output_file}")
48
+
49
+ # Step 4: Main script
50
+ if __name__ == "__main__":
51
+ # Model and prompt configuration
52
+ input_image_path = "/mnt/data/Screenshot 2025-01-03 171727.png" # Use the uploaded image
53
+ prompt = "A child riding a bicycle through a magical forest, dynamic and cinematic lighting"
54
+ num_frames = 30
55
+ fps = 24
56
+
57
+ # Initialize Stable Diffusion img2img pipeline
58
+ pipe = setup_pipeline()
59
+
60
+ # Generate frames from the single image
61
+ print("Generating frames...")
62
+ frames = generate_frames(pipe, input_image_path, prompt, num_frames=num_frames)
63
+
64
+ # Create video
65
+ print("Creating video...")
66
+ create_video(frames, output_file="image_to_video_diffusion.mp4", fps=fps)
67
+