asahi417 commited on
Commit
656dc9e
·
verified ·
1 Parent(s): 3cd9fa8

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +7 -7
  2. app.py +60 -0
  3. requirements.txt +8 -0
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
- title: Stable Video Diffusion Upscale
3
- emoji: 🦀
4
- colorFrom: indigo
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 4.40.0
8
  app_file: app.py
9
  pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Stable Video Diffusion with Upscaler
3
+ emoji: 📺
4
+ colorFrom: gray
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.37.2
8
  app_file: app.py
9
  pinned: false
10
+ license: other
11
+ disable_embedding: true
12
+ ---
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from glob import glob
4
+ from diffusers.utils import load_image
5
+ import spaces
6
+ from panna.pipeline import PipelineSVDUpscale
7
+
8
+
9
+ model = PipelineSVDUpscale(upscaler="instruct_ir")
10
+ example_files = []
11
+ root_url = "https://huggingface.co/spaces/multimodalart/stable-video-diffusion/resolve/main/images"
12
+ examples = ["disaster_meme.png", "distracted_meme.png", "hide_meme.png", "success_meme.png", "willy_meme.png", "wink_meme.png"]
13
+ for example in examples:
14
+ load_image(f"{root_url}/{example}").save(example)
15
+ tmp_output_dir = "outputs"
16
+ os.makedirs(tmp_output_dir, exist_ok=True)
17
+ title = ("# [Stable Video Diffusion](ttps://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt)\n"
18
+ "The demo is part of [panna](https://github.com/abacws-abacus/panna) project.")
19
+
20
+
21
+ @spaces.GPU(duration=120)
22
+ def infer(init_image, upscaler_prompt, num_frames, motion_bucket_id, noise_aug_strength, decode_chunk_size, fps, seed):
23
+ base_count = len(glob(os.path.join(tmp_output_dir, "*.mp4")))
24
+ video_path = os.path.join(tmp_output_dir, f"{base_count:06d}.mp4")
25
+ model(
26
+ init_image,
27
+ output_path=video_path,
28
+ prompt=upscaler_prompt,
29
+ num_frames=num_frames,
30
+ motion_bucket_id=motion_bucket_id,
31
+ noise_aug_strength=noise_aug_strength,
32
+ decode_chunk_size=decode_chunk_size,
33
+ fps=fps,
34
+ seed=seed
35
+ )
36
+ return video_path
37
+
38
+
39
+ with gr.Blocks() as demo:
40
+ gr.Markdown(title)
41
+ with gr.Row():
42
+ with gr.Column():
43
+ image = gr.Image(label="Upload your image", type="pil")
44
+ run_button = gr.Button("Generate")
45
+ video = gr.Video()
46
+ with gr.Accordion("Advanced options", open=False):
47
+ upscaler_prompt = gr.Text(label="Prompt for upscaler", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False)
48
+ seed = gr.Slider(label="Seed", minimum=0, maximum=1_000_000, step=1, value=0)
49
+ num_frames = gr.Slider(label="Number of frames", minimum=1, maximum=100, step=1, value=25)
50
+ motion_bucket_id = gr.Slider(label="Motion bucket id", minimum=1, maximum=255, step=1, value=127)
51
+ noise_aug_strength = gr.Slider(label="Noise strength", minimum=0, maximum=1, step=0.01, value=0.02)
52
+ fps = gr.Slider(label="Frames per second", minimum=5, maximum=30, step=1, value=6)
53
+ decode_chunk_size = gr.Slider(label="Decode chunk size", minimum=1, maximum=10, step=1, value=2)
54
+ run_button.click(
55
+ fn=infer,
56
+ inputs=[image, upscaler_prompt, num_frames, motion_bucket_id, noise_aug_strength, decode_chunk_size, fps, seed],
57
+ outputs=[video]
58
+ )
59
+ gr.Examples(examples=examples, inputs=image)
60
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ https://gradio-builds.s3.amazonaws.com/756e3431d65172df986a7e335dce8136206a293a/gradio-4.7.1-py3-none-any.whl
2
+ git+https://github.com/huggingface/diffusers.git
3
+ transformers
4
+ accelerate
5
+ safetensors
6
+ opencv-python
7
+ uuid
8
+ panna>=0.0.2