kadirnar commited on
Commit
a28b508
·
verified ·
1 Parent(s): 6da9572

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -25,7 +25,7 @@ from transformers import CLIPTextModel, CLIPTokenizer
25
  from diffusers_vdm.pipeline import LatentVideoDiffusionPipeline
26
  from diffusers_vdm.utils import resize_and_center_crop, save_bcthw_as_mp4
27
 
28
-
29
  class ModifiedUNet(UNet2DConditionModel):
30
  @classmethod
31
  def from_config(cls, *args, **kwargs):
@@ -73,7 +73,7 @@ def find_best_bucket(h, w, options):
73
  best_bucket = (bucket_h, bucket_w)
74
  return best_bucket
75
 
76
-
77
  @torch.inference_mode()
78
  def encode_cropped_prompt_77tokens(txt: str):
79
  memory_management.load_models_to_gpu(text_encoder)
@@ -85,7 +85,7 @@ def encode_cropped_prompt_77tokens(txt: str):
85
  text_cond = text_encoder(cond_ids, attention_mask=None).last_hidden_state
86
  return text_cond
87
 
88
-
89
  @torch.inference_mode()
90
  def pytorch2numpy(imgs):
91
  results = []
@@ -96,7 +96,7 @@ def pytorch2numpy(imgs):
96
  results.append(y)
97
  return results
98
 
99
-
100
  @torch.inference_mode()
101
  def numpy2pytorch(imgs):
102
  h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.5 - 1.0
@@ -109,12 +109,12 @@ def resize_without_crop(image, target_width, target_height):
109
  resized_image = pil_image.resize((target_width, target_height), Image.LANCZOS)
110
  return np.array(resized_image)
111
 
112
-
113
  @torch.inference_mode()
114
  def interrogator_process(x):
115
  return wd14tagger.default_interrogator(x)
116
 
117
-
118
  @torch.inference_mode()
119
  def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
120
  progress=gr.Progress()):
@@ -154,7 +154,7 @@ def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed,
154
 
155
  return pixels
156
 
157
-
158
  @torch.inference_mode()
159
  def process_video_inner(image_1, image_2, prompt, seed=123, steps=25, cfg_scale=7.5, fs=3, progress_tqdm=None):
160
  random.seed(seed)
 
25
  from diffusers_vdm.pipeline import LatentVideoDiffusionPipeline
26
  from diffusers_vdm.utils import resize_and_center_crop, save_bcthw_as_mp4
27
 
28
+ @spaces.GPU()
29
  class ModifiedUNet(UNet2DConditionModel):
30
  @classmethod
31
  def from_config(cls, *args, **kwargs):
 
73
  best_bucket = (bucket_h, bucket_w)
74
  return best_bucket
75
 
76
+ @spaces.GPU()
77
  @torch.inference_mode()
78
  def encode_cropped_prompt_77tokens(txt: str):
79
  memory_management.load_models_to_gpu(text_encoder)
 
85
  text_cond = text_encoder(cond_ids, attention_mask=None).last_hidden_state
86
  return text_cond
87
 
88
+ @spaces.GPU()
89
  @torch.inference_mode()
90
  def pytorch2numpy(imgs):
91
  results = []
 
96
  results.append(y)
97
  return results
98
 
99
+ @spaces.GPU()
100
  @torch.inference_mode()
101
  def numpy2pytorch(imgs):
102
  h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.5 - 1.0
 
109
  resized_image = pil_image.resize((target_width, target_height), Image.LANCZOS)
110
  return np.array(resized_image)
111
 
112
+ @spaces.GPU()
113
  @torch.inference_mode()
114
  def interrogator_process(x):
115
  return wd14tagger.default_interrogator(x)
116
 
117
+ @spaces.GPU()
118
  @torch.inference_mode()
119
  def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
120
  progress=gr.Progress()):
 
154
 
155
  return pixels
156
 
157
+ @spaces.GPU()
158
  @torch.inference_mode()
159
  def process_video_inner(image_1, image_2, prompt, seed=123, steps=25, cfg_scale=7.5, fs=3, progress_tqdm=None):
160
  random.seed(seed)