yucornetto commited on
Commit
8a89e5f
1 Parent(s): 2307701

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -12
app.py CHANGED
@@ -6,11 +6,9 @@ import torch
6
  torch.backends.cuda.matmul.allow_tf32 = True
7
  torch.backends.cudnn.allow_tf32 = True
8
  import time
9
- import argparse
10
  import demo_util
11
  import os
12
  import spaces
13
- from functools import partial
14
 
15
  model2ckpt = {
16
  "TiTok-L-32": ("tokenizer_titok_l32.bin", "generator_titok_l32.bin"),
@@ -21,16 +19,6 @@ if not os.path.exists("tokenizer_titok_l32.bin"):
21
  if not os.path.exists("generator_titok_l32.bin"):
22
  os.system("gdown 1IgqZ_vwGIj2ZWOPuCzilxeQ2UrMVY93l")
23
 
24
- parser = argparse.ArgumentParser()
25
- parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
26
- parser.add_argument("--guidance_scale", type=float, default=3.5)
27
- parser.add_argument("--randomize_temperature", type=float, default=1.0)
28
- parser.add_argument("--num_sample_steps", type=int, default=8)
29
- parser.add_argument("--seed", type=int, default=42)
30
- parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with")
31
- args = parser.parse_args()
32
-
33
-
34
  # @spaces.GPU
35
  def load_model():
36
  device = "cuda" #if torch.cuda.is_available() else "cpu"
 
6
  torch.backends.cuda.matmul.allow_tf32 = True
7
  torch.backends.cudnn.allow_tf32 = True
8
  import time
 
9
  import demo_util
10
  import os
11
  import spaces
 
12
 
13
  model2ckpt = {
14
  "TiTok-L-32": ("tokenizer_titok_l32.bin", "generator_titok_l32.bin"),
 
19
  if not os.path.exists("generator_titok_l32.bin"):
20
  os.system("gdown 1IgqZ_vwGIj2ZWOPuCzilxeQ2UrMVY93l")
21
 
 
 
 
 
 
 
 
 
 
 
22
  # @spaces.GPU
23
  def load_model():
24
  device = "cuda" #if torch.cuda.is_available() else "cpu"