amildravid4292 commited on
Commit
c2136ba
·
verified ·
1 Parent(s): 12b3d57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -29
app.py CHANGED
@@ -35,44 +35,37 @@ import spaces
35
  models_path = snapshot_download(repo_id="Snapchat/w2w")
36
 
37
 
38
- @spaces.GPU
39
- def load_models(device):
40
- pretrained_model_name_or_path = "stablediffusionapi/realistic-vision-v51"
41
-
42
- revision = None
43
- weight_dtype = torch.bfloat16
44
-
45
- # Load scheduler, tokenizer and models.
46
- pipe = StableDiffusionPipeline.from_pretrained("stablediffusionapi/realistic-vision-v51",
47
  torch_dtype=torch.float16,safety_checker = None,
48
  requires_safety_checker = False).to(device)
49
- noise_scheduler = pipe.scheduler
50
- del pipe
51
- tokenizer = AutoTokenizer.from_pretrained(
52
  pretrained_model_name_or_path, subfolder="tokenizer", revision=revision
53
  )
54
- text_encoder = CLIPTextModel.from_pretrained(
55
  pretrained_model_name_or_path, subfolder="text_encoder", revision=revision
56
  )
57
- vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae", revision=revision)
58
- unet = UNet2DConditionModel.from_pretrained(
59
  pretrained_model_name_or_path, subfolder="unet", revision=revision
60
  )
61
- unet.requires_grad_(False)
62
- unet.to(device, dtype=weight_dtype)
63
- vae.requires_grad_(False)
64
 
65
- text_encoder.requires_grad_(False)
66
- vae.requires_grad_(False)
67
- vae.to(device, dtype=weight_dtype)
68
- text_encoder.to(device, dtype=weight_dtype)
69
- print("")
70
-
71
- return unet, vae, text_encoder, tokenizer, noise_scheduler
72
-
73
-
74
-
75
 
 
76
 
77
  device="cuda"
78
  mean = torch.load(f"{models_path}/files/mean.pt", map_location=torch.device('cpu')).bfloat16().to(device)
@@ -83,7 +76,7 @@ df = torch.load(f"{models_path}/files/identity_df.pt")
83
  weight_dimensions = torch.load(f"{models_path}/files/weight_dimensions.pt")
84
  pinverse = torch.load(f"{models_path}/files/pinverse_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
85
 
86
- unet, vae, text_encoder, tokenizer, noise_scheduler = load_models(device)
87
 
88
 
89
  @spaces.GPU
 
35
  models_path = snapshot_download(repo_id="Snapchat/w2w")
36
 
37
 
38
+
39
+ pretrained_model_name_or_path = "stablediffusionapi/realistic-vision-v51"
40
+ revision = None
41
+ weight_dtype = torch.bfloat16
42
+ # Load scheduler, tokenizer and models.
43
+ pipe = StableDiffusionPipeline.from_pretrained("stablediffusionapi/realistic-vision-v51",
 
 
 
44
  torch_dtype=torch.float16,safety_checker = None,
45
  requires_safety_checker = False).to(device)
46
+ noise_scheduler = pipe.scheduler
47
+ del pipe
48
+ tokenizer = AutoTokenizer.from_pretrained(
49
  pretrained_model_name_or_path, subfolder="tokenizer", revision=revision
50
  )
51
+ text_encoder = CLIPTextModel.from_pretrained(
52
  pretrained_model_name_or_path, subfolder="text_encoder", revision=revision
53
  )
54
+ vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae", revision=revision)
55
+ unet = UNet2DConditionModel.from_pretrained(
56
  pretrained_model_name_or_path, subfolder="unet", revision=revision
57
  )
58
+ unet.requires_grad_(False)
59
+ unet.to(device, dtype=weight_dtype)
60
+ vae.requires_grad_(False)
61
 
62
+ text_encoder.requires_grad_(False)
63
+ vae.requires_grad_(False)
64
+ vae.to(device, dtype=weight_dtype)
65
+ text_encoder.to(device, dtype=weight_dtype)
66
+ print("")
 
 
 
 
 
67
 
68
+
69
 
70
  device="cuda"
71
  mean = torch.load(f"{models_path}/files/mean.pt", map_location=torch.device('cpu')).bfloat16().to(device)
 
76
  weight_dimensions = torch.load(f"{models_path}/files/weight_dimensions.pt")
77
  pinverse = torch.load(f"{models_path}/files/pinverse_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
78
 
79
+
80
 
81
 
82
  @spaces.GPU