medmac01 commited on
Commit
af59832
1 Parent(s): 33eeceb

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +3 -2
model.py CHANGED
@@ -1,5 +1,6 @@
1
  from PIL import Image
2
  from base64 import b64encode
 
3
 
4
  import torch
5
  from torch import autocast
@@ -95,7 +96,7 @@ text_tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
95
  embeddings= text_model.forward(texts, text_tokenizer, device )
96
  # 1. Load the autoencoder model which will be used to decode the latents into image space.
97
  vae = AutoencoderKL.from_pretrained(
98
- 'CompVis/stable-diffusion-v1-4', subfolder='vae', use_auth_token=True)
99
  vae = vae.to(device)
100
 
101
  # 2. Load the tokenizer and text encoder to tokenize and encode the text.
@@ -104,7 +105,7 @@ text_encoder = text_model
104
 
105
  # 3. The UNet model for generating the latents.
106
  unet = UNet2DConditionModel.from_pretrained(
107
- 'CompVis/stable-diffusion-v1-4', subfolder='unet', use_auth_token=True)
108
  unet = unet.to(device)
109
 
110
  # 4. Create a scheduler for inference
 
1
  from PIL import Image
2
  from base64 import b64encode
3
+ import os
4
 
5
  import torch
6
  from torch import autocast
 
96
  embeddings= text_model.forward(texts, text_tokenizer, device )
97
  # 1. Load the autoencoder model which will be used to decode the latents into image space.
98
  vae = AutoencoderKL.from_pretrained(
99
+ 'CompVis/stable-diffusion-v1-4', subfolder='vae', use_auth_token=True, token=os.environ['HF_API_TOKEN'])
100
  vae = vae.to(device)
101
 
102
  # 2. Load the tokenizer and text encoder to tokenize and encode the text.
 
105
 
106
  # 3. The UNet model for generating the latents.
107
  unet = UNet2DConditionModel.from_pretrained(
108
+ 'CompVis/stable-diffusion-v1-4', subfolder='unet', use_auth_token=True, token=os.environ['HF_API_TOKEN'])
109
  unet = unet.to(device)
110
 
111
  # 4. Create a scheduler for inference