twodgirl commited on
Commit
fb5dd14
1 Parent(s): 8d10e61

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -5
README.md CHANGED
@@ -31,16 +31,14 @@ import torch
31
  from torchvision import transforms
32
 
33
  def preview_image(latents, pipe):
34
- tea = TeaDecoder(ch_in=16)
35
- load_model(tea, './vae_decoder.safetensors')
36
- tea.eval()
37
  latents = FluxPipeline._unpack_latents(latents,
38
  pipe.default_sample_size * pipe.vae_scale_factor,
39
  pipe.default_sample_size * pipe.vae_scale_factor,
40
  pipe.vae_scale_factor)
 
 
41
  tea = tea.to(device='cuda')
42
  output = tea(latents.to(torch.float32)) / 2.0 + 0.5
43
- tea = tea.to(device='cpu')
44
  preview = transforms.ToPILImage()(output[0].clamp(0, 1))
45
 
46
  return preview
@@ -65,7 +63,7 @@ if __name__ == '__main__':
65
  # Return the upscaled and preview image.
66
  upscaled = full_size_image(latents, pipe)
67
  preview = preview_image(latents, pipe)
68
- preview.save('cat.preview.png')
69
  ```
70
 
71
  ## Disclaimer
 
31
  from torchvision import transforms
32
 
33
  def preview_image(latents, pipe):
 
 
 
34
  latents = FluxPipeline._unpack_latents(latents,
35
  pipe.default_sample_size * pipe.vae_scale_factor,
36
  pipe.default_sample_size * pipe.vae_scale_factor,
37
  pipe.vae_scale_factor)
38
+ tea = TeaDecoder(ch_in=16)
39
+ load_model(tea, './vae_decoder.safetensors')
40
  tea = tea.to(device='cuda')
41
  output = tea(latents.to(torch.float32)) / 2.0 + 0.5
 
42
  preview = transforms.ToPILImage()(output[0].clamp(0, 1))
43
 
44
  return preview
 
63
  # Return the upscaled and preview image.
64
  upscaled = full_size_image(latents, pipe)
65
  preview = preview_image(latents, pipe)
66
+ preview.save('cat.png')
67
  ```
68
 
69
  ## Disclaimer