multimodalart HF staff commited on
Commit
18cb1f5
·
verified ·
1 Parent(s): 0b0222a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -65,7 +65,7 @@ def inference( prompt, negative_prompt, guidance_scale, ddim_steps, seed):
65
  global text_encoder
66
  global tokenizer
67
  global noise_scheduler
68
- torch.Generator(device=device).manual_seed(seed)
69
  latents = torch.randn(
70
  (1, unet.in_channels, 512 // 8, 512 // 8),
71
  generator = generator,
@@ -111,7 +111,7 @@ def inference( prompt, negative_prompt, guidance_scale, ddim_steps, seed):
111
  def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
112
 
113
  global device
114
- global generator
115
  global unet
116
  global vae
117
  global text_encoder
@@ -136,7 +136,7 @@ def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, st
136
 
137
  edited_weights = original_weights+a1*1e6*young_pad+a2*1e6*pointy_pad+a3*1e6*wavy_pad+a4*2e6*thick_pad
138
 
139
- generator = generator.manual_seed(seed)
140
  latents = torch.randn(
141
  (1, unet.in_channels, 512 // 8, 512 // 8),
142
  generator = generator,
 
65
  global text_encoder
66
  global tokenizer
67
  global noise_scheduler
68
+ generator = torch.Generator(device=device).manual_seed(seed)
69
  latents = torch.randn(
70
  (1, unet.in_channels, 512 // 8, 512 // 8),
71
  generator = generator,
 
111
  def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
112
 
113
  global device
114
+ #global generator
115
  global unet
116
  global vae
117
  global text_encoder
 
136
 
137
  edited_weights = original_weights+a1*1e6*young_pad+a2*1e6*pointy_pad+a3*1e6*wavy_pad+a4*2e6*thick_pad
138
 
139
+ generator = torch.Generator(device=device).manual_seed(seed)
140
  latents = torch.randn(
141
  (1, unet.in_channels, 512 // 8, 512 // 8),
142
  generator = generator,