alfredplpl commited on
Commit
fa169a4
β€’
1 Parent(s): 48344fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -1
app.py CHANGED
@@ -21,10 +21,27 @@ pipe_normal.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31
21
 
22
  pipe_normal.to("cuda")
23
 
 
 
 
 
24
 
25
  @spaces.GPU
26
  def run_normal(prompt, negative_prompt="", guidance_scale=7.5, progress=gr.Progress(track_tqdm=True)):
27
- return pipe_normal(prompt, negative_prompt="unaestheticXLv31"+negative_prompt, guidance_scale=guidance_scale, num_inference_steps=20).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  css = '''
30
  .gradio-container{
 
21
 
22
  pipe_normal.to("cuda")
23
 
24
+ compel = Compel(tokenizer=[pipe_normal.tokenizer, pipe_normal.tokenizer_2] ,
25
+ text_encoder=[pipe_normal.text_encoder, pipe_normal.text_encoder_2],
26
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
27
+ requires_pooled=[False, True])
28
 
29
  @spaces.GPU
30
  def run_normal(prompt, negative_prompt="", guidance_scale=7.5, progress=gr.Progress(track_tqdm=True)):
31
+ conditioning, pooled = compel([prompt, "unaestheticXLv31--, "+neg_prompt])
32
+
33
+ result = pipe(
34
+ prompt_embeds=conditioning[0:1],
35
+ pooled_prompt_embeds=pooled[0:1],
36
+ negative_prompt_embeds=conditioning[1:2],
37
+ negative_pooled_prompt_embeds=pooled[1:2],
38
+ num_inference_steps = int(steps),
39
+ guidance_scale = guidance,
40
+ width = width,
41
+ height = height,
42
+ generator = generator)
43
+
44
+ return result.images[0]
45
 
46
  css = '''
47
  .gradio-container{