Niansuh commited on
Commit
2cb0586
·
verified ·
1 Parent(s): c8b999a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -18
app.py CHANGED
@@ -6,17 +6,32 @@ import gradio as gr
6
  import numpy as np
7
  import torch
8
  from PIL import Image
9
- from diffusers import StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL
10
  from custom_pipeline import CosStableDiffusionXLInstructPix2PixPipeline
11
  from huggingface_hub import hf_hub_download
12
  from huggingface_hub import InferenceClient
13
- from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
14
 
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
  dtype = torch.float16
 
 
 
 
 
 
 
 
 
17
 
18
- repo = "stabilityai/stable-diffusion-3-medium-diffusers"
19
- pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16).to(device)
 
 
 
 
 
 
 
20
 
21
  help_text = """
22
  To optimize image results:
@@ -44,7 +59,6 @@ def set_timesteps_patched(self, num_inference_steps: int, device = None):
44
  # Image Editor
45
  edit_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl_edit.safetensors")
46
  EDMEulerScheduler.set_timesteps = set_timesteps_patched
47
- vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
48
  pipe_edit = StableDiffusionXLInstructPix2PixPipeline.from_single_file(
49
  edit_file, num_in_channels=8, is_cosxl_edit=True, vae=vae, torch_dtype=torch.float16,
50
  )
@@ -52,7 +66,7 @@ pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_
52
  pipe_edit.to("cuda")
53
 
54
  # Generator
55
- @spaces.GPU(duration=30, queue=False)
56
  def king(type ,
57
  input_image ,
58
  instruction: str ,
@@ -63,7 +77,7 @@ def king(type ,
63
  image_cfg_scale: float = 1.7,
64
  width: int = 1024,
65
  height: int = 1024,
66
- guidance_scale: float = 6,
67
  use_resolution_binning: bool = True,
68
  progress=gr.Progress(track_tqdm=True),
69
  ):
@@ -85,14 +99,14 @@ def king(type ,
85
  if randomize_seed:
86
  seed = random.randint(0, 99999)
87
  generator = torch.Generator().manual_seed(seed)
88
- image = pipe(
89
- prompt = instruction,
90
- guidance_scale = 7,
91
- num_inference_steps = steps,
92
- width = width,
93
- height = height,
94
- generator = generator
95
- ).images[0]
96
  return seed, image
97
 
98
  client = InferenceClient()
@@ -162,10 +176,15 @@ with gr.Blocks(css=css) as demo:
162
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True, info="AI will select option based on your query, but if it selects wrong, please choose correct one.")
163
  with gr.Column(scale=1):
164
  generate_button = gr.Button("Generate")
165
-
 
166
  with gr.Row():
167
  input_image = gr.Image(label="Image", type="pil", interactive=True)
168
 
 
 
 
 
169
  with gr.Row():
170
  text_cfg_scale = gr.Number(value=7.3, step=0.1, label="Text CFG", interactive=True)
171
  image_cfg_scale = gr.Number(value=1.7, step=0.1,label="Image CFG", interactive=True)
@@ -188,9 +207,7 @@ with gr.Blocks(css=css) as demo:
188
  )
189
 
190
  gr.Markdown(help_text)
191
-
192
  instruction.change(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
193
-
194
  input_image.upload(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
195
 
196
  gr.on(triggers=[
@@ -206,6 +223,9 @@ with gr.Blocks(css=css) as demo:
206
  seed,
207
  text_cfg_scale,
208
  image_cfg_scale,
 
 
 
209
  ],
210
  outputs=[seed, input_image],
211
  )
 
6
  import numpy as np
7
  import torch
8
  from PIL import Image
9
+ from diffusers import StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
10
  from custom_pipeline import CosStableDiffusionXLInstructPix2PixPipeline
11
  from huggingface_hub import hf_hub_download
12
  from huggingface_hub import InferenceClient
 
13
 
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
  dtype = torch.float16
16
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
17
+
18
+ repo = "fluently/Fluently-XL-Final"
19
+
20
+ pipe_best = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
21
+ pipe_best.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
22
+ pipe_best.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
23
+ pipe_best.set_adapters(["lora","dalle"], adapter_weights=[1.5, 0.5])
24
+ pipe_best.to("cuda")
25
 
26
+ pipe_3D = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
27
+ pipe_3D.load_lora_weights("artificialguybr/3DRedmond-V1", weight_name="3DRedmond-3DRenderStyle-3DRenderAF.safetensors", adapter_name="3D")
28
+ pipe_3D.set_adapters(["3D"])
29
+ pipe_3D.to("cuda")
30
+
31
+ pipe_logo = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
32
+ pipe_logo.load_lora_weights("artificialguybr/LogoRedmond-LogoLoraForSDXL", weight_name="LogoRedmond_LogoRedAF.safetensors", adapter_name="logo")
33
+ pipe_logo.set_adapters(["logo"])
34
+ pipe_logo.to("cuda")
35
 
36
  help_text = """
37
  To optimize image results:
 
59
  # Image Editor
60
  edit_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl_edit.safetensors")
61
  EDMEulerScheduler.set_timesteps = set_timesteps_patched
 
62
  pipe_edit = StableDiffusionXLInstructPix2PixPipeline.from_single_file(
63
  edit_file, num_in_channels=8, is_cosxl_edit=True, vae=vae, torch_dtype=torch.float16,
64
  )
 
66
  pipe_edit.to("cuda")
67
 
68
  # Generator
69
+ @spaces.GPU(duration=45, queue=False)
70
  def king(type ,
71
  input_image ,
72
  instruction: str ,
 
77
  image_cfg_scale: float = 1.7,
78
  width: int = 1024,
79
  height: int = 1024,
80
+ style="BEST",
81
  use_resolution_binning: bool = True,
82
  progress=gr.Progress(track_tqdm=True),
83
  ):
 
99
  if randomize_seed:
100
  seed = random.randint(0, 99999)
101
  generator = torch.Generator().manual_seed(seed)
102
+ if style=="3D":
103
+ instruction = f"3DRenderAF, 3D Render, {instruction}"
104
+ image = pipe_3D( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
105
+ elif style=="Logo":
106
+ instruction = f"LogoRedAF, {instruction}"
107
+ image = pipe_logo( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
108
+ else:
109
+ image = pipe_best( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
110
  return seed, image
111
 
112
  client = InferenceClient()
 
176
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True, info="AI will select option based on your query, but if it selects wrong, please choose correct one.")
177
  with gr.Column(scale=1):
178
  generate_button = gr.Button("Generate")
179
+ with gr.Row():
180
+ style = gr.Radio(choices=["BEST","3D","Logo"],label="Style", value="BEST", interactive=True)
181
  with gr.Row():
182
  input_image = gr.Image(label="Image", type="pil", interactive=True)
183
 
184
+ with gr.Row():
185
+ width = gr.Number(value=1024, step=16,label="Width", interactive=True)
186
+ height = gr.Number(value=1024, step=16,label="Height", interactive=True)
187
+
188
  with gr.Row():
189
  text_cfg_scale = gr.Number(value=7.3, step=0.1, label="Text CFG", interactive=True)
190
  image_cfg_scale = gr.Number(value=1.7, step=0.1,label="Image CFG", interactive=True)
 
207
  )
208
 
209
  gr.Markdown(help_text)
 
210
  instruction.change(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
 
211
  input_image.upload(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
212
 
213
  gr.on(triggers=[
 
223
  seed,
224
  text_cfg_scale,
225
  image_cfg_scale,
226
+ width,
227
+ height,
228
+ style
229
  ],
230
  outputs=[seed, input_image],
231
  )