Chan-Y commited on
Commit
8fb94d3
1 Parent(s): 57901df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +138 -1
app.py CHANGED
@@ -1,3 +1,140 @@
 
1
  import gradio as gr
 
 
 
2
 
3
- gr.load("models/Chan-Y/Cyber-Stable-Realistic").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import DiffusionPipeline
2
  import gradio as gr
3
+ import numpy as np
4
+ import random
5
+ import torch
6
 
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+ dtype = torch.float16
9
+
10
+ pipe = DiffusionPipeline.from_pretrained("Chan-Y/Chan-Y-Cyber-Stable-Realistic",
11
+ torch_dtype=torch.float16).to(device)
12
+
13
+ MAX_SEED = 999999999999999
14
+ MAX_IMAGE_SIZE = 1344
15
+
16
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
17
+
18
+ if randomize_seed:
19
+ seed = random.randint(0, MAX_SEED)
20
+
21
+ generator = torch.Generator().manual_seed(seed)
22
+
23
+ image = pipe(
24
+ prompt = prompt,
25
+ negative_prompt = negative_prompt,
26
+ guidance_scale = guidance_scale,
27
+ num_inference_steps = num_inference_steps,
28
+ width = width,
29
+ height = height,
30
+ generator = generator
31
+ ).images[0]
32
+
33
+ return image, seed
34
+
35
+
36
+ examples = [
37
+ ["Batman, cute modern Disney style, Pixar 3d portrait, ultra detailed, gorgeous, 3d zbrush, trending on dribbble, 8k render.",
38
+ "",
39
+ ,
40
+ 50]
41
+ ]
42
+
43
+
44
+ css="""
45
+ #col-container {
46
+ margin: 0 auto;
47
+ max-width: 580px;
48
+ }
49
+ """
50
+
51
+
52
+ with gr.Blocks(css=css) as demo:
53
+
54
+ with gr.Column(elem_id="col-container"):
55
+ gr.Markdown(f"""
56
+ # Demo [Chan-Y/Stable-Flash-Lightning](https://huggingface.co/Chan-Y/Stable-Flash-Lightning)
57
+ by Cihan Yalçın | My [LinkedIn](https://www.linkedin.com/in/chanyalcin/) My [GitHub](https://github.com/g-hano)
58
+ """)
59
+
60
+ with gr.Row():
61
+
62
+ prompt = gr.Text(
63
+ label="Prompt",
64
+ show_label=False,
65
+ max_lines=1,
66
+ placeholder="Enter your prompt",
67
+ container=False,
68
+ )
69
+
70
+ run_button = gr.Button("Run", scale=0)
71
+
72
+ result = gr.Image(label="Result", show_label=False)
73
+
74
+ with gr.Accordion("Advanced Settings", open=False):
75
+
76
+ negative_prompt = gr.Text(
77
+ label="Negative prompt",
78
+ max_lines=1,
79
+ placeholder="Enter a negative prompt",
80
+ )
81
+
82
+ seed = gr.Slider(
83
+ label="Seed",
84
+ minimum=0,
85
+ maximum=MAX_SEED,
86
+ step=1,
87
+ value=0,
88
+ )
89
+
90
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
91
+
92
+ with gr.Row():
93
+
94
+ width = gr.Slider(
95
+ label="Width",
96
+ minimum=256,
97
+ maximum=MAX_IMAGE_SIZE,
98
+ step=64,
99
+ value=1024,
100
+ )
101
+
102
+ height = gr.Slider(
103
+ label="Height",
104
+ minimum=256,
105
+ maximum=MAX_IMAGE_SIZE,
106
+ step=64,
107
+ value=1024,
108
+ )
109
+
110
+ with gr.Row():
111
+
112
+ guidance_scale = gr.Slider(
113
+ label="Guidance scale",
114
+ minimum=0.0,
115
+ maximum=10.0,
116
+ step=0.1,
117
+ value=5.0,
118
+ )
119
+
120
+ num_inference_steps = gr.Slider(
121
+ label="Number of inference steps",
122
+ minimum=1,
123
+ maximum=50,
124
+ step=1,
125
+ value=28,
126
+ )
127
+
128
+ with gr.Accordion("Examples", open=False):
129
+ gr.Examples(
130
+ examples=examples,
131
+ inputs=[prompt, negative_prompt, seed, num_inference_steps]
132
+ )
133
+ gr.on(
134
+ triggers=[run_button.click, prompt.submit, negative_prompt.submit],
135
+ fn = infer,
136
+ inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
137
+ outputs = [result, seed]
138
+ )
139
+
140
+ demo.launch()