add examples
Browse files- app.py +22 -13
- images/Anya Taylor-Joy 003.jpg +0 -0
- images/Lizzo 001.jpeg +0 -0
- images/Mirai.jpg +0 -0
- images/OnChainMonkey #2278.jpeg +0 -0
- images/OnChainMonkey-2278.jpg +0 -0
- images/Wassie 4498.jpeg +0 -0
- images/billie eilish 004.jpeg +0 -0
app.py
CHANGED
@@ -228,15 +228,21 @@ tile_size = 100
|
|
228 |
# image_folder = os.path.join("file", "images")
|
229 |
image_folder ="images"
|
230 |
|
231 |
-
image_examples =
|
232 |
-
"SohoJoeEth.jpeg",
|
233 |
-
"Ray-Liotta-Goodfellas.jpg",
|
234 |
-
"
|
235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
image_examples_tile_size = 50
|
237 |
|
238 |
-
|
239 |
-
|
240 |
with gr.Blocks() as demo:
|
241 |
with gr.Row():
|
242 |
with gr.Column(scale=5):
|
@@ -275,18 +281,21 @@ Try uploading a few images and/or add some text prompts and click generate image
|
|
275 |
with gr.Column(scale=3, min_width=600):
|
276 |
embedding_plots[i] = gr.LinePlot(show_label=False).style(container=False)
|
277 |
# input_image.change(on_image_load, inputs= [input_image, plot])
|
278 |
-
# with gr.Row():
|
279 |
-
# examples_with_path = [os.path.join(image_folder, image) for image in image_examples]
|
280 |
-
# gr.Examples(
|
281 |
-
# examples=examples_with_path,
|
282 |
-
# inputs=input_images[i],
|
283 |
-
# )
|
284 |
with gr.Row():
|
285 |
with gr.Column(scale=2, min_width=240):
|
286 |
input_prompts[i] = gr.Textbox()
|
287 |
with gr.Column(scale=3, min_width=600):
|
288 |
with gr.Accordion(f"Embeddings (base64)", open=False):
|
289 |
embedding_base64s[i] = gr.Textbox(show_label=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
290 |
|
291 |
with gr.Row():
|
292 |
average_embedding_plot = gr.LinePlot(show_label=True, label="Average Embeddings (base64)").style(container=False)
|
|
|
228 |
# image_folder = os.path.join("file", "images")
|
229 |
image_folder ="images"
|
230 |
|
231 |
+
image_examples = {
|
232 |
+
"SohoJoe": "SohoJoeEth.jpeg",
|
233 |
+
"Ray": "Ray-Liotta-Goodfellas.jpg",
|
234 |
+
"Donkey": "Donkey.jpg",
|
235 |
+
"Snoop": "Snoop Dogg.jpg",
|
236 |
+
"Pup in TeaCup": "pup1.jpg",
|
237 |
+
"Anya": "Anya Taylor-Joy 003.jpg",
|
238 |
+
"Billie": "billie eilish 004.jpeg",
|
239 |
+
"Lizzo": "Lizzo 001.jpeg",
|
240 |
+
"Mirai": "Mirai.jpg",
|
241 |
+
"OnChainMonkey": "OnChainMonkey-2278.jpg",
|
242 |
+
"Wassie": "Wassie 4498.jpeg",
|
243 |
+
}
|
244 |
image_examples_tile_size = 50
|
245 |
|
|
|
|
|
246 |
with gr.Blocks() as demo:
|
247 |
with gr.Row():
|
248 |
with gr.Column(scale=5):
|
|
|
281 |
with gr.Column(scale=3, min_width=600):
|
282 |
embedding_plots[i] = gr.LinePlot(show_label=False).style(container=False)
|
283 |
# input_image.change(on_image_load, inputs= [input_image, plot])
|
|
|
|
|
|
|
|
|
|
|
|
|
284 |
with gr.Row():
|
285 |
with gr.Column(scale=2, min_width=240):
|
286 |
input_prompts[i] = gr.Textbox()
|
287 |
with gr.Column(scale=3, min_width=600):
|
288 |
with gr.Accordion(f"Embeddings (base64)", open=False):
|
289 |
embedding_base64s[i] = gr.Textbox(show_label=False)
|
290 |
+
with gr.Row():
|
291 |
+
for idx, (title, image) in enumerate(image_examples.items()):
|
292 |
+
local_path = os.path.join(image_folder, image)
|
293 |
+
with gr.Column(scale=1, min_width=image_examples_tile_size):
|
294 |
+
gr.Examples(
|
295 |
+
examples=[local_path],
|
296 |
+
inputs=input_images[i],
|
297 |
+
label=title,
|
298 |
+
)
|
299 |
|
300 |
with gr.Row():
|
301 |
average_embedding_plot = gr.LinePlot(show_label=True, label="Average Embeddings (base64)").style(container=False)
|
images/Anya Taylor-Joy 003.jpg
ADDED
![]() |
images/Lizzo 001.jpeg
ADDED
![]() |
images/Mirai.jpg
ADDED
![]() |
images/OnChainMonkey #2278.jpeg
ADDED
![]() |
images/OnChainMonkey-2278.jpg
ADDED
![]() |
images/Wassie 4498.jpeg
ADDED
![]() |
images/billie eilish 004.jpeg
ADDED
![]() |