throaway2854 commited on
Commit
addc6c7
·
verified ·
1 Parent(s): 4473560

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +322 -163
app.py CHANGED
@@ -1,175 +1,334 @@
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
2
  import random
3
- import json
4
- import os
5
 
6
  DATA_DIR = "/data"
7
- IMAGES_DIR = "/images"
8
- DATA_FILE = os.path.join(DATA_DIR, "saved_data.json")
9
-
10
- def load_data():
11
- if os.path.exists(DATA_FILE):
12
- with open(DATA_FILE, 'r') as f:
13
- return json.load(f)
14
- return {
15
- "scene_tags": [], "position_tags": [], "outfit_tags": [],
16
- "camera_tags": [], "concept_tags": [], "lora_tags": [],
17
- "characters": {}
18
- }
19
 
20
- def save_data(data):
21
- os.makedirs(DATA_DIR, exist_ok=True)
22
- with open(DATA_FILE, 'w') as f:
23
- json.dump(data, f)
24
-
25
- def save_character_image(name, image):
26
- os.makedirs(IMAGES_DIR, exist_ok=True)
27
- image_path = os.path.join(IMAGES_DIR, f"{name}.png")
28
- image.save(image_path)
29
- return image_path
30
-
31
- def generate_prompt(scene_tags, num_people, position_tags, selected_characters, outfit_tags, camera_tags, concept_tags, lora_tags, tag_counts, data):
32
- all_tags = {
33
- "scene": scene_tags.split(','),
34
- "position": position_tags.split(','),
35
- "outfit": outfit_tags.split(','),
36
- "camera": camera_tags.split(','),
37
- "concept": concept_tags.split(','),
38
  }
 
 
 
 
 
 
 
 
 
 
39
 
40
- all_tags = {k: [tag.strip() for tag in v if tag.strip()] for k, v in all_tags.items()}
41
-
42
- character_prompts = [
43
- f"{char_name}, " + ", ".join(random.sample(data["characters"][char_name]["traits"],
44
- min(tag_counts["character"], len(data["characters"][char_name]["traits"]))))
45
- for char_name in selected_characters if char_name in data["characters"]
46
- ]
47
-
48
- selected_tags = [
49
- f"{tag}:{random.uniform(0.8, 1.2):.2f}"
50
- for category, tags in all_tags.items()
51
- for tag in random.sample(tags, min(tag_counts[category], len(tags)))
52
- ]
53
-
54
- if num_people.strip():
55
- selected_tags.append(f"{num_people} people:1.1")
56
-
57
- prompt_parts = character_prompts + selected_tags
58
- random.shuffle(prompt_parts)
59
- main_prompt = ", ".join(prompt_parts)
60
-
61
- lora_list = [lora.strip() for lora in lora_tags.split(',') if lora.strip()]
62
- lora_prompt = " ".join(f"<lora:{lora}:1>" for lora in lora_list)
63
 
64
- fixed_tags = "source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres, anime artwork, anime style, vibrant, studio anime, highly detailed"
 
 
65
 
66
- return f"{main_prompt}, {fixed_tags} {lora_prompt}".strip()
67
-
68
- def update_data(data, key, value):
69
- data[key] = list(set(data[key] + [v.strip() for v in value.split(',') if v.strip()]))
70
- save_data(data)
71
- return data
72
-
73
- def create_character(name, traits, image, data):
74
- if name:
75
- data["characters"][name] = {
76
- "traits": [trait.strip() for trait in traits.split(',') if trait.strip()],
77
- "image": save_character_image(name, image) if image else None
78
- }
79
- save_data(data)
80
- return data, gr.update(choices=list(data["characters"].keys()))
81
-
82
- def create_ui():
83
- data = load_data()
84
-
85
- with gr.Blocks() as demo:
86
- gr.Markdown("# Advanced Pony SDXL Prompt Generator with Character Creation")
87
-
88
- with gr.Tabs():
89
- with gr.TabItem("Prompt Generator"):
90
- with gr.Row():
91
- with gr.Column():
92
- scene_input = gr.Textbox(label="Scene Tags (comma-separated)", value=", ".join(data["scene_tags"]))
93
- scene_count = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="Number of scene tags")
94
-
95
- num_people_input = gr.Textbox(label="Number of People")
96
-
97
- position_input = gr.Textbox(label="Position Tags (comma-separated)", value=", ".join(data["position_tags"]))
98
- position_count = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="Number of position tags")
99
-
100
- character_select = gr.CheckboxGroup(label="Select Characters", choices=list(data["characters"].keys()))
101
- character_count = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="Number of character traits")
102
-
103
- outfit_input = gr.Textbox(label="Outfit Tags (comma-separated)", value=", ".join(data["outfit_tags"]))
104
- outfit_count = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="Number of outfit tags")
105
-
106
- camera_input = gr.Textbox(label="Camera View/Angle Tags (comma-separated)", value=", ".join(data["camera_tags"]))
107
- camera_count = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="Number of camera tags")
108
-
109
- concept_input = gr.Textbox(label="Concept Tags (comma-separated)", value=", ".join(data["concept_tags"]))
110
- concept_count = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="Number of concept tags")
111
-
112
- lora_input = gr.Textbox(label="LORA Tags (comma-separated)", value=", ".join(data["lora_tags"]))
113
- lora_count = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="Number of LORA tags")
114
-
115
- generate_button = gr.Button("Generate Prompt")
116
-
117
- with gr.Column():
118
- output = gr.Textbox(label="Generated Prompt", lines=5)
119
-
120
- char_images = [char_data["image"] for char_data in data["characters"].values() if char_data["image"]]
121
- gr.Gallery(value=char_images, label="Character Images", show_label=True, elem_id="char_gallery", columns=2, rows=2, height="auto")
122
 
123
- with gr.TabItem("Character Creation"):
124
- with gr.Row():
125
- with gr.Column():
126
- char_name_input = gr.Textbox(label="Character Name")
127
- char_traits_input = gr.Textbox(label="Character Traits (comma-separated)")
128
- char_image_input = gr.Image(label="Character Image", type="pil")
129
- create_char_button = gr.Button("Create/Update Character")
130
-
131
- with gr.Column():
132
- char_gallery = gr.Gallery(label="Existing Characters", show_label=True, elem_id="char_gallery", columns=2, rows=2, height="auto")
133
-
134
- def update_and_generate(*args):
135
- nonlocal data
136
- scene_tags, num_people, position_tags, selected_characters, outfit_tags, camera_tags, concept_tags, lora_tags, *tag_counts = args
137
- data = update_data(data, "scene_tags", scene_tags)
138
- data = update_data(data, "position_tags", position_tags)
139
- data = update_data(data, "outfit_tags", outfit_tags)
140
- data = update_data(data, "camera_tags", camera_tags)
141
- data = update_data(data, "concept_tags", concept_tags)
142
- data = update_data(data, "lora_tags", lora_tags)
143
 
144
- tag_count_dict = {
145
- "scene": tag_counts[0], "position": tag_counts[1], "character": tag_counts[2],
146
- "outfit": tag_counts[3], "camera": tag_counts[4], "concept": tag_counts[5], "lora": tag_counts[6]
147
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
- return generate_prompt(scene_tags, num_people, position_tags, selected_characters, outfit_tags, camera_tags, concept_tags, lora_tags, tag_count_dict, data)
150
-
151
- generate_button.click(
152
- update_and_generate,
153
- inputs=[scene_input, num_people_input, position_input, character_select, outfit_input, camera_input, concept_input, lora_input,
154
- scene_count, position_count, character_count, outfit_count, camera_count, concept_count, lora_count],
155
- outputs=[output]
156
- )
157
-
158
- def update_char_gallery():
159
- char_images = [char_data["image"] for char_data in data["characters"].values() if char_data["image"]]
160
- return gr.Gallery(value=char_images)
161
-
162
- create_char_button.click(
163
- create_character,
164
- inputs=[char_name_input, char_traits_input, char_image_input],
165
- outputs=[gr.State(data), character_select]
166
- ).then(
167
- update_char_gallery,
168
- outputs=[char_gallery]
169
- )
170
-
171
- return demo
172
-
173
- if __name__ == "__main__":
174
- demo = create_ui()
175
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ import os
4
+ import json
5
  import gradio as gr
6
+ from datasets import Dataset
7
+ from PIL import Image
8
+ from huggingface_hub import HfApi, HfFolder, Repository, create_repo
9
+ import io
10
+ import uuid
11
+ import time
12
  import random
13
+ import zipfile
14
+ import csv
15
 
16
  DATA_DIR = "/data"
17
+ IMAGES_DIR = os.path.join(DATA_DIR, "images")
18
+ USER_AGENTS = [
19
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
20
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15",
21
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0"
22
+ ]
 
 
 
 
 
 
23
 
24
+ def get_headers(cookies=None):
25
+ headers = {
26
+ "User-Agent": random.choice(USER_AGENTS),
27
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
28
+ "Accept-Language": "en-US,en;q=0.5",
29
+ "Referer": "https://www.google.com/",
30
+ "DNT": "1",
31
+ "Connection": "keep-alive",
32
+ "Upgrade-Insecure-Requests": "1"
 
 
 
 
 
 
 
 
 
33
  }
34
+ if cookies:
35
+ headers["Cookie"] = cookies
36
+ return headers
37
+
38
+ def make_request(url, cookies=None):
39
+ time.sleep(random.uniform(1, 3)) # Add a random delay between requests
40
+ return requests.get(url, headers=get_headers(cookies), timeout=10)
41
+
42
+ def extract_image_url(html_content):
43
+ soup = BeautifulSoup(html_content, 'html.parser')
44
 
45
+ script = soup.find('script', type='text/javascript', string=lambda text: 'image =' in text if text else False)
46
+ if script:
47
+ try:
48
+ js_object_str = script.string.split('=', 1)[1].strip().rstrip(';')
49
+ js_object_str = js_object_str.replace("'", '"')
50
+ image_data = json.loads(js_object_str)
51
+ return f"{image_data['domain']}{image_data['base_dir']}/{image_data['dir']}/{image_data['img']}"
52
+ except json.JSONDecodeError as e:
53
+ raise Exception(f"Failed to decode JSON: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ img_tag = soup.find('img', alt=True)
56
+ if img_tag and 'src' in img_tag.attrs:
57
+ return img_tag['src']
58
 
59
+ return None
60
+
61
+ def extract_tags(html_content):
62
+ soup = BeautifulSoup(html_content, 'html.parser')
63
+ tag_elements = soup.find_all('li', class_='tag-type-general')
64
+ tags = [tag_element.find_all('a')[1].text for tag_element in tag_elements if len(tag_element.find_all('a')) > 1]
65
+ return ','.join(tags)
66
+
67
+ def download_image(url, cookies=None):
68
+ try:
69
+ response = make_request(url, cookies)
70
+ response.raise_for_status()
71
+ return Image.open(io.BytesIO(response.content))
72
+ except requests.RequestException as e:
73
+ raise Exception(f"Failed to download image: {str(e)}")
74
+
75
+ class DatasetBuilder:
76
+ def __init__(self, dataset_name):
77
+ self.dataset_name = dataset_name
78
+ self.dataset = self.load_dataset()
79
+ os.makedirs(IMAGES_DIR, exist_ok=True)
80
+ self.hf_token = os.getenv("HF_Token") # Access the token from the environment variable
81
+
82
+ def get_dataset_file(self):
83
+ return os.path.join(DATA_DIR, f"{self.dataset_name}.json")
84
+
85
+ def load_dataset(self):
86
+ dataset_file = self.get_dataset_file()
87
+ if os.path.exists(dataset_file):
88
+ with open(dataset_file, 'r') as f:
89
+ return json.load(f)
90
+ return []
91
+
92
+ def save_dataset(self):
93
+ dataset_file = self.get_dataset_file()
94
+ with open(dataset_file, 'w') as f:
95
+ json.dump(self.dataset, f)
96
+
97
+ def resize_images(self, min_size=512, max_size=768):
98
+ for item in self.dataset:
99
+ image_path = os.path.join(IMAGES_DIR, item['image'])
100
+ image = Image.open(image_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
+ # Resize the image while maintaining the aspect ratio
103
+ image.thumbnail((max_size, max_size), resample=Image.BICUBIC)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
+ # Save the resized image
106
+ image.save(image_path)
107
+
108
+ def resize_dataset(self):
109
+ resized_dataset_name = f"{self.dataset_name} (resized)"
110
+ resized_dataset_builder = DatasetBuilder(resized_dataset_name)
111
+ resized_dataset_builder.dataset = self.dataset
112
+ resized_dataset_builder.resize_images()
113
+ resized_dataset_builder.save_dataset()
114
+ return f"Resized dataset '{self.dataset_name}' to '{resized_dataset_name}'."
115
+
116
+ def create_downloadable_dataset(self):
117
+ if not self.dataset:
118
+ return None, "Dataset is empty. Add some images first."
119
+
120
+ try:
121
+ # Create a temporary ZIP file
122
+ zip_filename = f"{self.dataset_name}.zip"
123
+ zip_path = os.path.join(DATA_DIR, zip_filename)
124
 
125
+ with zipfile.ZipFile(zip_path, 'w') as zipf:
126
+ # Add the dataset CSV file
127
+ dataset_file = f"{self.dataset_name}.csv"
128
+ dataset_file_path = os.path.join(DATA_DIR, dataset_file)
129
+ with open(dataset_file_path, 'w', newline='') as csvfile:
130
+ writer = csv.writer(csvfile)
131
+ writer.writerow(['image', 'tags'])
132
+ for item in self.dataset:
133
+ writer.writerow([item['image'], item['tags']])
134
+ zipf.write(dataset_file_path, os.path.basename(dataset_file_path))
135
+
136
+ # Add all images
137
+ for item in self.dataset:
138
+ image_path = os.path.join(IMAGES_DIR, item['image'])
139
+ zipf.write(image_path, os.path.join("images", item['image']))
140
+
141
+ return zip_path, f"Dataset '{self.dataset_name}' ready for download."
142
+ except Exception as e:
143
+ return None, f"Error creating downloadable dataset: {str(e)}"
144
+
145
+ def add_image(self, url, cookies=None):
146
+ try:
147
+ response = make_request(url, cookies)
148
+ response.raise_for_status()
149
+ html_content = response.text
150
+
151
+ image_url = extract_image_url(html_content)
152
+ if not image_url:
153
+ raise Exception("Failed to extract image URL")
154
+
155
+ tags = extract_tags(html_content)
156
+ image = download_image(image_url, cookies)
157
+
158
+ filename = f"{uuid.uuid4()}.jpg"
159
+ filepath = os.path.join(IMAGES_DIR, filename)
160
+
161
+ image.save(filepath)
162
+
163
+ self.dataset.append({
164
+ 'image': filename,
165
+ 'text': tags
166
+ })
167
+
168
+ self.save_dataset()
169
+ return f"Added image with tags: {tags}"
170
+ except Exception as e:
171
+ return f"Error: {str(e)}"
172
+
173
+ def build_huggingface_dataset(self):
174
+ if not self.dataset:
175
+ return "Dataset is empty. Add some images first."
176
+
177
+ try:
178
+ hf_dataset = Dataset.from_dict({
179
+ 'image': [os.path.join(IMAGES_DIR, item['image']) for item in self.dataset],
180
+ 'text': [item['tags'] for item in self.dataset]
181
+ })
182
+ return "HuggingFace Dataset created successfully!"
183
+ except Exception as e:
184
+ return f"Error creating HuggingFace Dataset: {str(e)}"
185
+
186
+ def get_dataset_info(self):
187
+ return f"Current dataset size ({self.dataset_name}): {len(self.dataset)} images"
188
+
189
+ def get_dataset_preview(self, num_images=5):
190
+ preview = []
191
+ for item in self.dataset[-num_images:]:
192
+ image_path = os.path.join(IMAGES_DIR, item['image'])
193
+ preview.append((image_path, item['tags']))
194
+ return preview
195
+
196
+ def upload_to_huggingface(self, private=True):
197
+ if not self.dataset:
198
+ return "Dataset is empty. Add some images first."
199
+
200
+ if not self.hf_token:
201
+ return "Error: Hugging Face Token not found. Please make sure the token is correctly set as an environment variable."
202
+
203
+ try:
204
+ hf_api = HfApi(token=self.hf_token) # Use the token
205
+ hf_user = hf_api.whoami()["name"]
206
+ repo_id = f"{hf_user}/{self.dataset_name}"
207
+
208
+ # Create or update the repository
209
+ repo_url = create_repo(repo_id, token=self.hf_token, private=private, exist_ok=True)
210
+
211
+ # Save the dataset locally as a JSON file
212
+ dataset_file = self.get_dataset_file()
213
+ self.save_dataset()
214
+
215
+ # Initialize a local repository
216
+ repo = Repository(local_dir=DATA_DIR, clone_from=repo_id, use_auth_token=self.hf_token)
217
+
218
+ # Copy dataset files to the repository directory
219
+ repo.git_pull(lfs=True) # Pull the latest changes
220
+ os.makedirs(os.path.join(DATA_DIR, "images"), exist_ok=True)
221
+
222
+ for item in self.dataset:
223
+ src_image_path = os.path.join(IMAGES_DIR, item['image'])
224
+ dst_image_path = os.path.join(repo.local_dir, "images", item['image'])
225
+ if not os.path.exists(dst_image_path):
226
+ os.makedirs(os.path.dirname(dst_image_path), exist_ok=True)
227
+ os.system(f"cp {src_image_path} {dst_image_path}")
228
+
229
+ # Add files to the repository and push
230
+ repo.git_add(pattern=".")
231
+ repo.git_commit("Add dataset and images")
232
+ repo.git_push()
233
+
234
+ return f"Dataset '{self.dataset_name}' successfully uploaded to Hugging Face Hub as a {'private' if private else 'public'} repository."
235
+
236
+ except Exception as e:
237
+ return f"Error uploading dataset to Hugging Face: {str(e)}"
238
+
239
+ def add_image_to_dataset(url, cookies, dataset_name):
240
+ builder = DatasetBuilder(dataset_name)
241
+ result = builder.add_image(url, cookies)
242
+ return result, builder.get_dataset_info(), builder.get_dataset_preview()
243
+
244
+ def create_huggingface_dataset(dataset_name):
245
+ builder = DatasetBuilder(dataset_name)
246
+ return builder.build_huggingface_dataset()
247
+
248
+ def view_dataset(dataset_name):
249
+ builder = DatasetBuilder(dataset_name)
250
+ return builder.get_dataset_preview(num_images=60)
251
+
252
+ def upload_huggingface_dataset(dataset_name, privacy):
253
+ builder = DatasetBuilder(dataset_name)
254
+ return builder.upload_to_huggingface(private=privacy)
255
+
256
+ def download_dataset(dataset_name):
257
+ builder = DatasetBuilder(dataset_name)
258
+ zip_path, message = builder.create_downloadable_dataset()
259
+ return zip_path, message
260
+
261
+ def resize_dataset(dataset_name):
262
+ builder = DatasetBuilder(dataset_name)
263
+ return builder.resize_dataset()
264
+
265
+ def download_resized_dataset(dataset_name):
266
+ builder = DatasetBuilder(f"{dataset_name} (resized)")
267
+ zip_path, message = builder.create_downloadable_dataset()
268
+ return zip_path, message
269
+
270
+ # Create Gradio interface
271
+ with gr.Blocks(theme="huggingface") as iface:
272
+ gr.Markdown("# Image Dataset Builder")
273
+ gr.Markdown("Enter a URL to add an image and its tags to the dataset. Progress is saved automatically.")
274
+
275
+ with gr.Row():
276
+ dataset_name_input = gr.Textbox(lines=1, label="Dataset Name", placeholder="Enter dataset name...", value="default_dataset")
277
+ url_input = gr.Textbox(lines=2, label="URL", placeholder="Enter image URL here...")
278
+ cookies_input = gr.Textbox(lines=2, label="Cookies (optional)", placeholder="Enter cookies")
279
+ add_button = gr.Button("Add Image")
280
+
281
+ result_output = gr.Textbox(label="Result")
282
+ dataset_info = gr.Textbox(label="Dataset Info")
283
+
284
+ gr.Markdown("## Dataset Preview")
285
+ preview_gallery = gr.Gallery(label="Recent Additions", show_label=False, elem_id="preview_gallery", columns=5, rows=1, height="auto")
286
+
287
+ add_button.click(add_image_to_dataset, inputs=[url_input, cookies_input, dataset_name_input], outputs=[result_output, dataset_info, preview_gallery])
288
+
289
+ create_hf_button = gr.Button("Create HuggingFace Dataset")
290
+ hf_result = gr.Textbox(label="Dataset Creation Result")
291
+ create_hf_button.click(create_huggingface_dataset, inputs=[dataset_name_input], outputs=hf_result)
292
+
293
+ view_dataset_button = gr.Button("View Dataset")
294
+ dataset_gallery = gr.Gallery(label="Dataset Contents", show_label=False, elem_id="dataset_gallery", columns=5, rows=4, height="auto")
295
+ view_dataset_button.click(view_dataset, inputs=[dataset_name_input], outputs=dataset_gallery)
296
+
297
+ gr.Markdown("## Upload Dataset to Hugging Face")
298
+ privacy_radio = gr.Radio(choices=["private", "public"], value="private", label="Repository Privacy")
299
+ upload_hf_button = gr.Button("Upload to Hugging Face")
300
+ hf_upload_result = gr.Textbox(label="Upload Result")
301
+ upload_hf_button.click(upload_huggingface_dataset, inputs=[dataset_name_input, privacy_radio], outputs=hf_upload_result)
302
+
303
+ gr.Markdown("## Download Dataset")
304
+ download_button = gr.Button("Download Dataset")
305
+ download_output = gr.File(label="Download")
306
+ download_message = gr.Textbox(label="Download Status")
307
+
308
+ download_button.click(
309
+ download_dataset,
310
+ inputs=[dataset_name_input],
311
+ outputs=[download_output, download_message]
312
+ )
313
+
314
+ gr.Markdown("## Resize Dataset")
315
+ resize_button = gr.Button("Resize Dataset")
316
+ resize_result = gr.Textbox(label="Resize Result")
317
+ resize_button.click(
318
+ resize_dataset,
319
+ inputs=[dataset_name_input],
320
+ outputs=resize_result
321
+ )
322
+
323
+ gr.Markdown("## Download Resized Dataset")
324
+ download_resized_button = gr.Button("Download Resized Dataset")
325
+ download_resized_output = gr.File(label="Download Resized")
326
+ download_resized_message = gr.Textbox(label="Resized Download Status")
327
+ download_resized_button.click(
328
+ download_resized_dataset,
329
+ inputs=[dataset_name_input],
330
+ outputs=[download_resized_output, download_resized_message]
331
+ )
332
+
333
+ # Launch the interface
334
+ iface.launch()