Spaces:
Running
on
Zero
Running
on
Zero
gokaygokay
commited on
Commit
•
b5b4980
1
Parent(s):
e1a2485
refactor
Browse files- app.py +2 -632
- app1.py +639 -0
- caption_models.py +91 -0
- huggingface_inference_node.py +93 -0
- prompt_generator.py +277 -0
- ui_components.py +163 -0
app.py
CHANGED
@@ -1,639 +1,9 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
import random
|
4 |
-
import json
|
5 |
-
import os
|
6 |
-
import re
|
7 |
-
from datetime import datetime
|
8 |
-
from huggingface_hub import InferenceClient
|
9 |
import subprocess
|
10 |
-
import torch
|
11 |
-
from PIL import Image
|
12 |
-
from transformers import AutoProcessor, AutoModelForCausalLM, Qwen2VLForConditionalGeneration
|
13 |
-
from qwen_vl_utils import process_vision_info
|
14 |
-
import numpy as np
|
15 |
|
16 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
17 |
|
18 |
-
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
19 |
-
|
20 |
-
# Initialize Florence model
|
21 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
22 |
-
florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True).to(device).eval()
|
23 |
-
florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True)
|
24 |
-
|
25 |
-
# Initialize Qwen2-VL-2B model
|
26 |
-
qwen_model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True, torch_dtype="auto").to(device).eval()
|
27 |
-
qwen_processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True)
|
28 |
-
|
29 |
-
# Florence caption function
|
30 |
-
@spaces.GPU
|
31 |
-
def florence_caption(image):
|
32 |
-
if not isinstance(image, Image.Image):
|
33 |
-
image = Image.fromarray(image)
|
34 |
-
|
35 |
-
inputs = florence_processor(text="<MORE_DETAILED_CAPTION>", images=image, return_tensors="pt").to(device)
|
36 |
-
generated_ids = florence_model.generate(
|
37 |
-
input_ids=inputs["input_ids"],
|
38 |
-
pixel_values=inputs["pixel_values"],
|
39 |
-
max_new_tokens=1024,
|
40 |
-
early_stopping=False,
|
41 |
-
do_sample=False,
|
42 |
-
num_beams=3,
|
43 |
-
)
|
44 |
-
generated_text = florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
45 |
-
parsed_answer = florence_processor.post_process_generation(
|
46 |
-
generated_text,
|
47 |
-
task="<MORE_DETAILED_CAPTION>",
|
48 |
-
image_size=(image.width, image.height)
|
49 |
-
)
|
50 |
-
return parsed_answer["<MORE_DETAILED_CAPTION>"]
|
51 |
-
|
52 |
-
# Add this function to your code
|
53 |
-
def array_to_image_path(image_array):
|
54 |
-
# Convert numpy array to PIL Image
|
55 |
-
img = Image.fromarray(np.uint8(image_array))
|
56 |
-
|
57 |
-
# Generate a unique filename using timestamp
|
58 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
59 |
-
filename = f"image_{timestamp}.png"
|
60 |
-
|
61 |
-
# Save the image
|
62 |
-
img.save(filename)
|
63 |
-
|
64 |
-
# Get the full path of the saved image
|
65 |
-
full_path = os.path.abspath(filename)
|
66 |
-
|
67 |
-
return full_path
|
68 |
-
|
69 |
-
# Qwen2-VL-2B caption function
|
70 |
-
@spaces.GPU
|
71 |
-
def qwen_caption(image):
|
72 |
-
if not isinstance(image, Image.Image):
|
73 |
-
image = Image.fromarray(np.uint8(image))
|
74 |
-
|
75 |
-
image_path = array_to_image_path(np.array(image))
|
76 |
-
|
77 |
-
messages = [
|
78 |
-
{
|
79 |
-
"role": "user",
|
80 |
-
"content": [
|
81 |
-
{
|
82 |
-
"type": "image",
|
83 |
-
"image": image_path,
|
84 |
-
},
|
85 |
-
{"type": "text", "text": "Describe this image in great detail."},
|
86 |
-
],
|
87 |
-
}
|
88 |
-
]
|
89 |
-
|
90 |
-
text = qwen_processor.apply_chat_template(
|
91 |
-
messages, tokenize=False, add_generation_prompt=True
|
92 |
-
)
|
93 |
-
image_inputs, video_inputs = process_vision_info(messages)
|
94 |
-
inputs = qwen_processor(
|
95 |
-
text=[text],
|
96 |
-
images=image_inputs,
|
97 |
-
videos=video_inputs,
|
98 |
-
padding=True,
|
99 |
-
return_tensors="pt",
|
100 |
-
)
|
101 |
-
inputs = inputs.to(device)
|
102 |
-
|
103 |
-
generated_ids = qwen_model.generate(**inputs, max_new_tokens=256)
|
104 |
-
generated_ids_trimmed = [
|
105 |
-
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
106 |
-
]
|
107 |
-
output_text = qwen_processor.batch_decode(
|
108 |
-
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
109 |
-
)
|
110 |
-
|
111 |
-
return output_text[0]
|
112 |
-
|
113 |
-
# Load JSON files
|
114 |
-
def load_json_file(file_name):
|
115 |
-
file_path = os.path.join("data", file_name)
|
116 |
-
with open(file_path, "r") as file:
|
117 |
-
return json.load(file)
|
118 |
-
|
119 |
-
# Load gender-specific JSON files
|
120 |
-
FEMALE_DEFAULT_TAGS = load_json_file("female_default_tags.json")
|
121 |
-
MALE_DEFAULT_TAGS = load_json_file("male_default_tags.json")
|
122 |
-
FEMALE_BODY_TYPES = load_json_file("female_body_types.json")
|
123 |
-
MALE_BODY_TYPES = load_json_file("male_body_types.json")
|
124 |
-
FEMALE_CLOTHING = load_json_file("female_clothing.json")
|
125 |
-
MALE_CLOTHING = load_json_file("male_clothing.json")
|
126 |
-
FEMALE_ADDITIONAL_DETAILS = load_json_file("female_additional_details.json")
|
127 |
-
MALE_ADDITIONAL_DETAILS = load_json_file("male_additional_details.json")
|
128 |
-
|
129 |
-
# Load non-gender-specific JSON files
|
130 |
-
ARTFORM = load_json_file("artform.json")
|
131 |
-
PHOTO_TYPE = load_json_file("photo_type.json")
|
132 |
-
ROLES = load_json_file("roles.json")
|
133 |
-
HAIRSTYLES = load_json_file("hairstyles.json")
|
134 |
-
PLACE = load_json_file("place.json")
|
135 |
-
LIGHTING = load_json_file("lighting.json")
|
136 |
-
COMPOSITION = load_json_file("composition.json")
|
137 |
-
POSE = load_json_file("pose.json")
|
138 |
-
BACKGROUND = load_json_file("background.json")
|
139 |
-
PHOTOGRAPHY_STYLES = load_json_file("photography_styles.json")
|
140 |
-
DEVICE = load_json_file("device.json")
|
141 |
-
PHOTOGRAPHER = load_json_file("photographer.json")
|
142 |
-
ARTIST = load_json_file("artist.json")
|
143 |
-
DIGITAL_ARTFORM = load_json_file("digital_artform.json")
|
144 |
-
|
145 |
-
class PromptGenerator:
|
146 |
-
def __init__(self, seed=None):
|
147 |
-
self.rng = random.Random(seed)
|
148 |
-
|
149 |
-
def split_and_choose(self, input_str):
|
150 |
-
choices = [choice.strip() for choice in input_str.split(",")]
|
151 |
-
return self.rng.choices(choices, k=1)[0]
|
152 |
-
|
153 |
-
def get_choice(self, input_str, default_choices):
|
154 |
-
if input_str.lower() == "disabled":
|
155 |
-
return ""
|
156 |
-
elif "," in input_str:
|
157 |
-
return self.split_and_choose(input_str)
|
158 |
-
elif input_str.lower() == "random":
|
159 |
-
return self.rng.choices(default_choices, k=1)[0]
|
160 |
-
else:
|
161 |
-
return input_str
|
162 |
-
|
163 |
-
def clean_consecutive_commas(self, input_string):
|
164 |
-
cleaned_string = re.sub(r',\s*,', ', ', input_string)
|
165 |
-
return cleaned_string
|
166 |
-
|
167 |
-
def process_string(self, replaced, seed):
|
168 |
-
replaced = re.sub(r'\s*,\s*', ', ', replaced)
|
169 |
-
replaced = re.sub(r',+', ', ', replaced)
|
170 |
-
original = replaced
|
171 |
-
|
172 |
-
first_break_clipl_index = replaced.find("BREAK_CLIPL")
|
173 |
-
second_break_clipl_index = replaced.find("BREAK_CLIPL", first_break_clipl_index + len("BREAK_CLIPL"))
|
174 |
-
|
175 |
-
if first_break_clipl_index != -1 and second_break_clipl_index != -1:
|
176 |
-
clip_content_l = replaced[first_break_clipl_index + len("BREAK_CLIPL"):second_break_clipl_index]
|
177 |
-
replaced = replaced[:first_break_clipl_index].strip(", ") + replaced[second_break_clipl_index + len("BREAK_CLIPL"):].strip(", ")
|
178 |
-
clip_l = clip_content_l
|
179 |
-
else:
|
180 |
-
clip_l = ""
|
181 |
-
|
182 |
-
first_break_clipg_index = replaced.find("BREAK_CLIPG")
|
183 |
-
second_break_clipg_index = replaced.find("BREAK_CLIPG", first_break_clipg_index + len("BREAK_CLIPG"))
|
184 |
-
|
185 |
-
if first_break_clipg_index != -1 and second_break_clipg_index != -1:
|
186 |
-
clip_content_g = replaced[first_break_clipg_index + len("BREAK_CLIPG"):second_break_clipg_index]
|
187 |
-
replaced = replaced[:first_break_clipg_index].strip(", ") + replaced[second_break_clipg_index + len("BREAK_CLIPG"):].strip(", ")
|
188 |
-
clip_g = clip_content_g
|
189 |
-
else:
|
190 |
-
clip_g = ""
|
191 |
-
|
192 |
-
t5xxl = replaced
|
193 |
-
|
194 |
-
original = original.replace("BREAK_CLIPL", "").replace("BREAK_CLIPG", "")
|
195 |
-
original = re.sub(r'\s*,\s*', ', ', original)
|
196 |
-
original = re.sub(r',+', ', ', original)
|
197 |
-
clip_l = re.sub(r'\s*,\s*', ', ', clip_l)
|
198 |
-
clip_l = re.sub(r',+', ', ', clip_l)
|
199 |
-
clip_g = re.sub(r'\s*,\s*', ', ', clip_g)
|
200 |
-
clip_g = re.sub(r',+', ', ', clip_g)
|
201 |
-
if clip_l.startswith(", "):
|
202 |
-
clip_l = clip_l[2:]
|
203 |
-
if clip_g.startswith(", "):
|
204 |
-
clip_g = clip_g[2:]
|
205 |
-
if original.startswith(", "):
|
206 |
-
original = original[2:]
|
207 |
-
if t5xxl.startswith(", "):
|
208 |
-
t5xxl = t5xxl[2:]
|
209 |
-
|
210 |
-
# Add spaces after commas
|
211 |
-
replaced = re.sub(r',(?!\s)', ', ', replaced)
|
212 |
-
original = re.sub(r',(?!\s)', ', ', original)
|
213 |
-
clip_l = re.sub(r',(?!\s)', ', ', clip_l)
|
214 |
-
clip_g = re.sub(r',(?!\s)', ', ', clip_g)
|
215 |
-
t5xxl = re.sub(r',(?!\s)', ', ', t5xxl)
|
216 |
-
|
217 |
-
return original, seed, t5xxl, clip_l, clip_g
|
218 |
-
|
219 |
-
def generate_prompt(self, seed, custom, subject, gender, artform, photo_type, body_types, default_tags, roles, hairstyles,
|
220 |
-
additional_details, photography_styles, device, photographer, artist, digital_artform,
|
221 |
-
place, lighting, clothing, composition, pose, background, input_image):
|
222 |
-
kwargs = locals()
|
223 |
-
del kwargs['self']
|
224 |
-
|
225 |
-
seed = kwargs.get("seed", 0)
|
226 |
-
if seed is not None:
|
227 |
-
self.rng = random.Random(seed)
|
228 |
-
components = []
|
229 |
-
custom = kwargs.get("custom", "")
|
230 |
-
if custom:
|
231 |
-
components.append(custom)
|
232 |
-
is_photographer = kwargs.get("artform", "").lower() == "photography" or (
|
233 |
-
kwargs.get("artform", "").lower() == "random"
|
234 |
-
and self.rng.choice([True, False])
|
235 |
-
)
|
236 |
-
|
237 |
-
subject = kwargs.get("subject", "")
|
238 |
-
gender = kwargs.get("gender", "female")
|
239 |
-
|
240 |
-
if is_photographer:
|
241 |
-
selected_photo_style = self.get_choice(kwargs.get("photography_styles", ""), PHOTOGRAPHY_STYLES)
|
242 |
-
if not selected_photo_style:
|
243 |
-
selected_photo_style = "photography"
|
244 |
-
components.append(selected_photo_style)
|
245 |
-
if kwargs.get("photography_style", "") != "disabled" and kwargs.get("default_tags", "") != "disabled" or subject != "":
|
246 |
-
components.append(" of")
|
247 |
-
|
248 |
-
default_tags = kwargs.get("default_tags", "random")
|
249 |
-
body_type = kwargs.get("body_types", "")
|
250 |
-
if not subject:
|
251 |
-
if default_tags == "random":
|
252 |
-
if body_type != "disabled" and body_type != "random":
|
253 |
-
selected_subject = self.get_choice(kwargs.get("default_tags", ""), FEMALE_DEFAULT_TAGS if gender == "female" else MALE_DEFAULT_TAGS).replace("a ", "").replace("an ", "")
|
254 |
-
components.append("a ")
|
255 |
-
components.append(body_type)
|
256 |
-
components.append(selected_subject)
|
257 |
-
elif body_type == "disabled":
|
258 |
-
selected_subject = self.get_choice(kwargs.get("default_tags", ""), FEMALE_DEFAULT_TAGS if gender == "female" else MALE_DEFAULT_TAGS)
|
259 |
-
components.append(selected_subject)
|
260 |
-
else:
|
261 |
-
body_type = self.get_choice(body_type, FEMALE_BODY_TYPES if gender == "female" else MALE_BODY_TYPES)
|
262 |
-
components.append("a ")
|
263 |
-
components.append(body_type)
|
264 |
-
selected_subject = self.get_choice(kwargs.get("default_tags", ""), FEMALE_DEFAULT_TAGS if gender == "female" else MALE_DEFAULT_TAGS).replace("a ", "").replace("an ", "")
|
265 |
-
components.append(selected_subject)
|
266 |
-
elif default_tags == "disabled":
|
267 |
-
pass
|
268 |
-
else:
|
269 |
-
components.append(default_tags)
|
270 |
-
else:
|
271 |
-
if body_type != "disabled" and body_type != "random":
|
272 |
-
components.append("a ")
|
273 |
-
components.append(body_type)
|
274 |
-
elif body_type == "disabled":
|
275 |
-
pass
|
276 |
-
else:
|
277 |
-
body_type = self.get_choice(body_type, FEMALE_BODY_TYPES if gender == "female" else MALE_BODY_TYPES)
|
278 |
-
components.append("a ")
|
279 |
-
components.append(body_type)
|
280 |
-
components.append(subject)
|
281 |
-
|
282 |
-
params = [
|
283 |
-
("roles", ROLES),
|
284 |
-
("hairstyles", HAIRSTYLES),
|
285 |
-
("additional_details", FEMALE_ADDITIONAL_DETAILS if gender == "female" else MALE_ADDITIONAL_DETAILS),
|
286 |
-
]
|
287 |
-
for param in params:
|
288 |
-
components.append(self.get_choice(kwargs.get(param[0], ""), param[1]))
|
289 |
-
for i in reversed(range(len(components))):
|
290 |
-
if components[i] in PLACE:
|
291 |
-
components[i] += ", "
|
292 |
-
break
|
293 |
-
if kwargs.get("clothing", "") != "disabled" and kwargs.get("clothing", "") != "random":
|
294 |
-
components.append(", dressed in ")
|
295 |
-
clothing = kwargs.get("clothing", "")
|
296 |
-
components.append(clothing)
|
297 |
-
elif kwargs.get("clothing", "") == "random":
|
298 |
-
components.append(", dressed in ")
|
299 |
-
clothing = self.get_choice(kwargs.get("clothing", ""), FEMALE_CLOTHING if gender == "female" else MALE_CLOTHING)
|
300 |
-
components.append(clothing)
|
301 |
-
|
302 |
-
if kwargs.get("composition", "") != "disabled" and kwargs.get("composition", "") != "random":
|
303 |
-
components.append(", ")
|
304 |
-
composition = kwargs.get("composition", "")
|
305 |
-
components.append(composition)
|
306 |
-
elif kwargs.get("composition", "") == "random":
|
307 |
-
components.append(", ")
|
308 |
-
composition = self.get_choice(kwargs.get("composition", ""), COMPOSITION)
|
309 |
-
components.append(composition)
|
310 |
-
|
311 |
-
if kwargs.get("pose", "") != "disabled" and kwargs.get("pose", "") != "random":
|
312 |
-
components.append(", ")
|
313 |
-
pose = kwargs.get("pose", "")
|
314 |
-
components.append(pose)
|
315 |
-
elif kwargs.get("pose", "") == "random":
|
316 |
-
components.append(", ")
|
317 |
-
pose = self.get_choice(kwargs.get("pose", ""), POSE)
|
318 |
-
components.append(pose)
|
319 |
-
components.append("BREAK_CLIPG")
|
320 |
-
if kwargs.get("background", "") != "disabled" and kwargs.get("background", "") != "random":
|
321 |
-
components.append(", ")
|
322 |
-
background = kwargs.get("background", "")
|
323 |
-
components.append(background)
|
324 |
-
elif kwargs.get("background", "") == "random":
|
325 |
-
components.append(", ")
|
326 |
-
background = self.get_choice(kwargs.get("background", ""), BACKGROUND)
|
327 |
-
components.append(background)
|
328 |
-
|
329 |
-
if kwargs.get("place", "") != "disabled" and kwargs.get("place", "") != "random":
|
330 |
-
components.append(", ")
|
331 |
-
place = kwargs.get("place", "")
|
332 |
-
components.append(place)
|
333 |
-
elif kwargs.get("place", "") == "random":
|
334 |
-
components.append(", ")
|
335 |
-
place = self.get_choice(kwargs.get("place", ""), PLACE)
|
336 |
-
components.append(place + ", ")
|
337 |
-
|
338 |
-
lighting = kwargs.get("lighting", "").lower()
|
339 |
-
if lighting == "random":
|
340 |
-
selected_lighting = ", ".join(self.rng.sample(LIGHTING, self.rng.randint(2, 5)))
|
341 |
-
components.append(", ")
|
342 |
-
components.append(selected_lighting)
|
343 |
-
elif lighting == "disabled":
|
344 |
-
pass
|
345 |
-
else:
|
346 |
-
components.append(", ")
|
347 |
-
components.append(lighting)
|
348 |
-
components.append("BREAK_CLIPG")
|
349 |
-
components.append("BREAK_CLIPL")
|
350 |
-
if is_photographer:
|
351 |
-
if kwargs.get("photo_type", "") != "disabled":
|
352 |
-
photo_type_choice = self.get_choice(kwargs.get("photo_type", ""), PHOTO_TYPE)
|
353 |
-
if photo_type_choice and photo_type_choice != "random" and photo_type_choice != "disabled":
|
354 |
-
random_value = round(self.rng.uniform(1.1, 1.5), 1)
|
355 |
-
components.append(f", ({photo_type_choice}:{random_value}), ")
|
356 |
-
|
357 |
-
params = [
|
358 |
-
("device", DEVICE),
|
359 |
-
("photographer", PHOTOGRAPHER),
|
360 |
-
]
|
361 |
-
components.extend([self.get_choice(kwargs.get(param[0], ""), param[1]) for param in params])
|
362 |
-
if kwargs.get("device", "") != "disabled":
|
363 |
-
components[-2] = f", shot on {components[-2]}"
|
364 |
-
if kwargs.get("photographer", "") != "disabled":
|
365 |
-
components[-1] = f", photo by {components[-1]}"
|
366 |
-
else:
|
367 |
-
digital_artform_choice = self.get_choice(kwargs.get("digital_artform", ""), DIGITAL_ARTFORM)
|
368 |
-
if digital_artform_choice:
|
369 |
-
components.append(f"{digital_artform_choice}")
|
370 |
-
if kwargs.get("artist", "") != "disabled":
|
371 |
-
components.append(f"by {self.get_choice(kwargs.get('artist', ''), ARTIST)}")
|
372 |
-
components.append("BREAK_CLIPL")
|
373 |
-
|
374 |
-
prompt = " ".join(components)
|
375 |
-
prompt = re.sub(" +", " ", prompt)
|
376 |
-
replaced = prompt.replace("of as", "of")
|
377 |
-
replaced = self.clean_consecutive_commas(replaced)
|
378 |
-
|
379 |
-
return self.process_string(replaced, seed)
|
380 |
-
|
381 |
-
def add_caption_to_prompt(self, prompt, caption):
|
382 |
-
if caption:
|
383 |
-
return f"{prompt}, {caption}"
|
384 |
-
return prompt
|
385 |
-
|
386 |
-
import os
|
387 |
-
from openai import OpenAI
|
388 |
-
|
389 |
-
class HuggingFaceInferenceNode:
|
390 |
-
def __init__(self):
|
391 |
-
self.client = OpenAI(
|
392 |
-
base_url="https://api-inference.huggingface.co/v1/",
|
393 |
-
api_key=huggingface_token,
|
394 |
-
)
|
395 |
-
self.prompts_dir = "./prompts"
|
396 |
-
os.makedirs(self.prompts_dir, exist_ok=True)
|
397 |
-
|
398 |
-
def save_prompt(self, prompt):
|
399 |
-
filename_text = "hf_" + prompt.split(',')[0].strip()
|
400 |
-
filename_text = re.sub(r'[^\w\-_\. ]', '_', filename_text)
|
401 |
-
filename_text = filename_text[:30]
|
402 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
403 |
-
base_filename = f"{filename_text}_{timestamp}.txt"
|
404 |
-
filename = os.path.join(self.prompts_dir, base_filename)
|
405 |
-
|
406 |
-
with open(filename, "w") as file:
|
407 |
-
file.write(prompt)
|
408 |
-
|
409 |
-
print(f"Prompt saved to {filename}")
|
410 |
-
|
411 |
-
def generate(self, input_text, happy_talk, compress, compression_level, poster, custom_base_prompt=""):
|
412 |
-
try:
|
413 |
-
default_happy_prompt = """Create a detailed visually descriptive caption of this description, which will be used as a prompt for a text to image AI system (caption only, no instructions like "create an image").Remove any mention of digital artwork or artwork style. Give detailed visual descriptions of the character(s), including ethnicity, skin tone, expression etc. Imagine using keywords for a still for someone who has aphantasia. Describe the image style, e.g. any photographic or art styles / techniques utilized. Make sure to fully describe all aspects of the cinematography, with abundant technical details and visual descriptions. If there is more than one image, combine the elements and characters from all of the images creatively into a single cohesive composition with a single background, inventing an interaction between the characters. Be creative in combining the characters into a single cohesive scene. Focus on two primary characters (or one) and describe an interesting interaction between them, such as a hug, a kiss, a fight, giving an object, an emotional reaction / interaction. If there is more than one background in the images, pick the most appropriate one. Your output is only the caption itself, no comments or extra formatting. The caption is in a single long paragraph. If you feel the images are inappropriate, invent a new scene / characters inspired by these. Additionally, incorporate a specific movie director's visual style and describe the lighting setup in detail, including the type, color, and placement of light sources to create the desired mood and atmosphere. Always frame the scene, including details about the film grain, color grading, and any artifacts or characteristics specific."""
|
414 |
-
|
415 |
-
default_simple_prompt = """Create a brief, straightforward caption for this description, suitable for a text-to-image AI system. Focus on the main elements, key characters, and overall scene without elaborate details. Provide a clear and concise description in one or two sentences."""
|
416 |
-
|
417 |
-
poster_prompt = """Analyze the provided description and extract key information to create a movie poster style description. Format the output as follows:
|
418 |
-
Title: A catchy, intriguing title that captures the essence of the scene, place the title in "".
|
419 |
-
Main character: Give a description of the main character.
|
420 |
-
Background: Describe the background in detail.
|
421 |
-
Supporting characters: Describe the supporting characters
|
422 |
-
Branding type: Describe the branding type
|
423 |
-
Tagline: Include a tagline that captures the essence of the movie.
|
424 |
-
Visual style: Ensure that the visual style fits the branding type and tagline.
|
425 |
-
You are allowed to make up film and branding names, and do them like 80's, 90's or modern movie posters."""
|
426 |
-
|
427 |
-
if poster:
|
428 |
-
base_prompt = poster_prompt
|
429 |
-
elif custom_base_prompt.strip():
|
430 |
-
base_prompt = custom_base_prompt
|
431 |
-
else:
|
432 |
-
base_prompt = default_happy_prompt if happy_talk else default_simple_prompt
|
433 |
-
|
434 |
-
if compress and not poster:
|
435 |
-
compression_chars = {
|
436 |
-
"soft": 600 if happy_talk else 300,
|
437 |
-
"medium": 400 if happy_talk else 200,
|
438 |
-
"hard": 200 if happy_talk else 100
|
439 |
-
}
|
440 |
-
char_limit = compression_chars[compression_level]
|
441 |
-
base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
|
442 |
-
|
443 |
-
system_message = "You are a helpful assistant. Try your best to give the best response possible to the user."
|
444 |
-
user_message = f"{base_prompt}\nDescription: {input_text}"
|
445 |
-
|
446 |
-
messages = [
|
447 |
-
{"role": "system", "content": system_message},
|
448 |
-
{"role": "user", "content": user_message}
|
449 |
-
]
|
450 |
-
|
451 |
-
response = self.client.chat.completions.create(
|
452 |
-
model="meta-llama/Meta-Llama-3.1-70B-Instruct",
|
453 |
-
max_tokens=1024,
|
454 |
-
temperature=0.7,
|
455 |
-
top_p=0.95,
|
456 |
-
messages=messages,
|
457 |
-
)
|
458 |
-
|
459 |
-
output = response.choices[0].message.content.strip()
|
460 |
-
|
461 |
-
# Clean up the output
|
462 |
-
if ": " in output:
|
463 |
-
output = output.split(": ", 1)[1].strip()
|
464 |
-
elif output.lower().startswith("here"):
|
465 |
-
sentences = output.split(". ")
|
466 |
-
if len(sentences) > 1:
|
467 |
-
output = ". ".join(sentences[1:]).strip()
|
468 |
-
|
469 |
-
return output
|
470 |
-
|
471 |
-
except Exception as e:
|
472 |
-
print(f"An error occurred: {e}")
|
473 |
-
return f"Error occurred while processing the request: {str(e)}"
|
474 |
-
|
475 |
-
title = """<h1 align="center">FLUX Prompt Generator</h1>
|
476 |
-
<p><center>
|
477 |
-
<a href="https://x.com/gokayfem" target="_blank">[X gokaygokay]</a>
|
478 |
-
<a href="https://github.com/gokayfem" target="_blank">[Github gokayfem]</a>
|
479 |
-
<a href="https://github.com/dagthomas/comfyui_dagthomas" target="_blank">[comfyui_dagthomas]</a>
|
480 |
-
<p align="center">Create long prompts from images or simple words. Enhance your short prompts with prompt enhancer.</p>
|
481 |
-
</center></p>
|
482 |
-
"""
|
483 |
-
|
484 |
-
def create_interface():
|
485 |
-
prompt_generator = PromptGenerator()
|
486 |
-
huggingface_node = HuggingFaceInferenceNode()
|
487 |
-
|
488 |
-
with gr.Blocks(theme='bethecloud/storj_theme') as demo:
|
489 |
-
|
490 |
-
gr.HTML(title)
|
491 |
-
|
492 |
-
with gr.Row():
|
493 |
-
with gr.Column(scale=2):
|
494 |
-
with gr.Accordion("Basic Settings"):
|
495 |
-
custom = gr.Textbox(label="Custom Input Prompt (optional)")
|
496 |
-
subject = gr.Textbox(label="Subject (optional)")
|
497 |
-
gender = gr.Radio(["female", "male"], label="Gender", value="female")
|
498 |
-
|
499 |
-
# Add the radio button for global option selection
|
500 |
-
global_option = gr.Radio(
|
501 |
-
["Disabled", "Random", "No Figure Rand"],
|
502 |
-
label="Set all options to:",
|
503 |
-
value="Disabled"
|
504 |
-
)
|
505 |
-
|
506 |
-
with gr.Accordion("Artform and Photo Type", open=False):
|
507 |
-
artform = gr.Dropdown(["disabled", "random"] + ARTFORM, label="Artform", value="disabled")
|
508 |
-
photo_type = gr.Dropdown(["disabled", "random"] + PHOTO_TYPE, label="Photo Type", value="disabled")
|
509 |
-
|
510 |
-
with gr.Accordion("Character Details", open=False):
|
511 |
-
body_types = gr.Dropdown(["disabled", "random"] + FEMALE_BODY_TYPES + MALE_BODY_TYPES, label="Body Types", value="disabled")
|
512 |
-
default_tags = gr.Dropdown(["disabled", "random"] + FEMALE_DEFAULT_TAGS + MALE_DEFAULT_TAGS, label="Default Tags", value="disabled")
|
513 |
-
roles = gr.Dropdown(["disabled", "random"] + ROLES, label="Roles", value="disabled")
|
514 |
-
hairstyles = gr.Dropdown(["disabled", "random"] + HAIRSTYLES, label="Hairstyles", value="disabled")
|
515 |
-
clothing = gr.Dropdown(["disabled", "random"] + FEMALE_CLOTHING + MALE_CLOTHING, label="Clothing", value="disabled")
|
516 |
-
|
517 |
-
with gr.Accordion("Scene Details", open=False):
|
518 |
-
place = gr.Dropdown(["disabled", "random"] + PLACE, label="Place", value="disabled")
|
519 |
-
lighting = gr.Dropdown(["disabled", "random"] + LIGHTING, label="Lighting", value="disabled")
|
520 |
-
composition = gr.Dropdown(["disabled", "random"] + COMPOSITION, label="Composition", value="disabled")
|
521 |
-
pose = gr.Dropdown(["disabled", "random"] + POSE, label="Pose", value="disabled")
|
522 |
-
background = gr.Dropdown(["disabled", "random"] + BACKGROUND, label="Background", value="disabled")
|
523 |
-
|
524 |
-
with gr.Accordion("Style and Artist", open=False):
|
525 |
-
additional_details = gr.Dropdown(["disabled", "random"] + FEMALE_ADDITIONAL_DETAILS + MALE_ADDITIONAL_DETAILS, label="Additional Details", value="disabled")
|
526 |
-
photography_styles = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHY_STYLES, label="Photography Styles", value="disabled")
|
527 |
-
device = gr.Dropdown(["disabled", "random"] + DEVICE, label="Device", value="disabled")
|
528 |
-
photographer = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHER, label="Photographer", value="disabled")
|
529 |
-
artist = gr.Dropdown(["disabled", "random"] + ARTIST, label="Artist", value="disabled")
|
530 |
-
digital_artform = gr.Dropdown(["disabled", "random"] + DIGITAL_ARTFORM, label="Digital Artform", value="disabled")
|
531 |
-
|
532 |
-
generate_button = gr.Button("Generate Prompt")
|
533 |
-
|
534 |
-
with gr.Column(scale=2):
|
535 |
-
with gr.Accordion("Image and Caption", open=False):
|
536 |
-
input_image = gr.Image(label="Input Image (optional)")
|
537 |
-
caption_output = gr.Textbox(label="Generated Caption", lines=3)
|
538 |
-
caption_model = gr.Radio(["Florence-2", "Qwen2-VL"], label="Caption Model", value="Florence-2")
|
539 |
-
create_caption_button = gr.Button("Create Caption")
|
540 |
-
add_caption_button = gr.Button("Add Caption to Prompt")
|
541 |
-
|
542 |
-
with gr.Accordion("Prompt Generation", open=True):
|
543 |
-
output = gr.Textbox(label="Generated Prompt / Input Text", lines=4)
|
544 |
-
t5xxl_output = gr.Textbox(label="T5XXL Output", visible=True)
|
545 |
-
clip_l_output = gr.Textbox(label="CLIP L Output", visible=True)
|
546 |
-
clip_g_output = gr.Textbox(label="CLIP G Output", visible=True)
|
547 |
-
|
548 |
-
with gr.Column(scale=2):
|
549 |
-
with gr.Accordion("Prompt Generation with LLM", open=False):
|
550 |
-
happy_talk = gr.Checkbox(label="Happy Talk", value=True)
|
551 |
-
compress = gr.Checkbox(label="Compress", value=True)
|
552 |
-
compression_level = gr.Radio(["soft", "medium", "hard"], label="Compression Level", value="hard")
|
553 |
-
poster = gr.Checkbox(label="Poster", value=False)
|
554 |
-
custom_base_prompt = gr.Textbox(label="Custom Base Prompt", lines=5)
|
555 |
-
generate_text_button = gr.Button("Generate Prompt with LLM (Llama 3.1 70B)")
|
556 |
-
text_output = gr.Textbox(label="Generated Text", lines=10)
|
557 |
-
|
558 |
-
def create_caption(image, model):
|
559 |
-
if image is not None:
|
560 |
-
if model == "Florence-2":
|
561 |
-
return florence_caption(image)
|
562 |
-
elif model == "Qwen2-VL":
|
563 |
-
return qwen_caption(image)
|
564 |
-
return ""
|
565 |
-
|
566 |
-
create_caption_button.click(
|
567 |
-
create_caption,
|
568 |
-
inputs=[input_image, caption_model],
|
569 |
-
outputs=[caption_output]
|
570 |
-
)
|
571 |
-
|
572 |
-
def generate_prompt_with_dynamic_seed(*args):
|
573 |
-
# Generate a new random seed
|
574 |
-
dynamic_seed = random.randint(0, 1000000)
|
575 |
-
|
576 |
-
# Call the generate_prompt function with the dynamic seed
|
577 |
-
result = prompt_generator.generate_prompt(dynamic_seed, *args)
|
578 |
-
|
579 |
-
# Return the result along with the used seed
|
580 |
-
return [dynamic_seed] + list(result)
|
581 |
-
|
582 |
-
generate_button.click(
|
583 |
-
generate_prompt_with_dynamic_seed,
|
584 |
-
inputs=[custom, subject, gender, artform, photo_type, body_types, default_tags, roles, hairstyles,
|
585 |
-
additional_details, photography_styles, device, photographer, artist, digital_artform,
|
586 |
-
place, lighting, clothing, composition, pose, background, input_image],
|
587 |
-
outputs=[gr.Number(label="Used Seed", visible=True), output, gr.Number(visible=False), t5xxl_output, clip_l_output, clip_g_output]
|
588 |
-
)
|
589 |
-
|
590 |
-
add_caption_button.click(
|
591 |
-
prompt_generator.add_caption_to_prompt,
|
592 |
-
inputs=[output, caption_output],
|
593 |
-
outputs=[output]
|
594 |
-
)
|
595 |
-
|
596 |
-
generate_text_button.click(
|
597 |
-
huggingface_node.generate,
|
598 |
-
inputs=[output, happy_talk, compress, compression_level, poster, custom_base_prompt],
|
599 |
-
outputs=text_output
|
600 |
-
)
|
601 |
-
|
602 |
-
def update_all_options(choice):
|
603 |
-
updates = {}
|
604 |
-
if choice == "Disabled":
|
605 |
-
for dropdown in [
|
606 |
-
artform, photo_type, body_types, default_tags, roles, hairstyles, clothing,
|
607 |
-
place, lighting, composition, pose, background, additional_details,
|
608 |
-
photography_styles, device, photographer, artist, digital_artform
|
609 |
-
]:
|
610 |
-
updates[dropdown] = gr.update(value="disabled")
|
611 |
-
elif choice == "Random":
|
612 |
-
for dropdown in [
|
613 |
-
artform, photo_type, body_types, default_tags, roles, hairstyles, clothing,
|
614 |
-
place, lighting, composition, pose, background, additional_details,
|
615 |
-
photography_styles, device, photographer, artist, digital_artform
|
616 |
-
]:
|
617 |
-
updates[dropdown] = gr.update(value="random")
|
618 |
-
else: # No Figure Random
|
619 |
-
for dropdown in [photo_type, body_types, default_tags, roles, hairstyles, clothing, pose, additional_details]:
|
620 |
-
updates[dropdown] = gr.update(value="disabled")
|
621 |
-
for dropdown in [artform, place, lighting, composition, background, photography_styles, device, photographer, artist, digital_artform]:
|
622 |
-
updates[dropdown] = gr.update(value="random")
|
623 |
-
return updates
|
624 |
-
|
625 |
-
global_option.change(
|
626 |
-
update_all_options,
|
627 |
-
inputs=[global_option],
|
628 |
-
outputs=[
|
629 |
-
artform, photo_type, body_types, default_tags, roles, hairstyles, clothing,
|
630 |
-
place, lighting, composition, pose, background, additional_details,
|
631 |
-
photography_styles, device, photographer, artist, digital_artform
|
632 |
-
]
|
633 |
-
)
|
634 |
-
|
635 |
-
return demo
|
636 |
-
|
637 |
if __name__ == "__main__":
|
638 |
demo = create_interface()
|
639 |
demo.launch()
|
|
|
1 |
+
from ui_components import create_interface
|
2 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import subprocess
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
if __name__ == "__main__":
|
8 |
demo = create_interface()
|
9 |
demo.launch()
|
app1.py
ADDED
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import spaces
|
2 |
+
import gradio as gr
|
3 |
+
import random
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import re
|
7 |
+
from datetime import datetime
|
8 |
+
from huggingface_hub import InferenceClient
|
9 |
+
import subprocess
|
10 |
+
import torch
|
11 |
+
from PIL import Image
|
12 |
+
from transformers import AutoProcessor, AutoModelForCausalLM, Qwen2VLForConditionalGeneration
|
13 |
+
from qwen_vl_utils import process_vision_info
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
17 |
+
|
18 |
+
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
19 |
+
|
20 |
+
# Initialize Florence model
|
21 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
22 |
+
florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True).to(device).eval()
|
23 |
+
florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True)
|
24 |
+
|
25 |
+
# Initialize Qwen2-VL-2B model
|
26 |
+
qwen_model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True, torch_dtype="auto").to(device).eval()
|
27 |
+
qwen_processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True)
|
28 |
+
|
29 |
+
# Florence caption function
|
30 |
+
@spaces.GPU
|
31 |
+
def florence_caption(image):
|
32 |
+
if not isinstance(image, Image.Image):
|
33 |
+
image = Image.fromarray(image)
|
34 |
+
|
35 |
+
inputs = florence_processor(text="<MORE_DETAILED_CAPTION>", images=image, return_tensors="pt").to(device)
|
36 |
+
generated_ids = florence_model.generate(
|
37 |
+
input_ids=inputs["input_ids"],
|
38 |
+
pixel_values=inputs["pixel_values"],
|
39 |
+
max_new_tokens=1024,
|
40 |
+
early_stopping=False,
|
41 |
+
do_sample=False,
|
42 |
+
num_beams=3,
|
43 |
+
)
|
44 |
+
generated_text = florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
45 |
+
parsed_answer = florence_processor.post_process_generation(
|
46 |
+
generated_text,
|
47 |
+
task="<MORE_DETAILED_CAPTION>",
|
48 |
+
image_size=(image.width, image.height)
|
49 |
+
)
|
50 |
+
return parsed_answer["<MORE_DETAILED_CAPTION>"]
|
51 |
+
|
52 |
+
# Add this function to your code
|
53 |
+
def array_to_image_path(image_array):
|
54 |
+
# Convert numpy array to PIL Image
|
55 |
+
img = Image.fromarray(np.uint8(image_array))
|
56 |
+
|
57 |
+
# Generate a unique filename using timestamp
|
58 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
59 |
+
filename = f"image_{timestamp}.png"
|
60 |
+
|
61 |
+
# Save the image
|
62 |
+
img.save(filename)
|
63 |
+
|
64 |
+
# Get the full path of the saved image
|
65 |
+
full_path = os.path.abspath(filename)
|
66 |
+
|
67 |
+
return full_path
|
68 |
+
|
69 |
+
# Qwen2-VL-2B caption function
|
70 |
+
@spaces.GPU
|
71 |
+
def qwen_caption(image):
|
72 |
+
if not isinstance(image, Image.Image):
|
73 |
+
image = Image.fromarray(np.uint8(image))
|
74 |
+
|
75 |
+
image_path = array_to_image_path(np.array(image))
|
76 |
+
|
77 |
+
messages = [
|
78 |
+
{
|
79 |
+
"role": "user",
|
80 |
+
"content": [
|
81 |
+
{
|
82 |
+
"type": "image",
|
83 |
+
"image": image_path,
|
84 |
+
},
|
85 |
+
{"type": "text", "text": "Describe this image in great detail."},
|
86 |
+
],
|
87 |
+
}
|
88 |
+
]
|
89 |
+
|
90 |
+
text = qwen_processor.apply_chat_template(
|
91 |
+
messages, tokenize=False, add_generation_prompt=True
|
92 |
+
)
|
93 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
94 |
+
inputs = qwen_processor(
|
95 |
+
text=[text],
|
96 |
+
images=image_inputs,
|
97 |
+
videos=video_inputs,
|
98 |
+
padding=True,
|
99 |
+
return_tensors="pt",
|
100 |
+
)
|
101 |
+
inputs = inputs.to(device)
|
102 |
+
|
103 |
+
generated_ids = qwen_model.generate(**inputs, max_new_tokens=256)
|
104 |
+
generated_ids_trimmed = [
|
105 |
+
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
106 |
+
]
|
107 |
+
output_text = qwen_processor.batch_decode(
|
108 |
+
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
109 |
+
)
|
110 |
+
|
111 |
+
return output_text[0]
|
112 |
+
|
113 |
+
# Load JSON files
|
114 |
+
def load_json_file(file_name):
|
115 |
+
file_path = os.path.join("data", file_name)
|
116 |
+
with open(file_path, "r") as file:
|
117 |
+
return json.load(file)
|
118 |
+
|
119 |
+
# Load gender-specific JSON files
|
120 |
+
FEMALE_DEFAULT_TAGS = load_json_file("female_default_tags.json")
|
121 |
+
MALE_DEFAULT_TAGS = load_json_file("male_default_tags.json")
|
122 |
+
FEMALE_BODY_TYPES = load_json_file("female_body_types.json")
|
123 |
+
MALE_BODY_TYPES = load_json_file("male_body_types.json")
|
124 |
+
FEMALE_CLOTHING = load_json_file("female_clothing.json")
|
125 |
+
MALE_CLOTHING = load_json_file("male_clothing.json")
|
126 |
+
FEMALE_ADDITIONAL_DETAILS = load_json_file("female_additional_details.json")
|
127 |
+
MALE_ADDITIONAL_DETAILS = load_json_file("male_additional_details.json")
|
128 |
+
|
129 |
+
# Load non-gender-specific JSON files
|
130 |
+
ARTFORM = load_json_file("artform.json")
|
131 |
+
PHOTO_TYPE = load_json_file("photo_type.json")
|
132 |
+
ROLES = load_json_file("roles.json")
|
133 |
+
HAIRSTYLES = load_json_file("hairstyles.json")
|
134 |
+
PLACE = load_json_file("place.json")
|
135 |
+
LIGHTING = load_json_file("lighting.json")
|
136 |
+
COMPOSITION = load_json_file("composition.json")
|
137 |
+
POSE = load_json_file("pose.json")
|
138 |
+
BACKGROUND = load_json_file("background.json")
|
139 |
+
PHOTOGRAPHY_STYLES = load_json_file("photography_styles.json")
|
140 |
+
DEVICE = load_json_file("device.json")
|
141 |
+
PHOTOGRAPHER = load_json_file("photographer.json")
|
142 |
+
ARTIST = load_json_file("artist.json")
|
143 |
+
DIGITAL_ARTFORM = load_json_file("digital_artform.json")
|
144 |
+
|
145 |
+
class PromptGenerator:
|
146 |
+
def __init__(self, seed=None):
|
147 |
+
self.rng = random.Random(seed)
|
148 |
+
|
149 |
+
def split_and_choose(self, input_str):
|
150 |
+
choices = [choice.strip() for choice in input_str.split(",")]
|
151 |
+
return self.rng.choices(choices, k=1)[0]
|
152 |
+
|
153 |
+
def get_choice(self, input_str, default_choices):
|
154 |
+
if input_str.lower() == "disabled":
|
155 |
+
return ""
|
156 |
+
elif "," in input_str:
|
157 |
+
return self.split_and_choose(input_str)
|
158 |
+
elif input_str.lower() == "random":
|
159 |
+
return self.rng.choices(default_choices, k=1)[0]
|
160 |
+
else:
|
161 |
+
return input_str
|
162 |
+
|
163 |
+
def clean_consecutive_commas(self, input_string):
|
164 |
+
cleaned_string = re.sub(r',\s*,', ', ', input_string)
|
165 |
+
return cleaned_string
|
166 |
+
|
167 |
+
def process_string(self, replaced, seed):
|
168 |
+
replaced = re.sub(r'\s*,\s*', ', ', replaced)
|
169 |
+
replaced = re.sub(r',+', ', ', replaced)
|
170 |
+
original = replaced
|
171 |
+
|
172 |
+
first_break_clipl_index = replaced.find("BREAK_CLIPL")
|
173 |
+
second_break_clipl_index = replaced.find("BREAK_CLIPL", first_break_clipl_index + len("BREAK_CLIPL"))
|
174 |
+
|
175 |
+
if first_break_clipl_index != -1 and second_break_clipl_index != -1:
|
176 |
+
clip_content_l = replaced[first_break_clipl_index + len("BREAK_CLIPL"):second_break_clipl_index]
|
177 |
+
replaced = replaced[:first_break_clipl_index].strip(", ") + replaced[second_break_clipl_index + len("BREAK_CLIPL"):].strip(", ")
|
178 |
+
clip_l = clip_content_l
|
179 |
+
else:
|
180 |
+
clip_l = ""
|
181 |
+
|
182 |
+
first_break_clipg_index = replaced.find("BREAK_CLIPG")
|
183 |
+
second_break_clipg_index = replaced.find("BREAK_CLIPG", first_break_clipg_index + len("BREAK_CLIPG"))
|
184 |
+
|
185 |
+
if first_break_clipg_index != -1 and second_break_clipg_index != -1:
|
186 |
+
clip_content_g = replaced[first_break_clipg_index + len("BREAK_CLIPG"):second_break_clipg_index]
|
187 |
+
replaced = replaced[:first_break_clipg_index].strip(", ") + replaced[second_break_clipg_index + len("BREAK_CLIPG"):].strip(", ")
|
188 |
+
clip_g = clip_content_g
|
189 |
+
else:
|
190 |
+
clip_g = ""
|
191 |
+
|
192 |
+
t5xxl = replaced
|
193 |
+
|
194 |
+
original = original.replace("BREAK_CLIPL", "").replace("BREAK_CLIPG", "")
|
195 |
+
original = re.sub(r'\s*,\s*', ', ', original)
|
196 |
+
original = re.sub(r',+', ', ', original)
|
197 |
+
clip_l = re.sub(r'\s*,\s*', ', ', clip_l)
|
198 |
+
clip_l = re.sub(r',+', ', ', clip_l)
|
199 |
+
clip_g = re.sub(r'\s*,\s*', ', ', clip_g)
|
200 |
+
clip_g = re.sub(r',+', ', ', clip_g)
|
201 |
+
if clip_l.startswith(", "):
|
202 |
+
clip_l = clip_l[2:]
|
203 |
+
if clip_g.startswith(", "):
|
204 |
+
clip_g = clip_g[2:]
|
205 |
+
if original.startswith(", "):
|
206 |
+
original = original[2:]
|
207 |
+
if t5xxl.startswith(", "):
|
208 |
+
t5xxl = t5xxl[2:]
|
209 |
+
|
210 |
+
# Add spaces after commas
|
211 |
+
replaced = re.sub(r',(?!\s)', ', ', replaced)
|
212 |
+
original = re.sub(r',(?!\s)', ', ', original)
|
213 |
+
clip_l = re.sub(r',(?!\s)', ', ', clip_l)
|
214 |
+
clip_g = re.sub(r',(?!\s)', ', ', clip_g)
|
215 |
+
t5xxl = re.sub(r',(?!\s)', ', ', t5xxl)
|
216 |
+
|
217 |
+
return original, seed, t5xxl, clip_l, clip_g
|
218 |
+
|
219 |
+
def generate_prompt(self, seed, custom, subject, gender, artform, photo_type, body_types, default_tags, roles, hairstyles,
|
220 |
+
additional_details, photography_styles, device, photographer, artist, digital_artform,
|
221 |
+
place, lighting, clothing, composition, pose, background, input_image):
|
222 |
+
kwargs = locals()
|
223 |
+
del kwargs['self']
|
224 |
+
|
225 |
+
seed = kwargs.get("seed", 0)
|
226 |
+
if seed is not None:
|
227 |
+
self.rng = random.Random(seed)
|
228 |
+
components = []
|
229 |
+
custom = kwargs.get("custom", "")
|
230 |
+
if custom:
|
231 |
+
components.append(custom)
|
232 |
+
is_photographer = kwargs.get("artform", "").lower() == "photography" or (
|
233 |
+
kwargs.get("artform", "").lower() == "random"
|
234 |
+
and self.rng.choice([True, False])
|
235 |
+
)
|
236 |
+
|
237 |
+
subject = kwargs.get("subject", "")
|
238 |
+
gender = kwargs.get("gender", "female")
|
239 |
+
|
240 |
+
if is_photographer:
|
241 |
+
selected_photo_style = self.get_choice(kwargs.get("photography_styles", ""), PHOTOGRAPHY_STYLES)
|
242 |
+
if not selected_photo_style:
|
243 |
+
selected_photo_style = "photography"
|
244 |
+
components.append(selected_photo_style)
|
245 |
+
if kwargs.get("photography_style", "") != "disabled" and kwargs.get("default_tags", "") != "disabled" or subject != "":
|
246 |
+
components.append(" of")
|
247 |
+
|
248 |
+
default_tags = kwargs.get("default_tags", "random")
|
249 |
+
body_type = kwargs.get("body_types", "")
|
250 |
+
if not subject:
|
251 |
+
if default_tags == "random":
|
252 |
+
if body_type != "disabled" and body_type != "random":
|
253 |
+
selected_subject = self.get_choice(kwargs.get("default_tags", ""), FEMALE_DEFAULT_TAGS if gender == "female" else MALE_DEFAULT_TAGS).replace("a ", "").replace("an ", "")
|
254 |
+
components.append("a ")
|
255 |
+
components.append(body_type)
|
256 |
+
components.append(selected_subject)
|
257 |
+
elif body_type == "disabled":
|
258 |
+
selected_subject = self.get_choice(kwargs.get("default_tags", ""), FEMALE_DEFAULT_TAGS if gender == "female" else MALE_DEFAULT_TAGS)
|
259 |
+
components.append(selected_subject)
|
260 |
+
else:
|
261 |
+
body_type = self.get_choice(body_type, FEMALE_BODY_TYPES if gender == "female" else MALE_BODY_TYPES)
|
262 |
+
components.append("a ")
|
263 |
+
components.append(body_type)
|
264 |
+
selected_subject = self.get_choice(kwargs.get("default_tags", ""), FEMALE_DEFAULT_TAGS if gender == "female" else MALE_DEFAULT_TAGS).replace("a ", "").replace("an ", "")
|
265 |
+
components.append(selected_subject)
|
266 |
+
elif default_tags == "disabled":
|
267 |
+
pass
|
268 |
+
else:
|
269 |
+
components.append(default_tags)
|
270 |
+
else:
|
271 |
+
if body_type != "disabled" and body_type != "random":
|
272 |
+
components.append("a ")
|
273 |
+
components.append(body_type)
|
274 |
+
elif body_type == "disabled":
|
275 |
+
pass
|
276 |
+
else:
|
277 |
+
body_type = self.get_choice(body_type, FEMALE_BODY_TYPES if gender == "female" else MALE_BODY_TYPES)
|
278 |
+
components.append("a ")
|
279 |
+
components.append(body_type)
|
280 |
+
components.append(subject)
|
281 |
+
|
282 |
+
params = [
|
283 |
+
("roles", ROLES),
|
284 |
+
("hairstyles", HAIRSTYLES),
|
285 |
+
("additional_details", FEMALE_ADDITIONAL_DETAILS if gender == "female" else MALE_ADDITIONAL_DETAILS),
|
286 |
+
]
|
287 |
+
for param in params:
|
288 |
+
components.append(self.get_choice(kwargs.get(param[0], ""), param[1]))
|
289 |
+
for i in reversed(range(len(components))):
|
290 |
+
if components[i] in PLACE:
|
291 |
+
components[i] += ", "
|
292 |
+
break
|
293 |
+
if kwargs.get("clothing", "") != "disabled" and kwargs.get("clothing", "") != "random":
|
294 |
+
components.append(", dressed in ")
|
295 |
+
clothing = kwargs.get("clothing", "")
|
296 |
+
components.append(clothing)
|
297 |
+
elif kwargs.get("clothing", "") == "random":
|
298 |
+
components.append(", dressed in ")
|
299 |
+
clothing = self.get_choice(kwargs.get("clothing", ""), FEMALE_CLOTHING if gender == "female" else MALE_CLOTHING)
|
300 |
+
components.append(clothing)
|
301 |
+
|
302 |
+
if kwargs.get("composition", "") != "disabled" and kwargs.get("composition", "") != "random":
|
303 |
+
components.append(", ")
|
304 |
+
composition = kwargs.get("composition", "")
|
305 |
+
components.append(composition)
|
306 |
+
elif kwargs.get("composition", "") == "random":
|
307 |
+
components.append(", ")
|
308 |
+
composition = self.get_choice(kwargs.get("composition", ""), COMPOSITION)
|
309 |
+
components.append(composition)
|
310 |
+
|
311 |
+
if kwargs.get("pose", "") != "disabled" and kwargs.get("pose", "") != "random":
|
312 |
+
components.append(", ")
|
313 |
+
pose = kwargs.get("pose", "")
|
314 |
+
components.append(pose)
|
315 |
+
elif kwargs.get("pose", "") == "random":
|
316 |
+
components.append(", ")
|
317 |
+
pose = self.get_choice(kwargs.get("pose", ""), POSE)
|
318 |
+
components.append(pose)
|
319 |
+
components.append("BREAK_CLIPG")
|
320 |
+
if kwargs.get("background", "") != "disabled" and kwargs.get("background", "") != "random":
|
321 |
+
components.append(", ")
|
322 |
+
background = kwargs.get("background", "")
|
323 |
+
components.append(background)
|
324 |
+
elif kwargs.get("background", "") == "random":
|
325 |
+
components.append(", ")
|
326 |
+
background = self.get_choice(kwargs.get("background", ""), BACKGROUND)
|
327 |
+
components.append(background)
|
328 |
+
|
329 |
+
if kwargs.get("place", "") != "disabled" and kwargs.get("place", "") != "random":
|
330 |
+
components.append(", ")
|
331 |
+
place = kwargs.get("place", "")
|
332 |
+
components.append(place)
|
333 |
+
elif kwargs.get("place", "") == "random":
|
334 |
+
components.append(", ")
|
335 |
+
place = self.get_choice(kwargs.get("place", ""), PLACE)
|
336 |
+
components.append(place + ", ")
|
337 |
+
|
338 |
+
lighting = kwargs.get("lighting", "").lower()
|
339 |
+
if lighting == "random":
|
340 |
+
selected_lighting = ", ".join(self.rng.sample(LIGHTING, self.rng.randint(2, 5)))
|
341 |
+
components.append(", ")
|
342 |
+
components.append(selected_lighting)
|
343 |
+
elif lighting == "disabled":
|
344 |
+
pass
|
345 |
+
else:
|
346 |
+
components.append(", ")
|
347 |
+
components.append(lighting)
|
348 |
+
components.append("BREAK_CLIPG")
|
349 |
+
components.append("BREAK_CLIPL")
|
350 |
+
if is_photographer:
|
351 |
+
if kwargs.get("photo_type", "") != "disabled":
|
352 |
+
photo_type_choice = self.get_choice(kwargs.get("photo_type", ""), PHOTO_TYPE)
|
353 |
+
if photo_type_choice and photo_type_choice != "random" and photo_type_choice != "disabled":
|
354 |
+
random_value = round(self.rng.uniform(1.1, 1.5), 1)
|
355 |
+
components.append(f", ({photo_type_choice}:{random_value}), ")
|
356 |
+
|
357 |
+
params = [
|
358 |
+
("device", DEVICE),
|
359 |
+
("photographer", PHOTOGRAPHER),
|
360 |
+
]
|
361 |
+
components.extend([self.get_choice(kwargs.get(param[0], ""), param[1]) for param in params])
|
362 |
+
if kwargs.get("device", "") != "disabled":
|
363 |
+
components[-2] = f", shot on {components[-2]}"
|
364 |
+
if kwargs.get("photographer", "") != "disabled":
|
365 |
+
components[-1] = f", photo by {components[-1]}"
|
366 |
+
else:
|
367 |
+
digital_artform_choice = self.get_choice(kwargs.get("digital_artform", ""), DIGITAL_ARTFORM)
|
368 |
+
if digital_artform_choice:
|
369 |
+
components.append(f"{digital_artform_choice}")
|
370 |
+
if kwargs.get("artist", "") != "disabled":
|
371 |
+
components.append(f"by {self.get_choice(kwargs.get('artist', ''), ARTIST)}")
|
372 |
+
components.append("BREAK_CLIPL")
|
373 |
+
|
374 |
+
prompt = " ".join(components)
|
375 |
+
prompt = re.sub(" +", " ", prompt)
|
376 |
+
replaced = prompt.replace("of as", "of")
|
377 |
+
replaced = self.clean_consecutive_commas(replaced)
|
378 |
+
|
379 |
+
return self.process_string(replaced, seed)
|
380 |
+
|
381 |
+
def add_caption_to_prompt(self, prompt, caption):
|
382 |
+
if caption:
|
383 |
+
return f"{prompt}, {caption}"
|
384 |
+
return prompt
|
385 |
+
|
386 |
+
import os
|
387 |
+
from openai import OpenAI
|
388 |
+
|
389 |
+
class HuggingFaceInferenceNode:
|
390 |
+
def __init__(self):
|
391 |
+
self.client = OpenAI(
|
392 |
+
base_url="https://api-inference.huggingface.co/v1/",
|
393 |
+
api_key=huggingface_token,
|
394 |
+
)
|
395 |
+
self.prompts_dir = "./prompts"
|
396 |
+
os.makedirs(self.prompts_dir, exist_ok=True)
|
397 |
+
|
398 |
+
def save_prompt(self, prompt):
|
399 |
+
filename_text = "hf_" + prompt.split(',')[0].strip()
|
400 |
+
filename_text = re.sub(r'[^\w\-_\. ]', '_', filename_text)
|
401 |
+
filename_text = filename_text[:30]
|
402 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
403 |
+
base_filename = f"{filename_text}_{timestamp}.txt"
|
404 |
+
filename = os.path.join(self.prompts_dir, base_filename)
|
405 |
+
|
406 |
+
with open(filename, "w") as file:
|
407 |
+
file.write(prompt)
|
408 |
+
|
409 |
+
print(f"Prompt saved to {filename}")
|
410 |
+
|
411 |
+
def generate(self, input_text, happy_talk, compress, compression_level, poster, custom_base_prompt=""):
|
412 |
+
try:
|
413 |
+
default_happy_prompt = """Create a detailed visually descriptive caption of this description, which will be used as a prompt for a text to image AI system (caption only, no instructions like "create an image").Remove any mention of digital artwork or artwork style. Give detailed visual descriptions of the character(s), including ethnicity, skin tone, expression etc. Imagine using keywords for a still for someone who has aphantasia. Describe the image style, e.g. any photographic or art styles / techniques utilized. Make sure to fully describe all aspects of the cinematography, with abundant technical details and visual descriptions. If there is more than one image, combine the elements and characters from all of the images creatively into a single cohesive composition with a single background, inventing an interaction between the characters. Be creative in combining the characters into a single cohesive scene. Focus on two primary characters (or one) and describe an interesting interaction between them, such as a hug, a kiss, a fight, giving an object, an emotional reaction / interaction. If there is more than one background in the images, pick the most appropriate one. Your output is only the caption itself, no comments or extra formatting. The caption is in a single long paragraph. If you feel the images are inappropriate, invent a new scene / characters inspired by these. Additionally, incorporate a specific movie director's visual style and describe the lighting setup in detail, including the type, color, and placement of light sources to create the desired mood and atmosphere. Always frame the scene, including details about the film grain, color grading, and any artifacts or characteristics specific."""
|
414 |
+
|
415 |
+
default_simple_prompt = """Create a brief, straightforward caption for this description, suitable for a text-to-image AI system. Focus on the main elements, key characters, and overall scene without elaborate details. Provide a clear and concise description in one or two sentences."""
|
416 |
+
|
417 |
+
poster_prompt = """Analyze the provided description and extract key information to create a movie poster style description. Format the output as follows:
|
418 |
+
Title: A catchy, intriguing title that captures the essence of the scene, place the title in "".
|
419 |
+
Main character: Give a description of the main character.
|
420 |
+
Background: Describe the background in detail.
|
421 |
+
Supporting characters: Describe the supporting characters
|
422 |
+
Branding type: Describe the branding type
|
423 |
+
Tagline: Include a tagline that captures the essence of the movie.
|
424 |
+
Visual style: Ensure that the visual style fits the branding type and tagline.
|
425 |
+
You are allowed to make up film and branding names, and do them like 80's, 90's or modern movie posters."""
|
426 |
+
|
427 |
+
if poster:
|
428 |
+
base_prompt = poster_prompt
|
429 |
+
elif custom_base_prompt.strip():
|
430 |
+
base_prompt = custom_base_prompt
|
431 |
+
else:
|
432 |
+
base_prompt = default_happy_prompt if happy_talk else default_simple_prompt
|
433 |
+
|
434 |
+
if compress and not poster:
|
435 |
+
compression_chars = {
|
436 |
+
"soft": 600 if happy_talk else 300,
|
437 |
+
"medium": 400 if happy_talk else 200,
|
438 |
+
"hard": 200 if happy_talk else 100
|
439 |
+
}
|
440 |
+
char_limit = compression_chars[compression_level]
|
441 |
+
base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
|
442 |
+
|
443 |
+
system_message = "You are a helpful assistant. Try your best to give the best response possible to the user."
|
444 |
+
user_message = f"{base_prompt}\nDescription: {input_text}"
|
445 |
+
|
446 |
+
messages = [
|
447 |
+
{"role": "system", "content": system_message},
|
448 |
+
{"role": "user", "content": user_message}
|
449 |
+
]
|
450 |
+
|
451 |
+
response = self.client.chat.completions.create(
|
452 |
+
model="meta-llama/Meta-Llama-3.1-70B-Instruct",
|
453 |
+
max_tokens=1024,
|
454 |
+
temperature=0.7,
|
455 |
+
top_p=0.95,
|
456 |
+
messages=messages,
|
457 |
+
)
|
458 |
+
|
459 |
+
output = response.choices[0].message.content.strip()
|
460 |
+
|
461 |
+
# Clean up the output
|
462 |
+
if ": " in output:
|
463 |
+
output = output.split(": ", 1)[1].strip()
|
464 |
+
elif output.lower().startswith("here"):
|
465 |
+
sentences = output.split(". ")
|
466 |
+
if len(sentences) > 1:
|
467 |
+
output = ". ".join(sentences[1:]).strip()
|
468 |
+
|
469 |
+
return output
|
470 |
+
|
471 |
+
except Exception as e:
|
472 |
+
print(f"An error occurred: {e}")
|
473 |
+
return f"Error occurred while processing the request: {str(e)}"
|
474 |
+
|
475 |
+
title = """<h1 align="center">FLUX Prompt Generator</h1>
|
476 |
+
<p><center>
|
477 |
+
<a href="https://x.com/gokayfem" target="_blank">[X gokaygokay]</a>
|
478 |
+
<a href="https://github.com/gokayfem" target="_blank">[Github gokayfem]</a>
|
479 |
+
<a href="https://github.com/dagthomas/comfyui_dagthomas" target="_blank">[comfyui_dagthomas]</a>
|
480 |
+
<p align="center">Create long prompts from images or simple words. Enhance your short prompts with prompt enhancer.</p>
|
481 |
+
</center></p>
|
482 |
+
"""
|
483 |
+
|
484 |
+
def create_interface():
|
485 |
+
prompt_generator = PromptGenerator()
|
486 |
+
huggingface_node = HuggingFaceInferenceNode()
|
487 |
+
|
488 |
+
with gr.Blocks(theme='bethecloud/storj_theme') as demo:
|
489 |
+
|
490 |
+
gr.HTML(title)
|
491 |
+
|
492 |
+
with gr.Row():
|
493 |
+
with gr.Column(scale=2):
|
494 |
+
with gr.Accordion("Basic Settings"):
|
495 |
+
custom = gr.Textbox(label="Custom Input Prompt (optional)")
|
496 |
+
subject = gr.Textbox(label="Subject (optional)")
|
497 |
+
gender = gr.Radio(["female", "male"], label="Gender", value="female")
|
498 |
+
|
499 |
+
# Add the radio button for global option selection
|
500 |
+
global_option = gr.Radio(
|
501 |
+
["Disabled", "Random", "No Figure Rand"],
|
502 |
+
label="Set all options to:",
|
503 |
+
value="Disabled"
|
504 |
+
)
|
505 |
+
|
506 |
+
with gr.Accordion("Artform and Photo Type", open=False):
|
507 |
+
artform = gr.Dropdown(["disabled", "random"] + ARTFORM, label="Artform", value="disabled")
|
508 |
+
photo_type = gr.Dropdown(["disabled", "random"] + PHOTO_TYPE, label="Photo Type", value="disabled")
|
509 |
+
|
510 |
+
with gr.Accordion("Character Details", open=False):
|
511 |
+
body_types = gr.Dropdown(["disabled", "random"] + FEMALE_BODY_TYPES + MALE_BODY_TYPES, label="Body Types", value="disabled")
|
512 |
+
default_tags = gr.Dropdown(["disabled", "random"] + FEMALE_DEFAULT_TAGS + MALE_DEFAULT_TAGS, label="Default Tags", value="disabled")
|
513 |
+
roles = gr.Dropdown(["disabled", "random"] + ROLES, label="Roles", value="disabled")
|
514 |
+
hairstyles = gr.Dropdown(["disabled", "random"] + HAIRSTYLES, label="Hairstyles", value="disabled")
|
515 |
+
clothing = gr.Dropdown(["disabled", "random"] + FEMALE_CLOTHING + MALE_CLOTHING, label="Clothing", value="disabled")
|
516 |
+
|
517 |
+
with gr.Accordion("Scene Details", open=False):
|
518 |
+
place = gr.Dropdown(["disabled", "random"] + PLACE, label="Place", value="disabled")
|
519 |
+
lighting = gr.Dropdown(["disabled", "random"] + LIGHTING, label="Lighting", value="disabled")
|
520 |
+
composition = gr.Dropdown(["disabled", "random"] + COMPOSITION, label="Composition", value="disabled")
|
521 |
+
pose = gr.Dropdown(["disabled", "random"] + POSE, label="Pose", value="disabled")
|
522 |
+
background = gr.Dropdown(["disabled", "random"] + BACKGROUND, label="Background", value="disabled")
|
523 |
+
|
524 |
+
with gr.Accordion("Style and Artist", open=False):
|
525 |
+
additional_details = gr.Dropdown(["disabled", "random"] + FEMALE_ADDITIONAL_DETAILS + MALE_ADDITIONAL_DETAILS, label="Additional Details", value="disabled")
|
526 |
+
photography_styles = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHY_STYLES, label="Photography Styles", value="disabled")
|
527 |
+
device = gr.Dropdown(["disabled", "random"] + DEVICE, label="Device", value="disabled")
|
528 |
+
photographer = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHER, label="Photographer", value="disabled")
|
529 |
+
artist = gr.Dropdown(["disabled", "random"] + ARTIST, label="Artist", value="disabled")
|
530 |
+
digital_artform = gr.Dropdown(["disabled", "random"] + DIGITAL_ARTFORM, label="Digital Artform", value="disabled")
|
531 |
+
|
532 |
+
generate_button = gr.Button("Generate Prompt")
|
533 |
+
|
534 |
+
with gr.Column(scale=2):
|
535 |
+
with gr.Accordion("Image and Caption", open=False):
|
536 |
+
input_image = gr.Image(label="Input Image (optional)")
|
537 |
+
caption_output = gr.Textbox(label="Generated Caption", lines=3)
|
538 |
+
caption_model = gr.Radio(["Florence-2", "Qwen2-VL"], label="Caption Model", value="Florence-2")
|
539 |
+
create_caption_button = gr.Button("Create Caption")
|
540 |
+
add_caption_button = gr.Button("Add Caption to Prompt")
|
541 |
+
|
542 |
+
with gr.Accordion("Prompt Generation", open=True):
|
543 |
+
output = gr.Textbox(label="Generated Prompt / Input Text", lines=4)
|
544 |
+
t5xxl_output = gr.Textbox(label="T5XXL Output", visible=True)
|
545 |
+
clip_l_output = gr.Textbox(label="CLIP L Output", visible=True)
|
546 |
+
clip_g_output = gr.Textbox(label="CLIP G Output", visible=True)
|
547 |
+
|
548 |
+
with gr.Column(scale=2):
|
549 |
+
with gr.Accordion("Prompt Generation with LLM", open=False):
|
550 |
+
happy_talk = gr.Checkbox(label="Happy Talk", value=True)
|
551 |
+
compress = gr.Checkbox(label="Compress", value=True)
|
552 |
+
compression_level = gr.Radio(["soft", "medium", "hard"], label="Compression Level", value="hard")
|
553 |
+
poster = gr.Checkbox(label="Poster", value=False)
|
554 |
+
custom_base_prompt = gr.Textbox(label="Custom Base Prompt", lines=5)
|
555 |
+
generate_text_button = gr.Button("Generate Prompt with LLM (Llama 3.1 70B)")
|
556 |
+
text_output = gr.Textbox(label="Generated Text", lines=10)
|
557 |
+
|
558 |
+
def create_caption(image, model):
|
559 |
+
if image is not None:
|
560 |
+
if model == "Florence-2":
|
561 |
+
return florence_caption(image)
|
562 |
+
elif model == "Qwen2-VL":
|
563 |
+
return qwen_caption(image)
|
564 |
+
return ""
|
565 |
+
|
566 |
+
create_caption_button.click(
|
567 |
+
create_caption,
|
568 |
+
inputs=[input_image, caption_model],
|
569 |
+
outputs=[caption_output]
|
570 |
+
)
|
571 |
+
|
572 |
+
def generate_prompt_with_dynamic_seed(*args):
|
573 |
+
# Generate a new random seed
|
574 |
+
dynamic_seed = random.randint(0, 1000000)
|
575 |
+
|
576 |
+
# Call the generate_prompt function with the dynamic seed
|
577 |
+
result = prompt_generator.generate_prompt(dynamic_seed, *args)
|
578 |
+
|
579 |
+
# Return the result along with the used seed
|
580 |
+
return [dynamic_seed] + list(result)
|
581 |
+
|
582 |
+
generate_button.click(
|
583 |
+
generate_prompt_with_dynamic_seed,
|
584 |
+
inputs=[custom, subject, gender, artform, photo_type, body_types, default_tags, roles, hairstyles,
|
585 |
+
additional_details, photography_styles, device, photographer, artist, digital_artform,
|
586 |
+
place, lighting, clothing, composition, pose, background, input_image],
|
587 |
+
outputs=[gr.Number(label="Used Seed", visible=True), output, gr.Number(visible=False), t5xxl_output, clip_l_output, clip_g_output]
|
588 |
+
)
|
589 |
+
|
590 |
+
add_caption_button.click(
|
591 |
+
prompt_generator.add_caption_to_prompt,
|
592 |
+
inputs=[output, caption_output],
|
593 |
+
outputs=[output]
|
594 |
+
)
|
595 |
+
|
596 |
+
generate_text_button.click(
|
597 |
+
huggingface_node.generate,
|
598 |
+
inputs=[output, happy_talk, compress, compression_level, poster, custom_base_prompt],
|
599 |
+
outputs=text_output
|
600 |
+
)
|
601 |
+
|
602 |
+
def update_all_options(choice):
|
603 |
+
updates = {}
|
604 |
+
if choice == "Disabled":
|
605 |
+
for dropdown in [
|
606 |
+
artform, photo_type, body_types, default_tags, roles, hairstyles, clothing,
|
607 |
+
place, lighting, composition, pose, background, additional_details,
|
608 |
+
photography_styles, device, photographer, artist, digital_artform
|
609 |
+
]:
|
610 |
+
updates[dropdown] = gr.update(value="disabled")
|
611 |
+
elif choice == "Random":
|
612 |
+
for dropdown in [
|
613 |
+
artform, photo_type, body_types, default_tags, roles, hairstyles, clothing,
|
614 |
+
place, lighting, composition, pose, background, additional_details,
|
615 |
+
photography_styles, device, photographer, artist, digital_artform
|
616 |
+
]:
|
617 |
+
updates[dropdown] = gr.update(value="random")
|
618 |
+
else: # No Figure Random
|
619 |
+
for dropdown in [photo_type, body_types, default_tags, roles, hairstyles, clothing, pose, additional_details]:
|
620 |
+
updates[dropdown] = gr.update(value="disabled")
|
621 |
+
for dropdown in [artform, place, lighting, composition, background, photography_styles, device, photographer, artist, digital_artform]:
|
622 |
+
updates[dropdown] = gr.update(value="random")
|
623 |
+
return updates
|
624 |
+
|
625 |
+
global_option.change(
|
626 |
+
update_all_options,
|
627 |
+
inputs=[global_option],
|
628 |
+
outputs=[
|
629 |
+
artform, photo_type, body_types, default_tags, roles, hairstyles, clothing,
|
630 |
+
place, lighting, composition, pose, background, additional_details,
|
631 |
+
photography_styles, device, photographer, artist, digital_artform
|
632 |
+
]
|
633 |
+
)
|
634 |
+
|
635 |
+
return demo
|
636 |
+
|
637 |
+
if __name__ == "__main__":
|
638 |
+
demo = create_interface()
|
639 |
+
demo.launch()
|
caption_models.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import spaces
|
2 |
+
import torch
|
3 |
+
from PIL import Image
|
4 |
+
from transformers import AutoProcessor, AutoModelForCausalLM, Qwen2VLForConditionalGeneration
|
5 |
+
from qwen_vl_utils import process_vision_info
|
6 |
+
import numpy as np
|
7 |
+
import os
|
8 |
+
from datetime import datetime
|
9 |
+
|
10 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
+
|
12 |
+
# Initialize Florence model
|
13 |
+
florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True).to(device).eval()
|
14 |
+
florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-large', trust_remote_code=True)
|
15 |
+
|
16 |
+
# Initialize Qwen2-VL-2B model
|
17 |
+
qwen_model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True, torch_dtype="auto").to(device).eval()
|
18 |
+
qwen_processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True)
|
19 |
+
|
20 |
+
@spaces.GPU
|
21 |
+
def florence_caption(image):
|
22 |
+
if not isinstance(image, Image.Image):
|
23 |
+
image = Image.fromarray(image)
|
24 |
+
|
25 |
+
inputs = florence_processor(text="<MORE_DETAILED_CAPTION>", images=image, return_tensors="pt").to(device)
|
26 |
+
generated_ids = florence_model.generate(
|
27 |
+
input_ids=inputs["input_ids"],
|
28 |
+
pixel_values=inputs["pixel_values"],
|
29 |
+
max_new_tokens=1024,
|
30 |
+
early_stopping=False,
|
31 |
+
do_sample=False,
|
32 |
+
num_beams=3,
|
33 |
+
)
|
34 |
+
generated_text = florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
35 |
+
parsed_answer = florence_processor.post_process_generation(
|
36 |
+
generated_text,
|
37 |
+
task="<MORE_DETAILED_CAPTION>",
|
38 |
+
image_size=(image.width, image.height)
|
39 |
+
)
|
40 |
+
return parsed_answer["<MORE_DETAILED_CAPTION>"]
|
41 |
+
|
42 |
+
def array_to_image_path(image_array):
|
43 |
+
img = Image.fromarray(np.uint8(image_array))
|
44 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
45 |
+
filename = f"image_{timestamp}.png"
|
46 |
+
img.save(filename)
|
47 |
+
full_path = os.path.abspath(filename)
|
48 |
+
return full_path
|
49 |
+
|
50 |
+
@spaces.GPU
|
51 |
+
def qwen_caption(image):
|
52 |
+
if not isinstance(image, Image.Image):
|
53 |
+
image = Image.fromarray(np.uint8(image))
|
54 |
+
|
55 |
+
image_path = array_to_image_path(np.array(image))
|
56 |
+
|
57 |
+
messages = [
|
58 |
+
{
|
59 |
+
"role": "user",
|
60 |
+
"content": [
|
61 |
+
{
|
62 |
+
"type": "image",
|
63 |
+
"image": image_path,
|
64 |
+
},
|
65 |
+
{"type": "text", "text": "Describe this image in great detail."},
|
66 |
+
],
|
67 |
+
}
|
68 |
+
]
|
69 |
+
|
70 |
+
text = qwen_processor.apply_chat_template(
|
71 |
+
messages, tokenize=False, add_generation_prompt=True
|
72 |
+
)
|
73 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
74 |
+
inputs = qwen_processor(
|
75 |
+
text=[text],
|
76 |
+
images=image_inputs,
|
77 |
+
videos=video_inputs,
|
78 |
+
padding=True,
|
79 |
+
return_tensors="pt",
|
80 |
+
)
|
81 |
+
inputs = inputs.to(device)
|
82 |
+
|
83 |
+
generated_ids = qwen_model.generate(**inputs, max_new_tokens=256)
|
84 |
+
generated_ids_trimmed = [
|
85 |
+
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
86 |
+
]
|
87 |
+
output_text = qwen_processor.batch_decode(
|
88 |
+
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
89 |
+
)
|
90 |
+
|
91 |
+
return output_text[0]
|
huggingface_inference_node.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from openai import OpenAI
|
3 |
+
import re
|
4 |
+
from datetime import datetime
|
5 |
+
|
6 |
+
|
7 |
+
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
8 |
+
|
9 |
+
class HuggingFaceInferenceNode:
|
10 |
+
def __init__(self):
|
11 |
+
self.client = OpenAI(
|
12 |
+
base_url="https://api-inference.huggingface.co/v1/",
|
13 |
+
api_key=huggingface_token,
|
14 |
+
)
|
15 |
+
self.prompts_dir = "./prompts"
|
16 |
+
os.makedirs(self.prompts_dir, exist_ok=True)
|
17 |
+
|
18 |
+
def save_prompt(self, prompt):
|
19 |
+
filename_text = "hf_" + prompt.split(',')[0].strip()
|
20 |
+
filename_text = re.sub(r'[^\w\-_\. ]', '_', filename_text)
|
21 |
+
filename_text = filename_text[:30]
|
22 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
23 |
+
base_filename = f"{filename_text}_{timestamp}.txt"
|
24 |
+
filename = os.path.join(self.prompts_dir, base_filename)
|
25 |
+
|
26 |
+
with open(filename, "w") as file:
|
27 |
+
file.write(prompt)
|
28 |
+
|
29 |
+
print(f"Prompt saved to {filename}")
|
30 |
+
|
31 |
+
def generate(self, input_text, happy_talk, compress, compression_level, poster, custom_base_prompt=""):
|
32 |
+
try:
|
33 |
+
default_happy_prompt = """Create a detailed visually descriptive caption of this description, which will be used as a prompt for a text to image AI system (caption only, no instructions like "create an image").Remove any mention of digital artwork or artwork style. Give detailed visual descriptions of the character(s), including ethnicity, skin tone, expression etc. Imagine using keywords for a still for someone who has aphantasia. Describe the image style, e.g. any photographic or art styles / techniques utilized. Make sure to fully describe all aspects of the cinematography, with abundant technical details and visual descriptions. If there is more than one image, combine the elements and characters from all of the images creatively into a single cohesive composition with a single background, inventing an interaction between the characters. Be creative in combining the characters into a single cohesive scene. Focus on two primary characters (or one) and describe an interesting interaction between them, such as a hug, a kiss, a fight, giving an object, an emotional reaction / interaction. If there is more than one background in the images, pick the most appropriate one. Your output is only the caption itself, no comments or extra formatting. The caption is in a single long paragraph. If you feel the images are inappropriate, invent a new scene / characters inspired by these. Additionally, incorporate a specific movie director's visual style and describe the lighting setup in detail, including the type, color, and placement of light sources to create the desired mood and atmosphere. Always frame the scene, including details about the film grain, color grading, and any artifacts or characteristics specific."""
|
34 |
+
|
35 |
+
default_simple_prompt = """Create a brief, straightforward caption for this description, suitable for a text-to-image AI system. Focus on the main elements, key characters, and overall scene without elaborate details. Provide a clear and concise description in one or two sentences."""
|
36 |
+
|
37 |
+
poster_prompt = """Analyze the provided description and extract key information to create a movie poster style description. Format the output as follows:
|
38 |
+
Title: A catchy, intriguing title that captures the essence of the scene, place the title in "".
|
39 |
+
Main character: Give a description of the main character.
|
40 |
+
Background: Describe the background in detail.
|
41 |
+
Supporting characters: Describe the supporting characters
|
42 |
+
Branding type: Describe the branding type
|
43 |
+
Tagline: Include a tagline that captures the essence of the movie.
|
44 |
+
Visual style: Ensure that the visual style fits the branding type and tagline.
|
45 |
+
You are allowed to make up film and branding names, and do them like 80's, 90's or modern movie posters."""
|
46 |
+
|
47 |
+
if poster:
|
48 |
+
base_prompt = poster_prompt
|
49 |
+
elif custom_base_prompt.strip():
|
50 |
+
base_prompt = custom_base_prompt
|
51 |
+
else:
|
52 |
+
base_prompt = default_happy_prompt if happy_talk else default_simple_prompt
|
53 |
+
|
54 |
+
if compress and not poster:
|
55 |
+
compression_chars = {
|
56 |
+
"soft": 600 if happy_talk else 300,
|
57 |
+
"medium": 400 if happy_talk else 200,
|
58 |
+
"hard": 200 if happy_talk else 100
|
59 |
+
}
|
60 |
+
char_limit = compression_chars[compression_level]
|
61 |
+
base_prompt += f" Compress the output to be concise while retaining key visual details. MAX OUTPUT SIZE no more than {char_limit} characters."
|
62 |
+
|
63 |
+
system_message = "You are a helpful assistant. Try your best to give the best response possible to the user."
|
64 |
+
user_message = f"{base_prompt}\nDescription: {input_text}"
|
65 |
+
|
66 |
+
messages = [
|
67 |
+
{"role": "system", "content": system_message},
|
68 |
+
{"role": "user", "content": user_message}
|
69 |
+
]
|
70 |
+
|
71 |
+
response = self.client.chat.completions.create(
|
72 |
+
model="meta-llama/Meta-Llama-3.1-70B-Instruct",
|
73 |
+
max_tokens=1024,
|
74 |
+
temperature=0.7,
|
75 |
+
top_p=0.95,
|
76 |
+
messages=messages,
|
77 |
+
)
|
78 |
+
|
79 |
+
output = response.choices[0].message.content.strip()
|
80 |
+
|
81 |
+
# Clean up the output
|
82 |
+
if ": " in output:
|
83 |
+
output = output.split(": ", 1)[1].strip()
|
84 |
+
elif output.lower().startswith("here"):
|
85 |
+
sentences = output.split(". ")
|
86 |
+
if len(sentences) > 1:
|
87 |
+
output = ". ".join(sentences[1:]).strip()
|
88 |
+
|
89 |
+
return output
|
90 |
+
|
91 |
+
except Exception as e:
|
92 |
+
print(f"An error occurred: {e}")
|
93 |
+
return f"Error occurred while processing the request: {str(e)}"
|
prompt_generator.py
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
import re
|
5 |
+
|
6 |
+
# Load JSON files
|
7 |
+
def load_json_file(file_name):
|
8 |
+
file_path = os.path.join("data", file_name)
|
9 |
+
with open(file_path, "r") as file:
|
10 |
+
return json.load(file)
|
11 |
+
|
12 |
+
# Load gender-specific JSON files
|
13 |
+
FEMALE_DEFAULT_TAGS = load_json_file("female_default_tags.json")
|
14 |
+
MALE_DEFAULT_TAGS = load_json_file("male_default_tags.json")
|
15 |
+
FEMALE_BODY_TYPES = load_json_file("female_body_types.json")
|
16 |
+
MALE_BODY_TYPES = load_json_file("male_body_types.json")
|
17 |
+
FEMALE_CLOTHING = load_json_file("female_clothing.json")
|
18 |
+
MALE_CLOTHING = load_json_file("male_clothing.json")
|
19 |
+
FEMALE_ADDITIONAL_DETAILS = load_json_file("female_additional_details.json")
|
20 |
+
MALE_ADDITIONAL_DETAILS = load_json_file("male_additional_details.json")
|
21 |
+
|
22 |
+
# Load non-gender-specific JSON files
|
23 |
+
ARTFORM = load_json_file("artform.json")
|
24 |
+
PHOTO_TYPE = load_json_file("photo_type.json")
|
25 |
+
ROLES = load_json_file("roles.json")
|
26 |
+
HAIRSTYLES = load_json_file("hairstyles.json")
|
27 |
+
PLACE = load_json_file("place.json")
|
28 |
+
LIGHTING = load_json_file("lighting.json")
|
29 |
+
COMPOSITION = load_json_file("composition.json")
|
30 |
+
POSE = load_json_file("pose.json")
|
31 |
+
BACKGROUND = load_json_file("background.json")
|
32 |
+
PHOTOGRAPHY_STYLES = load_json_file("photography_styles.json")
|
33 |
+
DEVICE = load_json_file("device.json")
|
34 |
+
PHOTOGRAPHER = load_json_file("photographer.json")
|
35 |
+
ARTIST = load_json_file("artist.json")
|
36 |
+
DIGITAL_ARTFORM = load_json_file("digital_artform.json")
|
37 |
+
|
38 |
+
class PromptGenerator:
|
39 |
+
def __init__(self, seed=None):
|
40 |
+
self.rng = random.Random(seed)
|
41 |
+
|
42 |
+
def split_and_choose(self, input_str):
|
43 |
+
choices = [choice.strip() for choice in input_str.split(",")]
|
44 |
+
return self.rng.choices(choices, k=1)[0]
|
45 |
+
|
46 |
+
def get_choice(self, input_str, default_choices):
|
47 |
+
if input_str.lower() == "disabled":
|
48 |
+
return ""
|
49 |
+
elif "," in input_str:
|
50 |
+
return self.split_and_choose(input_str)
|
51 |
+
elif input_str.lower() == "random":
|
52 |
+
return self.rng.choices(default_choices, k=1)[0]
|
53 |
+
else:
|
54 |
+
return input_str
|
55 |
+
|
56 |
+
def clean_consecutive_commas(self, input_string):
|
57 |
+
cleaned_string = re.sub(r',\s*,', ', ', input_string)
|
58 |
+
return cleaned_string
|
59 |
+
|
60 |
+
def process_string(self, replaced, seed):
|
61 |
+
replaced = re.sub(r'\s*,\s*', ', ', replaced)
|
62 |
+
replaced = re.sub(r',+', ', ', replaced)
|
63 |
+
original = replaced
|
64 |
+
|
65 |
+
first_break_clipl_index = replaced.find("BREAK_CLIPL")
|
66 |
+
second_break_clipl_index = replaced.find("BREAK_CLIPL", first_break_clipl_index + len("BREAK_CLIPL"))
|
67 |
+
|
68 |
+
if first_break_clipl_index != -1 and second_break_clipl_index != -1:
|
69 |
+
clip_content_l = replaced[first_break_clipl_index + len("BREAK_CLIPL"):second_break_clipl_index]
|
70 |
+
replaced = replaced[:first_break_clipl_index].strip(", ") + replaced[second_break_clipl_index + len("BREAK_CLIPL"):].strip(", ")
|
71 |
+
clip_l = clip_content_l
|
72 |
+
else:
|
73 |
+
clip_l = ""
|
74 |
+
|
75 |
+
first_break_clipg_index = replaced.find("BREAK_CLIPG")
|
76 |
+
second_break_clipg_index = replaced.find("BREAK_CLIPG", first_break_clipg_index + len("BREAK_CLIPG"))
|
77 |
+
|
78 |
+
if first_break_clipg_index != -1 and second_break_clipg_index != -1:
|
79 |
+
clip_content_g = replaced[first_break_clipg_index + len("BREAK_CLIPG"):second_break_clipg_index]
|
80 |
+
replaced = replaced[:first_break_clipg_index].strip(", ") + replaced[second_break_clipg_index + len("BREAK_CLIPG"):].strip(", ")
|
81 |
+
clip_g = clip_content_g
|
82 |
+
else:
|
83 |
+
clip_g = ""
|
84 |
+
|
85 |
+
t5xxl = replaced
|
86 |
+
|
87 |
+
original = original.replace("BREAK_CLIPL", "").replace("BREAK_CLIPG", "")
|
88 |
+
original = re.sub(r'\s*,\s*', ', ', original)
|
89 |
+
original = re.sub(r',+', ', ', original)
|
90 |
+
clip_l = re.sub(r'\s*,\s*', ', ', clip_l)
|
91 |
+
clip_l = re.sub(r',+', ', ', clip_l)
|
92 |
+
clip_g = re.sub(r'\s*,\s*', ', ', clip_g)
|
93 |
+
clip_g = re.sub(r',+', ', ', clip_g)
|
94 |
+
if clip_l.startswith(", "):
|
95 |
+
clip_l = clip_l[2:]
|
96 |
+
if clip_g.startswith(", "):
|
97 |
+
clip_g = clip_g[2:]
|
98 |
+
if original.startswith(", "):
|
99 |
+
original = original[2:]
|
100 |
+
if t5xxl.startswith(", "):
|
101 |
+
t5xxl = t5xxl[2:]
|
102 |
+
|
103 |
+
# Add spaces after commas
|
104 |
+
replaced = re.sub(r',(?!\s)', ', ', replaced)
|
105 |
+
original = re.sub(r',(?!\s)', ', ', original)
|
106 |
+
clip_l = re.sub(r',(?!\s)', ', ', clip_l)
|
107 |
+
clip_g = re.sub(r',(?!\s)', ', ', clip_g)
|
108 |
+
t5xxl = re.sub(r',(?!\s)', ', ', t5xxl)
|
109 |
+
|
110 |
+
return original, seed, t5xxl, clip_l, clip_g
|
111 |
+
|
112 |
+
def generate_prompt(self, seed, custom, subject, gender, artform, photo_type, body_types, default_tags, roles, hairstyles,
|
113 |
+
additional_details, photography_styles, device, photographer, artist, digital_artform,
|
114 |
+
place, lighting, clothing, composition, pose, background, input_image):
|
115 |
+
kwargs = locals()
|
116 |
+
del kwargs['self']
|
117 |
+
|
118 |
+
seed = kwargs.get("seed", 0)
|
119 |
+
if seed is not None:
|
120 |
+
self.rng = random.Random(seed)
|
121 |
+
components = []
|
122 |
+
custom = kwargs.get("custom", "")
|
123 |
+
if custom:
|
124 |
+
components.append(custom)
|
125 |
+
is_photographer = kwargs.get("artform", "").lower() == "photography" or (
|
126 |
+
kwargs.get("artform", "").lower() == "random"
|
127 |
+
and self.rng.choice([True, False])
|
128 |
+
)
|
129 |
+
|
130 |
+
subject = kwargs.get("subject", "")
|
131 |
+
gender = kwargs.get("gender", "female")
|
132 |
+
|
133 |
+
if is_photographer:
|
134 |
+
selected_photo_style = self.get_choice(kwargs.get("photography_styles", ""), PHOTOGRAPHY_STYLES)
|
135 |
+
if not selected_photo_style:
|
136 |
+
selected_photo_style = "photography"
|
137 |
+
components.append(selected_photo_style)
|
138 |
+
if kwargs.get("photography_style", "") != "disabled" and kwargs.get("default_tags", "") != "disabled" or subject != "":
|
139 |
+
components.append(" of")
|
140 |
+
|
141 |
+
default_tags = kwargs.get("default_tags", "random")
|
142 |
+
body_type = kwargs.get("body_types", "")
|
143 |
+
if not subject:
|
144 |
+
if default_tags == "random":
|
145 |
+
if body_type != "disabled" and body_type != "random":
|
146 |
+
selected_subject = self.get_choice(kwargs.get("default_tags", ""), FEMALE_DEFAULT_TAGS if gender == "female" else MALE_DEFAULT_TAGS).replace("a ", "").replace("an ", "")
|
147 |
+
components.append("a ")
|
148 |
+
components.append(body_type)
|
149 |
+
components.append(selected_subject)
|
150 |
+
elif body_type == "disabled":
|
151 |
+
selected_subject = self.get_choice(kwargs.get("default_tags", ""), FEMALE_DEFAULT_TAGS if gender == "female" else MALE_DEFAULT_TAGS)
|
152 |
+
components.append(selected_subject)
|
153 |
+
else:
|
154 |
+
body_type = self.get_choice(body_type, FEMALE_BODY_TYPES if gender == "female" else MALE_BODY_TYPES)
|
155 |
+
components.append("a ")
|
156 |
+
components.append(body_type)
|
157 |
+
selected_subject = self.get_choice(kwargs.get("default_tags", ""), FEMALE_DEFAULT_TAGS if gender == "female" else MALE_DEFAULT_TAGS).replace("a ", "").replace("an ", "")
|
158 |
+
components.append(selected_subject)
|
159 |
+
elif default_tags == "disabled":
|
160 |
+
pass
|
161 |
+
else:
|
162 |
+
components.append(default_tags)
|
163 |
+
else:
|
164 |
+
if body_type != "disabled" and body_type != "random":
|
165 |
+
components.append("a ")
|
166 |
+
components.append(body_type)
|
167 |
+
elif body_type == "disabled":
|
168 |
+
pass
|
169 |
+
else:
|
170 |
+
body_type = self.get_choice(body_type, FEMALE_BODY_TYPES if gender == "female" else MALE_BODY_TYPES)
|
171 |
+
components.append("a ")
|
172 |
+
components.append(body_type)
|
173 |
+
components.append(subject)
|
174 |
+
|
175 |
+
params = [
|
176 |
+
("roles", ROLES),
|
177 |
+
("hairstyles", HAIRSTYLES),
|
178 |
+
("additional_details", FEMALE_ADDITIONAL_DETAILS if gender == "female" else MALE_ADDITIONAL_DETAILS),
|
179 |
+
]
|
180 |
+
for param in params:
|
181 |
+
components.append(self.get_choice(kwargs.get(param[0], ""), param[1]))
|
182 |
+
for i in reversed(range(len(components))):
|
183 |
+
if components[i] in PLACE:
|
184 |
+
components[i] += ", "
|
185 |
+
break
|
186 |
+
if kwargs.get("clothing", "") != "disabled" and kwargs.get("clothing", "") != "random":
|
187 |
+
components.append(", dressed in ")
|
188 |
+
clothing = kwargs.get("clothing", "")
|
189 |
+
components.append(clothing)
|
190 |
+
elif kwargs.get("clothing", "") == "random":
|
191 |
+
components.append(", dressed in ")
|
192 |
+
clothing = self.get_choice(kwargs.get("clothing", ""), FEMALE_CLOTHING if gender == "female" else MALE_CLOTHING)
|
193 |
+
components.append(clothing)
|
194 |
+
|
195 |
+
if kwargs.get("composition", "") != "disabled" and kwargs.get("composition", "") != "random":
|
196 |
+
components.append(", ")
|
197 |
+
composition = kwargs.get("composition", "")
|
198 |
+
components.append(composition)
|
199 |
+
elif kwargs.get("composition", "") == "random":
|
200 |
+
components.append(", ")
|
201 |
+
composition = self.get_choice(kwargs.get("composition", ""), COMPOSITION)
|
202 |
+
components.append(composition)
|
203 |
+
|
204 |
+
if kwargs.get("pose", "") != "disabled" and kwargs.get("pose", "") != "random":
|
205 |
+
components.append(", ")
|
206 |
+
pose = kwargs.get("pose", "")
|
207 |
+
components.append(pose)
|
208 |
+
elif kwargs.get("pose", "") == "random":
|
209 |
+
components.append(", ")
|
210 |
+
pose = self.get_choice(kwargs.get("pose", ""), POSE)
|
211 |
+
components.append(pose)
|
212 |
+
components.append("BREAK_CLIPG")
|
213 |
+
if kwargs.get("background", "") != "disabled" and kwargs.get("background", "") != "random":
|
214 |
+
components.append(", ")
|
215 |
+
background = kwargs.get("background", "")
|
216 |
+
components.append(background)
|
217 |
+
elif kwargs.get("background", "") == "random":
|
218 |
+
components.append(", ")
|
219 |
+
background = self.get_choice(kwargs.get("background", ""), BACKGROUND)
|
220 |
+
components.append(background)
|
221 |
+
|
222 |
+
if kwargs.get("place", "") != "disabled" and kwargs.get("place", "") != "random":
|
223 |
+
components.append(", ")
|
224 |
+
place = kwargs.get("place", "")
|
225 |
+
components.append(place)
|
226 |
+
elif kwargs.get("place", "") == "random":
|
227 |
+
components.append(", ")
|
228 |
+
place = self.get_choice(kwargs.get("place", ""), PLACE)
|
229 |
+
components.append(place + ", ")
|
230 |
+
|
231 |
+
lighting = kwargs.get("lighting", "").lower()
|
232 |
+
if lighting == "random":
|
233 |
+
selected_lighting = ", ".join(self.rng.sample(LIGHTING, self.rng.randint(2, 5)))
|
234 |
+
components.append(", ")
|
235 |
+
components.append(selected_lighting)
|
236 |
+
elif lighting == "disabled":
|
237 |
+
pass
|
238 |
+
else:
|
239 |
+
components.append(", ")
|
240 |
+
components.append(lighting)
|
241 |
+
components.append("BREAK_CLIPG")
|
242 |
+
components.append("BREAK_CLIPL")
|
243 |
+
if is_photographer:
|
244 |
+
if kwargs.get("photo_type", "") != "disabled":
|
245 |
+
photo_type_choice = self.get_choice(kwargs.get("photo_type", ""), PHOTO_TYPE)
|
246 |
+
if photo_type_choice and photo_type_choice != "random" and photo_type_choice != "disabled":
|
247 |
+
random_value = round(self.rng.uniform(1.1, 1.5), 1)
|
248 |
+
components.append(f", ({photo_type_choice}:{random_value}), ")
|
249 |
+
|
250 |
+
params = [
|
251 |
+
("device", DEVICE),
|
252 |
+
("photographer", PHOTOGRAPHER),
|
253 |
+
]
|
254 |
+
components.extend([self.get_choice(kwargs.get(param[0], ""), param[1]) for param in params])
|
255 |
+
if kwargs.get("device", "") != "disabled":
|
256 |
+
components[-2] = f", shot on {components[-2]}"
|
257 |
+
if kwargs.get("photographer", "") != "disabled":
|
258 |
+
components[-1] = f", photo by {components[-1]}"
|
259 |
+
else:
|
260 |
+
digital_artform_choice = self.get_choice(kwargs.get("digital_artform", ""), DIGITAL_ARTFORM)
|
261 |
+
if digital_artform_choice:
|
262 |
+
components.append(f"{digital_artform_choice}")
|
263 |
+
if kwargs.get("artist", "") != "disabled":
|
264 |
+
components.append(f"by {self.get_choice(kwargs.get('artist', ''), ARTIST)}")
|
265 |
+
components.append("BREAK_CLIPL")
|
266 |
+
|
267 |
+
prompt = " ".join(components)
|
268 |
+
prompt = re.sub(" +", " ", prompt)
|
269 |
+
replaced = prompt.replace("of as", "of")
|
270 |
+
replaced = self.clean_consecutive_commas(replaced)
|
271 |
+
|
272 |
+
return self.process_string(replaced, seed)
|
273 |
+
|
274 |
+
def add_caption_to_prompt(self, prompt, caption):
|
275 |
+
if caption:
|
276 |
+
return f"{prompt}, {caption}"
|
277 |
+
return prompt
|
ui_components.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from prompt_generator import PromptGenerator
|
3 |
+
from huggingface_inference_node import HuggingFaceInferenceNode
|
4 |
+
from caption_models import florence_caption, qwen_caption
|
5 |
+
import random
|
6 |
+
from prompt_generator import ARTFORM, PHOTO_TYPE, FEMALE_BODY_TYPES, MALE_BODY_TYPES, FEMALE_DEFAULT_TAGS, MALE_DEFAULT_TAGS, ROLES, HAIRSTYLES, FEMALE_CLOTHING, MALE_CLOTHING, PLACE, LIGHTING, COMPOSITION, POSE, BACKGROUND, FEMALE_ADDITIONAL_DETAILS, MALE_ADDITIONAL_DETAILS, PHOTOGRAPHY_STYLES, DEVICE, PHOTOGRAPHER, ARTIST, DIGITAL_ARTFORM
|
7 |
+
|
8 |
+
|
9 |
+
title = """<h1 align="center">FLUX Prompt Generator</h1>
|
10 |
+
<p><center>
|
11 |
+
<a href="https://x.com/gokayfem" target="_blank">[X gokaygokay]</a>
|
12 |
+
<a href="https://github.com/gokayfem" target="_blank">[Github gokayfem]</a>
|
13 |
+
<a href="https://github.com/dagthomas/comfyui_dagthomas" target="_blank">[comfyui_dagthomas]</a>
|
14 |
+
<p align="center">Create long prompts from images or simple words. Enhance your short prompts with prompt enhancer.</p>
|
15 |
+
</center></p>
|
16 |
+
"""
|
17 |
+
|
18 |
+
def create_interface():
|
19 |
+
prompt_generator = PromptGenerator()
|
20 |
+
huggingface_node = HuggingFaceInferenceNode()
|
21 |
+
|
22 |
+
with gr.Blocks(theme='bethecloud/storj_theme') as demo:
|
23 |
+
|
24 |
+
gr.HTML(title)
|
25 |
+
|
26 |
+
with gr.Row():
|
27 |
+
with gr.Column(scale=2):
|
28 |
+
with gr.Accordion("Basic Settings"):
|
29 |
+
custom = gr.Textbox(label="Custom Input Prompt (optional)")
|
30 |
+
subject = gr.Textbox(label="Subject (optional)")
|
31 |
+
gender = gr.Radio(["female", "male"], label="Gender", value="female")
|
32 |
+
|
33 |
+
global_option = gr.Radio(
|
34 |
+
["Disabled", "Random", "No Figure Rand"],
|
35 |
+
label="Set all options to:",
|
36 |
+
value="Disabled"
|
37 |
+
)
|
38 |
+
|
39 |
+
with gr.Accordion("Artform and Photo Type", open=False):
|
40 |
+
artform = gr.Dropdown(["disabled", "random"] + ARTFORM, label="Artform", value="disabled")
|
41 |
+
photo_type = gr.Dropdown(["disabled", "random"] + PHOTO_TYPE, label="Photo Type", value="disabled")
|
42 |
+
|
43 |
+
with gr.Accordion("Character Details", open=False):
|
44 |
+
body_types = gr.Dropdown(["disabled", "random"] + FEMALE_BODY_TYPES + MALE_BODY_TYPES, label="Body Types", value="disabled")
|
45 |
+
default_tags = gr.Dropdown(["disabled", "random"] + FEMALE_DEFAULT_TAGS + MALE_DEFAULT_TAGS, label="Default Tags", value="disabled")
|
46 |
+
roles = gr.Dropdown(["disabled", "random"] + ROLES, label="Roles", value="disabled")
|
47 |
+
hairstyles = gr.Dropdown(["disabled", "random"] + HAIRSTYLES, label="Hairstyles", value="disabled")
|
48 |
+
clothing = gr.Dropdown(["disabled", "random"] + FEMALE_CLOTHING + MALE_CLOTHING, label="Clothing", value="disabled")
|
49 |
+
|
50 |
+
with gr.Accordion("Scene Details", open=False):
|
51 |
+
place = gr.Dropdown(["disabled", "random"] + PLACE, label="Place", value="disabled")
|
52 |
+
lighting = gr.Dropdown(["disabled", "random"] + LIGHTING, label="Lighting", value="disabled")
|
53 |
+
composition = gr.Dropdown(["disabled", "random"] + COMPOSITION, label="Composition", value="disabled")
|
54 |
+
pose = gr.Dropdown(["disabled", "random"] + POSE, label="Pose", value="disabled")
|
55 |
+
background = gr.Dropdown(["disabled", "random"] + BACKGROUND, label="Background", value="disabled")
|
56 |
+
|
57 |
+
with gr.Accordion("Style and Artist", open=False):
|
58 |
+
additional_details = gr.Dropdown(["disabled", "random"] + FEMALE_ADDITIONAL_DETAILS + MALE_ADDITIONAL_DETAILS, label="Additional Details", value="disabled")
|
59 |
+
photography_styles = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHY_STYLES, label="Photography Styles", value="disabled")
|
60 |
+
device = gr.Dropdown(["disabled", "random"] + DEVICE, label="Device", value="disabled")
|
61 |
+
photographer = gr.Dropdown(["disabled", "random"] + PHOTOGRAPHER, label="Photographer", value="disabled")
|
62 |
+
artist = gr.Dropdown(["disabled", "random"] + ARTIST, label="Artist", value="disabled")
|
63 |
+
digital_artform = gr.Dropdown(["disabled", "random"] + DIGITAL_ARTFORM, label="Digital Artform", value="disabled")
|
64 |
+
|
65 |
+
generate_button = gr.Button("Generate Prompt")
|
66 |
+
|
67 |
+
with gr.Column(scale=2):
|
68 |
+
with gr.Accordion("Image and Caption", open=False):
|
69 |
+
input_image = gr.Image(label="Input Image (optional)")
|
70 |
+
caption_output = gr.Textbox(label="Generated Caption", lines=3)
|
71 |
+
caption_model = gr.Radio(["Florence-2", "Qwen2-VL"], label="Caption Model", value="Florence-2")
|
72 |
+
create_caption_button = gr.Button("Create Caption")
|
73 |
+
add_caption_button = gr.Button("Add Caption to Prompt")
|
74 |
+
|
75 |
+
with gr.Accordion("Prompt Generation", open=True):
|
76 |
+
output = gr.Textbox(label="Generated Prompt / Input Text", lines=4)
|
77 |
+
t5xxl_output = gr.Textbox(label="T5XXL Output", visible=True)
|
78 |
+
clip_l_output = gr.Textbox(label="CLIP L Output", visible=True)
|
79 |
+
clip_g_output = gr.Textbox(label="CLIP G Output", visible=True)
|
80 |
+
|
81 |
+
with gr.Column(scale=2):
|
82 |
+
with gr.Accordion("Prompt Generation with LLM", open=False):
|
83 |
+
happy_talk = gr.Checkbox(label="Happy Talk", value=True)
|
84 |
+
compress = gr.Checkbox(label="Compress", value=True)
|
85 |
+
compression_level = gr.Radio(["soft", "medium", "hard"], label="Compression Level", value="hard")
|
86 |
+
poster = gr.Checkbox(label="Poster", value=False)
|
87 |
+
custom_base_prompt = gr.Textbox(label="Custom Base Prompt", lines=5)
|
88 |
+
generate_text_button = gr.Button("Generate Prompt with LLM (Llama 3.1 70B)")
|
89 |
+
text_output = gr.Textbox(label="Generated Text", lines=10)
|
90 |
+
|
91 |
+
def create_caption(image, model):
|
92 |
+
if image is not None:
|
93 |
+
if model == "Florence-2":
|
94 |
+
return florence_caption(image)
|
95 |
+
elif model == "Qwen2-VL":
|
96 |
+
return qwen_caption(image)
|
97 |
+
return ""
|
98 |
+
|
99 |
+
create_caption_button.click(
|
100 |
+
create_caption,
|
101 |
+
inputs=[input_image, caption_model],
|
102 |
+
outputs=[caption_output]
|
103 |
+
)
|
104 |
+
|
105 |
+
def generate_prompt_with_dynamic_seed(*args):
|
106 |
+
dynamic_seed = random.randint(0, 1000000)
|
107 |
+
result = prompt_generator.generate_prompt(dynamic_seed, *args)
|
108 |
+
return [dynamic_seed] + list(result)
|
109 |
+
|
110 |
+
generate_button.click(
|
111 |
+
generate_prompt_with_dynamic_seed,
|
112 |
+
inputs=[custom, subject, gender, artform, photo_type, body_types, default_tags, roles, hairstyles,
|
113 |
+
additional_details, photography_styles, device, photographer, artist, digital_artform,
|
114 |
+
place, lighting, clothing, composition, pose, background, input_image],
|
115 |
+
outputs=[gr.Number(label="Used Seed", visible=True), output, gr.Number(visible=False), t5xxl_output, clip_l_output, clip_g_output]
|
116 |
+
)
|
117 |
+
|
118 |
+
add_caption_button.click(
|
119 |
+
prompt_generator.add_caption_to_prompt,
|
120 |
+
inputs=[output, caption_output],
|
121 |
+
outputs=[output]
|
122 |
+
)
|
123 |
+
|
124 |
+
generate_text_button.click(
|
125 |
+
huggingface_node.generate,
|
126 |
+
inputs=[output, happy_talk, compress, compression_level, poster, custom_base_prompt],
|
127 |
+
outputs=text_output
|
128 |
+
)
|
129 |
+
|
130 |
+
def update_all_options(choice):
|
131 |
+
updates = {}
|
132 |
+
if choice == "Disabled":
|
133 |
+
for dropdown in [
|
134 |
+
artform, photo_type, body_types, default_tags, roles, hairstyles, clothing,
|
135 |
+
place, lighting, composition, pose, background, additional_details,
|
136 |
+
photography_styles, device, photographer, artist, digital_artform
|
137 |
+
]:
|
138 |
+
updates[dropdown] = gr.update(value="disabled")
|
139 |
+
elif choice == "Random":
|
140 |
+
for dropdown in [
|
141 |
+
artform, photo_type, body_types, default_tags, roles, hairstyles, clothing,
|
142 |
+
place, lighting, composition, pose, background, additional_details,
|
143 |
+
photography_styles, device, photographer, artist, digital_artform
|
144 |
+
]:
|
145 |
+
updates[dropdown] = gr.update(value="random")
|
146 |
+
else: # No Figure Random
|
147 |
+
for dropdown in [photo_type, body_types, default_tags, roles, hairstyles, clothing, pose, additional_details]:
|
148 |
+
updates[dropdown] = gr.update(value="disabled")
|
149 |
+
for dropdown in [artform, place, lighting, composition, background, photography_styles, device, photographer, artist, digital_artform]:
|
150 |
+
updates[dropdown] = gr.update(value="random")
|
151 |
+
return updates
|
152 |
+
|
153 |
+
global_option.change(
|
154 |
+
update_all_options,
|
155 |
+
inputs=[global_option],
|
156 |
+
outputs=[
|
157 |
+
artform, photo_type, body_types, default_tags, roles, hairstyles, clothing,
|
158 |
+
place, lighting, composition, pose, background, additional_details,
|
159 |
+
photography_styles, device, photographer, artist, digital_artform
|
160 |
+
]
|
161 |
+
)
|
162 |
+
|
163 |
+
return demo
|