Spaces:
Running
Running
import os | |
import gradio as gr | |
from random import randint | |
from operator import itemgetter | |
import bisect | |
from all_models import tags_plus_models,models,models_plus_tags,find_warm_model_list | |
from datetime import datetime | |
from externalmod import gr_Interface_load | |
import asyncio | |
import os | |
from threading import RLock | |
lock = RLock() | |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary. | |
nb_req_simult=80 ######## | |
max_pending=3 | |
nb_gallery_model=5 | |
tempo_update_actu=3.0 | |
#incr_update_actu={} | |
now2 = 0 | |
inference_timeout = 300 | |
inference_timeout_w = 70 | |
inference_timeout_wp = 120 | |
MAX_SEED = 2**32-1 | |
nb_rep=2 | |
nb_mod_dif=20 | |
nb_models=nb_mod_dif*nb_rep | |
cache_image={} | |
cache_id_image={} | |
cache_list_task={} | |
cache_text_actu={} | |
from_reload={} | |
cache_list_task_w={} | |
def load_fn(models): | |
global models_load | |
global num_models | |
global default_models | |
models_load = {} | |
num_models = len(models) | |
i=0 | |
if num_models!=0: | |
default_models = models[:num_models] | |
else: | |
default_models = {} | |
for model in models: | |
i+=1 | |
if i%50==0: | |
print("\n\n\n-------"+str(i)+'/'+str(len(models))+"-------\n\n\n") | |
if model not in models_load.keys(): | |
try: | |
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN) | |
except Exception as error: | |
m = gr.Interface(lambda txt: None, ['text'], ['image']) | |
print(error) | |
models_load.update({model: m}) | |
load_fn(models) | |
tags_plus_models_to_list=[] | |
list_tags=[] | |
for tag_plus_m in tags_plus_models: | |
list_tags.append(tag_plus_m[0]+f" ({tag_plus_m[1]})") | |
models_publ=[] | |
if len(models)>10: | |
nb_publ=10 | |
else: | |
nb_publ=len(models) | |
for i in range(nb_publ): | |
models_publ.append(models[i]) | |
def test_pass_aff(test): | |
if test==os.getenv('p'): | |
return gr.Tab(visible=True) | |
else: | |
return gr.Tab(visible=False) | |
# https://huggingface.co./docs/api-inference/detailed_parameters | |
# https://huggingface.co./docs/huggingface_hub/package_reference/inference_client | |
async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout): | |
from pathlib import Path | |
kwargs = {} | |
if height is not None and height >= 256: kwargs["height"] = height | |
if width is not None and width >= 256: kwargs["width"] = width | |
if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps | |
if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg | |
if seed >= 0: kwargs["seed"] = seed | |
else: kwargs["seed"] = randint(1, MAX_SEED-1) | |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, | |
prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN)) | |
await asyncio.sleep(3) | |
try: | |
result = await asyncio.wait_for(task, timeout=timeout) | |
except (Exception, asyncio.TimeoutError) as e: | |
print(e) | |
print(f"Task timed out: {model_str}") | |
if not task.done(): task.cancel() | |
result = None | |
if task.done() and result is not None: | |
with lock: | |
nb_rand1=randint(1, MAX_SEED) | |
nb_rand2=randint(1, MAX_SEED) | |
nb_rand3=randint(1, MAX_SEED) | |
png_path = f"image_{nb_rand1}_{nb_rand2}_{nb_rand3}.png" | |
result.save(png_path) | |
image = str(Path(png_path).resolve()) | |
return image | |
return None | |
def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1,timeout=inference_timeout): | |
if model_str == 'NA': | |
return None | |
try: | |
loop = asyncio.new_event_loop() | |
result = loop.run_until_complete(infer(model_str, prompt, nprompt, | |
height, width, steps, cfg, seed, timeout)) | |
except (Exception, asyncio.CancelledError) as e: | |
print(e) | |
print(f"Task aborted: {model_str}") | |
result = None | |
finally: | |
loop.close() | |
return result | |
def add_gallery(image, model_str, gallery): | |
if gallery is None: gallery = [] | |
#with lock: | |
if image is not None: gallery.append((image, model_str)) | |
return gallery | |
def reset_gallery(gallery): | |
return add_gallery(None,"",[]) | |
def load_gallery(gallery,id): | |
gallery = reset_gallery(gallery) | |
for c in cache_image[f"{id}"]: | |
gallery=add_gallery(c[0],c[1],gallery) | |
return gallery | |
def load_gallery_sorted(gallery,id): | |
gallery = reset_gallery(gallery) | |
for c in sorted(cache_image[f"{id}"], key=itemgetter(1)): | |
gallery=add_gallery(c[0],c[1],gallery) | |
return gallery | |
def add_cache_image(image, model_str,id,cache_image=cache_image): | |
if image is not None: | |
cache_image[f"{id}"].append((image,model_str)) | |
#cache_image=sorted(cache_image, key=itemgetter(1)) | |
return | |
def reset_cache_image(id,cache_image=cache_image): | |
cache_image[f"{id}"].clear() | |
return | |
def reset_cache_image_all_sessions(cache_image=cache_image): | |
for key, listT in cache_image.items(): | |
listT.clear() | |
return | |
def set_session(id,warm=False): | |
if id==0: | |
randTemp=randint(1,MAX_SEED) | |
cache_image[f"{randTemp}"]=[] | |
cache_id_image[f"{randTemp}"]=[] | |
cache_list_task[f"{randTemp}"]=[] | |
cache_text_actu[f"{randTemp}"]={} | |
if warm: | |
cache_text_actu[f"{randTemp}"]["warm"]=True | |
else: | |
cache_text_actu[f"{randTemp}"]["warm"]=False | |
from_reload[f"{randTemp}"]=False | |
cache_list_task_w[f"{randTemp}"]=[] | |
#incr_update_actu[f"{randTemp}"]=0 | |
return gr.Number(visible=False,value=randTemp) | |
else : | |
return id | |
def fonc_restore_session(id): | |
from_reload[f"{id}"]=True | |
list_param=[] | |
list_models=[] | |
for m in cache_list_task[f"{id}"]: | |
if m["model"] not in list_models: | |
list_models.append(m["model"]) | |
for t in m["task"]: | |
if [t["prompt"],t["nprompt"],t["width"],t["height"],t["steps"],t["cfg"],t["seed"]] not in list_param: | |
list_param.append([t["prompt"],t["nprompt"],t["width"],t["height"],t["steps"],t["cfg"],t["seed"]]) | |
for t in cache_image[f"{id}"]: | |
if t["model"] not in list_models : | |
list_models.append(t["model"]) | |
if [t["prompt"],t["nprompt"],t["width"],t["height"],t["steps"],t["cfg"],t["seed"]] not in list_param: | |
list_param.append([t["prompt"],t["nprompt"],t["width"],t["height"],t["steps"],t["cfg"],t["seed"]]) | |
cache_text_actu[f"{id}"]["nb_modules_use"]=nb_req_simult | |
cache_text_actu[f"{id}"]["stop"]=False | |
return gr.Dropdown(choices=[["a",list_param]], value=list_param) ,gr.Dataset(samples=list_param), list_models , len(list_models) | |
def print_info_sessions(): | |
lenTot=0 | |
s="" | |
s+="number of sessions : "+str(len(cache_image))+"\n" | |
for key, listT in cache_image.items(): | |
s+="session "+key+" : "+str(len(listT))+"\n" | |
lenTot+=len(listT) | |
s+="images total = "+str(lenTot)+"\n" | |
return s | |
def disp_models(group_model_choice,nb_rep=nb_rep): | |
listTemp=[] | |
strTemp='\n' | |
i=0 | |
for m in group_model_choice: | |
if m not in listTemp: | |
listTemp.append(m) | |
for m in listTemp: | |
i+=1 | |
strTemp+="\"" + m + "\",\n" | |
if i%(8/nb_rep)==0: | |
strTemp+="\n" | |
return gr.Textbox(label="models",value=strTemp) | |
def search_models(str_search,tags_plus_models=tags_plus_models): | |
output1="\n" | |
output2="" | |
for m in tags_plus_models[0][2]: | |
if m.find(str_search)!=-1: | |
output1+="\"" + m + "\",\n" | |
outputPlus="\n From tags : \n\n" | |
for tag_plus_models in tags_plus_models: | |
if str_search.lower() == tag_plus_models[0].lower() and str_search!="": | |
for m in tag_plus_models[2]: | |
output2+="\"" + m + "\",\n" | |
if output2 != "": | |
output=output1+outputPlus+output2 | |
else : | |
output=output1 | |
return gr.Textbox(label="out",value=output) | |
def search_info(txt_search_info,models_plus_tags=models_plus_tags): | |
outputList=[] | |
if txt_search_info.find("\"")!=-1: | |
start=txt_search_info.find("\"")+1 | |
end=txt_search_info.find("\"",start) | |
m_name=cutStrg(txt_search_info,start,end) | |
else : | |
m_name = txt_search_info | |
for m in models_plus_tags: | |
if m_name == m[0]: | |
outputList=m[1] | |
if len(outputList)==0: | |
outputList.append("Model Not Find") | |
return gr.Textbox(label="out",value=outputList) | |
def ratio_chosen(choice_ratio,width,height): | |
if choice_ratio == [None,None]: | |
return width , height | |
else : | |
return gr.Slider(label="Width", info="If 0, the default value is used.", maximum=2024, step=32, value=choice_ratio[0]), gr.Slider(label="Height", info="If 0, the default value is used.", maximum=2024, step=32, value=choice_ratio[1]) | |
list_ratios=[["None",[None,None]], | |
["4:1 (2048 x 512)",[2048,512]], | |
["12:5 (1536 x 640)",[1536,640]], | |
["~16:9 (1344 x 768)",[1344,768]], | |
["~3:2 (1216 x 832)",[1216,832]], | |
["~4:3 (1152 x 896)",[1152,896]], | |
["1:1 (1024 x 1024)",[1024,1024]], | |
["~3:4 (896 x 1152)",[896,1152]], | |
["~2:3 (832 x 1216)",[832,1216]], | |
["~9:16 (768 x 1344)",[768,1344]], | |
["5:12 (640 x 1536)",[640,1536]], | |
["1:4 (512 x 2048)",[512,2048]]] | |
def fonc_add_param(lp,txt_input,neg_input,width,height,steps,cfg,seed):########################################### | |
if lp == [["","",0,0,0,0,-1]]: | |
lp.remove(["","",0,0,0,0,-1]) | |
#lp.append([txt_input,neg_input,width,height,steps,cfg,seed]) | |
list_txt=txt_input.split("/") | |
for t in list_txt: | |
lp.append([t,neg_input,width,height,steps,cfg,seed]) | |
return gr.Dataset(samples=lp) , gr.Dropdown(choices=[["a",lp]], value=lp) | |
def fonc_del_param(lp,txt_input,neg_input,width,height,steps,cfg,seed): | |
if [txt_input,neg_input,width,height,steps,cfg,seed] in lp : | |
lp.remove([txt_input,neg_input,width,height,steps,cfg,seed]) | |
if lp == []: | |
lp.append(["","",0,0,0,0,-1]) | |
return gr.Dataset(samples=lp) , gr.Dropdown(choices=[["a",lp]], value=lp) | |
def fonc_load_info(nb_of_models_to_gen,index_tag,index_first_model): | |
str_temp="" | |
list_models_temp=[] | |
if index_first_model+nb_of_models_to_gen>len(tags_plus_models[index_tag][2]): | |
if nb_of_models_to_gen>len(tags_plus_models[index_tag][2]): | |
str_temp+="warning : to many model chosen" | |
else: | |
str_temp+="warning : first model to close to the last model" | |
nb_of_models_to_gen= len(tags_plus_models[index_tag][2])-index_first_model | |
str_temp+=f" - only {nb_of_models_to_gen} will be use\n\n" | |
str_temp+="list of models use (from " | |
str_temp+=f"{index_first_model+1}/{len(tags_plus_models[index_tag][2])} to {index_first_model+nb_of_models_to_gen}/{len(tags_plus_models[index_tag][2])}) :\n\n" | |
for i in range(nb_of_models_to_gen): | |
list_models_temp.append(tags_plus_models[index_tag][2][i+index_first_model]) | |
str_temp+=f"\"{tags_plus_models[index_tag][2][i+index_first_model]}\",\n" | |
return nb_of_models_to_gen,gr.Textbox(str_temp),gr.Dropdown(choices=[["",list_models_temp]], value=list_models_temp ) | |
def load_random_models(nb_of_models_to_gen,index_tag): | |
str_temp="" | |
list_models_temp=[] | |
list_random=[] | |
if nb_of_models_to_gen>=len(tags_plus_models[index_tag][2]): | |
str_temp+="warning : to many model chosen" | |
nb_of_models_to_gen= len(tags_plus_models[index_tag][2]) | |
str_temp+=f" - only {nb_of_models_to_gen} will be use\n\n" | |
list_models_temp=tags_plus_models[index_tag][2] | |
for m in list_models_temp: | |
str_temp+=f"\"{m}\",\n" | |
else : | |
list_random=tags_plus_models[index_tag][2].copy() | |
for i in range(nb_of_models_to_gen): | |
i_rand=randint(0,len(list_random)-1) | |
m=list_random.pop(i_rand) | |
list_models_temp.append(m) | |
str_temp+=f"\"{m}\",\n" | |
return nb_of_models_to_gen,gr.Textbox(str_temp),gr.Dropdown(choices=[["",list_models_temp]], value=list_models_temp ) | |
def fonc_load_info_custom(nb_of_models_to_gen,list_model_custom,index_first_model): | |
str_temp="" | |
list_models_temp=[] | |
if index_first_model+nb_of_models_to_gen>len(list_model_custom): | |
if nb_of_models_to_gen>len(list_model_custom): | |
str_temp+="warning : to many model chosen" | |
else: | |
str_temp+="warning : first model to close to the last model" | |
nb_of_models_to_gen= len(list_model_custom)-index_first_model | |
str_temp+=f" - only {nb_of_models_to_gen} will be use\n\n" | |
str_temp+="list of models CUSTOM use (from " | |
str_temp+=f"{index_first_model+1}/{len(list_model_custom)} to {index_first_model+nb_of_models_to_gen}/{len(list_model_custom)}) :\n\n" | |
for i in range(nb_of_models_to_gen): | |
list_models_temp.append(list_model_custom[i+index_first_model]) | |
str_temp+=f"\"{list_model_custom[i+index_first_model]}\",\n" | |
return nb_of_models_to_gen,gr.Textbox(str_temp),gr.Dropdown(choices=[["",list_models_temp]], value=list_models_temp ) | |
def crea_list_task(id_session,list_param,list_models_to_gen,nb_images_by_prompt): | |
if from_reload[f"{id_session}"]==True: | |
from_reload[f"{id_session}"]=False | |
return | |
cache_list_task[f"{id_session}"]=[] | |
dict_temp={} | |
list_progress=[] | |
for m in list_models_to_gen: | |
dict_temp={} | |
dict_temp["model"]=m | |
dict_temp["id_module"]=-1 | |
dict_temp["pending_task"]=0 | |
dict_temp["task"]=[] | |
list_progress.append(0) | |
index_prompt=0 | |
for p in list_param: | |
for i in range(nb_images_by_prompt): | |
dict_temp["task"].append({"prompt":p[0],"nprompt":p[1],"width":p[2],"height":p[3],"steps":p[4],"cfg":p[5],"seed":p[6],"index_prompt":index_prompt}) | |
index_prompt+=1 | |
cache_list_task[f"{id_session}"].append(dict_temp) | |
cache_text_actu[f"{id_session}"]={"nb_modules_use":nb_req_simult,"stop":False,"nb_fail":0,"warm":False, | |
"nb_models_to_do":len(list_models_to_gen) ,"nb_models_tot":len(list_models_to_gen) , | |
"nb_tasks_to_do":len(list_models_to_gen)*len(list_param)*nb_images_by_prompt , | |
"nb_tasks_tot":len(list_models_to_gen)*len(list_param)*nb_images_by_prompt, | |
"progress":list_progress,'nb_tasks_by_model': nb_images_by_prompt*len(list_param), | |
"nb_warm_in_use":0} | |
def fonc_update_actu(text_actu,id): | |
s="" | |
s+=f"modules: {cache_text_actu[str(id)]['nb_modules_use']}/{nb_req_simult}\n" | |
s+=f"models remaining: {cache_text_actu[str(id)]['nb_models_to_do']}/{cache_text_actu[str(id)]['nb_models_tot']}\n" | |
i=0 | |
for d in cache_text_actu[str(id)]['progress']: | |
i+=1 | |
s+=str(d) | |
if i%10==0: | |
s+=" " | |
if i%50==0: | |
s+="\n" | |
s+="\n" | |
s+=f"images remaining: {cache_text_actu[str(id)]['nb_tasks_to_do']}/{cache_text_actu[str(id)]['nb_tasks_tot']}\n" | |
s+=f"fail attempt: {cache_text_actu[str(id)]['nb_fail']}" | |
return gr.Textbox(s) | |
def fonc_update_actu_2(id): | |
if id == 0 : | |
return gr.Textbox("waiting...") | |
if cache_text_actu[str(id)]['warm']==True: | |
return gr.Textbox("waiting...") | |
s="" | |
i=0 | |
nb_ones=0 | |
for d in cache_text_actu[str(id)]['progress']: | |
i+=1 | |
if d==1: | |
nb_ones+=1 | |
s+=str(d) | |
if i%10==0: | |
s+=" " | |
if i%50==0: | |
s+="\n" | |
s+="\n" | |
s+=f"modules: {cache_text_actu[str(id)]['nb_modules_use']}/{nb_req_simult} ({nb_ones}/{cache_text_actu[str(id)]['nb_modules_use']})\n" | |
s+=f"models remaining: {cache_text_actu[str(id)]['nb_models_to_do']}/{cache_text_actu[str(id)]['nb_models_tot']}\n" | |
s+=f"images remaining(done): {cache_text_actu[str(id)]['nb_tasks_to_do']}({cache_text_actu[str(id)]['nb_tasks_tot']-cache_text_actu[str(id)]['nb_tasks_to_do']})/{cache_text_actu[str(id)]['nb_tasks_tot']}\n" | |
s+=f"fail attempt: {cache_text_actu[str(id)]['nb_fail']}\n" | |
s+=f"warm task pending = {cache_text_actu[str(id)]['nb_warm_in_use']}\n" | |
#s+=f"{tempo_update_actu*incr_update_actu[str(id)]} s" | |
#incr_update_actu[str(id)]+=1 | |
s+=f"{randint(1,MAX_SEED)}" | |
return gr.Textbox(s) | |
def cutStrg(longStrg,start,end): | |
shortStrg='' | |
for i in range(end-start): | |
shortStrg+=longStrg[start+i] | |
return shortStrg | |
def aff_models_perso(txt_list_perso,models=models): | |
list_perso=[] | |
t1=True | |
start=txt_list_perso.find('\"') | |
if start!=-1: | |
while t1: | |
start+=1 | |
end=txt_list_perso.find('\"',start) | |
if end != -1: | |
txtTemp=cutStrg(txt_list_perso,start,end) | |
if txtTemp in models: | |
list_perso.append(cutStrg(txt_list_perso,start,end)) | |
else : | |
t1=False | |
start=txt_list_perso.find('\"',end+1) | |
if start==-1: | |
t1=False | |
return gr.Dropdown(choices=[["",list_perso]], value=list_perso ) | |
def add_gallery(image, model_str, gallery): | |
if gallery is None: gallery = [] | |
#with lock: | |
if image is not None: gallery.append((image, model_str)) | |
return gallery | |
def reset_gallery(gallery): | |
return add_gallery(None,"",[]) | |
def fonc_load_gallery(id_session,gallery): | |
gallery = reset_gallery(gallery) | |
for i in range(len(cache_image[f"{id_session}"])): | |
gallery=add_gallery(cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],gallery) | |
return gr.Gallery(gallery,visible=True) | |
def fonc_move_gallery_by_model(id_session,gallery,index_g,models,index_m,direction): | |
delta=int((nb_gallery_model-1)/2) | |
list_image_temp=[] | |
if index_g==(index_m+(delta*direction))%nb_gallery_model : | |
gallery = reset_gallery(gallery) | |
for i in range(len(cache_image[f"{id_session}"])): | |
if cache_image[f"{id_session}"][i]["model"]==models[(index_m+(delta*direction))%len(models)]: | |
list_image_temp.append([cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],cache_image[f"{id_session}"][i]["index_prompt"]]) | |
for temp in sorted(list_image_temp,key=itemgetter(2)): | |
gallery=add_gallery(temp[0],temp[1],gallery) | |
if index_g==(index_m-direction)%nb_gallery_model: | |
return gr.Gallery(gallery,visible=False) | |
#return gr.Gallery(gallery,visible=True) | |
elif index_g==index_m%nb_gallery_model: | |
return gr.Gallery(gallery,visible=True) | |
else: | |
return gallery | |
def fonc_start(id_session,id_module,s,cont,list_models_to_gen): | |
if cont==False: | |
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1 | |
print("manual stop") | |
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED)) | |
task_actu={} | |
model_actu="" | |
use_warm_model=False | |
print(f"in fonc : id module={id_module}\n") | |
warm_models , models_plus_tags_temp = find_warm_model_list("John6666", ["stable-diffusion-xl"], "", "last_modified", 10000) | |
for model_plus_tasks in cache_list_task[f"{id_session}"]: | |
if model_actu == "": | |
if model_plus_tasks["model"] in warm_models: | |
if model_plus_tasks["pending_task"]<max_pending: | |
try: | |
task_actu=model_plus_tasks["task"].pop() | |
except: | |
continue | |
model_actu=model_plus_tasks["model"] | |
model_plus_tasks["pending_task"]+=1 | |
use_warm_model=True | |
cache_text_actu[f"{id_session}"]["nb_warm_in_use"]+=1 | |
print(f"warm model : {model_actu}\n") | |
break | |
if model_actu == "": | |
for model_plus_tasks in cache_list_task[f"{id_session}"]: | |
if model_plus_tasks["pending_task"]==0: | |
try: | |
task_actu=model_plus_tasks["task"].pop() | |
except: | |
continue | |
model_actu=model_plus_tasks["model"] | |
model_plus_tasks["pending_task"]+=1 | |
print(f"find model : {model_actu}\n") | |
if len(model_plus_tasks["task"])==cache_text_actu[f"{id_session}"]["nb_tasks_by_model"]-1: | |
i=0 | |
for model in list_models_to_gen: | |
if model_actu==model: | |
cache_text_actu[f"{id_session}"]['progress'][i]=1 | |
i+=1 | |
break | |
if model_actu=="": | |
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1 | |
print("Stop with :"+s+"\n") | |
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED)) | |
print("begin gen image:") | |
print(model_actu) | |
print(task_actu) | |
if use_warm_model: | |
result=gen_fn(model_actu, task_actu["prompt"], task_actu["nprompt"], task_actu["height"], task_actu["width"], task_actu["steps"], task_actu["cfg"], task_actu["seed"],inference_timeout_wp) | |
else: | |
result=gen_fn(model_actu, task_actu["prompt"], task_actu["nprompt"], task_actu["height"], task_actu["width"], task_actu["steps"], task_actu["cfg"], task_actu["seed"]) | |
print("reception") | |
if result!=None: | |
#result=gr.Image(result) | |
id_image=len(cache_image[f"{id_session}"]) | |
i=0 | |
for model_plus_tasks in cache_list_task[f"{id_session}"]: | |
if model_plus_tasks["model"]==model_actu: | |
model_plus_tasks["pending_task"]-=1 | |
cache_text_actu[f"{id_session}"]["nb_tasks_to_do"]-=1 | |
i=0 | |
for model in list_models_to_gen: | |
if model_actu==model: | |
cache_text_actu[f"{id_session}"]['progress'][i]=int(((1-((len(model_plus_tasks["task"])+model_plus_tasks["pending_task"])/cache_text_actu[f"{id_session}"]["nb_tasks_by_model"]))*7)//1)+2 | |
i+=1 | |
if len(model_plus_tasks["task"])+model_plus_tasks["pending_task"]==0: | |
cache_list_task[f"{id_session}"].remove(model_plus_tasks) | |
cache_text_actu[f"{id_session}"]["nb_models_to_do"]-=1 | |
task_actu["id_image"]=id_image | |
task_actu["model"]=model_actu | |
task_actu["image"]=result | |
#cache_image[f"{id_session}"].append(result) | |
#cache_id_image[f"{id_session}"].append(task_actu) | |
cache_image[f"{id_session}"].append(task_actu) | |
print("image saved\n") | |
else: | |
model_plus_tasks["task"].append(task_actu) | |
model_plus_tasks["pending_task"]-=1 | |
cache_text_actu[f"{id_session}"]["nb_fail"]+=1 | |
print("fail to generate\n") | |
num_task_to_do=0 | |
for model_plus_tasks in cache_list_task[f"{id_session}"]: | |
for task in model_plus_tasks["task"]: | |
num_task_to_do+=1 | |
if use_warm_model: | |
cache_text_actu[f"{id_session}"]["nb_warm_in_use"]-=1 | |
print(f"\n {num_task_to_do} tasks to do\n") | |
return result , gr.Textbox(s+"1"),gr.Number(randint(1,MAX_SEED)) | |
def fonc_init(s): | |
return gr.Textbox(s+"1") | |
def fonc_load_gallery_by_model(id_session,gallery,models,index_g,index_m,gallery_all): | |
delta=int((nb_gallery_model-1)/2) | |
gallery = reset_gallery(gallery) | |
list_image_temp=[] | |
for i in range(len(cache_image[f"{id_session}"])): | |
if cache_image[f"{id_session}"][i]["model"]==models[((index_m+index_g+delta)%nb_gallery_model)-delta]: | |
list_image_temp.append([cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],cache_image[f"{id_session}"][i]["index_prompt"]]) | |
for temp in sorted(list_image_temp,key=itemgetter(2)): | |
gallery=add_gallery(temp[0],temp[1],gallery) | |
return gr.Gallery(gallery,visible=(index_g==(index_m%nb_gallery_model))), gr.Gallery(gallery_all,visible=False) | |
def load_gallery_by_prompt(id_session,gallery,index_p,list_p): | |
#"prompt":p[0],"nprompt":p[1],"width":p[2],"height":p[3],"steps":p[4],"cfg":p[5],"seed":p[6] | |
gallery = reset_gallery(gallery) | |
for i in range(len(cache_image[f"{id_session}"])): | |
if cache_image[f"{id_session}"][i]["prompt"]==list_p[index_p][0] : | |
gallery=add_gallery(cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],gallery) | |
if len(gallery)!=0: | |
gallery=sorted(gallery, key=itemgetter(1)) | |
return gr.Gallery(gallery, visible=True) | |
def index_gallery_next(i,list_models): | |
iT=i+1 | |
return gr.Number(iT%len(list_models)),gr.Number(1) | |
def index_gallery_prev(i,list_models): | |
iT=i-1 | |
return gr.Number(iT%len(list_models)),gr.Number(-1) | |
def change_text_model_actu_gal(list_models,index): | |
return gr.Textbox(f"({(index%(len(list_models)))+1}/{(len(list_models))}) {list_models[index]}") | |
def fonc_add_to_text(text,list_models,index): | |
return gr.Textbox(text+f"\"{list_models[index]}\",\n") | |
def load_model_publ(choice_model_publ): | |
return gr.Image(None,label=choice_model_publ,interactive=False),gr.Textbox(choice_model_publ,visible=False,show_label=False) | |
def set_tasks_w(id_session,list_p,nb_i): | |
list_t=[] | |
for p in list_p: | |
cache_list_task_w[f"{id_session}"].append([nb_i,{"prompt":p[0],"nprompt":p[1],"width":p[2],"height":p[3],"steps":p[4],"cfg":p[5],"seed":p[6]},0]) | |
cache_text_actu[f"{id_session}"]={"nb_modules_use":nb_req_simult,"stop":False,"nb_fail":0,"warm":True,"nb_prompt_to_do":len(list_p),"nb_prompt_tot":len(list_p), | |
"nb_tasks_to_do":len(list_p)*nb_i ,"nb_tasks_tot":len(list_p)*nb_i} | |
return | |
def fonc_start_w(id_session,id_module,s,cont,nb_modules,tag): | |
if cont==False or id_module>=nb_modules: | |
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1 | |
print("manual stop") | |
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED)) | |
find_task=False | |
task={} | |
for t in cache_list_task_w[f"{id_session}"]: | |
if not find_task: | |
if t[0]>0: | |
t[0]-=1 | |
t[2]+=1 | |
task=t[1].copy() | |
find_task=True | |
if not find_task: | |
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1 | |
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED)) | |
if tag == "": | |
tagT=["stable-diffusion-xl"] | |
else: | |
tagT=["stable-diffusion-xl",tag] | |
models_temp , models_plus_tags_temp = find_warm_model_list("John6666", tagT, "", "last_modified", 10000) | |
models_rand=[] | |
for m in models_temp: | |
if m in models: | |
models_rand.append(m) | |
if len(models_rand)!=0: | |
model_actu=models_temp[randint(0,len(models_rand)-1)] | |
print(f"find model : {model_actu}") | |
else: | |
print("no warm model") | |
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1 | |
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED)) | |
print("begin gen image:") | |
print(model_actu) | |
print(task) | |
result=gen_fn(model_actu, task["prompt"], task["nprompt"], task["height"], task["width"], task["steps"], task["cfg"], task["seed"],inference_timeout_w) | |
print("reception") | |
if result!=None: | |
id_image=len(cache_image[f"{id_session}"]) | |
for t in cache_list_task_w[f"{id_session}"]: | |
if t[1]==task: | |
t[2]-=1 | |
if t[0]+t[2]<=0: | |
cache_list_task_w[f"{id_session}"].remove(t) | |
cache_text_actu[f"{id_session}"]["nb_prompt_to_do"]-=1 ################################# | |
cache_text_actu[f"{id_session}"]["nb_tasks_to_do"]-=1 | |
task["id_image"]=id_image | |
task["model"]=model_actu | |
task["image"]=result | |
cache_image[f"{id_session}"].append(task) | |
print("image saved\n") | |
print(task) | |
else: | |
for t in cache_list_task_w[f"{id_session}"]: | |
if t[1]==task: | |
t[0]+=1 | |
t[2]-=1 | |
cache_text_actu[f"{id_session}"]["nb_fail"]+=1 | |
print("fail to generate\n") | |
nb_task_to_do=cache_text_actu[str(id_session)]["nb_tasks_to_do"] | |
print(f"\n {nb_task_to_do} tasks to do\n") | |
return result , gr.Textbox(s+"1"),gr.Number(randint(1,MAX_SEED)) | |
def fonc_update_actu_w(id): | |
if id == 0 : | |
return gr.Textbox("waiting...") | |
if cache_text_actu[str(id)]['warm']==False: | |
return gr.Textbox("waiting...") | |
s="" | |
s+=f"modules: {cache_text_actu[str(id)]['nb_modules_use']}/{nb_req_simult} \n" | |
s+=f"prompts remaining: {cache_text_actu[str(id)]['nb_prompt_to_do']}/{cache_text_actu[str(id)]['nb_prompt_tot']}\n" | |
s+=f"images remaining(done): {cache_text_actu[str(id)]['nb_tasks_to_do']}({cache_text_actu[str(id)]['nb_tasks_tot']-cache_text_actu[str(id)]['nb_tasks_to_do']})/{cache_text_actu[str(id)]['nb_tasks_tot']}\n" | |
s+=f"fail attempt: {cache_text_actu[str(id)]['nb_fail']}\n" | |
#s+=f"{tempo_update_actu*incr_update_actu[str(id)]} s" | |
#incr_update_actu[str(id)]+=1 | |
s+=f"{randint(1,MAX_SEED)}" | |
return gr.Textbox(s) | |
def make_me(): | |
with gr.Tab(" "): | |
with gr.Column(): | |
with gr.Row(): | |
with gr.Column(scale=4): | |
prompt_publ=gr.Textbox(label='Your prompt:', lines=4, interactive = True) | |
choice_model_publ=gr.Dropdown(label="List of Models", choices=list(models_publ),value=models_publ[0]) | |
gen_button_publ = gr.Button('Generate images',scale=2) | |
image_publ=gr.Image(None,label=models_publ[0],interactive=False) | |
current_models_publ=gr.Textbox(models_publ[0],visible=False,show_label=False) | |
choice_model_publ.change(load_model_publ,[choice_model_publ],[image_publ,current_models_publ]) | |
gen_event_publ = gr.on(triggers=[gen_button_publ.click, prompt_publ.submit], fn=gen_fn, | |
inputs=[choice_model_publ, prompt_publ], outputs=[image_publ]) | |
with gr.Row(): | |
with gr.Column(scale=4): | |
test_pass=gr.Textbox(show_label=False,lines=1, interactive = True) | |
button_test_pass=gr.Button(" ",scale=1) | |
with gr.Tab(" Sort ",visible=False) as tab_p: | |
button_test_pass.click(test_pass_aff,[test_pass],[tab_p]) | |
with gr.Column(): | |
with gr.Group(): | |
with gr.Row(): | |
with gr.Column(scale=4): | |
txt_input = gr.Textbox(label='Your prompt:', lines=4, interactive = True) | |
neg_input = gr.Textbox(label='Negative prompt:', lines=4, interactive = True) | |
with gr.Column(scale=4): | |
with gr.Row(): | |
width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True) | |
height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True) | |
with gr.Row(): | |
choice_ratio = gr.Dropdown(label="Ratio Width/Height", | |
info="OverWrite Width and Height (W*H<1024*1024)", | |
show_label=True, choices=list(list_ratios) , interactive = True, value=list_ratios[0][1]) | |
choice_ratio.change(ratio_chosen,[choice_ratio,width,height],[width,height]) | |
with gr.Row(): | |
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, interactive = True) | |
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, interactive = True) | |
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, interactive = True) | |
add_param=gr.Button("Add to the list") | |
del_param=gr.Button("Delete to the list") | |
#gen_button = gr.Button('Generate images', scale=3) | |
#stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1) | |
#gen_button.click(lambda: gr.update(interactive=True), None, stop_button) | |
list_param=gr.Dropdown(choices=[["a",[["","",0,0,0,0,-1]]]], value=[["","",0,0,0,0,-1]], visible=False) | |
disp_param = gr.Examples( | |
label="list of prompt", | |
examples=list_param.value, | |
inputs=[txt_input,neg_input,width,height,steps,cfg,seed], | |
outputs=[txt_input,neg_input,width,height,steps,cfg,seed], | |
) | |
with gr.Accordion("Restore Session",open=False) : | |
with gr.Row(): | |
text_info_session=gr.Textbox() | |
with gr.Column(): | |
button_info_session=gr.Button("Get infos sessions") | |
button_info_session.click(print_info_sessions,[],[text_info_session]) | |
id_session=gr.Number(0,interactive = True,label="ID session",show_label=True) | |
button_restore_session=gr.Button("Restore Session") | |
add_param.click(fonc_add_param,[list_param,txt_input,neg_input,width,height,steps,cfg,seed],[disp_param.dataset,list_param]) | |
add_param.click(set_session,[id_session],[id_session]) | |
del_param.click(fonc_del_param,[list_param,txt_input,neg_input,width,height,steps,cfg,seed],[disp_param.dataset,list_param]) | |
with gr.Row(): | |
list_models_to_gen=gr.Dropdown(choices=[["",[]]], value=[], visible=False) | |
disp_info=gr.Textbox(label="Info") | |
with gr.Column(): | |
with gr.Row(): | |
nb_images_by_prompt=gr.Number(2,label="Number of images by prompt:",interactive=True) | |
nb_of_models_to_gen=gr.Number(10,label="Number of Models:",interactive=True) | |
index_tag=gr.Dropdown(label="Tag",choices=list(list_tags),type="index") | |
index_first_model=gr.Dropdown(label="First model",choices=list([]), type="index") | |
index_tag.change(lambda i:gr.Dropdown(choices=list([f"({j+1}/{len(tags_plus_models[i][2])}) {tags_plus_models[i][2][j]}" for j in range(len(tags_plus_models[i][2]))])), | |
index_tag,index_first_model) | |
load_info=gr.Button("Load Models") | |
load_info.click(fonc_load_info,[nb_of_models_to_gen,index_tag,index_first_model],[nb_of_models_to_gen,disp_info,list_models_to_gen]) | |
button_load_random_models=gr.Button("Load Random Models") | |
button_load_random_models.click(load_random_models,[nb_of_models_to_gen,index_tag],[nb_of_models_to_gen,disp_info,list_models_to_gen]) | |
with gr.Accordion("Models Custom",open=False) : | |
with gr.Row(): | |
text_list_model_custom=gr.Textbox(label="List Models Custom") | |
with gr.Column(): | |
list_model_custom=gr.Dropdown(choices=[["",[]]], value=[], visible=False) | |
#use_models_custom=gr.Radio("Use Models Custom",value=False) | |
cut_model_custom=gr.Button("Cut Text Models Custom") | |
cut_model_custom.click(aff_models_perso,[text_list_model_custom],[list_model_custom]) | |
index_first_model_custom=gr.Dropdown(label="First model",choices=list([]), type="index") | |
list_model_custom.change(lambda li:gr.Dropdown(choices=list([f"({j+1}/{len(li)}) {li[j]}" for j in range(len(li))])), | |
[list_model_custom],index_first_model_custom) | |
load_model_custom=gr.Button("Load Models Custom") | |
load_model_custom.click(fonc_load_info_custom,[nb_of_models_to_gen,list_model_custom,index_first_model_custom],[nb_of_models_to_gen,disp_info,list_models_to_gen]) | |
list_models_to_gen.change(crea_list_task,[id_session,list_param,list_models_to_gen,nb_images_by_prompt],[]) | |
with gr.Column(): | |
button_start=gr.Button("START") | |
button_stop=gr.Button("STOP") | |
cont=gr.Checkbox(True,visible=False) | |
button_start.click(lambda:True,[],[cont]) | |
button_stop.click(lambda:False,[],[cont]) | |
#text_actu=gr.Textbox("",label="in progress",interactive=False,lines=6) | |
text_actu=gr.Textbox(fonc_update_actu_2,inputs=id_session,every=tempo_update_actu,label="in progress",interactive=False,lines=6) | |
update_actu=gr.Number(0,visible=False) | |
#update_actu.change(fonc_update_actu,[text_actu,id_session],[text_actu]) | |
#button_start.click(fonc_update_actu,[text_actu,id_session],[text_actu]) | |
#button_start.click(lambda:gr.Number(0),[],[incr_update_actu]) | |
with gr.Accordion("Gallery Parameters",open=False) : | |
with gr.Row(): | |
with gr.Column(): | |
set_height_gallery=gr.Checkbox(True,label="set height",show_label=True) | |
height_gallery=gr.Number(650,label="height",show_label=True) | |
col_gallery=gr.Number(5,label="nb columns",show_label=True) | |
row_gallery=gr.Number(4,label="nb row",show_label=True) | |
with gr.Column(): | |
button_reset_cache_image=gr.Button("Reset Images") | |
button_reset_cache_image.click(reset_cache_image,[id_session],[]) | |
button_reset_cache_image_all_session=gr.Button("Reset Images ALL SESSION") | |
button_reset_cache_image_all_session.click(reset_cache_image_all_sessions,[],[]) | |
with gr.Row(): | |
outputs=[] | |
id_modules=[] | |
states=[] | |
for i in range(nb_req_simult): | |
#outputs.append(gr.Image(None,interactive=False,render=False)) | |
#id_modules.append(gr.Number(i,interactive=False,render=False)) | |
outputs.append(gr.Image(None,interactive=False,visible=False)) | |
id_modules.append(gr.Number(i,interactive=False,visible=False)) | |
states.append(gr.Textbox("1",interactive=False,visible=False)) | |
for o,i,s in zip(outputs,id_modules,states): | |
#o.change(fonc_start,[id_session,i],[o]) | |
#o.change(test_change,[],[]) | |
s.change(fonc_start,[id_session,i,s,cont,list_models_to_gen],[o,s,update_actu]) | |
#button_start.click(lambda : gr.Image(None),[],[o]) | |
gen_event = gr.on(triggers=[button_start.click], fn=fonc_init,inputs=[s], outputs=[s]) | |
with gr.Column(scale=2): | |
gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery", | |
interactive=False, show_share_button=False, container=True, format="png", | |
preview=True, object_fit="contain",columns=5,rows=4,height=650) | |
gallery_by_prompt = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery", | |
interactive=False, show_share_button=False, container=True, format="png", | |
preview=True, object_fit="contain",columns=5,rows=4,visible=False,height=650) | |
gallery_models=[] | |
index_gallery=[] | |
set_height_gallery.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None), | |
[gallery,height_gallery,set_height_gallery],[gallery]) | |
height_gallery.change(lambda g,h: gr.Gallery(g,height=h),[gallery,height_gallery],[gallery]) | |
col_gallery.change(lambda g,h: gr.Gallery(g,columns=h),[gallery,col_gallery],[gallery]) | |
row_gallery.change(lambda g,h: gr.Gallery(g,rows=h),[gallery,row_gallery],[gallery]) | |
for i in range(nb_gallery_model): | |
gallery_models.append(gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery", | |
interactive=False, show_share_button=False, container=True, format="png", | |
preview=True, object_fit="contain",columns=5,rows=4,visible=False,height=650)) | |
index_gallery.append(gr.Number(i,visible=False)) | |
set_height_gallery.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None), | |
[gallery_models[i],height_gallery,set_height_gallery],[gallery_models[i]]) | |
height_gallery.change(lambda g,h: gr.Gallery(g,height=h),[gallery_models[i],height_gallery],[gallery_models[i]]) | |
col_gallery.change(lambda g,h: gr.Gallery(g,columns=h),[gallery_models[i],col_gallery],[gallery_models[i]]) | |
row_gallery.change(lambda g,h: gr.Gallery(g,rows=h),[gallery_models[i],row_gallery],[gallery_models[i]]) | |
with gr.Column(scale=3): | |
button_load_gallery=gr.Button("Load Gallery All") | |
button_load_gallery.click(fonc_load_gallery,[id_session,gallery],[gallery]) | |
with gr.Accordion("Gallery by Model",open=True) : | |
index_gallery_m=gr.Number(0,visible=False) | |
button_load_gallery_first=gr.Button("Init Gallery by model") | |
with gr.Row(): | |
button_load_gallery_prev=gr.Button("Prev model") | |
button_load_gallery_next=gr.Button("Next model") | |
direction_gallery=gr.Number(0,visible=False) | |
button_load_gallery_next.click(index_gallery_next,[index_gallery_m,list_models_to_gen],[index_gallery_m,direction_gallery]) | |
button_load_gallery_prev.click(index_gallery_prev,[index_gallery_m,list_models_to_gen],[index_gallery_m,direction_gallery]) | |
for g,i in zip(gallery_models,index_gallery): | |
index_gallery_m.change(fonc_move_gallery_by_model,[id_session,g,i,list_models_to_gen,index_gallery_m,direction_gallery],[g]) | |
gen_event_gallery_first = gr.on(triggers=[button_load_gallery_first.click], fn=fonc_load_gallery_by_model, | |
inputs=[id_session,g,list_models_to_gen,i,index_gallery_m,gallery], outputs=[g,gallery]) | |
gen_event_gallery_first_all = gr.on(triggers=[button_load_gallery.click], fn=lambda g:gr.Gallery(g,visible=False), | |
inputs=[g], outputs=[g]) | |
text_model_actu_gal = gr.Textbox(label='Model Actu:', lines=1, interactive = False) | |
index_gallery_m.change(change_text_model_actu_gal,[list_models_to_gen,index_gallery_m],[text_model_actu_gal]) | |
with gr.Row(): | |
with gr.Column(): | |
button_add_to_bl=gr.Button("Add to Blacklist") | |
#button_remove_from_bl=gr.Button("Remove from Blacklist") | |
text_bl=gr.Textbox(label='Blacklist', lines=5, interactive = True) | |
button_add_to_bl.click(fonc_add_to_text,[text_bl,list_models_to_gen,index_gallery_m],[text_bl]) | |
#button_remove_from_bl.click(fonc_remove_from_text,[text_bl,list_models_to_gen,index_gallery_m],[text_bl]) | |
with gr.Column(): | |
button_add_to_fav=gr.Button("Add to Favlist") | |
text_fav=gr.Textbox(label='Favlist', lines=5, interactive = True) | |
button_add_to_fav.click(fonc_add_to_text,[text_fav,list_models_to_gen,index_gallery_m],[text_fav]) | |
with gr.Accordion("Gallery by Prompt",open=False) : | |
index_gallery_by_prompt=gr.Number(0,visible=False) | |
button_load_gallery_by_prompt=gr.Button("Load Gallery by prompt") | |
text_gallery_by_prompt=gr.Textbox(f"{index_gallery_by_prompt.value+1}/{len(list_param.value)}",show_label=False) | |
index_gallery_by_prompt.change(lambda i,p:gr.Textbox(f"{i+1}/{len(p)}"),[index_gallery_by_prompt,list_param],[text_gallery_by_prompt]) | |
button_load_gallery_by_prompt.click(load_gallery_by_prompt, | |
[id_session,gallery_by_prompt,index_gallery_by_prompt,list_param],[gallery_by_prompt]) | |
gen_event_gallery_by_prompt = gr.on(triggers=[button_load_gallery_by_prompt.click], fn=lambda g:gr.Gallery(g,visible=False), | |
inputs=[gallery], outputs=[gallery]) | |
gen_event_gallery_first = gr.on(triggers=[button_load_gallery_first.click], fn=lambda g:gr.Gallery(g,visible=False), | |
inputs=[gallery_by_prompt], outputs=[gallery_by_prompt]) | |
gen_event_gallery = gr.on(triggers=[button_load_gallery.click], fn=lambda g:gr.Gallery(g,visible=False), | |
inputs=[gallery_by_prompt], outputs=[gallery_by_prompt]) | |
for g,i in zip(gallery_models,index_gallery): | |
gen_event_gallery_by_prompt = gr.on(triggers=[button_load_gallery_by_prompt.click], fn=lambda g:gr.Gallery(g,visible=False), | |
inputs=[g], outputs=[g]) | |
with gr.Row(): | |
button_gallery_prev_prompt=gr.Button("Prev prompt") | |
button_gallery_next_prompt=gr.Button("Next prompt") | |
button_gallery_next_prompt.click(lambda i,p: (i+1)%len(p),[index_gallery_by_prompt,list_param],[index_gallery_by_prompt]) | |
button_gallery_prev_prompt.click(lambda i,p: (i-1)%len(p),[index_gallery_by_prompt,list_param],[index_gallery_by_prompt]) | |
index_gallery_by_prompt.change(load_gallery_by_prompt, | |
[id_session,gallery_by_prompt,index_gallery_by_prompt,list_param],[gallery_by_prompt]) | |
set_height_gallery.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None), | |
[gallery_by_prompt,height_gallery,set_height_gallery],[gallery_by_prompt]) | |
height_gallery.change(lambda g,h: gr.Gallery(g,height=h),[gallery_by_prompt,height_gallery],[gallery_by_prompt]) | |
col_gallery.change(lambda g,h: gr.Gallery(g,columns=h),[gallery_by_prompt,col_gallery],[gallery_by_prompt]) | |
row_gallery.change(lambda g,h: gr.Gallery(g,rows=h),[gallery_by_prompt,row_gallery],[gallery_by_prompt]) | |
button_restore_session.click(fonc_restore_session,[id_session],[list_param,disp_param.dataset,list_models_to_gen,nb_of_models_to_gen]) | |
with gr.Tab(" Warm ",visible=False) as tab_warm: | |
button_test_pass.click(test_pass_aff,[test_pass],[tab_warm]) | |
with gr.Column(): | |
with gr.Group(): | |
with gr.Row(): | |
with gr.Column(scale=4): | |
txt_input_w = gr.Textbox(label='Your prompt:', lines=4, interactive = True) | |
neg_input_w = gr.Textbox(label='Negative prompt:', lines=4, interactive = True) | |
with gr.Column(scale=4): | |
with gr.Row(): | |
width_w = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True) | |
height_w = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True) | |
with gr.Row(): | |
choice_ratio_w = gr.Dropdown(label="Ratio Width/Height", | |
info="OverWrite Width and Height (W*H<1024*1024)", | |
show_label=True, choices=list(list_ratios) , interactive = True, value=list_ratios[0][1]) | |
choice_ratio_w.change(ratio_chosen,[choice_ratio_w,width,height_w],[width_w,height_w]) | |
with gr.Row(): | |
steps_w = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, interactive = True) | |
cfg_w = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, interactive = True) | |
seed_w = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, interactive = True) | |
add_param_w=gr.Button("Add to the list") | |
del_param_w=gr.Button("Delete to the list") | |
list_param_w=gr.Dropdown(choices=[["a",[["","",0,0,0,0,-1]]]], value=[["","",0,0,0,0,-1]], visible=False) | |
disp_param_w = gr.Examples( | |
label="list of prompt", | |
examples=list_param_w.value, | |
inputs=[txt_input_w,neg_input_w,width_w,height_w,steps_w,cfg_w,seed_w], | |
outputs=[txt_input_w,neg_input_w,width_w,height_w,steps_w,cfg_w,seed_w], | |
) | |
bool_warm=gr.Checkbox(visible=False,value=True) | |
add_param_w.click(fonc_add_param,[list_param_w,txt_input_w,neg_input_w,width_w,height_w,steps_w,cfg_w,seed_w],[disp_param_w.dataset,list_param_w]) | |
add_param_w.click(set_session,[id_session,bool_warm],[id_session]) | |
del_param_w.click(fonc_del_param,[list_param_w,txt_input_w,neg_input_w,width_w,height_w,steps_w,cfg_w,seed_w],[disp_param_w.dataset,list_param_w]) | |
with gr.Row(): | |
nb_images_by_prompt_w=gr.Number(2,label="Images by prompt",interactive = True) | |
nb_modules_w=gr.Slider(label="Module use", minimum=1, maximum=nb_req_simult, step=1, value=40, interactive = True) | |
text_tag_w=gr.Textbox("",label="Tag",interactive = True,lines=1) | |
with gr.Column(): | |
button_load_task_w=gr.Button("LOAD") | |
button_load_task_w.click(set_tasks_w,[id_session,list_param_w,nb_images_by_prompt_w],[]) | |
button_start_w=gr.Button("START") | |
button_stop_w=gr.Button("STOP") | |
cont_w=gr.Checkbox(True,visible=False) | |
button_start_w.click(lambda:True,[],[cont_w]) | |
button_stop_w.click(lambda:False,[],[cont_w]) | |
text_actu_w=gr.Textbox(fonc_update_actu_w,inputs=id_session,every=tempo_update_actu,label="in progress",interactive=False,lines=6)############ | |
update_actu_w=gr.Number(0,visible=False) | |
with gr.Accordion("Gallery Parameters",open=False) : | |
with gr.Row(): | |
with gr.Column(): | |
set_height_gallery_w=gr.Checkbox(True,label="set height",show_label=True) | |
height_gallery_w=gr.Number(650,label="height",show_label=True) | |
col_gallery_w=gr.Number(5,label="nb columns",show_label=True) | |
row_gallery_w=gr.Number(4,label="nb row",show_label=True) | |
with gr.Column(): | |
button_reset_cache_image_w=gr.Button("Reset Images") | |
button_reset_cache_image_w.click(reset_cache_image,[id_session],[]) | |
button_reset_cache_image_all_session_w=gr.Button("Reset Images ALL SESSION") | |
button_reset_cache_image_all_session_w.click(reset_cache_image_all_sessions,[],[]) | |
with gr.Row(): | |
outputs_w=[] | |
id_modules_w=[] | |
states_w=[] | |
for i in range(nb_req_simult): | |
outputs_w.append(gr.Image(None,interactive=False,visible=False)) | |
id_modules_w.append(gr.Number(i,interactive=False,visible=False)) | |
states_w.append(gr.Textbox("1",interactive=False,visible=False)) | |
for o,i,s in zip(outputs_w,id_modules_w,states_w): | |
s.change(fonc_start_w,[id_session,i,s,cont_w,nb_modules_w,text_tag_w],[o,s,update_actu_w])################# | |
gen_event = gr.on(triggers=[button_start_w.click], fn=fonc_init,inputs=[s], outputs=[s])################### | |
with gr.Column(scale=2): | |
gallery_w = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery", | |
interactive=False, show_share_button=False, container=True, format="png", | |
preview=True, object_fit="contain",columns=5,rows=4,height=650) | |
gallery_by_prompt_w = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery", | |
interactive=False, show_share_button=False, container=True, format="png", | |
preview=True, object_fit="contain",columns=5,rows=4,visible=False,height=650) | |
set_height_gallery_w.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None), | |
[gallery_w,height_gallery_w,set_height_gallery_w],[gallery_w]) | |
height_gallery_w.change(lambda g,h: gr.Gallery(g,height=h),[gallery_w,height_gallery_w],[gallery_w]) | |
col_gallery_w.change(lambda g,h: gr.Gallery(g,columns=h),[gallery_w,col_gallery_w],[gallery_w]) | |
row_gallery_w.change(lambda g,h: gr.Gallery(g,rows=h),[gallery_w,row_gallery_w],[gallery_w]) | |
set_height_gallery_w.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None), | |
[gallery_by_prompt_w,height_gallery_w,set_height_gallery_w],[gallery_by_prompt_w]) | |
height_gallery_w.change(lambda g,h: gr.Gallery(g,height=h),[gallery_by_prompt_w,height_gallery_w],[gallery_by_prompt_w]) | |
col_gallery_w.change(lambda g,h: gr.Gallery(g,columns=h),[gallery_by_prompt_w,col_gallery_w],[gallery_by_prompt_w]) | |
row_gallery_w.change(lambda g,h: gr.Gallery(g,rows=h),[gallery_by_prompt_w,row_gallery_w],[gallery_by_prompt_w]) | |
with gr.Column(scale=3): | |
button_load_gallery_w=gr.Button("Load Gallery All") | |
button_load_gallery_w.click(fonc_load_gallery,[id_session,gallery_w],[gallery_w]) | |
with gr.Accordion("Gallery by Prompt",open=False) : | |
index_gallery_by_prompt_w=gr.Number(0,visible=False) | |
button_load_gallery_by_prompt_w=gr.Button("Load Gallery by prompt") | |
text_gallery_by_prompt_w=gr.Textbox(f"{index_gallery_by_prompt_w.value+1}/{len(list_param_w.value)}",show_label=False) | |
index_gallery_by_prompt_w.change(lambda i,p:gr.Textbox(f"{i+1}/{len(p)}"),[index_gallery_by_prompt_w,list_param_w],[text_gallery_by_prompt_w]) | |
button_load_gallery_by_prompt_w.click(load_gallery_by_prompt, | |
[id_session,gallery_by_prompt_w,index_gallery_by_prompt_w,list_param_w],[gallery_by_prompt_w]) | |
gen_event_gallery_by_prompt_w = gr.on(triggers=[button_load_gallery_by_prompt_w.click], fn=lambda g:gr.Gallery(g,visible=False), | |
inputs=[gallery_w], outputs=[gallery_w]) | |
gen_event_gallery_w = gr.on(triggers=[button_load_gallery_w.click], fn=lambda g:gr.Gallery(g,visible=False), | |
inputs=[gallery_by_prompt_w], outputs=[gallery_by_prompt_w]) | |
with gr.Row(): | |
button_gallery_prev_prompt_w=gr.Button("Prev prompt") | |
button_gallery_next_prompt_w=gr.Button("Next prompt") | |
button_gallery_next_prompt_w.click(lambda i,p: (i+1)%len(p),[index_gallery_by_prompt_w,list_param_w],[index_gallery_by_prompt_w]) | |
button_gallery_prev_prompt_w.click(lambda i,p: (i-1)%len(p),[index_gallery_by_prompt_w,list_param_w],[index_gallery_by_prompt_w]) | |
index_gallery_by_prompt_w.change(load_gallery_by_prompt, | |
[id_session,gallery_by_prompt_w,index_gallery_by_prompt_w,list_param_w],[gallery_by_prompt_w]) | |
js_code = """ | |
console.log('ghgh'); | |
""" | |
with gr.Blocks(theme="Nymbo/Nymbo_Theme", fill_width=True, css="div.float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}") as demo: | |
gr.Markdown("<script>" + js_code + "</script>") | |
make_me() | |
# https://www.gradio.app/guides/setting-up-a-demo-for-maximum-performance | |
#demo.queue(concurrency_count=999) # concurrency_count is deprecated in 4.x | |
demo.queue(default_concurrency_limit=200, max_size=200) | |
demo.launch(max_threads=400) |