Spaces:
Running
Running
File size: 61,203 Bytes
351ffe3 c84e807 351ffe3 e68cc2f e86f2e3 1f9c7c5 3983f0d ad248c9 351ffe3 c84e807 e86f2e3 351ffe3 c6b7c7e 3983f0d 101951c c84e807 351ffe3 0a68b0d e6e6106 0a68b0d b3d45b6 0cc191e 351ffe3 0cc191e 351ffe3 0cc191e 351ffe3 e86f2e3 351ffe3 e86f2e3 dedc0a4 351ffe3 57bdf63 351ffe3 c84e807 351ffe3 c84e807 351ffe3 d860607 351ffe3 c84e807 351ffe3 c6b7c7e 3983f0d c84e807 101951c c84e807 ad248c9 351ffe3 101951c 351ffe3 101951c 351ffe3 101951c 351ffe3 101951c 351ffe3 b205d6e 85b6619 37ca3ce b205d6e 098483c 6c1c4b0 c24cfb9 8583df7 5971562 3f38010 ad67e2e 22e71c8 3f38010 ac296b2 3f38010 ad67e2e 3f38010 ad67e2e b3bcfcb ad67e2e c84e807 b3bcfcb 391e734 b3bcfcb c6b7c7e 101951c c6b7c7e 1f9c7c5 c6b7c7e b4f7961 c6b7c7e 3673052 c6b7c7e 1f9c7c5 3673052 c6b7c7e 3673052 c6b7c7e b3bcfcb c84e807 3983f0d 1f9c7c5 3673052 3983f0d a1f7bcb 1f9c7c5 ad248c9 1f9c7c5 a1f7bcb 5600670 3983f0d ad248c9 c84e807 ad248c9 c84e807 ad248c9 c84e807 ad248c9 c84e807 ad248c9 3673052 c84e807 ad248c9 3673052 ad248c9 b3bcfcb 1b8076f 5127b7b 1b8076f 5127b7b dedc0a4 1f9c7c5 3673052 1f9c7c5 3673052 1f9c7c5 3983f0d 1f9c7c5 ac95a3c 3983f0d 93b8a4f 3983f0d 1b8076f 3673052 84dfe4b 3673052 1b8076f 3673052 611f7de 3673052 005e741 3673052 005e741 1f9c7c5 005e741 1f9c7c5 3983f0d 27d6273 3983f0d 14bd628 c6efa57 f6ac67a 3673052 e86f2e3 3673052 14bd628 1b8076f 93b8a4f 1b8076f 1f9c7c5 1b8076f 3673052 3983f0d 1f9c7c5 3673052 1f9c7c5 3673052 1b8076f 3983f0d 1f9c7c5 1b8076f d5288fa dedc0a4 1e4b93f 3673052 1dda856 3983f0d 1e4b93f bed04b1 1e4b93f bed04b1 3673052 bed04b1 3983f0d 8aee81a dedc0a4 a88c048 8aee81a 1f9c7c5 3673052 1f9c7c5 3673052 b205d6e dedc0a4 1f9c7c5 dedc0a4 1f9c7c5 3e98ab2 1f9c7c5 3e98ab2 bed04b1 dedc0a4 0cc191e c84e807 0cc191e c84e807 351ffe3 0cc191e d33624d 1f03a5b 0cc191e 1f03a5b 0cc191e 391e734 0cc191e b3bcfcb 0cc191e 101951c 0cc191e 101951c b3bcfcb 0cc191e b3bcfcb 0cc191e c84e807 0cc191e 3983f0d 0cc191e ad248c9 0cc191e ad248c9 b205d6e 0cc191e 1f9c7c5 0cc191e b205d6e 1f9c7c5 b205d6e 1f9c7c5 b205d6e 1f9c7c5 b205d6e 0cc191e b205d6e 1f9c7c5 b205d6e 0cc191e 101951c eeb3566 c84e807 eeb3566 101951c 351ffe3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 |
import os
import gradio as gr
from random import randint
from operator import itemgetter
import bisect
from all_models import tags_plus_models,models,models_plus_tags,find_warm_model_list
from datetime import datetime
from externalmod import gr_Interface_load
import asyncio
import os
from threading import RLock
lock = RLock()
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
nb_req_simult=80 ########
max_pending=3
nb_gallery_model=5
tempo_update_actu=3.0
#incr_update_actu={}
now2 = 0
inference_timeout = 300
inference_timeout_w = 70
inference_timeout_wp = 120
MAX_SEED = 2**32-1
nb_rep=2
nb_mod_dif=20
nb_models=nb_mod_dif*nb_rep
cache_image={}
cache_id_image={}
cache_list_task={}
cache_text_actu={}
from_reload={}
cache_list_task_w={}
def load_fn(models):
global models_load
global num_models
global default_models
models_load = {}
num_models = len(models)
i=0
if num_models!=0:
default_models = models[:num_models]
else:
default_models = {}
for model in models:
i+=1
if i%50==0:
print("\n\n\n-------"+str(i)+'/'+str(len(models))+"-------\n\n\n")
if model not in models_load.keys():
try:
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
except Exception as error:
m = gr.Interface(lambda txt: None, ['text'], ['image'])
print(error)
models_load.update({model: m})
load_fn(models)
tags_plus_models_to_list=[]
list_tags=[]
for tag_plus_m in tags_plus_models:
list_tags.append(tag_plus_m[0]+f" ({tag_plus_m[1]})")
models_publ=[]
if len(models)>10:
nb_publ=10
else:
nb_publ=len(models)
for i in range(nb_publ):
models_publ.append(models[i])
def test_pass_aff(test):
if test==os.getenv('p'):
return gr.Tab(visible=True)
else:
return gr.Tab(visible=False)
# https://huggingface.co./docs/api-inference/detailed_parameters
# https://huggingface.co./docs/huggingface_hub/package_reference/inference_client
async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
from pathlib import Path
kwargs = {}
if height is not None and height >= 256: kwargs["height"] = height
if width is not None and width >= 256: kwargs["width"] = width
if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
if seed >= 0: kwargs["seed"] = seed
else: kwargs["seed"] = randint(1, MAX_SEED-1)
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
await asyncio.sleep(3)
try:
result = await asyncio.wait_for(task, timeout=timeout)
except (Exception, asyncio.TimeoutError) as e:
print(e)
print(f"Task timed out: {model_str}")
if not task.done(): task.cancel()
result = None
if task.done() and result is not None:
with lock:
nb_rand1=randint(1, MAX_SEED)
nb_rand2=randint(1, MAX_SEED)
nb_rand3=randint(1, MAX_SEED)
png_path = f"image_{nb_rand1}_{nb_rand2}_{nb_rand3}.png"
result.save(png_path)
image = str(Path(png_path).resolve())
return image
return None
def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1,timeout=inference_timeout):
if model_str == 'NA':
return None
try:
loop = asyncio.new_event_loop()
result = loop.run_until_complete(infer(model_str, prompt, nprompt,
height, width, steps, cfg, seed, timeout))
except (Exception, asyncio.CancelledError) as e:
print(e)
print(f"Task aborted: {model_str}")
result = None
finally:
loop.close()
return result
def add_gallery(image, model_str, gallery):
if gallery is None: gallery = []
#with lock:
if image is not None: gallery.append((image, model_str))
return gallery
def reset_gallery(gallery):
return add_gallery(None,"",[])
def load_gallery(gallery,id):
gallery = reset_gallery(gallery)
for c in cache_image[f"{id}"]:
gallery=add_gallery(c[0],c[1],gallery)
return gallery
def load_gallery_sorted(gallery,id):
gallery = reset_gallery(gallery)
for c in sorted(cache_image[f"{id}"], key=itemgetter(1)):
gallery=add_gallery(c[0],c[1],gallery)
return gallery
def add_cache_image(image, model_str,id,cache_image=cache_image):
if image is not None:
cache_image[f"{id}"].append((image,model_str))
#cache_image=sorted(cache_image, key=itemgetter(1))
return
def reset_cache_image(id,cache_image=cache_image):
cache_image[f"{id}"].clear()
return
def reset_cache_image_all_sessions(cache_image=cache_image):
for key, listT in cache_image.items():
listT.clear()
return
def set_session(id,warm=False):
if id==0:
randTemp=randint(1,MAX_SEED)
cache_image[f"{randTemp}"]=[]
cache_id_image[f"{randTemp}"]=[]
cache_list_task[f"{randTemp}"]=[]
cache_text_actu[f"{randTemp}"]={}
if warm:
cache_text_actu[f"{randTemp}"]["warm"]=True
else:
cache_text_actu[f"{randTemp}"]["warm"]=False
from_reload[f"{randTemp}"]=False
cache_list_task_w[f"{randTemp}"]=[]
#incr_update_actu[f"{randTemp}"]=0
return gr.Number(visible=False,value=randTemp)
else :
return id
def fonc_restore_session(id):
from_reload[f"{id}"]=True
list_param=[]
list_models=[]
for m in cache_list_task[f"{id}"]:
if m["model"] not in list_models:
list_models.append(m["model"])
for t in m["task"]:
if [t["prompt"],t["nprompt"],t["width"],t["height"],t["steps"],t["cfg"],t["seed"]] not in list_param:
list_param.append([t["prompt"],t["nprompt"],t["width"],t["height"],t["steps"],t["cfg"],t["seed"]])
for t in cache_image[f"{id}"]:
if t["model"] not in list_models :
list_models.append(t["model"])
if [t["prompt"],t["nprompt"],t["width"],t["height"],t["steps"],t["cfg"],t["seed"]] not in list_param:
list_param.append([t["prompt"],t["nprompt"],t["width"],t["height"],t["steps"],t["cfg"],t["seed"]])
cache_text_actu[f"{id}"]["nb_modules_use"]=nb_req_simult
cache_text_actu[f"{id}"]["stop"]=False
return gr.Dropdown(choices=[["a",list_param]], value=list_param) ,gr.Dataset(samples=list_param), list_models , len(list_models)
def print_info_sessions():
lenTot=0
s=""
s+="number of sessions : "+str(len(cache_image))+"\n"
for key, listT in cache_image.items():
s+="session "+key+" : "+str(len(listT))+"\n"
lenTot+=len(listT)
s+="images total = "+str(lenTot)+"\n"
return s
def disp_models(group_model_choice,nb_rep=nb_rep):
listTemp=[]
strTemp='\n'
i=0
for m in group_model_choice:
if m not in listTemp:
listTemp.append(m)
for m in listTemp:
i+=1
strTemp+="\"" + m + "\",\n"
if i%(8/nb_rep)==0:
strTemp+="\n"
return gr.Textbox(label="models",value=strTemp)
def search_models(str_search,tags_plus_models=tags_plus_models):
output1="\n"
output2=""
for m in tags_plus_models[0][2]:
if m.find(str_search)!=-1:
output1+="\"" + m + "\",\n"
outputPlus="\n From tags : \n\n"
for tag_plus_models in tags_plus_models:
if str_search.lower() == tag_plus_models[0].lower() and str_search!="":
for m in tag_plus_models[2]:
output2+="\"" + m + "\",\n"
if output2 != "":
output=output1+outputPlus+output2
else :
output=output1
return gr.Textbox(label="out",value=output)
def search_info(txt_search_info,models_plus_tags=models_plus_tags):
outputList=[]
if txt_search_info.find("\"")!=-1:
start=txt_search_info.find("\"")+1
end=txt_search_info.find("\"",start)
m_name=cutStrg(txt_search_info,start,end)
else :
m_name = txt_search_info
for m in models_plus_tags:
if m_name == m[0]:
outputList=m[1]
if len(outputList)==0:
outputList.append("Model Not Find")
return gr.Textbox(label="out",value=outputList)
def ratio_chosen(choice_ratio,width,height):
if choice_ratio == [None,None]:
return width , height
else :
return gr.Slider(label="Width", info="If 0, the default value is used.", maximum=2024, step=32, value=choice_ratio[0]), gr.Slider(label="Height", info="If 0, the default value is used.", maximum=2024, step=32, value=choice_ratio[1])
list_ratios=[["None",[None,None]],
["4:1 (2048 x 512)",[2048,512]],
["12:5 (1536 x 640)",[1536,640]],
["~16:9 (1344 x 768)",[1344,768]],
["~3:2 (1216 x 832)",[1216,832]],
["~4:3 (1152 x 896)",[1152,896]],
["1:1 (1024 x 1024)",[1024,1024]],
["~3:4 (896 x 1152)",[896,1152]],
["~2:3 (832 x 1216)",[832,1216]],
["~9:16 (768 x 1344)",[768,1344]],
["5:12 (640 x 1536)",[640,1536]],
["1:4 (512 x 2048)",[512,2048]]]
def fonc_add_param(lp,txt_input,neg_input,width,height,steps,cfg,seed):###########################################
if lp == [["","",0,0,0,0,-1]]:
lp.remove(["","",0,0,0,0,-1])
#lp.append([txt_input,neg_input,width,height,steps,cfg,seed])
list_txt=txt_input.split("/")
for t in list_txt:
lp.append([t,neg_input,width,height,steps,cfg,seed])
return gr.Dataset(samples=lp) , gr.Dropdown(choices=[["a",lp]], value=lp)
def fonc_del_param(lp,txt_input,neg_input,width,height,steps,cfg,seed):
if [txt_input,neg_input,width,height,steps,cfg,seed] in lp :
lp.remove([txt_input,neg_input,width,height,steps,cfg,seed])
if lp == []:
lp.append(["","",0,0,0,0,-1])
return gr.Dataset(samples=lp) , gr.Dropdown(choices=[["a",lp]], value=lp)
def fonc_load_info(nb_of_models_to_gen,index_tag,index_first_model):
str_temp=""
list_models_temp=[]
if index_first_model+nb_of_models_to_gen>len(tags_plus_models[index_tag][2]):
if nb_of_models_to_gen>len(tags_plus_models[index_tag][2]):
str_temp+="warning : to many model chosen"
else:
str_temp+="warning : first model to close to the last model"
nb_of_models_to_gen= len(tags_plus_models[index_tag][2])-index_first_model
str_temp+=f" - only {nb_of_models_to_gen} will be use\n\n"
str_temp+="list of models use (from "
str_temp+=f"{index_first_model+1}/{len(tags_plus_models[index_tag][2])} to {index_first_model+nb_of_models_to_gen}/{len(tags_plus_models[index_tag][2])}) :\n\n"
for i in range(nb_of_models_to_gen):
list_models_temp.append(tags_plus_models[index_tag][2][i+index_first_model])
str_temp+=f"\"{tags_plus_models[index_tag][2][i+index_first_model]}\",\n"
return nb_of_models_to_gen,gr.Textbox(str_temp),gr.Dropdown(choices=[["",list_models_temp]], value=list_models_temp )
def load_random_models(nb_of_models_to_gen,index_tag):
str_temp=""
list_models_temp=[]
list_random=[]
if nb_of_models_to_gen>=len(tags_plus_models[index_tag][2]):
str_temp+="warning : to many model chosen"
nb_of_models_to_gen= len(tags_plus_models[index_tag][2])
str_temp+=f" - only {nb_of_models_to_gen} will be use\n\n"
list_models_temp=tags_plus_models[index_tag][2]
for m in list_models_temp:
str_temp+=f"\"{m}\",\n"
else :
list_random=tags_plus_models[index_tag][2].copy()
for i in range(nb_of_models_to_gen):
i_rand=randint(0,len(list_random)-1)
m=list_random.pop(i_rand)
list_models_temp.append(m)
str_temp+=f"\"{m}\",\n"
return nb_of_models_to_gen,gr.Textbox(str_temp),gr.Dropdown(choices=[["",list_models_temp]], value=list_models_temp )
def fonc_load_info_custom(nb_of_models_to_gen,list_model_custom,index_first_model):
str_temp=""
list_models_temp=[]
if index_first_model+nb_of_models_to_gen>len(list_model_custom):
if nb_of_models_to_gen>len(list_model_custom):
str_temp+="warning : to many model chosen"
else:
str_temp+="warning : first model to close to the last model"
nb_of_models_to_gen= len(list_model_custom)-index_first_model
str_temp+=f" - only {nb_of_models_to_gen} will be use\n\n"
str_temp+="list of models CUSTOM use (from "
str_temp+=f"{index_first_model+1}/{len(list_model_custom)} to {index_first_model+nb_of_models_to_gen}/{len(list_model_custom)}) :\n\n"
for i in range(nb_of_models_to_gen):
list_models_temp.append(list_model_custom[i+index_first_model])
str_temp+=f"\"{list_model_custom[i+index_first_model]}\",\n"
return nb_of_models_to_gen,gr.Textbox(str_temp),gr.Dropdown(choices=[["",list_models_temp]], value=list_models_temp )
def crea_list_task(id_session,list_param,list_models_to_gen,nb_images_by_prompt):
if from_reload[f"{id_session}"]==True:
from_reload[f"{id_session}"]=False
return
cache_list_task[f"{id_session}"]=[]
dict_temp={}
list_progress=[]
for m in list_models_to_gen:
dict_temp={}
dict_temp["model"]=m
dict_temp["id_module"]=-1
dict_temp["pending_task"]=0
dict_temp["task"]=[]
list_progress.append(0)
index_prompt=0
for p in list_param:
for i in range(nb_images_by_prompt):
dict_temp["task"].append({"prompt":p[0],"nprompt":p[1],"width":p[2],"height":p[3],"steps":p[4],"cfg":p[5],"seed":p[6],"index_prompt":index_prompt})
index_prompt+=1
cache_list_task[f"{id_session}"].append(dict_temp)
cache_text_actu[f"{id_session}"]={"nb_modules_use":nb_req_simult,"stop":False,"nb_fail":0,"warm":False,
"nb_models_to_do":len(list_models_to_gen) ,"nb_models_tot":len(list_models_to_gen) ,
"nb_tasks_to_do":len(list_models_to_gen)*len(list_param)*nb_images_by_prompt ,
"nb_tasks_tot":len(list_models_to_gen)*len(list_param)*nb_images_by_prompt,
"progress":list_progress,'nb_tasks_by_model': nb_images_by_prompt*len(list_param),
"nb_warm_in_use":0}
def fonc_update_actu(text_actu,id):
s=""
s+=f"modules: {cache_text_actu[str(id)]['nb_modules_use']}/{nb_req_simult}\n"
s+=f"models remaining: {cache_text_actu[str(id)]['nb_models_to_do']}/{cache_text_actu[str(id)]['nb_models_tot']}\n"
i=0
for d in cache_text_actu[str(id)]['progress']:
i+=1
s+=str(d)
if i%10==0:
s+=" "
if i%50==0:
s+="\n"
s+="\n"
s+=f"images remaining: {cache_text_actu[str(id)]['nb_tasks_to_do']}/{cache_text_actu[str(id)]['nb_tasks_tot']}\n"
s+=f"fail attempt: {cache_text_actu[str(id)]['nb_fail']}"
return gr.Textbox(s)
def fonc_update_actu_2(id):
if id == 0 :
return gr.Textbox("waiting...")
if cache_text_actu[str(id)]['warm']==True:
return gr.Textbox("waiting...")
s=""
i=0
nb_ones=0
for d in cache_text_actu[str(id)]['progress']:
i+=1
if d==1:
nb_ones+=1
s+=str(d)
if i%10==0:
s+=" "
if i%50==0:
s+="\n"
s+="\n"
s+=f"modules: {cache_text_actu[str(id)]['nb_modules_use']}/{nb_req_simult} ({nb_ones}/{cache_text_actu[str(id)]['nb_modules_use']})\n"
s+=f"models remaining: {cache_text_actu[str(id)]['nb_models_to_do']}/{cache_text_actu[str(id)]['nb_models_tot']}\n"
s+=f"images remaining(done): {cache_text_actu[str(id)]['nb_tasks_to_do']}({cache_text_actu[str(id)]['nb_tasks_tot']-cache_text_actu[str(id)]['nb_tasks_to_do']})/{cache_text_actu[str(id)]['nb_tasks_tot']}\n"
s+=f"fail attempt: {cache_text_actu[str(id)]['nb_fail']}\n"
s+=f"warm task pending = {cache_text_actu[str(id)]['nb_warm_in_use']}\n"
#s+=f"{tempo_update_actu*incr_update_actu[str(id)]} s"
#incr_update_actu[str(id)]+=1
s+=f"{randint(1,MAX_SEED)}"
return gr.Textbox(s)
def cutStrg(longStrg,start,end):
shortStrg=''
for i in range(end-start):
shortStrg+=longStrg[start+i]
return shortStrg
def aff_models_perso(txt_list_perso,models=models):
list_perso=[]
t1=True
start=txt_list_perso.find('\"')
if start!=-1:
while t1:
start+=1
end=txt_list_perso.find('\"',start)
if end != -1:
txtTemp=cutStrg(txt_list_perso,start,end)
if txtTemp in models:
list_perso.append(cutStrg(txt_list_perso,start,end))
else :
t1=False
start=txt_list_perso.find('\"',end+1)
if start==-1:
t1=False
return gr.Dropdown(choices=[["",list_perso]], value=list_perso )
def add_gallery(image, model_str, gallery):
if gallery is None: gallery = []
#with lock:
if image is not None: gallery.append((image, model_str))
return gallery
def reset_gallery(gallery):
return add_gallery(None,"",[])
def fonc_load_gallery(id_session,gallery):
gallery = reset_gallery(gallery)
for i in range(len(cache_image[f"{id_session}"])):
gallery=add_gallery(cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],gallery)
return gr.Gallery(gallery,visible=True)
def fonc_move_gallery_by_model(id_session,gallery,index_g,models,index_m,direction):
delta=int((nb_gallery_model-1)/2)
list_image_temp=[]
if index_g==(index_m+(delta*direction))%nb_gallery_model :
gallery = reset_gallery(gallery)
for i in range(len(cache_image[f"{id_session}"])):
if cache_image[f"{id_session}"][i]["model"]==models[(index_m+(delta*direction))%len(models)]:
list_image_temp.append([cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],cache_image[f"{id_session}"][i]["index_prompt"]])
for temp in sorted(list_image_temp,key=itemgetter(2)):
gallery=add_gallery(temp[0],temp[1],gallery)
if index_g==(index_m-direction)%nb_gallery_model:
return gr.Gallery(gallery,visible=False)
#return gr.Gallery(gallery,visible=True)
elif index_g==index_m%nb_gallery_model:
return gr.Gallery(gallery,visible=True)
else:
return gallery
def fonc_start(id_session,id_module,s,cont,list_models_to_gen):
if cont==False:
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1
print("manual stop")
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED))
task_actu={}
model_actu=""
use_warm_model=False
print(f"in fonc : id module={id_module}\n")
warm_models , models_plus_tags_temp = find_warm_model_list("John6666", ["stable-diffusion-xl"], "", "last_modified", 10000)
for model_plus_tasks in cache_list_task[f"{id_session}"]:
if model_actu == "":
if model_plus_tasks["model"] in warm_models:
if model_plus_tasks["pending_task"]<max_pending:
try:
task_actu=model_plus_tasks["task"].pop()
except:
continue
model_actu=model_plus_tasks["model"]
model_plus_tasks["pending_task"]+=1
use_warm_model=True
cache_text_actu[f"{id_session}"]["nb_warm_in_use"]+=1
print(f"warm model : {model_actu}\n")
break
if model_actu == "":
for model_plus_tasks in cache_list_task[f"{id_session}"]:
if model_plus_tasks["pending_task"]==0:
try:
task_actu=model_plus_tasks["task"].pop()
except:
continue
model_actu=model_plus_tasks["model"]
model_plus_tasks["pending_task"]+=1
print(f"find model : {model_actu}\n")
if len(model_plus_tasks["task"])==cache_text_actu[f"{id_session}"]["nb_tasks_by_model"]-1:
i=0
for model in list_models_to_gen:
if model_actu==model:
cache_text_actu[f"{id_session}"]['progress'][i]=1
i+=1
break
if model_actu=="":
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1
print("Stop with :"+s+"\n")
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED))
print("begin gen image:")
print(model_actu)
print(task_actu)
if use_warm_model:
result=gen_fn(model_actu, task_actu["prompt"], task_actu["nprompt"], task_actu["height"], task_actu["width"], task_actu["steps"], task_actu["cfg"], task_actu["seed"],inference_timeout_wp)
else:
result=gen_fn(model_actu, task_actu["prompt"], task_actu["nprompt"], task_actu["height"], task_actu["width"], task_actu["steps"], task_actu["cfg"], task_actu["seed"])
print("reception")
if result!=None:
#result=gr.Image(result)
id_image=len(cache_image[f"{id_session}"])
i=0
for model_plus_tasks in cache_list_task[f"{id_session}"]:
if model_plus_tasks["model"]==model_actu:
model_plus_tasks["pending_task"]-=1
cache_text_actu[f"{id_session}"]["nb_tasks_to_do"]-=1
i=0
for model in list_models_to_gen:
if model_actu==model:
cache_text_actu[f"{id_session}"]['progress'][i]=int(((1-((len(model_plus_tasks["task"])+model_plus_tasks["pending_task"])/cache_text_actu[f"{id_session}"]["nb_tasks_by_model"]))*7)//1)+2
i+=1
if len(model_plus_tasks["task"])+model_plus_tasks["pending_task"]==0:
cache_list_task[f"{id_session}"].remove(model_plus_tasks)
cache_text_actu[f"{id_session}"]["nb_models_to_do"]-=1
task_actu["id_image"]=id_image
task_actu["model"]=model_actu
task_actu["image"]=result
#cache_image[f"{id_session}"].append(result)
#cache_id_image[f"{id_session}"].append(task_actu)
cache_image[f"{id_session}"].append(task_actu)
print("image saved\n")
else:
model_plus_tasks["task"].append(task_actu)
model_plus_tasks["pending_task"]-=1
cache_text_actu[f"{id_session}"]["nb_fail"]+=1
print("fail to generate\n")
num_task_to_do=0
for model_plus_tasks in cache_list_task[f"{id_session}"]:
for task in model_plus_tasks["task"]:
num_task_to_do+=1
if use_warm_model:
cache_text_actu[f"{id_session}"]["nb_warm_in_use"]-=1
print(f"\n {num_task_to_do} tasks to do\n")
return result , gr.Textbox(s+"1"),gr.Number(randint(1,MAX_SEED))
def fonc_init(s):
return gr.Textbox(s+"1")
def fonc_load_gallery_by_model(id_session,gallery,models,index_g,index_m,gallery_all):
delta=int((nb_gallery_model-1)/2)
gallery = reset_gallery(gallery)
list_image_temp=[]
for i in range(len(cache_image[f"{id_session}"])):
if cache_image[f"{id_session}"][i]["model"]==models[((index_m+index_g+delta)%nb_gallery_model)-delta]:
list_image_temp.append([cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],cache_image[f"{id_session}"][i]["index_prompt"]])
for temp in sorted(list_image_temp,key=itemgetter(2)):
gallery=add_gallery(temp[0],temp[1],gallery)
return gr.Gallery(gallery,visible=(index_g==(index_m%nb_gallery_model))), gr.Gallery(gallery_all,visible=False)
def load_gallery_by_prompt(id_session,gallery,index_p,list_p):
#"prompt":p[0],"nprompt":p[1],"width":p[2],"height":p[3],"steps":p[4],"cfg":p[5],"seed":p[6]
gallery = reset_gallery(gallery)
for i in range(len(cache_image[f"{id_session}"])):
if cache_image[f"{id_session}"][i]["prompt"]==list_p[index_p][0] :
gallery=add_gallery(cache_image[f"{id_session}"][i]["image"],cache_image[f"{id_session}"][i]["model"],gallery)
if len(gallery)!=0:
gallery=sorted(gallery, key=itemgetter(1))
return gr.Gallery(gallery, visible=True)
def index_gallery_next(i,list_models):
iT=i+1
return gr.Number(iT%len(list_models)),gr.Number(1)
def index_gallery_prev(i,list_models):
iT=i-1
return gr.Number(iT%len(list_models)),gr.Number(-1)
def change_text_model_actu_gal(list_models,index):
return gr.Textbox(f"({(index%(len(list_models)))+1}/{(len(list_models))}) {list_models[index]}")
def fonc_add_to_text(text,list_models,index):
return gr.Textbox(text+f"\"{list_models[index]}\",\n")
def load_model_publ(choice_model_publ):
return gr.Image(None,label=choice_model_publ,interactive=False),gr.Textbox(choice_model_publ,visible=False,show_label=False)
def set_tasks_w(id_session,list_p,nb_i):
list_t=[]
for p in list_p:
cache_list_task_w[f"{id_session}"].append([nb_i,{"prompt":p[0],"nprompt":p[1],"width":p[2],"height":p[3],"steps":p[4],"cfg":p[5],"seed":p[6]},0])
cache_text_actu[f"{id_session}"]={"nb_modules_use":nb_req_simult,"stop":False,"nb_fail":0,"warm":True,"nb_prompt_to_do":len(list_p),"nb_prompt_tot":len(list_p),
"nb_tasks_to_do":len(list_p)*nb_i ,"nb_tasks_tot":len(list_p)*nb_i}
return
def fonc_start_w(id_session,id_module,s,cont,nb_modules,tag):
if cont==False or id_module>=nb_modules:
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1
print("manual stop")
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED))
find_task=False
task={}
for t in cache_list_task_w[f"{id_session}"]:
if not find_task:
if t[0]>0:
t[0]-=1
t[2]+=1
task=t[1].copy()
find_task=True
if not find_task:
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED))
if tag == "":
tagT=["stable-diffusion-xl"]
else:
tagT=["stable-diffusion-xl",tag]
models_temp , models_plus_tags_temp = find_warm_model_list("John6666", tagT, "", "last_modified", 10000)
models_rand=[]
for m in models_temp:
if m in models:
models_rand.append(m)
if len(models_rand)!=0:
model_actu=models_temp[randint(0,len(models_rand)-1)]
print(f"find model : {model_actu}")
else:
print("no warm model")
cache_text_actu[f"{id_session}"]["nb_modules_use"]-=1
return None,gr.Textbox(s),gr.Number(randint(1,MAX_SEED))
print("begin gen image:")
print(model_actu)
print(task)
result=gen_fn(model_actu, task["prompt"], task["nprompt"], task["height"], task["width"], task["steps"], task["cfg"], task["seed"],inference_timeout_w)
print("reception")
if result!=None:
id_image=len(cache_image[f"{id_session}"])
for t in cache_list_task_w[f"{id_session}"]:
if t[1]==task:
t[2]-=1
if t[0]+t[2]<=0:
cache_list_task_w[f"{id_session}"].remove(t)
cache_text_actu[f"{id_session}"]["nb_prompt_to_do"]-=1 #################################
cache_text_actu[f"{id_session}"]["nb_tasks_to_do"]-=1
task["id_image"]=id_image
task["model"]=model_actu
task["image"]=result
cache_image[f"{id_session}"].append(task)
print("image saved\n")
print(task)
else:
for t in cache_list_task_w[f"{id_session}"]:
if t[1]==task:
t[0]+=1
t[2]-=1
cache_text_actu[f"{id_session}"]["nb_fail"]+=1
print("fail to generate\n")
nb_task_to_do=cache_text_actu[str(id_session)]["nb_tasks_to_do"]
print(f"\n {nb_task_to_do} tasks to do\n")
return result , gr.Textbox(s+"1"),gr.Number(randint(1,MAX_SEED))
def fonc_update_actu_w(id):
if id == 0 :
return gr.Textbox("waiting...")
if cache_text_actu[str(id)]['warm']==False:
return gr.Textbox("waiting...")
s=""
s+=f"modules: {cache_text_actu[str(id)]['nb_modules_use']}/{nb_req_simult} \n"
s+=f"prompts remaining: {cache_text_actu[str(id)]['nb_prompt_to_do']}/{cache_text_actu[str(id)]['nb_prompt_tot']}\n"
s+=f"images remaining(done): {cache_text_actu[str(id)]['nb_tasks_to_do']}({cache_text_actu[str(id)]['nb_tasks_tot']-cache_text_actu[str(id)]['nb_tasks_to_do']})/{cache_text_actu[str(id)]['nb_tasks_tot']}\n"
s+=f"fail attempt: {cache_text_actu[str(id)]['nb_fail']}\n"
#s+=f"{tempo_update_actu*incr_update_actu[str(id)]} s"
#incr_update_actu[str(id)]+=1
s+=f"{randint(1,MAX_SEED)}"
return gr.Textbox(s)
def make_me():
with gr.Tab(" "):
with gr.Column():
with gr.Row():
with gr.Column(scale=4):
prompt_publ=gr.Textbox(label='Your prompt:', lines=4, interactive = True)
choice_model_publ=gr.Dropdown(label="List of Models", choices=list(models_publ),value=models_publ[0])
gen_button_publ = gr.Button('Generate images',scale=2)
image_publ=gr.Image(None,label=models_publ[0],interactive=False)
current_models_publ=gr.Textbox(models_publ[0],visible=False,show_label=False)
choice_model_publ.change(load_model_publ,[choice_model_publ],[image_publ,current_models_publ])
gen_event_publ = gr.on(triggers=[gen_button_publ.click, prompt_publ.submit], fn=gen_fn,
inputs=[choice_model_publ, prompt_publ], outputs=[image_publ])
with gr.Row():
with gr.Column(scale=4):
test_pass=gr.Textbox(show_label=False,lines=1, interactive = True)
button_test_pass=gr.Button(" ",scale=1)
with gr.Tab(" Sort ",visible=False) as tab_p:
button_test_pass.click(test_pass_aff,[test_pass],[tab_p])
with gr.Column():
with gr.Group():
with gr.Row():
with gr.Column(scale=4):
txt_input = gr.Textbox(label='Your prompt:', lines=4, interactive = True)
neg_input = gr.Textbox(label='Negative prompt:', lines=4, interactive = True)
with gr.Column(scale=4):
with gr.Row():
width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True)
height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True)
with gr.Row():
choice_ratio = gr.Dropdown(label="Ratio Width/Height",
info="OverWrite Width and Height (W*H<1024*1024)",
show_label=True, choices=list(list_ratios) , interactive = True, value=list_ratios[0][1])
choice_ratio.change(ratio_chosen,[choice_ratio,width,height],[width,height])
with gr.Row():
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, interactive = True)
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, interactive = True)
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, interactive = True)
add_param=gr.Button("Add to the list")
del_param=gr.Button("Delete to the list")
#gen_button = gr.Button('Generate images', scale=3)
#stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
#gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
list_param=gr.Dropdown(choices=[["a",[["","",0,0,0,0,-1]]]], value=[["","",0,0,0,0,-1]], visible=False)
disp_param = gr.Examples(
label="list of prompt",
examples=list_param.value,
inputs=[txt_input,neg_input,width,height,steps,cfg,seed],
outputs=[txt_input,neg_input,width,height,steps,cfg,seed],
)
with gr.Accordion("Restore Session",open=False) :
with gr.Row():
text_info_session=gr.Textbox()
with gr.Column():
button_info_session=gr.Button("Get infos sessions")
button_info_session.click(print_info_sessions,[],[text_info_session])
id_session=gr.Number(0,interactive = True,label="ID session",show_label=True)
button_restore_session=gr.Button("Restore Session")
add_param.click(fonc_add_param,[list_param,txt_input,neg_input,width,height,steps,cfg,seed],[disp_param.dataset,list_param])
add_param.click(set_session,[id_session],[id_session])
del_param.click(fonc_del_param,[list_param,txt_input,neg_input,width,height,steps,cfg,seed],[disp_param.dataset,list_param])
with gr.Row():
list_models_to_gen=gr.Dropdown(choices=[["",[]]], value=[], visible=False)
disp_info=gr.Textbox(label="Info")
with gr.Column():
with gr.Row():
nb_images_by_prompt=gr.Number(2,label="Number of images by prompt:",interactive=True)
nb_of_models_to_gen=gr.Number(10,label="Number of Models:",interactive=True)
index_tag=gr.Dropdown(label="Tag",choices=list(list_tags),type="index")
index_first_model=gr.Dropdown(label="First model",choices=list([]), type="index")
index_tag.change(lambda i:gr.Dropdown(choices=list([f"({j+1}/{len(tags_plus_models[i][2])}) {tags_plus_models[i][2][j]}" for j in range(len(tags_plus_models[i][2]))])),
index_tag,index_first_model)
load_info=gr.Button("Load Models")
load_info.click(fonc_load_info,[nb_of_models_to_gen,index_tag,index_first_model],[nb_of_models_to_gen,disp_info,list_models_to_gen])
button_load_random_models=gr.Button("Load Random Models")
button_load_random_models.click(load_random_models,[nb_of_models_to_gen,index_tag],[nb_of_models_to_gen,disp_info,list_models_to_gen])
with gr.Accordion("Models Custom",open=False) :
with gr.Row():
text_list_model_custom=gr.Textbox(label="List Models Custom")
with gr.Column():
list_model_custom=gr.Dropdown(choices=[["",[]]], value=[], visible=False)
#use_models_custom=gr.Radio("Use Models Custom",value=False)
cut_model_custom=gr.Button("Cut Text Models Custom")
cut_model_custom.click(aff_models_perso,[text_list_model_custom],[list_model_custom])
index_first_model_custom=gr.Dropdown(label="First model",choices=list([]), type="index")
list_model_custom.change(lambda li:gr.Dropdown(choices=list([f"({j+1}/{len(li)}) {li[j]}" for j in range(len(li))])),
[list_model_custom],index_first_model_custom)
load_model_custom=gr.Button("Load Models Custom")
load_model_custom.click(fonc_load_info_custom,[nb_of_models_to_gen,list_model_custom,index_first_model_custom],[nb_of_models_to_gen,disp_info,list_models_to_gen])
list_models_to_gen.change(crea_list_task,[id_session,list_param,list_models_to_gen,nb_images_by_prompt],[])
with gr.Column():
button_start=gr.Button("START")
button_stop=gr.Button("STOP")
cont=gr.Checkbox(True,visible=False)
button_start.click(lambda:True,[],[cont])
button_stop.click(lambda:False,[],[cont])
#text_actu=gr.Textbox("",label="in progress",interactive=False,lines=6)
text_actu=gr.Textbox(fonc_update_actu_2,inputs=id_session,every=tempo_update_actu,label="in progress",interactive=False,lines=6)
update_actu=gr.Number(0,visible=False)
#update_actu.change(fonc_update_actu,[text_actu,id_session],[text_actu])
#button_start.click(fonc_update_actu,[text_actu,id_session],[text_actu])
#button_start.click(lambda:gr.Number(0),[],[incr_update_actu])
with gr.Accordion("Gallery Parameters",open=False) :
with gr.Row():
with gr.Column():
set_height_gallery=gr.Checkbox(True,label="set height",show_label=True)
height_gallery=gr.Number(650,label="height",show_label=True)
col_gallery=gr.Number(5,label="nb columns",show_label=True)
row_gallery=gr.Number(4,label="nb row",show_label=True)
with gr.Column():
button_reset_cache_image=gr.Button("Reset Images")
button_reset_cache_image.click(reset_cache_image,[id_session],[])
button_reset_cache_image_all_session=gr.Button("Reset Images ALL SESSION")
button_reset_cache_image_all_session.click(reset_cache_image_all_sessions,[],[])
with gr.Row():
outputs=[]
id_modules=[]
states=[]
for i in range(nb_req_simult):
#outputs.append(gr.Image(None,interactive=False,render=False))
#id_modules.append(gr.Number(i,interactive=False,render=False))
outputs.append(gr.Image(None,interactive=False,visible=False))
id_modules.append(gr.Number(i,interactive=False,visible=False))
states.append(gr.Textbox("1",interactive=False,visible=False))
for o,i,s in zip(outputs,id_modules,states):
#o.change(fonc_start,[id_session,i],[o])
#o.change(test_change,[],[])
s.change(fonc_start,[id_session,i,s,cont,list_models_to_gen],[o,s,update_actu])
#button_start.click(lambda : gr.Image(None),[],[o])
gen_event = gr.on(triggers=[button_start.click], fn=fonc_init,inputs=[s], outputs=[s])
with gr.Column(scale=2):
gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
interactive=False, show_share_button=False, container=True, format="png",
preview=True, object_fit="contain",columns=5,rows=4,height=650)
gallery_by_prompt = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
interactive=False, show_share_button=False, container=True, format="png",
preview=True, object_fit="contain",columns=5,rows=4,visible=False,height=650)
gallery_models=[]
index_gallery=[]
set_height_gallery.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None),
[gallery,height_gallery,set_height_gallery],[gallery])
height_gallery.change(lambda g,h: gr.Gallery(g,height=h),[gallery,height_gallery],[gallery])
col_gallery.change(lambda g,h: gr.Gallery(g,columns=h),[gallery,col_gallery],[gallery])
row_gallery.change(lambda g,h: gr.Gallery(g,rows=h),[gallery,row_gallery],[gallery])
for i in range(nb_gallery_model):
gallery_models.append(gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
interactive=False, show_share_button=False, container=True, format="png",
preview=True, object_fit="contain",columns=5,rows=4,visible=False,height=650))
index_gallery.append(gr.Number(i,visible=False))
set_height_gallery.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None),
[gallery_models[i],height_gallery,set_height_gallery],[gallery_models[i]])
height_gallery.change(lambda g,h: gr.Gallery(g,height=h),[gallery_models[i],height_gallery],[gallery_models[i]])
col_gallery.change(lambda g,h: gr.Gallery(g,columns=h),[gallery_models[i],col_gallery],[gallery_models[i]])
row_gallery.change(lambda g,h: gr.Gallery(g,rows=h),[gallery_models[i],row_gallery],[gallery_models[i]])
with gr.Column(scale=3):
button_load_gallery=gr.Button("Load Gallery All")
button_load_gallery.click(fonc_load_gallery,[id_session,gallery],[gallery])
with gr.Accordion("Gallery by Model",open=True) :
index_gallery_m=gr.Number(0,visible=False)
button_load_gallery_first=gr.Button("Init Gallery by model")
with gr.Row():
button_load_gallery_prev=gr.Button("Prev model")
button_load_gallery_next=gr.Button("Next model")
direction_gallery=gr.Number(0,visible=False)
button_load_gallery_next.click(index_gallery_next,[index_gallery_m,list_models_to_gen],[index_gallery_m,direction_gallery])
button_load_gallery_prev.click(index_gallery_prev,[index_gallery_m,list_models_to_gen],[index_gallery_m,direction_gallery])
for g,i in zip(gallery_models,index_gallery):
index_gallery_m.change(fonc_move_gallery_by_model,[id_session,g,i,list_models_to_gen,index_gallery_m,direction_gallery],[g])
gen_event_gallery_first = gr.on(triggers=[button_load_gallery_first.click], fn=fonc_load_gallery_by_model,
inputs=[id_session,g,list_models_to_gen,i,index_gallery_m,gallery], outputs=[g,gallery])
gen_event_gallery_first_all = gr.on(triggers=[button_load_gallery.click], fn=lambda g:gr.Gallery(g,visible=False),
inputs=[g], outputs=[g])
text_model_actu_gal = gr.Textbox(label='Model Actu:', lines=1, interactive = False)
index_gallery_m.change(change_text_model_actu_gal,[list_models_to_gen,index_gallery_m],[text_model_actu_gal])
with gr.Row():
with gr.Column():
button_add_to_bl=gr.Button("Add to Blacklist")
#button_remove_from_bl=gr.Button("Remove from Blacklist")
text_bl=gr.Textbox(label='Blacklist', lines=5, interactive = True)
button_add_to_bl.click(fonc_add_to_text,[text_bl,list_models_to_gen,index_gallery_m],[text_bl])
#button_remove_from_bl.click(fonc_remove_from_text,[text_bl,list_models_to_gen,index_gallery_m],[text_bl])
with gr.Column():
button_add_to_fav=gr.Button("Add to Favlist")
text_fav=gr.Textbox(label='Favlist', lines=5, interactive = True)
button_add_to_fav.click(fonc_add_to_text,[text_fav,list_models_to_gen,index_gallery_m],[text_fav])
with gr.Accordion("Gallery by Prompt",open=False) :
index_gallery_by_prompt=gr.Number(0,visible=False)
button_load_gallery_by_prompt=gr.Button("Load Gallery by prompt")
text_gallery_by_prompt=gr.Textbox(f"{index_gallery_by_prompt.value+1}/{len(list_param.value)}",show_label=False)
index_gallery_by_prompt.change(lambda i,p:gr.Textbox(f"{i+1}/{len(p)}"),[index_gallery_by_prompt,list_param],[text_gallery_by_prompt])
button_load_gallery_by_prompt.click(load_gallery_by_prompt,
[id_session,gallery_by_prompt,index_gallery_by_prompt,list_param],[gallery_by_prompt])
gen_event_gallery_by_prompt = gr.on(triggers=[button_load_gallery_by_prompt.click], fn=lambda g:gr.Gallery(g,visible=False),
inputs=[gallery], outputs=[gallery])
gen_event_gallery_first = gr.on(triggers=[button_load_gallery_first.click], fn=lambda g:gr.Gallery(g,visible=False),
inputs=[gallery_by_prompt], outputs=[gallery_by_prompt])
gen_event_gallery = gr.on(triggers=[button_load_gallery.click], fn=lambda g:gr.Gallery(g,visible=False),
inputs=[gallery_by_prompt], outputs=[gallery_by_prompt])
for g,i in zip(gallery_models,index_gallery):
gen_event_gallery_by_prompt = gr.on(triggers=[button_load_gallery_by_prompt.click], fn=lambda g:gr.Gallery(g,visible=False),
inputs=[g], outputs=[g])
with gr.Row():
button_gallery_prev_prompt=gr.Button("Prev prompt")
button_gallery_next_prompt=gr.Button("Next prompt")
button_gallery_next_prompt.click(lambda i,p: (i+1)%len(p),[index_gallery_by_prompt,list_param],[index_gallery_by_prompt])
button_gallery_prev_prompt.click(lambda i,p: (i-1)%len(p),[index_gallery_by_prompt,list_param],[index_gallery_by_prompt])
index_gallery_by_prompt.change(load_gallery_by_prompt,
[id_session,gallery_by_prompt,index_gallery_by_prompt,list_param],[gallery_by_prompt])
set_height_gallery.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None),
[gallery_by_prompt,height_gallery,set_height_gallery],[gallery_by_prompt])
height_gallery.change(lambda g,h: gr.Gallery(g,height=h),[gallery_by_prompt,height_gallery],[gallery_by_prompt])
col_gallery.change(lambda g,h: gr.Gallery(g,columns=h),[gallery_by_prompt,col_gallery],[gallery_by_prompt])
row_gallery.change(lambda g,h: gr.Gallery(g,rows=h),[gallery_by_prompt,row_gallery],[gallery_by_prompt])
button_restore_session.click(fonc_restore_session,[id_session],[list_param,disp_param.dataset,list_models_to_gen,nb_of_models_to_gen])
with gr.Tab(" Warm ",visible=False) as tab_warm:
button_test_pass.click(test_pass_aff,[test_pass],[tab_warm])
with gr.Column():
with gr.Group():
with gr.Row():
with gr.Column(scale=4):
txt_input_w = gr.Textbox(label='Your prompt:', lines=4, interactive = True)
neg_input_w = gr.Textbox(label='Negative prompt:', lines=4, interactive = True)
with gr.Column(scale=4):
with gr.Row():
width_w = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True)
height_w = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=2024, step=32, value=0, interactive = True)
with gr.Row():
choice_ratio_w = gr.Dropdown(label="Ratio Width/Height",
info="OverWrite Width and Height (W*H<1024*1024)",
show_label=True, choices=list(list_ratios) , interactive = True, value=list_ratios[0][1])
choice_ratio_w.change(ratio_chosen,[choice_ratio_w,width,height_w],[width_w,height_w])
with gr.Row():
steps_w = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, interactive = True)
cfg_w = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, interactive = True)
seed_w = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, interactive = True)
add_param_w=gr.Button("Add to the list")
del_param_w=gr.Button("Delete to the list")
list_param_w=gr.Dropdown(choices=[["a",[["","",0,0,0,0,-1]]]], value=[["","",0,0,0,0,-1]], visible=False)
disp_param_w = gr.Examples(
label="list of prompt",
examples=list_param_w.value,
inputs=[txt_input_w,neg_input_w,width_w,height_w,steps_w,cfg_w,seed_w],
outputs=[txt_input_w,neg_input_w,width_w,height_w,steps_w,cfg_w,seed_w],
)
bool_warm=gr.Checkbox(visible=False,value=True)
add_param_w.click(fonc_add_param,[list_param_w,txt_input_w,neg_input_w,width_w,height_w,steps_w,cfg_w,seed_w],[disp_param_w.dataset,list_param_w])
add_param_w.click(set_session,[id_session,bool_warm],[id_session])
del_param_w.click(fonc_del_param,[list_param_w,txt_input_w,neg_input_w,width_w,height_w,steps_w,cfg_w,seed_w],[disp_param_w.dataset,list_param_w])
with gr.Row():
nb_images_by_prompt_w=gr.Number(2,label="Images by prompt",interactive = True)
nb_modules_w=gr.Slider(label="Module use", minimum=1, maximum=nb_req_simult, step=1, value=40, interactive = True)
text_tag_w=gr.Textbox("",label="Tag",interactive = True,lines=1)
with gr.Column():
button_load_task_w=gr.Button("LOAD")
button_load_task_w.click(set_tasks_w,[id_session,list_param_w,nb_images_by_prompt_w],[])
button_start_w=gr.Button("START")
button_stop_w=gr.Button("STOP")
cont_w=gr.Checkbox(True,visible=False)
button_start_w.click(lambda:True,[],[cont_w])
button_stop_w.click(lambda:False,[],[cont_w])
text_actu_w=gr.Textbox(fonc_update_actu_w,inputs=id_session,every=tempo_update_actu,label="in progress",interactive=False,lines=6)############
update_actu_w=gr.Number(0,visible=False)
with gr.Accordion("Gallery Parameters",open=False) :
with gr.Row():
with gr.Column():
set_height_gallery_w=gr.Checkbox(True,label="set height",show_label=True)
height_gallery_w=gr.Number(650,label="height",show_label=True)
col_gallery_w=gr.Number(5,label="nb columns",show_label=True)
row_gallery_w=gr.Number(4,label="nb row",show_label=True)
with gr.Column():
button_reset_cache_image_w=gr.Button("Reset Images")
button_reset_cache_image_w.click(reset_cache_image,[id_session],[])
button_reset_cache_image_all_session_w=gr.Button("Reset Images ALL SESSION")
button_reset_cache_image_all_session_w.click(reset_cache_image_all_sessions,[],[])
with gr.Row():
outputs_w=[]
id_modules_w=[]
states_w=[]
for i in range(nb_req_simult):
outputs_w.append(gr.Image(None,interactive=False,visible=False))
id_modules_w.append(gr.Number(i,interactive=False,visible=False))
states_w.append(gr.Textbox("1",interactive=False,visible=False))
for o,i,s in zip(outputs_w,id_modules_w,states_w):
s.change(fonc_start_w,[id_session,i,s,cont_w,nb_modules_w,text_tag_w],[o,s,update_actu_w])#################
gen_event = gr.on(triggers=[button_start_w.click], fn=fonc_init,inputs=[s], outputs=[s])###################
with gr.Column(scale=2):
gallery_w = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
interactive=False, show_share_button=False, container=True, format="png",
preview=True, object_fit="contain",columns=5,rows=4,height=650)
gallery_by_prompt_w = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
interactive=False, show_share_button=False, container=True, format="png",
preview=True, object_fit="contain",columns=5,rows=4,visible=False,height=650)
set_height_gallery_w.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None),
[gallery_w,height_gallery_w,set_height_gallery_w],[gallery_w])
height_gallery_w.change(lambda g,h: gr.Gallery(g,height=h),[gallery_w,height_gallery_w],[gallery_w])
col_gallery_w.change(lambda g,h: gr.Gallery(g,columns=h),[gallery_w,col_gallery_w],[gallery_w])
row_gallery_w.change(lambda g,h: gr.Gallery(g,rows=h),[gallery_w,row_gallery_w],[gallery_w])
set_height_gallery_w.change(lambda g,h,s: gr.Gallery(g,height=h) if s else gr.Gallery(g,height=None),
[gallery_by_prompt_w,height_gallery_w,set_height_gallery_w],[gallery_by_prompt_w])
height_gallery_w.change(lambda g,h: gr.Gallery(g,height=h),[gallery_by_prompt_w,height_gallery_w],[gallery_by_prompt_w])
col_gallery_w.change(lambda g,h: gr.Gallery(g,columns=h),[gallery_by_prompt_w,col_gallery_w],[gallery_by_prompt_w])
row_gallery_w.change(lambda g,h: gr.Gallery(g,rows=h),[gallery_by_prompt_w,row_gallery_w],[gallery_by_prompt_w])
with gr.Column(scale=3):
button_load_gallery_w=gr.Button("Load Gallery All")
button_load_gallery_w.click(fonc_load_gallery,[id_session,gallery_w],[gallery_w])
with gr.Accordion("Gallery by Prompt",open=False) :
index_gallery_by_prompt_w=gr.Number(0,visible=False)
button_load_gallery_by_prompt_w=gr.Button("Load Gallery by prompt")
text_gallery_by_prompt_w=gr.Textbox(f"{index_gallery_by_prompt_w.value+1}/{len(list_param_w.value)}",show_label=False)
index_gallery_by_prompt_w.change(lambda i,p:gr.Textbox(f"{i+1}/{len(p)}"),[index_gallery_by_prompt_w,list_param_w],[text_gallery_by_prompt_w])
button_load_gallery_by_prompt_w.click(load_gallery_by_prompt,
[id_session,gallery_by_prompt_w,index_gallery_by_prompt_w,list_param_w],[gallery_by_prompt_w])
gen_event_gallery_by_prompt_w = gr.on(triggers=[button_load_gallery_by_prompt_w.click], fn=lambda g:gr.Gallery(g,visible=False),
inputs=[gallery_w], outputs=[gallery_w])
gen_event_gallery_w = gr.on(triggers=[button_load_gallery_w.click], fn=lambda g:gr.Gallery(g,visible=False),
inputs=[gallery_by_prompt_w], outputs=[gallery_by_prompt_w])
with gr.Row():
button_gallery_prev_prompt_w=gr.Button("Prev prompt")
button_gallery_next_prompt_w=gr.Button("Next prompt")
button_gallery_next_prompt_w.click(lambda i,p: (i+1)%len(p),[index_gallery_by_prompt_w,list_param_w],[index_gallery_by_prompt_w])
button_gallery_prev_prompt_w.click(lambda i,p: (i-1)%len(p),[index_gallery_by_prompt_w,list_param_w],[index_gallery_by_prompt_w])
index_gallery_by_prompt_w.change(load_gallery_by_prompt,
[id_session,gallery_by_prompt_w,index_gallery_by_prompt_w,list_param_w],[gallery_by_prompt_w])
js_code = """
console.log('ghgh');
"""
with gr.Blocks(theme="Nymbo/Nymbo_Theme", fill_width=True, css="div.float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}") as demo:
gr.Markdown("<script>" + js_code + "</script>")
make_me()
# https://www.gradio.app/guides/setting-up-a-demo-for-maximum-performance
#demo.queue(concurrency_count=999) # concurrency_count is deprecated in 4.x
demo.queue(default_concurrency_limit=200, max_size=200)
demo.launch(max_threads=400) |