John6666 commited on
Commit
8217c9e
1 Parent(s): 501231e

Upload convert_url_to_diffusers_flux_gr.py

Browse files
Files changed (1) hide show
  1. convert_url_to_diffusers_flux_gr.py +20 -32
convert_url_to_diffusers_flux_gr.py CHANGED
@@ -607,7 +607,8 @@ with torch.no_grad():
607
  for filepath in glob.glob(f"{path}/*.safetensors"):
608
  progress(0.25, desc=f"Processing temporary files: {str(filepath)}")
609
  print(f"Processing temporary files: {str(filepath)}")
610
- sharded_sd = extract_normalized_flux_state_dict_unet(str(filepath), dtype, dequant)
 
611
  for k, v in sharded_sd.items():
612
  sharded_sd[k] = v.to(device="cpu")
613
  save_file(sharded_sd, str(filepath))
@@ -622,55 +623,42 @@ with torch.no_grad():
622
  print(e)
623
  return sd
624
 
625
- def download_repo(repo_name, path, download_sf=False, progress=gr.Progress(track_tqdm=True)):
626
  from huggingface_hub import snapshot_download
627
  print(f"Downloading {repo_name}.")
628
  try:
629
- if download_sf:
630
  snapshot_download(repo_id=repo_name, local_dir=path, ignore_patterns=["transformer/", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp"])
631
  else:
632
  snapshot_download(repo_id=repo_name, local_dir=path, ignore_patterns=["transformer/", "text_encoder_2/", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp"])
633
  except Exception as e:
634
  print(e)
635
 
636
- def copy_nontensor_files(from_path, to_path, copy_sf=False):
637
  import shutil
638
- if copy_sf:
639
- print(f"Copying non-tensor files {from_path} to {to_path}")
640
- shutil.copytree(from_path, to_path, ignore=shutil.ignore_patterns("*.safetensors", "*.bin", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
641
  te_from = str(Path(from_path, "text_encoder_2"))
642
  te_to = str(Path(to_path, "text_encoder_2"))
643
  print(f"Copying Text Encoder 2 files {te_from} to {te_to}")
644
  shutil.copytree(te_from, te_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
 
645
  te1_from = str(Path(from_path, "text_encoder"))
646
  te1_to = str(Path(to_path, "text_encoder"))
647
  print(f"Copying Text Encoder 1 files {te1_from} to {te1_to}")
648
  shutil.copytree(te1_from, te1_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
649
- tn2_from = str(Path(from_path, "tokenizer_2"))
650
- tn2_to = str(Path(to_path, "tokenizer_2"))
651
- print(f"Copying Tokenizer 2 files {tn2_from} to {tn2_to}")
652
- shutil.copytree(tn2_from, tn2_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
653
  vae_from = str(Path(from_path, "vae"))
654
  vae_to = str(Path(to_path, "vae"))
655
  print(f"Copying VAE files {vae_from} to {vae_to}")
656
  shutil.copytree(vae_from, vae_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
657
- else:
658
- print(f"Copying non-tensor files {from_path} to {to_path}")
659
- shutil.copytree(from_path, to_path, ignore=shutil.ignore_patterns("*.safetensors", "*.bin", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
660
- te1_from = str(Path(from_path, "text_encoder"))
661
- te1_to = str(Path(to_path, "text_encoder"))
662
- print(f"Copying Text Encoder 1 files {te1_from} to {te1_to}")
663
- shutil.copytree(te1_from, te1_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
664
- tn2_from = str(Path(from_path, "tokenizer_2"))
665
- tn2_to = str(Path(to_path, "tokenizer_2"))
666
- print(f"Copying Tokenizer 2 files {tn2_from} to {tn2_to}")
667
- shutil.copytree(tn2_from, tn2_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
668
- vae_from = str(Path(from_path, "vae"))
669
- vae_to = str(Path(to_path, "vae"))
670
- print(f"Copying VAE files {vae_from} to {vae_to}")
671
- shutil.copytree(vae_from, vae_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
672
-
673
- def save_flux_other_diffusers(path: str, model_type: str = "dev", copy_te: bool = False, progress=gr.Progress(track_tqdm=True)):
674
  import shutil
675
  progress(0, desc="Loading FLUX.1 Components.")
676
  print("Loading FLUX.1 Components.")
@@ -679,10 +667,10 @@ def save_flux_other_diffusers(path: str, model_type: str = "dev", copy_te: bool
679
  else: repo = flux_dev_repo
680
  os.makedirs(temppath, exist_ok=True)
681
  os.makedirs(path, exist_ok=True)
682
- download_repo(repo, temppath, copy_te)
683
  progress(0.5, desc="Saving FLUX.1 Components.")
684
  print("Saving FLUX.1 Components.")
685
- copy_nontensor_files(temppath, path, copy_te)
686
  shutil.rmtree(temppath)
687
 
688
  with torch.no_grad():
@@ -711,7 +699,6 @@ with torch.no_grad(): # Much lower memory consumption, but higher disk load
711
  vae_sd_pattern = "diffusion_pytorch_model{suffix}.safetensors"
712
  vae_sd_size = "10GB"
713
  metadata = {"format": "pt", **read_safetensors_metadata(loadpath)}
714
- save_flux_other_diffusers(savepath, model_type, "text_encoder_2" in use_original)
715
  if "vae" not in use_original:
716
  vae_sd = extract_norm_flux_module_sd(loadpath, torch.bfloat16, dequant, "VAE",
717
  keys_flux_vae)
@@ -744,6 +731,7 @@ with torch.no_grad(): # Much lower memory consumption, but higher disk load
744
  del unet_sd
745
  torch.cuda.empty_cache()
746
  gc.collect()
 
747
 
748
  with torch.no_grad(): # lowest memory consumption, but higheest disk load
749
  def flux_to_diffusers_lowmem2(loadpath: str, savepath: str, dtype: torch.dtype = torch.bfloat16,
@@ -765,7 +753,6 @@ with torch.no_grad(): # lowest memory consumption, but higheest disk load
765
  vae_sd_pattern = "diffusion_pytorch_model{suffix}.safetensors"
766
  vae_sd_size = "10GB"
767
  metadata = {"format": "pt", **read_safetensors_metadata(loadpath)}
768
- save_flux_other_diffusers(savepath, model_type, "text_encoder_2" in use_original)
769
  if "vae" not in use_original:
770
  vae_sd = extract_norm_flux_module_sd(loadpath, torch.bfloat16, dequant, "VAE",
771
  keys_flux_vae)
@@ -799,6 +786,7 @@ with torch.no_grad(): # lowest memory consumption, but higheest disk load
799
  del unet_sd
800
  torch.cuda.empty_cache()
801
  gc.collect()
 
802
 
803
  def convert_url_to_diffusers_flux(url, civitai_key="", is_upload_sf=False, data_type="bf16",
804
  model_type="dev", dequant=False, use_original=["vae", "text_encoder"],
 
607
  for filepath in glob.glob(f"{path}/*.safetensors"):
608
  progress(0.25, desc=f"Processing temporary files: {str(filepath)}")
609
  print(f"Processing temporary files: {str(filepath)}")
610
+ sharded_sd = extract_norm_flux_module_sd(str(filepath), dtype, dequant,
611
+ "Transformer", keys_flux_transformer)
612
  for k, v in sharded_sd.items():
613
  sharded_sd[k] = v.to(device="cpu")
614
  save_file(sharded_sd, str(filepath))
 
623
  print(e)
624
  return sd
625
 
626
+ def download_repo(repo_name, path, use_original=["vae", "text_encoder"], progress=gr.Progress(track_tqdm=True)):
627
  from huggingface_hub import snapshot_download
628
  print(f"Downloading {repo_name}.")
629
  try:
630
+ if "text_encoder_2" not in use_original:
631
  snapshot_download(repo_id=repo_name, local_dir=path, ignore_patterns=["transformer/", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp"])
632
  else:
633
  snapshot_download(repo_id=repo_name, local_dir=path, ignore_patterns=["transformer/", "text_encoder_2/", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp"])
634
  except Exception as e:
635
  print(e)
636
 
637
+ def copy_nontensor_files(from_path, to_path, use_original=["vae", "text_encoder"]):
638
  import shutil
639
+ if "text_encoder_2" in use_original:
 
 
640
  te_from = str(Path(from_path, "text_encoder_2"))
641
  te_to = str(Path(to_path, "text_encoder_2"))
642
  print(f"Copying Text Encoder 2 files {te_from} to {te_to}")
643
  shutil.copytree(te_from, te_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
644
+ if "text_encoder" in use_original:
645
  te1_from = str(Path(from_path, "text_encoder"))
646
  te1_to = str(Path(to_path, "text_encoder"))
647
  print(f"Copying Text Encoder 1 files {te1_from} to {te1_to}")
648
  shutil.copytree(te1_from, te1_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
649
+ if "vae" in use_original:
 
 
 
650
  vae_from = str(Path(from_path, "vae"))
651
  vae_to = str(Path(to_path, "vae"))
652
  print(f"Copying VAE files {vae_from} to {vae_to}")
653
  shutil.copytree(vae_from, vae_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
654
+ tn2_from = str(Path(from_path, "tokenizer_2"))
655
+ tn2_to = str(Path(to_path, "tokenizer_2"))
656
+ print(f"Copying Tokenizer 2 files {tn2_from} to {tn2_to}")
657
+ shutil.copytree(tn2_from, tn2_to, ignore=shutil.ignore_patterns(".*", "README*", "*.md", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
658
+ print(f"Copying non-tensor files {from_path} to {to_path}")
659
+ shutil.copytree(from_path, to_path, ignore=shutil.ignore_patterns("*.safetensors", "*.bin", "*.sft", ".*", "README*", "*.md", "*.index", "*.jpg", "*.png", "*.webp"), dirs_exist_ok=True)
660
+
661
+ def save_flux_other_diffusers(path: str, model_type: str = "dev", use_original: list = ["vae", "text_encoder"], progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
662
  import shutil
663
  progress(0, desc="Loading FLUX.1 Components.")
664
  print("Loading FLUX.1 Components.")
 
667
  else: repo = flux_dev_repo
668
  os.makedirs(temppath, exist_ok=True)
669
  os.makedirs(path, exist_ok=True)
670
+ download_repo(repo, temppath, use_original)
671
  progress(0.5, desc="Saving FLUX.1 Components.")
672
  print("Saving FLUX.1 Components.")
673
+ copy_nontensor_files(temppath, path, use_original)
674
  shutil.rmtree(temppath)
675
 
676
  with torch.no_grad():
 
699
  vae_sd_pattern = "diffusion_pytorch_model{suffix}.safetensors"
700
  vae_sd_size = "10GB"
701
  metadata = {"format": "pt", **read_safetensors_metadata(loadpath)}
 
702
  if "vae" not in use_original:
703
  vae_sd = extract_norm_flux_module_sd(loadpath, torch.bfloat16, dequant, "VAE",
704
  keys_flux_vae)
 
731
  del unet_sd
732
  torch.cuda.empty_cache()
733
  gc.collect()
734
+ save_flux_other_diffusers(savepath, model_type, use_original)
735
 
736
  with torch.no_grad(): # lowest memory consumption, but higheest disk load
737
  def flux_to_diffusers_lowmem2(loadpath: str, savepath: str, dtype: torch.dtype = torch.bfloat16,
 
753
  vae_sd_pattern = "diffusion_pytorch_model{suffix}.safetensors"
754
  vae_sd_size = "10GB"
755
  metadata = {"format": "pt", **read_safetensors_metadata(loadpath)}
 
756
  if "vae" not in use_original:
757
  vae_sd = extract_norm_flux_module_sd(loadpath, torch.bfloat16, dequant, "VAE",
758
  keys_flux_vae)
 
786
  del unet_sd
787
  torch.cuda.empty_cache()
788
  gc.collect()
789
+ save_flux_other_diffusers(savepath, model_type, use_original)
790
 
791
  def convert_url_to_diffusers_flux(url, civitai_key="", is_upload_sf=False, data_type="bf16",
792
  model_type="dev", dequant=False, use_original=["vae", "text_encoder"],