guaguale commited on
Commit
db03e98
1 Parent(s): 98176e0

End of training

Browse files
Files changed (39) hide show
  1. README.md +22 -0
  2. checkpoint-100/optimizer.bin +3 -0
  3. checkpoint-100/random_states_0.pkl +3 -0
  4. checkpoint-100/scheduler.bin +3 -0
  5. checkpoint-100/unet/config.json +68 -0
  6. checkpoint-100/unet/diffusion_pytorch_model.bin +3 -0
  7. checkpoint-200/optimizer.bin +3 -0
  8. checkpoint-200/random_states_0.pkl +3 -0
  9. checkpoint-200/scheduler.bin +3 -0
  10. checkpoint-200/unet/config.json +68 -0
  11. checkpoint-200/unet/diffusion_pytorch_model.bin +3 -0
  12. checkpoint-300/optimizer.bin +3 -0
  13. checkpoint-300/random_states_0.pkl +3 -0
  14. checkpoint-300/scheduler.bin +3 -0
  15. checkpoint-300/unet/config.json +68 -0
  16. checkpoint-300/unet/diffusion_pytorch_model.bin +3 -0
  17. checkpoint-400/optimizer.bin +3 -0
  18. checkpoint-400/random_states_0.pkl +3 -0
  19. checkpoint-400/scheduler.bin +3 -0
  20. checkpoint-400/unet/config.json +68 -0
  21. checkpoint-400/unet/diffusion_pytorch_model.bin +3 -0
  22. logs/dreambooth/1688970869.4839497/events.out.tfevents.1688970869.aiplatform-wlf2-hi-66.idchb2az2.hb2.kwaidc.com.50946.1 +3 -0
  23. logs/dreambooth/1688970869.489733/hparams.yml +59 -0
  24. logs/dreambooth/1688972456.9792664/events.out.tfevents.1688972456.aiplatform-wlf2-hi-66.idchb2az2.hb2.kwaidc.com.56348.1 +3 -0
  25. logs/dreambooth/1688972457.007087/hparams.yml +59 -0
  26. logs/dreambooth/events.out.tfevents.1688970869.aiplatform-wlf2-hi-66.idchb2az2.hb2.kwaidc.com.50946.0 +3 -0
  27. logs/dreambooth/events.out.tfevents.1688972456.aiplatform-wlf2-hi-66.idchb2az2.hb2.kwaidc.com.56348.0 +3 -0
  28. model_index.json +33 -0
  29. scheduler/scheduler_config.json +16 -0
  30. text_encoder/config.json +25 -0
  31. text_encoder/pytorch_model.bin +3 -0
  32. tokenizer/clip_tokenizer_roberta.py +246 -0
  33. tokenizer/special_tokens_map.json +7 -0
  34. tokenizer/tokenizer_config.json +22 -0
  35. tokenizer/vocab.txt +0 -0
  36. unet/config.json +68 -0
  37. unet/diffusion_pytorch_model.bin +3 -0
  38. vae/config.json +31 -0
  39. vae/diffusion_pytorch_model.bin +3 -0
README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: creativeml-openrail-m
4
+ base_model: /mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/
5
+ instance_prompt: a male idol sks with blonde hair, wearing a black jacket and fringes on the sides of the jacket
6
+ tags:
7
+ - stable-diffusion
8
+ - stable-diffusion-diffusers
9
+ - text-to-image
10
+ - diffusers
11
+ - dreambooth
12
+ inference: true
13
+ ---
14
+
15
+ # DreamBooth - guaguale/model_kthv_vcg
16
+
17
+ This is a dreambooth model derived from /mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/. The weights were trained on a male idol sks with blonde hair, wearing a black jacket and fringes on the sides of the jacket using [DreamBooth](https://dreambooth.github.io/).
18
+ You can find some example images in the following.
19
+
20
+
21
+
22
+ DreamBooth for the text encoder was enabled: False.
checkpoint-100/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef4cdbdf574dd96ac31e908ebfcc46dd0f8fc0c8c33a46139a078aec4eb42020
3
+ size 6927867155
checkpoint-100/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eda0380f8ecd0ad2f102ef43f43221c26981ea704c8c98265f822cb662511803
3
+ size 21795
checkpoint-100/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:878061482b24a0d2dfca88a7fe89a6bc573620995897edea291db23629a0e128
3
+ size 563
checkpoint-100/unet/config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "_name_or_path": "/mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20,
12
+ 20
13
+ ],
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280,
18
+ 1280
19
+ ],
20
+ "center_input_sample": false,
21
+ "class_embed_type": null,
22
+ "class_embeddings_concat": false,
23
+ "conv_in_kernel": 3,
24
+ "conv_out_kernel": 3,
25
+ "cross_attention_dim": 1024,
26
+ "cross_attention_norm": null,
27
+ "down_block_types": [
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D",
30
+ "CrossAttnDownBlock2D",
31
+ "DownBlock2D"
32
+ ],
33
+ "downsample_padding": 1,
34
+ "dual_cross_attention": false,
35
+ "encoder_hid_dim": null,
36
+ "encoder_hid_dim_type": null,
37
+ "flip_sin_to_cos": true,
38
+ "freq_shift": 0,
39
+ "in_channels": 4,
40
+ "layers_per_block": 2,
41
+ "mid_block_only_cross_attention": null,
42
+ "mid_block_scale_factor": 1,
43
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
44
+ "norm_eps": 1e-05,
45
+ "norm_num_groups": 32,
46
+ "num_attention_heads": null,
47
+ "num_class_embeds": null,
48
+ "only_cross_attention": false,
49
+ "out_channels": 4,
50
+ "projection_class_embeddings_input_dim": null,
51
+ "resnet_out_scale_factor": 1.0,
52
+ "resnet_skip_time_act": false,
53
+ "resnet_time_scale_shift": "default",
54
+ "sample_size": 96,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "up_block_types": [
61
+ "UpBlock2D",
62
+ "CrossAttnUpBlock2D",
63
+ "CrossAttnUpBlock2D",
64
+ "CrossAttnUpBlock2D"
65
+ ],
66
+ "upcast_attention": true,
67
+ "use_linear_projection": true
68
+ }
checkpoint-100/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55a76b1bdb4d8105851d70eba86e70f42359483ed7e992d7b0419e7c86254e19
3
+ size 3463934693
checkpoint-200/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bd1fe38209d776217702bd9a7a433f4613c06ad38de92e9160980e4af1c5231
3
+ size 6927867155
checkpoint-200/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc22051193f62ee40964548a14b4dd7039a425f18717df0a53432df32124302b
3
+ size 21795
checkpoint-200/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c00027191b55e858392f3d774de4170f8e7fd7f13505e8196a7ca5732db61ee
3
+ size 563
checkpoint-200/unet/config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "_name_or_path": "/mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20,
12
+ 20
13
+ ],
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280,
18
+ 1280
19
+ ],
20
+ "center_input_sample": false,
21
+ "class_embed_type": null,
22
+ "class_embeddings_concat": false,
23
+ "conv_in_kernel": 3,
24
+ "conv_out_kernel": 3,
25
+ "cross_attention_dim": 1024,
26
+ "cross_attention_norm": null,
27
+ "down_block_types": [
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D",
30
+ "CrossAttnDownBlock2D",
31
+ "DownBlock2D"
32
+ ],
33
+ "downsample_padding": 1,
34
+ "dual_cross_attention": false,
35
+ "encoder_hid_dim": null,
36
+ "encoder_hid_dim_type": null,
37
+ "flip_sin_to_cos": true,
38
+ "freq_shift": 0,
39
+ "in_channels": 4,
40
+ "layers_per_block": 2,
41
+ "mid_block_only_cross_attention": null,
42
+ "mid_block_scale_factor": 1,
43
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
44
+ "norm_eps": 1e-05,
45
+ "norm_num_groups": 32,
46
+ "num_attention_heads": null,
47
+ "num_class_embeds": null,
48
+ "only_cross_attention": false,
49
+ "out_channels": 4,
50
+ "projection_class_embeddings_input_dim": null,
51
+ "resnet_out_scale_factor": 1.0,
52
+ "resnet_skip_time_act": false,
53
+ "resnet_time_scale_shift": "default",
54
+ "sample_size": 96,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "up_block_types": [
61
+ "UpBlock2D",
62
+ "CrossAttnUpBlock2D",
63
+ "CrossAttnUpBlock2D",
64
+ "CrossAttnUpBlock2D"
65
+ ],
66
+ "upcast_attention": true,
67
+ "use_linear_projection": true
68
+ }
checkpoint-200/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbb07e1a1d3cfb0072d7a732cbb11b4570df7a48b419b45c899f6aa8f6db140c
3
+ size 3463934693
checkpoint-300/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1ef2a432b652fe07a5c531732e55c760081d17c6fd06900fd7c096caf47945f
3
+ size 6927867155
checkpoint-300/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:812f07e2d774cd786218853bf6d2e2760179215c695aa4fcb5cecd8141a3421d
3
+ size 21795
checkpoint-300/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d36051b747762d98e03d466e94fa9a2058769c7532f96eee1210bfd1f7d36e2f
3
+ size 563
checkpoint-300/unet/config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "_name_or_path": "/mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20,
12
+ 20
13
+ ],
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280,
18
+ 1280
19
+ ],
20
+ "center_input_sample": false,
21
+ "class_embed_type": null,
22
+ "class_embeddings_concat": false,
23
+ "conv_in_kernel": 3,
24
+ "conv_out_kernel": 3,
25
+ "cross_attention_dim": 1024,
26
+ "cross_attention_norm": null,
27
+ "down_block_types": [
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D",
30
+ "CrossAttnDownBlock2D",
31
+ "DownBlock2D"
32
+ ],
33
+ "downsample_padding": 1,
34
+ "dual_cross_attention": false,
35
+ "encoder_hid_dim": null,
36
+ "encoder_hid_dim_type": null,
37
+ "flip_sin_to_cos": true,
38
+ "freq_shift": 0,
39
+ "in_channels": 4,
40
+ "layers_per_block": 2,
41
+ "mid_block_only_cross_attention": null,
42
+ "mid_block_scale_factor": 1,
43
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
44
+ "norm_eps": 1e-05,
45
+ "norm_num_groups": 32,
46
+ "num_attention_heads": null,
47
+ "num_class_embeds": null,
48
+ "only_cross_attention": false,
49
+ "out_channels": 4,
50
+ "projection_class_embeddings_input_dim": null,
51
+ "resnet_out_scale_factor": 1.0,
52
+ "resnet_skip_time_act": false,
53
+ "resnet_time_scale_shift": "default",
54
+ "sample_size": 96,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "up_block_types": [
61
+ "UpBlock2D",
62
+ "CrossAttnUpBlock2D",
63
+ "CrossAttnUpBlock2D",
64
+ "CrossAttnUpBlock2D"
65
+ ],
66
+ "upcast_attention": true,
67
+ "use_linear_projection": true
68
+ }
checkpoint-300/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:125105bdccd8a7c1de99cb3b45a4e5f231d478fc3defe7ef7cd7cd5667a2f503
3
+ size 3463934693
checkpoint-400/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ced7b3f46a63b3d4dea4a19e28ce766ce05f410aeb93f217c71ed616485c91
3
+ size 6927867155
checkpoint-400/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3adc458a7296ff6fce79e92035fd359d86973839b4c13cdfe54d2821b4228d40
3
+ size 21795
checkpoint-400/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e83682523b5e77c0572155519eacc24a8feb77bb159e1dfe8e440f68f187fd1e
3
+ size 563
checkpoint-400/unet/config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "_name_or_path": "/mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20,
12
+ 20
13
+ ],
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280,
18
+ 1280
19
+ ],
20
+ "center_input_sample": false,
21
+ "class_embed_type": null,
22
+ "class_embeddings_concat": false,
23
+ "conv_in_kernel": 3,
24
+ "conv_out_kernel": 3,
25
+ "cross_attention_dim": 1024,
26
+ "cross_attention_norm": null,
27
+ "down_block_types": [
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D",
30
+ "CrossAttnDownBlock2D",
31
+ "DownBlock2D"
32
+ ],
33
+ "downsample_padding": 1,
34
+ "dual_cross_attention": false,
35
+ "encoder_hid_dim": null,
36
+ "encoder_hid_dim_type": null,
37
+ "flip_sin_to_cos": true,
38
+ "freq_shift": 0,
39
+ "in_channels": 4,
40
+ "layers_per_block": 2,
41
+ "mid_block_only_cross_attention": null,
42
+ "mid_block_scale_factor": 1,
43
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
44
+ "norm_eps": 1e-05,
45
+ "norm_num_groups": 32,
46
+ "num_attention_heads": null,
47
+ "num_class_embeds": null,
48
+ "only_cross_attention": false,
49
+ "out_channels": 4,
50
+ "projection_class_embeddings_input_dim": null,
51
+ "resnet_out_scale_factor": 1.0,
52
+ "resnet_skip_time_act": false,
53
+ "resnet_time_scale_shift": "default",
54
+ "sample_size": 96,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "up_block_types": [
61
+ "UpBlock2D",
62
+ "CrossAttnUpBlock2D",
63
+ "CrossAttnUpBlock2D",
64
+ "CrossAttnUpBlock2D"
65
+ ],
66
+ "upcast_attention": true,
67
+ "use_linear_projection": true
68
+ }
checkpoint-400/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:131c3d388d7a4965f8dee913869a8fdfa5fd95c7ffd2809ae53fe5278d185139
3
+ size 3463934693
logs/dreambooth/1688970869.4839497/events.out.tfevents.1688970869.aiplatform-wlf2-hi-66.idchb2az2.hb2.kwaidc.com.50946.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:373ad5271a35642666e04ff8b6674a1b907d8d49c0628b4cab2c5ab86eace634
3
+ size 2822
logs/dreambooth/1688970869.489733/hparams.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ center_crop: false
7
+ checkpointing_steps: 100
8
+ checkpoints_total_limit: null
9
+ class_data_dir: null
10
+ class_labels_conditioning: null
11
+ class_prompt: null
12
+ dataloader_num_workers: 0
13
+ enable_xformers_memory_efficient_attention: false
14
+ gradient_accumulation_steps: 1
15
+ gradient_checkpointing: false
16
+ hub_model_id: null
17
+ hub_token: null
18
+ instance_data_dir: kthv
19
+ instance_prompt: a male idol sks with blonde hair, wearing a black jacket and fringes
20
+ on the sides of the jacket
21
+ learning_rate: 5.0e-06
22
+ local_rank: 0
23
+ logging_dir: logs
24
+ lr_num_cycles: 1
25
+ lr_power: 1.0
26
+ lr_scheduler: constant
27
+ lr_warmup_steps: 0
28
+ max_grad_norm: 1.0
29
+ max_train_steps: 400
30
+ mixed_precision: null
31
+ num_class_images: 100
32
+ num_train_epochs: 400
33
+ num_validation_images: 4
34
+ offset_noise: false
35
+ output_dir: model_kthv_vcg
36
+ pre_compute_text_embeddings: false
37
+ pretrained_model_name_or_path: /mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/
38
+ prior_generation_precision: null
39
+ prior_loss_weight: 1.0
40
+ push_to_hub: true
41
+ report_to: tensorboard
42
+ resolution: 512
43
+ resume_from_checkpoint: null
44
+ revision: null
45
+ sample_batch_size: 4
46
+ scale_lr: false
47
+ seed: null
48
+ set_grads_to_none: false
49
+ skip_save_text_encoder: false
50
+ text_encoder_use_attention_mask: false
51
+ tokenizer_max_length: null
52
+ tokenizer_name: null
53
+ train_batch_size: 2
54
+ train_text_encoder: false
55
+ use_8bit_adam: false
56
+ validation_images: null
57
+ validation_prompt: null
58
+ validation_steps: 100
59
+ with_prior_preservation: false
logs/dreambooth/1688972456.9792664/events.out.tfevents.1688972456.aiplatform-wlf2-hi-66.idchb2az2.hb2.kwaidc.com.56348.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bf8d1fd35c679b59462d5a020dceb9733d3c67fbaf088f51509f999a0ff9b9d
3
+ size 2822
logs/dreambooth/1688972457.007087/hparams.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ center_crop: false
7
+ checkpointing_steps: 100
8
+ checkpoints_total_limit: null
9
+ class_data_dir: null
10
+ class_labels_conditioning: null
11
+ class_prompt: null
12
+ dataloader_num_workers: 0
13
+ enable_xformers_memory_efficient_attention: false
14
+ gradient_accumulation_steps: 1
15
+ gradient_checkpointing: false
16
+ hub_model_id: null
17
+ hub_token: null
18
+ instance_data_dir: kthv
19
+ instance_prompt: a male idol sks with blonde hair, wearing a black jacket and fringes
20
+ on the sides of the jacket
21
+ learning_rate: 5.0e-06
22
+ local_rank: 0
23
+ logging_dir: logs
24
+ lr_num_cycles: 1
25
+ lr_power: 1.0
26
+ lr_scheduler: constant
27
+ lr_warmup_steps: 0
28
+ max_grad_norm: 1.0
29
+ max_train_steps: 400
30
+ mixed_precision: null
31
+ num_class_images: 100
32
+ num_train_epochs: 400
33
+ num_validation_images: 4
34
+ offset_noise: false
35
+ output_dir: model_kthv_vcg
36
+ pre_compute_text_embeddings: false
37
+ pretrained_model_name_or_path: /mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/
38
+ prior_generation_precision: null
39
+ prior_loss_weight: 1.0
40
+ push_to_hub: true
41
+ report_to: tensorboard
42
+ resolution: 512
43
+ resume_from_checkpoint: null
44
+ revision: null
45
+ sample_batch_size: 4
46
+ scale_lr: false
47
+ seed: null
48
+ set_grads_to_none: false
49
+ skip_save_text_encoder: false
50
+ text_encoder_use_attention_mask: false
51
+ tokenizer_max_length: null
52
+ tokenizer_name: null
53
+ train_batch_size: 2
54
+ train_text_encoder: false
55
+ use_8bit_adam: false
56
+ validation_images: null
57
+ validation_prompt: null
58
+ validation_steps: 100
59
+ with_prior_preservation: false
logs/dreambooth/events.out.tfevents.1688970869.aiplatform-wlf2-hi-66.idchb2az2.hb2.kwaidc.com.50946.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c93326165a4799324c74277d55c5c8264349ace45d481d340c06dbc1d9f8da4
3
+ size 33434
logs/dreambooth/events.out.tfevents.1688972456.aiplatform-wlf2-hi-66.idchb2az2.hb2.kwaidc.com.56348.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb33c6d713daca8944b8339903fe6d3b11e6ed8e8b11e6d0c80040b9fde7b50a
3
+ size 33434
model_index.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "feature_extractor": [
5
+ null,
6
+ null
7
+ ],
8
+ "requires_safety_checker": false,
9
+ "safety_checker": [
10
+ null,
11
+ null
12
+ ],
13
+ "scheduler": [
14
+ "diffusers",
15
+ "EulerDiscreteScheduler"
16
+ ],
17
+ "text_encoder": [
18
+ "transformers",
19
+ "CLIPTextModel"
20
+ ],
21
+ "tokenizer": [
22
+ "transformers_modules.clip_tokenizer_roberta",
23
+ "CLIPTokenizerRoberta"
24
+ ],
25
+ "unet": [
26
+ "diffusers",
27
+ "UNet2DConditionModel"
28
+ ],
29
+ "vae": [
30
+ "diffusers",
31
+ "AutoencoderKL"
32
+ ]
33
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerDiscreteScheduler",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "interpolation_type": "linear",
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "v_prediction",
11
+ "set_alpha_to_one": false,
12
+ "skip_prk_steps": true,
13
+ "steps_offset": 1,
14
+ "trained_betas": null,
15
+ "use_karras_sigmas": false
16
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/text_encoder/",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1024,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 512,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.28.0",
24
+ "vocab_size": 49408
25
+ }
text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa8eed19b546f8b2b13c44c2dfe4e6be358dfcf3fa396ddf596979976c61814f
3
+ size 1412070305
tokenizer/clip_tokenizer_roberta.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.models.bert.tokenization_bert import *
2
+ import os
3
+
4
+
5
+ class CLIPTokenizerRoberta(PreTrainedTokenizer):
6
+ r"""
7
+ Construct a BERT tokenizer. Based on WordPiece.
8
+
9
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
10
+ this superclass for more information regarding those methods.
11
+
12
+ Args:
13
+ vocab_file (`str`):
14
+ File containing the vocabulary.
15
+ do_lower_case (`bool`, *optional*, defaults to `True`):
16
+ Whether or not to lowercase the input when tokenizing.
17
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
18
+ Whether or not to do basic tokenization before WordPiece.
19
+ never_split (`Iterable`, *optional*):
20
+ Collection of tokens which will never be split during tokenization. Only has an effect when
21
+ `do_basic_tokenize=True`
22
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
23
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
24
+ token instead.
25
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
26
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
27
+ sequence classification or for a text and a question for question answering. It is also used as the last
28
+ token of a sequence built with special tokens.
29
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
30
+ The token used for padding, for example when batching sequences of different lengths.
31
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
32
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
33
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
34
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
35
+ The token used for masking values. This is the token used when training this model with masked language
36
+ modeling. This is the token which the model will try to predict.
37
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
38
+ Whether or not to tokenize Chinese characters.
39
+
40
+ This should likely be deactivated for Japanese (see this
41
+ [issue](https://github.com/huggingface/transformers/issues/328)).
42
+ strip_accents (`bool`, *optional*):
43
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
44
+ value for `lowercase` (as in the original BERT).
45
+ """
46
+
47
+ vocab_files_names = VOCAB_FILES_NAMES
48
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
49
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
50
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
51
+
52
+ def __init__(
53
+ self,
54
+ vocab_file,
55
+ do_lower_case=True,
56
+ do_basic_tokenize=True,
57
+ never_split=None,
58
+ unk_token="[UNK]",
59
+ sep_token="[SEP]",
60
+ pad_token="[PAD]",
61
+ cls_token="[CLS]",
62
+ mask_token="[MASK]",
63
+ tokenize_chinese_chars=True,
64
+ strip_accents=None,
65
+ **kwargs
66
+ ):
67
+ super().__init__(
68
+ do_lower_case=do_lower_case,
69
+ do_basic_tokenize=do_basic_tokenize,
70
+ never_split=never_split,
71
+ unk_token=unk_token,
72
+ sep_token=sep_token,
73
+ pad_token=pad_token,
74
+ cls_token=cls_token,
75
+ mask_token=mask_token,
76
+ tokenize_chinese_chars=tokenize_chinese_chars,
77
+ strip_accents=strip_accents,
78
+ **kwargs,
79
+ )
80
+
81
+ if not os.path.isfile(vocab_file):
82
+ raise ValueError(
83
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
84
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
85
+ )
86
+ self.vocab = load_vocab(vocab_file)
87
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
88
+ self.do_basic_tokenize = do_basic_tokenize
89
+ if do_basic_tokenize:
90
+ self.basic_tokenizer = BasicTokenizer(
91
+ do_lower_case=do_lower_case,
92
+ never_split=never_split,
93
+ tokenize_chinese_chars=tokenize_chinese_chars,
94
+ strip_accents=strip_accents,
95
+ )
96
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
97
+
98
+ @property
99
+ def do_lower_case(self):
100
+ return self.basic_tokenizer.do_lower_case
101
+
102
+ @property
103
+ def vocab_size(self):
104
+ return len(self.vocab)
105
+
106
+ def get_vocab(self):
107
+ return dict(self.vocab, **self.added_tokens_encoder)
108
+
109
+ def _tokenize(self, text):
110
+ split_tokens = []
111
+ if self.do_basic_tokenize:
112
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
113
+
114
+ # If the token is part of the never_split set
115
+ if token in self.basic_tokenizer.never_split:
116
+ split_tokens.append(token)
117
+ else:
118
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
119
+ else:
120
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
121
+ return split_tokens
122
+
123
+ def _convert_token_to_id(self, token):
124
+ """Converts a token (str) in an id using the vocab."""
125
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
126
+
127
+ def _convert_id_to_token(self, index):
128
+ """Converts an index (integer) in a token (str) using the vocab."""
129
+ return self.ids_to_tokens.get(index, self.unk_token)
130
+
131
+ def convert_tokens_to_string(self, tokens):
132
+ """Converts a sequence of tokens (string) in a single string."""
133
+ out_string = " ".join(tokens).replace(" ##", "").strip()
134
+ return out_string
135
+
136
+ def build_inputs_with_special_tokens(
137
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
138
+ ) -> List[int]:
139
+ """
140
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
141
+ adding special tokens. A BERT sequence has the following format:
142
+
143
+ - single sequence: `[CLS] X [SEP]`
144
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
145
+
146
+ Args:
147
+ token_ids_0 (`List[int]`):
148
+ List of IDs to which the special tokens will be added.
149
+ token_ids_1 (`List[int]`, *optional*):
150
+ Optional second list of IDs for sequence pairs.
151
+
152
+ Returns:
153
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
154
+ """
155
+ sep = [49407]
156
+ cls = [49406]
157
+
158
+ if token_ids_1 is None:
159
+ return cls + token_ids_0 + sep
160
+ # return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
161
+ # cls = [self.cls_token_id]
162
+ # sep = [self.sep_token_id]
163
+
164
+ return cls + token_ids_0 + sep + token_ids_1 + sep
165
+
166
+ def get_special_tokens_mask(
167
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None,
168
+ already_has_special_tokens: bool = False
169
+ ) -> List[int]:
170
+ """
171
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
172
+ special tokens using the tokenizer `prepare_for_model` method.
173
+
174
+ Args:
175
+ token_ids_0 (`List[int]`):
176
+ List of IDs.
177
+ token_ids_1 (`List[int]`, *optional*):
178
+ Optional second list of IDs for sequence pairs.
179
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
180
+ Whether or not the token list is already formatted with special tokens for the model.
181
+
182
+ Returns:
183
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
184
+ """
185
+
186
+ if already_has_special_tokens:
187
+ return super().get_special_tokens_mask(
188
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
189
+ )
190
+
191
+ if token_ids_1 is not None:
192
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
193
+ return [1] + ([0] * len(token_ids_0)) + [1]
194
+
195
+ def create_token_type_ids_from_sequences(
196
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
197
+ ) -> List[int]:
198
+ """
199
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
200
+ pair mask has the following format:
201
+
202
+ ```
203
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
204
+ | first sequence | second sequence |
205
+ ```
206
+
207
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
208
+
209
+ Args:
210
+ token_ids_0 (`List[int]`):
211
+ List of IDs.
212
+ token_ids_1 (`List[int]`, *optional*):
213
+ Optional second list of IDs for sequence pairs.
214
+
215
+ Returns:
216
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
217
+ """
218
+ # sep = [self.sep_token_id]
219
+ # cls = [self.cls_token_id]
220
+ sep = [49407]
221
+ cls = [49406]
222
+ if token_ids_1 is None:
223
+ return len(cls + token_ids_0 + sep) * [0]
224
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
225
+
226
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
227
+ index = 0
228
+ if os.path.isdir(save_directory):
229
+ vocab_file = os.path.join(
230
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
231
+ )
232
+ else:
233
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
234
+ with open(vocab_file, "w", encoding="utf-8") as writer:
235
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
236
+ if index != token_index:
237
+ logger.warning(
238
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
239
+ " Please check that the vocabulary is not corrupted!"
240
+ )
241
+ index = token_index
242
+ writer.write(token + "\n")
243
+ index += 1
244
+ return (vocab_file,)
245
+
246
+
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "clip_tokenizer_roberta.CLIPTokenizerRoberta",
5
+ null
6
+ ]
7
+ },
8
+ "clean_up_tokenization_spaces": true,
9
+ "cls_token": "[CLS]",
10
+ "do_basic_tokenize": true,
11
+ "do_lower_case": true,
12
+ "mask_token": "[MASK]",
13
+ "model_max_length": 77,
14
+ "never_split": null,
15
+ "pad_token": "[PAD]",
16
+ "sep_token": "[SEP]",
17
+ "strip_accents": null,
18
+ "tokenize_chinese_chars": true,
19
+ "tokenizer_class": "CLIPTokenizerRoberta",
20
+ "unk_token": "[UNK]",
21
+ "use_fast": true
22
+ }
tokenizer/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "_name_or_path": "/mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20,
12
+ 20
13
+ ],
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280,
18
+ 1280
19
+ ],
20
+ "center_input_sample": false,
21
+ "class_embed_type": null,
22
+ "class_embeddings_concat": false,
23
+ "conv_in_kernel": 3,
24
+ "conv_out_kernel": 3,
25
+ "cross_attention_dim": 1024,
26
+ "cross_attention_norm": null,
27
+ "down_block_types": [
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D",
30
+ "CrossAttnDownBlock2D",
31
+ "DownBlock2D"
32
+ ],
33
+ "downsample_padding": 1,
34
+ "dual_cross_attention": false,
35
+ "encoder_hid_dim": null,
36
+ "encoder_hid_dim_type": null,
37
+ "flip_sin_to_cos": true,
38
+ "freq_shift": 0,
39
+ "in_channels": 4,
40
+ "layers_per_block": 2,
41
+ "mid_block_only_cross_attention": null,
42
+ "mid_block_scale_factor": 1,
43
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
44
+ "norm_eps": 1e-05,
45
+ "norm_num_groups": 32,
46
+ "num_attention_heads": null,
47
+ "num_class_embeds": null,
48
+ "only_cross_attention": false,
49
+ "out_channels": 4,
50
+ "projection_class_embeddings_input_dim": null,
51
+ "resnet_out_scale_factor": 1.0,
52
+ "resnet_skip_time_act": false,
53
+ "resnet_time_scale_shift": "default",
54
+ "sample_size": 96,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "up_block_types": [
61
+ "UpBlock2D",
62
+ "CrossAttnUpBlock2D",
63
+ "CrossAttnUpBlock2D",
64
+ "CrossAttnUpBlock2D"
65
+ ],
66
+ "upcast_attention": true,
67
+ "use_linear_projection": true
68
+ }
unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6accf3e1612c886205a9bd59a84e8577f5ec5d7f6db9f6b0e27135ad88d9532a
3
+ size 3463934693
vae/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.18.0.dev0",
4
+ "_name_or_path": "/mmu_vcg_ssd/liuhao12/workspace/1_diffusion/models/sd-zhuxiongwei-320nodes-task3_2-0612/checkpoint-28000/vae/",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 768,
24
+ "scaling_factor": 0.18215,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ]
31
+ }
vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:388eade249e1a73e99e8759547c71b5227af1cfea9978267a0fea48f1379de7a
3
+ size 334715569