p1atdev commited on
Commit
aaa0a3a
·
verified ·
1 Parent(s): 4c72058

Upload config.yml

Browse files
Files changed (1) hide show
  1. shortcut-07/config.yml +94 -0
shortcut-07/config.yml ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ checkpoint_path: "./models/aura_flow_0.3.bnb_nf4.safetensors"
3
+ pretrained_model_name_or_path: fal/AuraFlow-v0.3
4
+
5
+ dtype: bfloat16
6
+
7
+ denoiser:
8
+ use_flash_attn: true
9
+
10
+ use_shortcut: true
11
+
12
+ flow_matching_ratio: 0.5
13
+ shortcut_max_steps: 128
14
+
15
+ peft:
16
+ type: lora
17
+ rank: 4
18
+ alpha: 1.0
19
+ dropout: 0.0
20
+
21
+ dtype: bfloat16
22
+
23
+ # include the AdaLN-Zero modulation layers
24
+ include_keys:
25
+ [".attn.", ".mlp.", ".mlpC.", ".mlpX.", ".modC.", ".modX.", ".modCX."]
26
+ exclude_keys: ["text_encoder", "vae", "t_embedder", "final_linear", ".modF."]
27
+
28
+ dataset:
29
+ folder: "data/pexels-1k-random"
30
+ num_repeats: 2
31
+ batch_size: 2
32
+
33
+ bucket_base_size: 1024
34
+ step: 128
35
+ min_size: 384
36
+ do_upscale: false
37
+
38
+ caption_processors: []
39
+
40
+ optimizer:
41
+ name: "schedulefree.RAdamScheduleFree"
42
+ args:
43
+ lr: 0.001
44
+
45
+ tracker:
46
+ project_name: "auraflow-shortcut-1"
47
+ loggers:
48
+ - wandb
49
+
50
+ saving:
51
+ strategy:
52
+ per_epochs: 1
53
+ per_steps: null
54
+ save_last: true
55
+
56
+ callbacks:
57
+ - type: "hf_hub"
58
+ # - type: "safetensors"
59
+ name: "shortcut-07"
60
+ save_dir: "./output/shortcut-07"
61
+
62
+ hub_id: "p1atdev/afv03-lora"
63
+ dir_in_repo: "shortcut-07"
64
+
65
+ preview:
66
+ strategy:
67
+ per_epochs: 1
68
+ per_steps: 100
69
+
70
+ callbacks:
71
+ # - type: "local"
72
+ # save_dir: "./output/shortcut-07/preview"
73
+
74
+ - type: "discord"
75
+ url: "masked"
76
+
77
+ data:
78
+ path: "./projects/shortcut/preview.yml"
79
+
80
+ seed: 42
81
+ num_train_epochs: 5
82
+
83
+ trainer:
84
+ # debug_mode: "1step"
85
+
86
+ gradient_checkpointing: true
87
+ gradient_accumulation_steps: 4
88
+
89
+ torch_compile: true
90
+ torch_compile_args:
91
+ mode: max-autotune
92
+ fullgraph: true
93
+
94
+ fp32_matmul_precision: "medium"