Delta-Vector commited on
Commit
085b855
1 Parent(s): a036e5b

Upload folder using huggingface_hub

Browse files
dataset.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [general]
2
+ shuffle_caption = false
3
+ caption_extension = '.txt'
4
+ keep_tokens = 1
5
+
6
+ [[datasets]]
7
+ resolution = 512
8
+ batch_size = 1
9
+ keep_tokens = 1
10
+
11
+ [[datasets.subsets]]
12
+ image_dir = '/home/mango/Trainers/loras/fluxgym/datasets/rae-taylor'
13
+ class_tokens = 'RA3'
14
+ num_repeats = 10
rae-taylor-000004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2933730656cca150f70ee7da87d84dfe6f85dd1d4c66d407091814b528cdc0bc
3
+ size 18211324
rae-taylor-000008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5e7729cf1ec603b1d3824671de069fa1d1bf3773362f80c21f832d0cc6ff8ba
3
+ size 18211324
rae-taylor-000012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2025b85634c71fd6c193767ebc3973240dbaad4b43ad0ee1f18ca3b9f36a930e
3
+ size 18211324
sample_prompts.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ RA3
train.sh ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch \
2
+ --mixed_precision bf16 \
3
+ --num_cpu_threads_per_process 1 \
4
+ sd-scripts/flux_train_network.py \
5
+ --pretrained_model_name_or_path "/home/mango/Trainers/loras/fluxgym/models/unet/flux1-dev.sft" \
6
+ --clip_l "/home/mango/Trainers/loras/fluxgym/models/clip/clip_l.safetensors" \
7
+ --t5xxl "/home/mango/Trainers/loras/fluxgym/models/clip/t5xxl_fp16.safetensors" \
8
+ --ae "/home/mango/Trainers/loras/fluxgym/models/vae/ae.sft" \
9
+ --cache_latents_to_disk \
10
+ --save_model_as safetensors \
11
+ --sdpa --persistent_data_loader_workers \
12
+ --max_data_loader_n_workers 2 \
13
+ --seed 42 \
14
+ --gradient_checkpointing \
15
+ --mixed_precision bf16 \
16
+ --save_precision bf16 \
17
+ --network_module networks.lora_flux \
18
+ --network_dim 4 \
19
+ --optimizer_type adafactor \
20
+ --optimizer_args "relative_step=False" "scale_parameter=False" "warmup_init=False" \
21
+ --split_mode \
22
+ --network_args "train_blocks=single" \
23
+ --lr_scheduler constant_with_warmup \
24
+ --max_grad_norm 0.0 \
25
+ --learning_rate 8e-4 \
26
+ --cache_text_encoder_outputs \
27
+ --cache_text_encoder_outputs_to_disk \
28
+ --fp8_base \
29
+ --highvram \
30
+ --max_train_epochs 16 \
31
+ --save_every_n_epochs 4 \
32
+ --dataset_config "/home/mango/Trainers/loras/fluxgym/outputs/rae-taylor/dataset.toml" \
33
+ --output_dir "/home/mango/Trainers/loras/fluxgym/outputs/rae-taylor" \
34
+ --output_name rae-taylor \
35
+ --timestep_sampling shift \
36
+ --discrete_flow_shift 3.1582 \
37
+ --model_prediction_type raw \
38
+ --guidance_scale 1 \
39
+ --loss_type l2 \