serkandyck commited on
Commit
bc23c96
1 Parent(s): 13a76da

Upload ai-toolkit.log with huggingface_hub

Browse files
Files changed (1) hide show
  1. ai-toolkit.log +134 -0
ai-toolkit.log CHANGED
@@ -1,3 +1,137 @@
1
  The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`.
2
  Running 1 job
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`.
2
  Running 1 job
3
 
4
+ /usr/local/lib/python3.10/dist-packages/controlnet_aux/mediapipe_face/mediapipe_face_common.py:7: UserWarning: The module 'mediapipe' is not installed. The package will have limited functionality. Please install it using the command: pip install 'mediapipe'
5
+ warnings.warn(
6
+ /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_5m_224 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_5m_224. This is because the name being registered conflicts with an existing name. Please check if this is not expected.
7
+ return register_model(fn_wrapper)
8
+ /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_11m_224 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_11m_224. This is because the name being registered conflicts with an existing name. Please check if this is not expected.
9
+ return register_model(fn_wrapper)
10
+ /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_21m_224 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_21m_224. This is because the name being registered conflicts with an existing name. Please check if this is not expected.
11
+ return register_model(fn_wrapper)
12
+ /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_21m_384 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_21m_384. This is because the name being registered conflicts with an existing name. Please check if this is not expected.
13
+ return register_model(fn_wrapper)
14
+ /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_21m_512 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_21m_512. This is because the name being registered conflicts with an existing name. Please check if this is not expected.
15
+ return register_model(fn_wrapper)
16
+ You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers
17
+ {
18
+ "type": "sd_trainer",
19
+ "training_folder": "output",
20
+ "device": "cuda:0",
21
+ "network": {
22
+ "type": "lora",
23
+ "linear": 16,
24
+ "linear_alpha": 16
25
+ },
26
+ "save": {
27
+ "dtype": "float16",
28
+ "save_every": 250,
29
+ "max_step_saves_to_keep": 4,
30
+ "push_to_hub": false
31
+ },
32
+ "datasets": [
33
+ {
34
+ "folder_path": "/workspace/ai-toolkit/images",
35
+ "caption_ext": "txt",
36
+ "caption_dropout_rate": 0.05,
37
+ "shuffle_tokens": false,
38
+ "cache_latents_to_disk": true,
39
+ "resolution": [
40
+ 512,
41
+ 768,
42
+ 1024
43
+ ]
44
+ }
45
+ ],
46
+ "train": {
47
+ "batch_size": 1,
48
+ "steps": 2000,
49
+ "gradient_accumulation_steps": 1,
50
+ "train_unet": true,
51
+ "train_text_encoder": false,
52
+ "gradient_checkpointing": true,
53
+ "noise_scheduler": "flowmatch",
54
+ "optimizer": "adamw8bit",
55
+ "lr": 0.0001,
56
+ "ema_config": {
57
+ "use_ema": true,
58
+ "ema_decay": 0.99
59
+ },
60
+ "dtype": "bf16"
61
+ },
62
+ "model": {
63
+ "name_or_path": "black-forest-labs/FLUX.1-dev",
64
+ "is_flux": true,
65
+ "quantize": true
66
+ },
67
+ "sample": {
68
+ "sampler": "flowmatch",
69
+ "sample_every": 250,
70
+ "width": 1024,
71
+ "height": 1024,
72
+ "prompts": [
73
+ "woman with red hair, playing chess at the park, bomb going off in the background",
74
+ "a woman holding a coffee cup, in a beanie, sitting at a cafe",
75
+ "a horse is a DJ at a night club, fish eye lens, smoke machine, lazer lights, holding a martini",
76
+ "a man showing off his cool new t shirt at the beach, a shark is jumping out of the water in the background",
77
+ "a bear building a log cabin in the snow covered mountains",
78
+ "woman playing the guitar, on stage, singing a song, laser lights, punk rocker",
79
+ "hipster man with a beard, building a chair, in a wood shop",
80
+ "photo of a man, white background, medium shot, modeling clothing, studio lighting, white backdrop",
81
+ "a man holding a sign that says, 'this is a sign'",
82
+ "a bulldog, in a post apocalyptic world, with a shotgun, in a leather jacket, in a desert, with a motorcycle"
83
+ ],
84
+ "neg": "",
85
+ "seed": 42,
86
+ "walk_seed": true,
87
+ "guidance_scale": 4,
88
+ "sample_steps": 20
89
+ }
90
+ }
91
+ Using EMA
92
+
93
+ #############################################
94
+ # Running job: my_first_flux_lora_v1
95
+ #############################################
96
+
97
+
98
+ Running 1 process
99
+ Loading Flux model
100
+ Loading transformer
101
+ Quantizing transformer
102
+ Loading vae
103
+ Loading t5
104
+
105
+
106
+ Quantizing T5
107
+ Loading clip
108
+ making pipe
109
+ preparing
110
+ create LoRA network. base dim (rank): 16, alpha: 16
111
+ neuron dropout: p=None, rank dropout: p=None, module dropout: p=None
112
+ create LoRA for Text Encoder: 0 modules.
113
+ create LoRA for U-Net: 494 modules.
114
+ enable LoRA for U-Net
115
+ Error running job: [Errno 2] No such file or directory: '/workspace/ai-toolkit/images'
116
+
117
+ ========================================
118
+ Result:
119
+ - 0 completed jobs
120
+ - 1 failure
121
+ ========================================
122
+ Traceback (most recent call last):
123
+ File "/workspace/ai-toolkit/run.py", line 90, in <module>
124
+ main()
125
+ File "/workspace/ai-toolkit/run.py", line 86, in main
126
+ raise e
127
+ File "/workspace/ai-toolkit/run.py", line 78, in main
128
+ job.run()
129
+ File "/workspace/ai-toolkit/jobs/ExtensionJob.py", line 22, in run
130
+ process.run()
131
+ File "/workspace/ai-toolkit/jobs/process/BaseSDTrainProcess.py", line 1567, in run
132
+ self.data_loader = get_dataloader_from_datasets(self.datasets, self.train_config.batch_size, self.sd)
133
+ File "/workspace/ai-toolkit/toolkit/data_loader.py", line 571, in get_dataloader_from_datasets
134
+ dataset = AiToolkitDataset(config, batch_size=batch_size, sd=sd)
135
+ File "/workspace/ai-toolkit/toolkit/data_loader.py", line 411, in __init__
136
+ with open(self.dataset_path, 'r') as f:
137
+ FileNotFoundError: [Errno 2] No such file or directory: '/workspace/ai-toolkit/images'