Upload folder using huggingface_hub
Browse files- config.json +1 -1
- generation_config.json +1 -1
- model-00001-of-00004.safetensors +1 -1
- model-00002-of-00004.safetensors +1 -1
- model-00003-of-00004.safetensors +1 -1
- model-00004-of-00004.safetensors +1 -1
- sft_args.json +4 -4
- tokenizer_config.json +1 -0
config.json
CHANGED
@@ -31,7 +31,7 @@
|
|
31 |
"sliding_window": 32768,
|
32 |
"tie_word_embeddings": false,
|
33 |
"torch_dtype": "bfloat16",
|
34 |
-
"transformers_version": "4.
|
35 |
"use_cache": true,
|
36 |
"use_sliding_window": false,
|
37 |
"video_token_id": 151656,
|
|
|
31 |
"sliding_window": 32768,
|
32 |
"tie_word_embeddings": false,
|
33 |
"torch_dtype": "bfloat16",
|
34 |
+
"transformers_version": "4.47.1",
|
35 |
"use_cache": true,
|
36 |
"use_sliding_window": false,
|
37 |
"video_token_id": 151656,
|
generation_config.json
CHANGED
@@ -7,5 +7,5 @@
|
|
7 |
"temperature": 0.01,
|
8 |
"top_k": 1,
|
9 |
"top_p": 0.001,
|
10 |
-
"transformers_version": "4.
|
11 |
}
|
|
|
7 |
"temperature": 0.01,
|
8 |
"top_k": 1,
|
9 |
"top_p": 0.001,
|
10 |
+
"transformers_version": "4.47.1"
|
11 |
}
|
model-00001-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4966659944
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4dcf3603db2bc71e78ddb56e4de61079f55dc5620e43ef5e28da83c811e10b12
|
3 |
size 4966659944
|
model-00002-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4991495816
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0d57bb9ef6cafa76eefe491651610880ef72367ebf0245ba4ea71aced264dfd
|
3 |
size 4991495816
|
model-00003-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4932751040
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a741637874c27f4de16398104cc60c7c4f4f042c090e9380b84d99d054e347f
|
3 |
size 4932751040
|
model-00004-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1691924384
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40e890fc26d8da0dc34729795e838dfc555960790a93cbb2ed0eb9c509a53f00
|
3 |
size 1691924384
|
sft_args.json
CHANGED
@@ -10,7 +10,7 @@
|
|
10 |
"additional_trainable_parameters": [],
|
11 |
"tuner_backend": "peft",
|
12 |
"template_type": "qwen2-vl",
|
13 |
-
"output_dir": "/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/zhengkaipeng-240108120123/swift/output/qwen2-vl-7b-instruct/
|
14 |
"add_output_dir_suffix": true,
|
15 |
"ddp_backend": "nccl",
|
16 |
"ddp_find_unused_parameters": null,
|
@@ -36,7 +36,7 @@
|
|
36 |
"dataset_seed": 42,
|
37 |
"dataset_test_ratio": 0.01,
|
38 |
"use_loss_scale": false,
|
39 |
-
"loss_scale_config_path": "/
|
40 |
"system": null,
|
41 |
"tools_prompt": "react_en",
|
42 |
"max_length": 2048,
|
@@ -165,7 +165,7 @@
|
|
165 |
"use_flash_attn": null,
|
166 |
"ignore_args_error": false,
|
167 |
"check_model_is_latest": true,
|
168 |
-
"logging_dir": "/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/zhengkaipeng-240108120123/swift/output/qwen2-vl-7b-instruct/
|
169 |
"report_to": [
|
170 |
"tensorboard"
|
171 |
],
|
@@ -242,5 +242,5 @@
|
|
242 |
"load_in_8bit": false,
|
243 |
"train_sampler_random": true,
|
244 |
"train_type": "sft",
|
245 |
-
"training_args": "Seq2SeqTrainingArguments(output_dir='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/zhengkaipeng-240108120123/swift/output/qwen2-vl-7b-instruct/
|
246 |
}
|
|
|
10 |
"additional_trainable_parameters": [],
|
11 |
"tuner_backend": "peft",
|
12 |
"template_type": "qwen2-vl",
|
13 |
+
"output_dir": "/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/zhengkaipeng-240108120123/swift/output/qwen2-vl-7b-instruct/v80-20241225-091123",
|
14 |
"add_output_dir_suffix": true,
|
15 |
"ddp_backend": "nccl",
|
16 |
"ddp_find_unused_parameters": null,
|
|
|
36 |
"dataset_seed": 42,
|
37 |
"dataset_test_ratio": 0.01,
|
38 |
"use_loss_scale": false,
|
39 |
+
"loss_scale_config_path": "/opt/conda/lib/python3.11/site-packages/swift/llm/agent/default_loss_scale_config.json",
|
40 |
"system": null,
|
41 |
"tools_prompt": "react_en",
|
42 |
"max_length": 2048,
|
|
|
165 |
"use_flash_attn": null,
|
166 |
"ignore_args_error": false,
|
167 |
"check_model_is_latest": true,
|
168 |
+
"logging_dir": "/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/zhengkaipeng-240108120123/swift/output/qwen2-vl-7b-instruct/v80-20241225-091123/runs",
|
169 |
"report_to": [
|
170 |
"tensorboard"
|
171 |
],
|
|
|
242 |
"load_in_8bit": false,
|
243 |
"train_sampler_random": true,
|
244 |
"train_type": "sft",
|
245 |
+
"training_args": "Seq2SeqTrainingArguments(output_dir='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/zhengkaipeng-240108120123/swift/output/qwen2-vl-7b-instruct/v80-20241225-091123', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=16, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=0.0001, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1, num_train_epochs=3, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs={}, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/zhengkaipeng-240108120123/swift/output/qwen2-vl-7b-instruct/v80-20241225-091123/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=50, save_total_limit=2, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend='nccl', tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=50, dataloader_num_workers=1, dataloader_prefetch_factor=None, past_index=-1, run_name='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/public-project/zhengkaipeng-240108120123/swift/output/qwen2-vl-7b-instruct/v80-20241225-091123', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed=None, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=False, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=False, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=False, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy=None, push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=False, include_num_input_tokens_seen=False, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=False, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=GenerationConfig {\n \"bos_token_id\": 151643,\n \"do_sample\": true,\n \"eos_token_id\": 151645,\n \"max_new_tokens\": 2048,\n \"pad_token_id\": 151643,\n \"temperature\": 0.01,\n \"top_k\": 1,\n \"top_p\": 0.001\n}\n, acc_strategy='token', loss_name=None, additional_saved_files=[], train_sampler_random=True, metric_warmup_step=0, train_dataset_sample=-1)"
|
246 |
}
|
tokenizer_config.json
CHANGED
@@ -134,6 +134,7 @@
|
|
134 |
"clean_up_tokenization_spaces": false,
|
135 |
"eos_token": "<|im_end|>",
|
136 |
"errors": "replace",
|
|
|
137 |
"model_max_length": 32768,
|
138 |
"pad_token": "<|endoftext|>",
|
139 |
"padding_side": "left",
|
|
|
134 |
"clean_up_tokenization_spaces": false,
|
135 |
"eos_token": "<|im_end|>",
|
136 |
"errors": "replace",
|
137 |
+
"extra_special_tokens": {},
|
138 |
"model_max_length": 32768,
|
139 |
"pad_token": "<|endoftext|>",
|
140 |
"padding_side": "left",
|