nomnoos37 commited on
Commit
4acbdda
·
verified ·
1 Parent(s): a909feb

nomnoos37/stt-turbo-0112-v1.8

Browse files
README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ language:
4
+ - ko
5
+ license: mit
6
+ base_model: openai/whisper-large-v3-turbo
7
+ tags:
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: Whisper Turbo ko
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # Whisper Turbo ko
18
+
19
+ This model is a fine-tuned version of [openai/whisper-large-v3-turbo](https://huggingface.co/openai/whisper-large-v3-turbo) on the custom dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0940
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.001
41
+ - train_batch_size: 64
42
+ - eval_batch_size: 256
43
+ - seed: 42
44
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
+ - lr_scheduler_type: linear
46
+ - lr_scheduler_warmup_steps: 200
47
+ - training_steps: 1000
48
+ - mixed_precision_training: Native AMP
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss |
53
+ |:-------------:|:------:|:----:|:---------------:|
54
+ | 0.3061 | 0.5405 | 100 | 0.6169 |
55
+ | 0.107 | 1.0811 | 200 | 0.3840 |
56
+ | 0.0871 | 1.6216 | 300 | 0.3024 |
57
+ | 0.0801 | 2.1622 | 400 | 0.2429 |
58
+ | 0.0608 | 2.7027 | 500 | 0.2094 |
59
+ | 0.0527 | 3.2432 | 600 | 0.1674 |
60
+ | 0.0377 | 3.7838 | 700 | 0.1404 |
61
+ | 0.0316 | 4.3243 | 800 | 0.1230 |
62
+ | 0.0302 | 4.8649 | 900 | 0.1004 |
63
+ | 0.0227 | 5.4054 | 1000 | 0.0940 |
64
+
65
+
66
+ ### Framework versions
67
+
68
+ - PEFT 0.14.0
69
+ - Transformers 4.47.1
70
+ - Pytorch 2.5.1+cu124
71
+ - Datasets 3.2.0
72
+ - Tokenizers 0.21.0
adapter_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "WhisperForConditionalGeneration",
5
+ "parent_library": "transformers.models.whisper.modeling_whisper"
6
+ },
7
+ "base_model_name_or_path": "openai/whisper-large-v3-turbo",
8
+ "bias": "none",
9
+ "eva_config": null,
10
+ "exclude_modules": null,
11
+ "fan_in_fan_out": false,
12
+ "inference_mode": true,
13
+ "init_lora_weights": true,
14
+ "layer_replication": null,
15
+ "layers_pattern": null,
16
+ "layers_to_transform": null,
17
+ "loftq_config": {},
18
+ "lora_alpha": 64,
19
+ "lora_bias": false,
20
+ "lora_dropout": 0.05,
21
+ "megatron_config": null,
22
+ "megatron_core": "megatron.core",
23
+ "modules_to_save": null,
24
+ "peft_type": "LORA",
25
+ "r": 64,
26
+ "rank_pattern": {},
27
+ "revision": null,
28
+ "target_modules": [
29
+ "q_proj",
30
+ "v_proj"
31
+ ],
32
+ "task_type": null,
33
+ "use_dora": false,
34
+ "use_rslora": false
35
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c0875d5362a15baa21014bfba943ffed9c107637eff470b81149d67bb08ae8d
3
+ size 52451624
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "feature_extractor_type": "WhisperFeatureExtractor",
4
+ "feature_size": 128,
5
+ "hop_length": 160,
6
+ "n_fft": 400,
7
+ "n_samples": 480000,
8
+ "nb_max_frames": 3000,
9
+ "padding_side": "right",
10
+ "padding_value": 0.0,
11
+ "processor_class": "WhisperProcessor",
12
+ "return_attention_mask": false,
13
+ "sampling_rate": 16000
14
+ }
runs/Jan13_08-45-04_gglabs-a6000/events.out.tfevents.1736775904.gglabs-a6000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91670b65d50208ca6a8ee3ce489390431b22095db144931b2c564eafee1a7077
3
+ size 11460
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:284045918a3b838a97423af0f34a207c439f10488c9506185ac5c82eff13fe5e
3
+ size 5496