mariagrandury commited on
Commit
c35ca9d
·
verified ·
1 Parent(s): c11bd09

Model save

Browse files
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  base_model: dccuchile/distilbert-base-spanish-uncased
3
  tags:
4
  - generated_from_trainer
@@ -14,8 +15,8 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  This model is a fine-tuned version of [dccuchile/distilbert-base-spanish-uncased](https://huggingface.co/dccuchile/distilbert-base-spanish-uncased) on an unknown dataset.
16
  It achieves the following results on the evaluation set:
17
- - Loss: 2.1904
18
- - Classification Report: {'ar': {'precision': 0.03571428571428571, 'recall': 0.00966183574879227, 'f1-score': 0.015209125475285171, 'support': 207.0}, 'cl': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 263.0}, 'co': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 171.0}, 'es': {'precision': 0.2221276595744681, 'recall': 0.786144578313253, 'f1-score': 0.34638354346383543, 'support': 332.0}, 'mx': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 203.0}, 'pe': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 161.0}, 'pr': {'precision': 0.010101010101010102, 'recall': 0.04, 'f1-score': 0.016129032258064516, 'support': 50.0}, 'uy': {'precision': 0.07142857142857142, 'recall': 0.05217391304347826, 'f1-score': 0.06030150753768844, 'support': 115.0}, 've': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 22.0}, 'accuracy': 0.1778215223097113, 'macro avg': {'precision': 0.03770794742425948, 'recall': 0.09866448078950261, 'f1-score': 0.04866924541498595, 'support': 1524.0}, 'weighted avg': {'precision': 0.05896232043367241, 'recall': 0.1778215223097113, 'f1-score': 0.08260416691805432, 'support': 1524.0}}
19
 
20
  ## Model description
21
 
@@ -38,21 +39,21 @@ The following hyperparameters were used during training:
38
  - train_batch_size: 4
39
  - eval_batch_size: 4
40
  - seed: 42
41
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
43
  - lr_scheduler_warmup_steps: 2
44
  - training_steps: 2
45
 
46
  ### Training results
47
 
48
- | Training Loss | Epoch | Step | Validation Loss | Classification Report |
49
- |:-------------:|:------:|:----:|:---------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
50
- | 2.0868 | 0.0022 | 2 | 2.1904 | {'ar': {'precision': 0.03571428571428571, 'recall': 0.00966183574879227, 'f1-score': 0.015209125475285171, 'support': 207.0}, 'cl': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 263.0}, 'co': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 171.0}, 'es': {'precision': 0.2221276595744681, 'recall': 0.786144578313253, 'f1-score': 0.34638354346383543, 'support': 332.0}, 'mx': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 203.0}, 'pe': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 161.0}, 'pr': {'precision': 0.010101010101010102, 'recall': 0.04, 'f1-score': 0.016129032258064516, 'support': 50.0}, 'uy': {'precision': 0.07142857142857142, 'recall': 0.05217391304347826, 'f1-score': 0.06030150753768844, 'support': 115.0}, 've': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 22.0}, 'accuracy': 0.1778215223097113, 'macro avg': {'precision': 0.03770794742425948, 'recall': 0.09866448078950261, 'f1-score': 0.04866924541498595, 'support': 1524.0}, 'weighted avg': {'precision': 0.05896232043367241, 'recall': 0.1778215223097113, 'f1-score': 0.08260416691805432, 'support': 1524.0}} |
51
 
52
 
53
  ### Framework versions
54
 
55
- - Transformers 4.40.0
56
- - Pytorch 2.6.0
57
  - Datasets 3.3.2
58
- - Tokenizers 0.19.1
 
1
  ---
2
+ library_name: transformers
3
  base_model: dccuchile/distilbert-base-spanish-uncased
4
  tags:
5
  - generated_from_trainer
 
15
 
16
  This model is a fine-tuned version of [dccuchile/distilbert-base-spanish-uncased](https://huggingface.co/dccuchile/distilbert-base-spanish-uncased) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 2.1812
19
+ - Classification Report: {'ar': {'precision': 0.10526315789473684, 'recall': 0.08695652173913043, 'f1-score': 0.09523809523809523, 'support': 207.0}, 'cl': {'precision': 0.17346938775510204, 'recall': 0.19391634980988592, 'f1-score': 0.18312387791741472, 'support': 263.0}, 'co': {'precision': 0.06451612903225806, 'recall': 0.011695906432748537, 'f1-score': 0.019801980198019802, 'support': 171.0}, 'es': {'precision': 0.24783362218370883, 'recall': 0.4307228915662651, 'f1-score': 0.3146314631463146, 'support': 332.0}, 'mx': {'precision': 0.15086206896551724, 'recall': 0.1724137931034483, 'f1-score': 0.16091954022988506, 'support': 203.0}, 'pe': {'precision': 0.061946902654867256, 'recall': 0.043478260869565216, 'f1-score': 0.051094890510948905, 'support': 161.0}, 'pr': {'precision': 0.5, 'recall': 0.02, 'f1-score': 0.038461538461538464, 'support': 50.0}, 'uy': {'precision': 0.3181818181818182, 'recall': 0.06086956521739131, 'f1-score': 0.10218978102189781, 'support': 115.0}, 've': {'precision': 0.012195121951219513, 'recall': 0.045454545454545456, 'f1-score': 0.019230769230769232, 'support': 22.0}, 'accuracy': 0.17388451443569553, 'macro avg': {'precision': 0.18158535651324753, 'recall': 0.11838975935477558, 'f1-score': 0.10941021510609822, 'support': 1524.0}, 'weighted avg': {'precision': 0.1726919923848946, 'recall': 0.17388451443569553, 'f1-score': 0.151384890214964, 'support': 1524.0}}
20
 
21
  ## Model description
22
 
 
39
  - train_batch_size: 4
40
  - eval_batch_size: 4
41
  - seed: 42
42
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 2
45
  - training_steps: 2
46
 
47
  ### Training results
48
 
49
+ | Training Loss | Epoch | Step | Validation Loss | Classification Report |
50
+ |:-------------:|:------:|:----:|:---------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
51
+ | 2.1638 | 0.0022 | 2 | 2.1812 | {'ar': {'precision': 0.10526315789473684, 'recall': 0.08695652173913043, 'f1-score': 0.09523809523809523, 'support': 207.0}, 'cl': {'precision': 0.17346938775510204, 'recall': 0.19391634980988592, 'f1-score': 0.18312387791741472, 'support': 263.0}, 'co': {'precision': 0.06451612903225806, 'recall': 0.011695906432748537, 'f1-score': 0.019801980198019802, 'support': 171.0}, 'es': {'precision': 0.24783362218370883, 'recall': 0.4307228915662651, 'f1-score': 0.3146314631463146, 'support': 332.0}, 'mx': {'precision': 0.15086206896551724, 'recall': 0.1724137931034483, 'f1-score': 0.16091954022988506, 'support': 203.0}, 'pe': {'precision': 0.061946902654867256, 'recall': 0.043478260869565216, 'f1-score': 0.051094890510948905, 'support': 161.0}, 'pr': {'precision': 0.5, 'recall': 0.02, 'f1-score': 0.038461538461538464, 'support': 50.0}, 'uy': {'precision': 0.3181818181818182, 'recall': 0.06086956521739131, 'f1-score': 0.10218978102189781, 'support': 115.0}, 've': {'precision': 0.012195121951219513, 'recall': 0.045454545454545456, 'f1-score': 0.019230769230769232, 'support': 22.0}, 'accuracy': 0.17388451443569553, 'macro avg': {'precision': 0.18158535651324753, 'recall': 0.11838975935477558, 'f1-score': 0.10941021510609822, 'support': 1524.0}, 'weighted avg': {'precision': 0.1726919923848946, 'recall': 0.17388451443569553, 'f1-score': 0.151384890214964, 'support': 1524.0}} |
52
 
53
 
54
  ### Framework versions
55
 
56
+ - Transformers 4.48.3
57
+ - Pytorch 2.5.1+cu124
58
  - Datasets 3.3.2
59
+ - Tokenizers 0.21.0
config.json CHANGED
@@ -42,6 +42,6 @@
42
  "sinusoidal_pos_embds": true,
43
  "tie_weights_": true,
44
  "torch_dtype": "float32",
45
- "transformers_version": "4.40.0",
46
  "vocab_size": 31002
47
  }
 
42
  "sinusoidal_pos_embds": true,
43
  "tie_weights_": true,
44
  "torch_dtype": "float32",
45
+ "transformers_version": "4.48.3",
46
  "vocab_size": 31002
47
  }
logs/events.out.tfevents.1740153701.a2d653e866f9.226.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0c5779ecb9ee243486f17dd0b0ba9984622c9a5a631723be1a0cc1688f9e340
3
+ size 6355
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3f2f5c1f4883cc8a2a79c0416f0ce2222e6c44889cb4583ab083139c9c85b76
3
  size 269328660
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a59e6079206454140cd9de2a558ebfe6c234d41668e5122b99f4a63e3862ff46
3
  size 269328660
tokenizer_config.json CHANGED
@@ -41,10 +41,11 @@
41
  "special": true
42
  }
43
  },
44
- "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
  "do_basic_tokenize": true,
47
  "do_lower_case": true,
 
48
  "mask_token": "[MASK]",
49
  "model_max_length": 512,
50
  "never_split": null,
 
41
  "special": true
42
  }
43
  },
44
+ "clean_up_tokenization_spaces": false,
45
  "cls_token": "[CLS]",
46
  "do_basic_tokenize": true,
47
  "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
  "mask_token": "[MASK]",
50
  "model_max_length": 512,
51
  "never_split": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13dc96ab2a6e490bd24dc84105ed78eb7583d16848e06ecfeff4aeac77acce6e
3
- size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a931bb071306bc2c9f6731ed0c069d5875fa5eab471d84947b67f65d2ef43f3
3
+ size 5368