cybersyn commited on
Commit
93146e3
·
1 Parent(s): c0d6930

Upload TFRobertaForSequenceClassification

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. config.json +2 -2
  3. tf_model.h5 +1 -1
README.md CHANGED
@@ -32,7 +32,7 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - optimizer: {'name': 'AdamW', 'weight_decay': 0.004, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-05, 'decay_steps': 915, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
36
  - training_precision: float32
37
 
38
  ### Training results
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - optimizer: {'name': 'AdamW', 'weight_decay': 0.004, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 732, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
36
  - training_precision: float32
37
 
38
  ### Training results
config.json CHANGED
@@ -3,13 +3,13 @@
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
6
- "attention_probs_dropout_prob": 0.2,
7
  "bos_token_id": 0,
8
  "classifier_dropout": null,
9
  "eos_token_id": 2,
10
  "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
- "hidden_dropout_prob": 0.2,
13
  "hidden_size": 768,
14
  "id2label": {
15
  "0": "LABEL_0",
 
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
8
  "classifier_dropout": null,
9
  "eos_token_id": 2,
10
  "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
  "hidden_size": 768,
14
  "id2label": {
15
  "0": "LABEL_0",
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af2984c52a21af4b61a2403f77994ed62f9081f369cbd262a8106ff42461b3e2
3
  size 435453760
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06efd782c40ed801fe11ae8dc7e24a04f27e72b3f433640d166c8a2b0954dcf8
3
  size 435453760