Text Generation
Transformers
PyTorch
Polish
gpt2
text-generation-inference
Inference Endpoints
s3nh commited on
Commit
a3b5179
1 Parent(s): 1fc9ff2

Upload ./ with huggingface_hub

Browse files
Files changed (7) hide show
  1. config.json +39 -0
  2. optimizer.pt +3 -0
  3. pytorch_model.bin +3 -0
  4. rng_state.pth +3 -0
  5. scheduler.pt +3 -0
  6. trainer_state.json +34 -0
  7. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "MBZUAI/LaMini-GPT-774M",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 1280,
16
+ "n_head": 20,
17
+ "n_inner": null,
18
+ "n_layer": 36,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.20.1",
37
+ "use_cache": false,
38
+ "vocab_size": 50258
39
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:885c2094f4b09c85accee562c99f3d3a61cae9b070e2e59c2a769fdcce9ed030
3
+ size 6192513077
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8c2ab09aadf7c38cf33bf03798b6fc11bc5a56d3d109742090b713878f15f79
3
+ size 3134036617
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:169b4ab88f7671b7f367bcb7651bdcd9065127a6128a0d175979ac545364a631
3
+ size 14503
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b2b6e9c1347e28d86675cb1800fb1df7ef5bb26a25547ec553145e502ff902f
3
+ size 623
trainer_state.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.658536585365854,
5
+ "global_step": 19500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.94,
12
+ "learning_rate": 9.063614186526554e-05,
13
+ "loss": 0.9314,
14
+ "step": 5000
15
+ },
16
+ {
17
+ "epoch": 1.88,
18
+ "learning_rate": 8.125351848376806e-05,
19
+ "loss": 0.6435,
20
+ "step": 10000
21
+ },
22
+ {
23
+ "epoch": 2.81,
24
+ "learning_rate": 7.18708951022706e-05,
25
+ "loss": 0.5023,
26
+ "step": 15000
27
+ }
28
+ ],
29
+ "max_steps": 53300,
30
+ "num_train_epochs": 10,
31
+ "total_flos": 1.697286170345472e+17,
32
+ "trial_name": null,
33
+ "trial_params": null
34
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a33d5e3e527355b12ec1190e535877d4b2c247c2d3485ed61d0247851d1c1c9e
3
+ size 3247