ayjays132 commited on
Commit
7b9aefb
·
verified ·
1 Parent(s): b5996ba

Upload 8 files

Browse files
added_tokens.json CHANGED
The diff for this file is too large to render. See raw diff
 
config.json CHANGED
@@ -1,84 +1,77 @@
1
  {
2
- "_name_or_path": "ayjays132/CustomGPT2Conversational",
3
- "activation_function": "gelu_new",
4
- "architectures": [
5
- "GPT2LMHeadModel"
6
- ],
7
- "attn_pdrop": 0.1,
8
- "bos_token_id": 50256,
9
  "config": {
10
  "activation_function": "gelu_new",
11
- "attn_pdrop": 0.1,
12
- "embd_pdrop": 0.1,
13
- "gradient_checkpointing": true,
14
- "initializer_range": 0.02,
15
- "layer_norm_epsilon": 1e-05,
16
  "n_ctx": 2048,
17
  "n_embd": 2048,
18
  "n_head": 16,
19
- "n_layer": 36,
20
  "n_positions": 2048,
 
 
 
 
 
21
  "resid_pdrop": 0.1,
22
- "scale_attn_weights": true,
23
- "use_cache": true,
24
- "vocab_size": 50257
 
 
25
  },
26
- "embd_pdrop": 0.1,
27
- "eos_token_id": 50256,
28
- "initializer_range": 0.02,
29
- "language": "en",
30
- "layer_norm_epsilon": 1e-05,
31
- "library_name": "transformers",
32
- "license": "apache-2.0",
33
- "metrics": [
34
- "perplexity",
35
- "accuracy"
36
- ],
37
- "model_type": "gpt2",
38
- "n_embd": 768,
39
- "n_head": 12,
40
- "n_inner": null,
41
- "n_layer": 12,
42
- "n_positions": 1024,
43
- "pipeline_tag": "conversational",
44
- "reorder_and_upcast_attn": false,
45
- "resid_pdrop": 0.1,
46
- "scale_attn_by_inverse_layer_idx": false,
47
- "scale_attn_weights": true,
48
- "summary_activation": null,
49
- "summary_first_dropout": 0.1,
50
- "summary_proj_to_labels": true,
51
- "summary_type": "cls_index",
52
- "summary_use_proj": true,
53
- "tags": [
54
- "conversational",
55
- "state-of-the-art"
56
- ],
57
  "task_specific_params": {
58
  "conversational": {
59
- "do_sample": true,
60
- "early_stopping": true,
61
- "frequency_penalty": 0.5,
62
- "length_penalty": 2.0,
63
  "max_length": 1024,
64
  "min_length": 20,
65
- "no_repeat_ngram_size": 3,
66
  "num_beams": 5,
67
- "presence_penalty": 0.5,
 
68
  "temperature": 0.7,
69
- "top_k": 40,
70
- "top_p": 0.95
71
  }
72
  },
73
- "tokenizer_config": {
74
- "bos_token_id": 50256,
75
- "eos_token_id": 50256,
76
- "n_positions": 2048,
77
- "padding_side": "left",
78
- "truncation_side": "right"
79
- },
80
- "torch_dtype": "float32",
81
- "transformers_version": "4.37.2",
82
- "use_cache": true,
83
- "vocab_size": 50257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  }
 
1
  {
2
+ "model_type": "gpt2",
3
+ "architectures": ["GPT2LMHeadModel"],
4
+ "tokenizer_config": {
5
+ "bos_token_id": 50256,
6
+ "eos_token_id": 50256,
7
+ "n_positions": 2048
8
+ },
9
  "config": {
10
  "activation_function": "gelu_new",
 
 
 
 
 
11
  "n_ctx": 2048,
12
  "n_embd": 2048,
13
  "n_head": 16,
14
+ "n_layer": 24,
15
  "n_positions": 2048,
16
+ "n_special": 0,
17
+ "attn_pdrop": 0.1,
18
+ "embd_pdrop": 0.1,
19
+ "initializer_range": 0.02,
20
+ "layer_norm_epsilon": 1e-05,
21
  "resid_pdrop": 0.1,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true
27
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  "task_specific_params": {
29
  "conversational": {
 
 
 
 
30
  "max_length": 1024,
31
  "min_length": 20,
32
+ "length_penalty": 1.5,
33
  "num_beams": 5,
34
+ "early_stopping": true,
35
+ "no_repeat_ngram_size": 3,
36
  "temperature": 0.7,
37
+ "top_k": 50,
38
+ "top_p": 0.9
39
  }
40
  },
41
+ "transformers_version": "4.34.0",
42
+ "language": ["en"],
43
+ "tags": ["conversational"],
44
+ "metrics": ["perplexity", "accuracy"],
45
+ "pipeline_tag": "conversational",
46
+ "library_name": "transformers",
47
+ "datasets": ["vicgalle/alpaca-gpt4"],
48
+ "license": "apache-2.0",
49
+ "custom_params": {
50
+ "adaptation_rate": 0.05,
51
+ "desired_improvement_rate": 0.02,
52
+ "ecosystem_dynamics": {
53
+ "environmental_volatility": 0.1,
54
+ "resource_pool": 1
55
+ },
56
+ "growth_improvement_threshold": 0.01,
57
+ "hidden_dim": 2048,
58
+ "initial_neuron_count": 5000,
59
+ "innovative_growth_net": {
60
+ "adaptation_rate": 0.05,
61
+ "initial_capacity": 250000,
62
+ "input_size": 2048
63
+ },
64
+ "input_dimension": 768,
65
+ "low_stability_threshold": 0.01,
66
+ "max_complexity": 10000,
67
+ "max_neurons": 250000,
68
+ "max_sequence_length": 1024,
69
+ "min_epochs_before_growth": 5,
70
+ "model_filename": "pytorch_model.bin",
71
+ "num_embeddings": 25000,
72
+ "pruning_improvement_threshold": 0.005,
73
+ "some_adaptation_rate": 0.05,
74
+ "stability_threshold": 0.02,
75
+ "start_token_index": 2
76
+ }
77
  }
special_tokens_map.json CHANGED
@@ -1,125 +1,51 @@
1
  {
2
  "additional_special_tokens": [
3
- "<extra_id_0>",
4
- "<extra_id_1>",
5
- "<extra_id_2>",
6
- "<extra_id_3>",
7
- "<extra_id_4>",
8
- "<extra_id_5>",
9
- "<extra_id_6>",
10
- "<extra_id_7>",
11
- "<extra_id_8>",
12
- "<extra_id_9>",
13
- "<extra_id_10>",
14
- "<extra_id_11>",
15
- "<extra_id_12>",
16
- "<extra_id_13>",
17
- "<extra_id_14>",
18
- "<extra_id_15>",
19
- "<extra_id_16>",
20
- "<extra_id_17>",
21
- "<extra_id_18>",
22
- "<extra_id_19>",
23
- "<extra_id_20>",
24
- "<extra_id_21>",
25
- "<extra_id_22>",
26
- "<extra_id_23>",
27
- "<extra_id_24>",
28
- "<extra_id_25>",
29
- "<extra_id_26>",
30
- "<extra_id_27>",
31
- "<extra_id_28>",
32
- "<extra_id_29>",
33
- "<extra_id_30>",
34
- "<extra_id_31>",
35
- "<extra_id_32>",
36
- "<extra_id_33>",
37
- "<extra_id_34>",
38
- "<extra_id_35>",
39
- "<extra_id_36>",
40
- "<extra_id_37>",
41
- "<extra_id_38>",
42
- "<extra_id_39>",
43
- "<extra_id_40>",
44
- "<extra_id_41>",
45
- "<extra_id_42>",
46
- "<extra_id_43>",
47
- "<extra_id_44>",
48
- "<extra_id_45>",
49
- "<extra_id_46>",
50
- "<extra_id_47>",
51
- "<extra_id_48>",
52
- "<extra_id_49>",
53
- "<extra_id_50>",
54
- "<extra_id_51>",
55
- "<extra_id_52>",
56
- "<extra_id_53>",
57
- "<extra_id_54>",
58
- "<extra_id_55>",
59
- "<extra_id_56>",
60
- "<extra_id_57>",
61
- "<extra_id_58>",
62
- "<extra_id_59>",
63
- "<extra_id_60>",
64
- "<extra_id_61>",
65
- "<extra_id_62>",
66
- "<extra_id_63>",
67
- "<extra_id_64>",
68
- "<extra_id_65>",
69
- "<extra_id_66>",
70
- "<extra_id_67>",
71
- "<extra_id_68>",
72
- "<extra_id_69>",
73
- "<extra_id_70>",
74
- "<extra_id_71>",
75
- "<extra_id_72>",
76
- "<extra_id_73>",
77
- "<extra_id_74>",
78
- "<extra_id_75>",
79
- "<extra_id_76>",
80
- "<extra_id_77>",
81
- "<extra_id_78>",
82
- "<extra_id_79>",
83
- "<extra_id_80>",
84
- "<extra_id_81>",
85
- "<extra_id_82>",
86
- "<extra_id_83>",
87
- "<extra_id_84>",
88
- "<extra_id_85>",
89
- "<extra_id_86>",
90
- "<extra_id_87>",
91
- "<extra_id_88>",
92
- "<extra_id_89>",
93
- "<extra_id_90>",
94
- "<extra_id_91>",
95
- "<extra_id_92>",
96
- "<extra_id_93>",
97
- "<extra_id_94>",
98
- "<extra_id_95>",
99
- "<extra_id_96>",
100
- "<extra_id_97>",
101
- "<extra_id_98>",
102
- "<extra_id_99>"
103
  ],
104
- "eos_token": {
105
- "content": "</s>",
106
- "lstrip": false,
107
- "normalized": false,
108
- "rstrip": false,
109
- "single_word": false
110
- },
111
- "pad_token": {
112
- "content": "<pad>",
113
- "lstrip": false,
114
- "normalized": false,
115
- "rstrip": false,
116
- "single_word": false
117
- },
118
- "unk_token": {
119
- "content": "<unk>",
120
- "lstrip": false,
121
- "normalized": false,
122
- "rstrip": false,
123
- "single_word": false
124
- }
125
  }
 
1
  {
2
  "additional_special_tokens": [
3
+ {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "[CLS]",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "[UNK]",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "[MASK]",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "[SEP]",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  ],
46
+ "cls_token": "[CLS]",
47
+ "mask_token": "[MASK]",
48
+ "pad_token": "[PAD]",
49
+ "sep_token": "[SEP]",
50
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  }
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3738aeb98a27be9bd050d9078f85b6b6fd7ab94dc5f112b747adcdc714b0a421
3
- size 73332137
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e173edee502038fb1ca5955c8c0989741bae978783c0b0f04e922eabd97c08ae
3
+ size 9133154
tokenizer_config.json CHANGED
The diff for this file is too large to render. See raw diff
 
vocab.json CHANGED
The diff for this file is too large to render. See raw diff
 
vocab.txt ADDED
The diff for this file is too large to render. See raw diff