Upload 6 files
Browse files- added_tokens.json +3 -0
- config.json +50 -58
- merges.txt +0 -0
- special_tokens_map.json +24 -0
- tokenizer_config.json +30 -0
- vocab.json +0 -0
added_tokens.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<pad>": 50257
|
3 |
+
}
|
config.json
CHANGED
@@ -1,23 +1,23 @@
|
|
1 |
{
|
2 |
-
"
|
3 |
-
"
|
4 |
-
"
|
5 |
-
"
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
"config": {
|
10 |
"activation_function": "gelu_new",
|
11 |
-
"attn_pdrop": 0.1,
|
12 |
-
"embd_pdrop": 0.1,
|
13 |
-
"initializer_range": 0.02,
|
14 |
-
"layer_norm_epsilon": 1e-05,
|
15 |
"n_ctx": 2048,
|
16 |
"n_embd": 2048,
|
17 |
"n_head": 16,
|
18 |
"n_layer": 24,
|
19 |
"n_positions": 2048,
|
20 |
"n_special": 0,
|
|
|
|
|
|
|
|
|
21 |
"resid_pdrop": 0.1,
|
22 |
"summary_activation": null,
|
23 |
"summary_first_dropout": 0.1,
|
@@ -25,61 +25,53 @@
|
|
25 |
"summary_type": "cls_index",
|
26 |
"summary_use_proj": true
|
27 |
},
|
28 |
-
"datasets": [
|
29 |
-
"vicgalle/alpaca-gpt4"
|
30 |
-
],
|
31 |
-
"embd_pdrop": 0.1,
|
32 |
-
"eos_token_id": 50256,
|
33 |
-
"initializer_range": 0.02,
|
34 |
-
"language": [
|
35 |
-
"en"
|
36 |
-
],
|
37 |
-
"layer_norm_epsilon": 1e-05,
|
38 |
-
"library_name": "transformers",
|
39 |
-
"license": "apache-2.0",
|
40 |
-
"metrics": [
|
41 |
-
"perplexity",
|
42 |
-
"accuracy"
|
43 |
-
],
|
44 |
-
"model_type": "gpt2",
|
45 |
-
"n_embd": 768,
|
46 |
-
"n_head": 12,
|
47 |
-
"n_inner": null,
|
48 |
-
"n_layer": 12,
|
49 |
-
"n_positions": 1024,
|
50 |
-
"pipeline_tag": "conversational",
|
51 |
-
"reorder_and_upcast_attn": false,
|
52 |
-
"resid_pdrop": 0.1,
|
53 |
-
"scale_attn_by_inverse_layer_idx": false,
|
54 |
-
"scale_attn_weights": true,
|
55 |
-
"summary_activation": null,
|
56 |
-
"summary_first_dropout": 0.1,
|
57 |
-
"summary_proj_to_labels": true,
|
58 |
-
"summary_type": "cls_index",
|
59 |
-
"summary_use_proj": true,
|
60 |
-
"tags": [
|
61 |
-
"conversational"
|
62 |
-
],
|
63 |
"task_specific_params": {
|
64 |
"conversational": {
|
65 |
-
"early_stopping": true,
|
66 |
-
"length_penalty": 1.5,
|
67 |
"max_length": 1024,
|
68 |
"min_length": 20,
|
69 |
-
"
|
70 |
"num_beams": 5,
|
|
|
|
|
71 |
"temperature": 0.7,
|
72 |
"top_k": 50,
|
73 |
"top_p": 0.9
|
74 |
}
|
75 |
},
|
76 |
-
"
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
"
|
82 |
-
"
|
83 |
-
"
|
84 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
}
|
|
|
1 |
{
|
2 |
+
"model_type": "gpt2",
|
3 |
+
"architectures": ["GPT2LMHeadModel"],
|
4 |
+
"tokenizer_config": {
|
5 |
+
"bos_token_id": 50256,
|
6 |
+
"eos_token_id": 50256,
|
7 |
+
"n_positions": 2048
|
8 |
+
},
|
9 |
"config": {
|
10 |
"activation_function": "gelu_new",
|
|
|
|
|
|
|
|
|
11 |
"n_ctx": 2048,
|
12 |
"n_embd": 2048,
|
13 |
"n_head": 16,
|
14 |
"n_layer": 24,
|
15 |
"n_positions": 2048,
|
16 |
"n_special": 0,
|
17 |
+
"attn_pdrop": 0.1,
|
18 |
+
"embd_pdrop": 0.1,
|
19 |
+
"initializer_range": 0.02,
|
20 |
+
"layer_norm_epsilon": 1e-05,
|
21 |
"resid_pdrop": 0.1,
|
22 |
"summary_activation": null,
|
23 |
"summary_first_dropout": 0.1,
|
|
|
25 |
"summary_type": "cls_index",
|
26 |
"summary_use_proj": true
|
27 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
"task_specific_params": {
|
29 |
"conversational": {
|
|
|
|
|
30 |
"max_length": 1024,
|
31 |
"min_length": 20,
|
32 |
+
"length_penalty": 1.5,
|
33 |
"num_beams": 5,
|
34 |
+
"early_stopping": true,
|
35 |
+
"no_repeat_ngram_size": 3,
|
36 |
"temperature": 0.7,
|
37 |
"top_k": 50,
|
38 |
"top_p": 0.9
|
39 |
}
|
40 |
},
|
41 |
+
"transformers_version": "4.34.0",
|
42 |
+
"language": ["en"],
|
43 |
+
"tags": ["conversational"],
|
44 |
+
"metrics": ["perplexity", "accuracy"],
|
45 |
+
"pipeline_tag": "conversational",
|
46 |
+
"library_name": "transformers",
|
47 |
+
"datasets": ["vicgalle/alpaca-gpt4"],
|
48 |
+
"license": "apache-2.0",
|
49 |
+
"custom_params": {
|
50 |
+
"adaptation_rate": 0.05,
|
51 |
+
"desired_improvement_rate": 0.02,
|
52 |
+
"ecosystem_dynamics": {
|
53 |
+
"environmental_volatility": 0.1,
|
54 |
+
"resource_pool": 1
|
55 |
+
},
|
56 |
+
"growth_improvement_threshold": 0.01,
|
57 |
+
"hidden_dim": 2048,
|
58 |
+
"initial_neuron_count": 5000,
|
59 |
+
"innovative_growth_net": {
|
60 |
+
"adaptation_rate": 0.05,
|
61 |
+
"initial_capacity": 250000,
|
62 |
+
"input_size": 2048
|
63 |
+
},
|
64 |
+
"input_dimension": 768,
|
65 |
+
"low_stability_threshold": 0.01,
|
66 |
+
"max_complexity": 10000,
|
67 |
+
"max_neurons": 250000,
|
68 |
+
"max_sequence_length": 1024,
|
69 |
+
"min_epochs_before_growth": 5,
|
70 |
+
"model_filename": "pytorch_model.bin",
|
71 |
+
"num_embeddings": 25000,
|
72 |
+
"pruning_improvement_threshold": 0.005,
|
73 |
+
"some_adaptation_rate": 0.05,
|
74 |
+
"stability_threshold": 0.02,
|
75 |
+
"start_token_index": 2
|
76 |
+
}
|
77 |
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<pad>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer_config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"50256": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"50257": {
|
14 |
+
"content": "<pad>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
}
|
21 |
+
},
|
22 |
+
"bos_token": "<|endoftext|>",
|
23 |
+
"clean_up_tokenization_spaces": true,
|
24 |
+
"eos_token": "<|endoftext|>",
|
25 |
+
"errors": "replace",
|
26 |
+
"model_max_length": 2048,
|
27 |
+
"pad_token": "<pad>",
|
28 |
+
"tokenizer_class": "GPT2Tokenizer",
|
29 |
+
"unk_token": "<|endoftext|>"
|
30 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|