llm-finetune commited on
Commit
619513e
1 Parent(s): c9e7fd1

Training in progress, step 50

Browse files
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "google/gemma-2b",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -19,13 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "o_proj",
23
  "q_proj",
 
24
  "v_proj",
 
25
  "gate_proj",
26
- "up_proj",
27
  "down_proj",
28
- "k_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-2b-it",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "q_proj",
23
+ "o_proj",
24
  "v_proj",
25
+ "k_proj",
26
  "gate_proj",
 
27
  "down_proj",
28
+ "up_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c61a3ee963f34def488179af3eaff235654b8aac679c18d4772fea1d50f7f69c
3
  size 78480072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b45cac2d6e36260458c56551f43695b32fa256b9ba17dd8942b8c1156eda269d
3
  size 78480072
special_tokens_map.json CHANGED
@@ -1,4 +1,8 @@
1
  {
 
 
 
 
2
  "bos_token": {
3
  "content": "<bos>",
4
  "lstrip": false,
 
1
  {
2
+ "additional_special_tokens": [
3
+ "<start_of_turn>",
4
+ "<end_of_turn>"
5
+ ],
6
  "bos_token": {
7
  "content": "<bos>",
8
  "lstrip": false,
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0d908b4f9326e0998815690e325b6abbd378978553e10627924dd825db7e243
3
- size 17477553
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05e97791a5e007260de1db7e1692e53150e08cea481e2bf25435553380c147ee
3
+ size 17477929
tokenizer_config.json CHANGED
@@ -33,9 +33,30 @@
33
  "rstrip": false,
34
  "single_word": false,
35
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  }
37
  },
 
 
 
 
38
  "bos_token": "<bos>",
 
39
  "clean_up_tokenization_spaces": false,
40
  "eos_token": "<eos>",
41
  "legacy": null,
 
33
  "rstrip": false,
34
  "single_word": false,
35
  "special": true
36
+ },
37
+ "106": {
38
+ "content": "<start_of_turn>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "107": {
46
+ "content": "<end_of_turn>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
  }
53
  },
54
+ "additional_special_tokens": [
55
+ "<start_of_turn>",
56
+ "<end_of_turn>"
57
+ ],
58
  "bos_token": "<bos>",
59
+ "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
60
  "clean_up_tokenization_spaces": false,
61
  "eos_token": "<eos>",
62
  "legacy": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb853e01a9c5175f939130aa30db1ee797daa0c624fbc40a3e4e02eb365af8a7
3
  size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24e89271976e51b82cc711d1eac5a1458be40f824c92e8b5a741f48cddb9f45e
3
  size 4856