cheonyumin
commited on
Commit
•
14393f3
1
Parent(s):
56dbe66
Upload model
Browse files- README.md +9 -10
- adapter_config.json +14 -6
- adapter_model.safetensors +3 -0
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
library_name: peft
|
3 |
-
base_model:
|
4 |
---
|
5 |
|
6 |
# Model Card for Model ID
|
@@ -18,6 +18,7 @@ base_model: google/flan-t5-large
|
|
18 |
|
19 |
|
20 |
- **Developed by:** [More Information Needed]
|
|
|
21 |
- **Shared by [optional]:** [More Information Needed]
|
22 |
- **Model type:** [More Information Needed]
|
23 |
- **Language(s) (NLP):** [More Information Needed]
|
@@ -76,7 +77,7 @@ Use the code below to get started with the model.
|
|
76 |
|
77 |
### Training Data
|
78 |
|
79 |
-
<!-- This should link to a
|
80 |
|
81 |
[More Information Needed]
|
82 |
|
@@ -107,7 +108,7 @@ Use the code below to get started with the model.
|
|
107 |
|
108 |
#### Testing Data
|
109 |
|
110 |
-
<!-- This should link to a
|
111 |
|
112 |
[More Information Needed]
|
113 |
|
@@ -200,20 +201,18 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
|
|
200 |
|
201 |
## Training procedure
|
202 |
|
203 |
-
|
204 |
The following `bitsandbytes` quantization config was used during training:
|
205 |
- quant_method: bitsandbytes
|
206 |
-
- load_in_8bit:
|
207 |
-
- load_in_4bit:
|
208 |
- llm_int8_threshold: 6.0
|
209 |
- llm_int8_skip_modules: None
|
210 |
- llm_int8_enable_fp32_cpu_offload: False
|
211 |
- llm_int8_has_fp16_weight: False
|
212 |
-
- bnb_4bit_quant_type:
|
213 |
- bnb_4bit_use_double_quant: False
|
214 |
-
- bnb_4bit_compute_dtype:
|
215 |
|
216 |
### Framework versions
|
217 |
|
218 |
-
|
219 |
-
- PEFT 0.6.0.dev0
|
|
|
1 |
---
|
2 |
library_name: peft
|
3 |
+
base_model: meta-llama/Llama-2-7b-chat-hf
|
4 |
---
|
5 |
|
6 |
# Model Card for Model ID
|
|
|
18 |
|
19 |
|
20 |
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
- **Shared by [optional]:** [More Information Needed]
|
23 |
- **Model type:** [More Information Needed]
|
24 |
- **Language(s) (NLP):** [More Information Needed]
|
|
|
77 |
|
78 |
### Training Data
|
79 |
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
|
82 |
[More Information Needed]
|
83 |
|
|
|
108 |
|
109 |
#### Testing Data
|
110 |
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
|
113 |
[More Information Needed]
|
114 |
|
|
|
201 |
|
202 |
## Training procedure
|
203 |
|
|
|
204 |
The following `bitsandbytes` quantization config was used during training:
|
205 |
- quant_method: bitsandbytes
|
206 |
+
- load_in_8bit: False
|
207 |
+
- load_in_4bit: True
|
208 |
- llm_int8_threshold: 6.0
|
209 |
- llm_int8_skip_modules: None
|
210 |
- llm_int8_enable_fp32_cpu_offload: False
|
211 |
- llm_int8_has_fp16_weight: False
|
212 |
+
- bnb_4bit_quant_type: nf4
|
213 |
- bnb_4bit_use_double_quant: False
|
214 |
+
- bnb_4bit_compute_dtype: bfloat16
|
215 |
|
216 |
### Framework versions
|
217 |
|
218 |
+
- PEFT 0.7.0
|
|
adapter_config.json
CHANGED
@@ -1,23 +1,31 @@
|
|
1 |
{
|
2 |
"alpha_pattern": {},
|
3 |
"auto_mapping": null,
|
4 |
-
"base_model_name_or_path": "
|
5 |
"bias": "none",
|
6 |
"fan_in_fan_out": false,
|
7 |
"inference_mode": true,
|
8 |
"init_lora_weights": true,
|
9 |
"layers_pattern": null,
|
10 |
"layers_to_transform": null,
|
11 |
-
"
|
|
|
12 |
"lora_dropout": 0.05,
|
|
|
|
|
13 |
"modules_to_save": null,
|
14 |
"peft_type": "LORA",
|
15 |
-
"r":
|
16 |
"rank_pattern": {},
|
17 |
"revision": null,
|
18 |
"target_modules": [
|
19 |
-
"
|
20 |
-
"
|
|
|
|
|
|
|
|
|
|
|
21 |
],
|
22 |
-
"task_type": "
|
23 |
}
|
|
|
1 |
{
|
2 |
"alpha_pattern": {},
|
3 |
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
|
5 |
"bias": "none",
|
6 |
"fan_in_fan_out": false,
|
7 |
"inference_mode": true,
|
8 |
"init_lora_weights": true,
|
9 |
"layers_pattern": null,
|
10 |
"layers_to_transform": null,
|
11 |
+
"loftq_config": {},
|
12 |
+
"lora_alpha": 16,
|
13 |
"lora_dropout": 0.05,
|
14 |
+
"megatron_config": null,
|
15 |
+
"megatron_core": "megatron.core",
|
16 |
"modules_to_save": null,
|
17 |
"peft_type": "LORA",
|
18 |
+
"r": 8,
|
19 |
"rank_pattern": {},
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
+
"gate_proj",
|
23 |
+
"k_proj",
|
24 |
+
"q_proj",
|
25 |
+
"o_proj",
|
26 |
+
"down_proj",
|
27 |
+
"v_proj",
|
28 |
+
"up_proj"
|
29 |
],
|
30 |
+
"task_type": "CAUSAL_LM"
|
31 |
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:25b277708658e3250ef9a3759cc97230aa54da5adddd42cfc5b9e5c815de4ec1
|
3 |
+
size 80013120
|