shrenikb commited on
Commit
e0a7469
1 Parent(s): 8ec5332

Upload model

Browse files
Files changed (3) hide show
  1. README.md +2 -20
  2. adapter_config.json +8 -2
  3. adapter_model.safetensors +2 -2
README.md CHANGED
@@ -81,7 +81,7 @@ Use the code below to get started with the model.
81
 
82
  [More Information Needed]
83
 
84
- ### Training Procedure
85
 
86
  <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
 
@@ -197,24 +197,6 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
197
  ## Model Card Contact
198
 
199
  [More Information Needed]
200
-
201
-
202
- ## Training procedure
203
-
204
-
205
- The following `bitsandbytes` quantization config was used during training:
206
- - quant_method: bitsandbytes
207
- - load_in_8bit: True
208
- - load_in_4bit: False
209
- - llm_int8_threshold: 6.0
210
- - llm_int8_skip_modules: None
211
- - llm_int8_enable_fp32_cpu_offload: False
212
- - llm_int8_has_fp16_weight: False
213
- - bnb_4bit_quant_type: fp4
214
- - bnb_4bit_use_double_quant: False
215
- - bnb_4bit_compute_dtype: float32
216
-
217
  ### Framework versions
218
 
219
-
220
- - PEFT 0.6.2
 
81
 
82
  [More Information Needed]
83
 
84
+ ### Training Procedure
85
 
86
  <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
 
 
197
  ## Model Card Contact
198
 
199
  [More Information Needed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  ### Framework versions
201
 
202
+ - PEFT 0.10.0
 
adapter_config.json CHANGED
@@ -1,15 +1,19 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "chavinlo/alpaca-native",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
 
11
  "lora_alpha": 16,
12
  "lora_dropout": 0.05,
 
 
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
  "r": 16,
@@ -18,5 +22,7 @@
18
  "target_modules": [
19
  "q_proj"
20
  ],
21
- "task_type": "CAUSAL_LM"
 
 
22
  }
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "huggyllama/llama-7b",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
9
+ "layer_replication": null,
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
+ "loftq_config": {},
13
  "lora_alpha": 16,
14
  "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
  "r": 16,
 
22
  "target_modules": [
23
  "q_proj"
24
  ],
25
+ "task_type": "CAUSAL_LM",
26
+ "use_dora": false,
27
+ "use_rslora": false
28
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c75e528c56c1bc428ef088191fb835ff6826a2da5f22c0a0a649f67bd0c5bee
3
- size 12589328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef5c2ed2a916cc4a7a7a1e9a13c9c4e084bcfe81c665bb553a818b94f0285032
3
+ size 16785792