paultrust commited on
Commit
4b16f03
1 Parent(s): 784c045

Upload model

Browse files
Files changed (3) hide show
  1. README.md +20 -0
  2. adapter_config.json +5 -3
  3. adapter_model.bin +2 -2
README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0.dev0
adapter_config.json CHANGED
@@ -1,17 +1,19 @@
1
  {
2
- "base_model_name_or_path": "EleutherAI/gpt-neo-1.3B",
3
  "bias": "none",
4
  "fan_in_fan_out": false,
5
  "inference_mode": true,
6
  "init_lora_weights": true,
 
 
7
  "lora_alpha": 32,
8
  "lora_dropout": 0.05,
9
  "modules_to_save": null,
10
  "peft_type": "LORA",
11
  "r": 16,
 
12
  "target_modules": [
13
- "q_proj",
14
- "v_proj"
15
  ],
16
  "task_type": "CAUSAL_LM"
17
  }
 
1
  {
2
+ "base_model_name_or_path": "tiiuae/falcon-7b-instruct",
3
  "bias": "none",
4
  "fan_in_fan_out": false,
5
  "inference_mode": true,
6
  "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
  "lora_alpha": 32,
10
  "lora_dropout": 0.05,
11
  "modules_to_save": null,
12
  "peft_type": "LORA",
13
  "r": 16,
14
+ "revision": null,
15
  "target_modules": [
16
+ "query_key_value"
 
17
  ],
18
  "task_type": "CAUSAL_LM"
19
  }
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4ecb0f85edf72c2e9cd9aa65e87adde0daa154e64240b87dd117cc54df401d3
3
- size 12617681
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc1fb99c8cf09fc0a4185dfd0781d3249a18f7f30e8ea70d8f50b40dc244311
3
+ size 18898161