Ashishkr commited on
Commit
9a4f868
1 Parent(s): f66cec6

Upload model trained with Unsloth

Browse files

Upload model trained with Unsloth 2x faster

Files changed (3) hide show
  1. README.md +5 -0
  2. adapter_config.json +33 -0
  3. adapter_model.safetensors +3 -0
README.md CHANGED
@@ -15,3 +15,8 @@ base_model: unsloth/gemma-7b-bnb-4bit
15
 
16
  - **Developed by:** Ashishkr
17
  - **License:** apache-2.0
 
 
 
 
 
 
15
 
16
  - **Developed by:** Ashishkr
17
  - **License:** apache-2.0
18
+ - **Finetuned from model :** unsloth/gemma-7b-bnb-4bit
19
+
20
+ This gemma model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
21
+
22
+ [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
adapter_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/gemma-7b-bnb-4bit",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 4096,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 1024,
19
+ "rank_pattern": {},
20
+ "revision": "unsloth",
21
+ "target_modules": [
22
+ "down_proj",
23
+ "o_proj",
24
+ "v_proj",
25
+ "gate_proj",
26
+ "up_proj",
27
+ "k_proj",
28
+ "q_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM",
31
+ "use_dora": false,
32
+ "use_rslora": false
33
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99cd2ee8ca81d0e34a53943d8cfc0085574a5964f63247ae55e6f20d0874c058
3
+ size 12801070560