munish0838 commited on
Commit
d80efe0
1 Parent(s): c19e623

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +134 -0
README.md ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama3
3
+ pipeline_tag: text-generation
4
+ base_model: OwenArli/ArliAI-Llama-3-8B-Dolfin-v0.2-Instruct
5
+ ---
6
+
7
+ # QuantFactory/ArliAI-Llama-3-8B-Dolfin-v0.2-Instruct-GGUF
8
+ This is quantized version of [OwenArli/ArliAI-Llama-3-8B-Dolfin-v0.2-Instruct](https://huggingface.co/OwenArli/ArliAI-Llama-3-8B-Dolfin-v0.2-Instruct) created using llama.cpp
9
+
10
+ # Model Description
11
+ Based on Meta-Llama-3-8b-Instruct, and is governed by Meta Llama 3 License agreement:
12
+ https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
13
+
14
+ v0.2 version with better improved dolphin based dataset but only 150K for testing instead of the full 850K. Doesn't seem to work that well so I will need to add the rest of the dataset.
15
+
16
+ We are happy for anyone to try it out and give some feedback.
17
+
18
+
19
+ Training:
20
+ - 4096 sequence length, while the base model is 8192 sequence length. From testing it still performs the same 8192 context just fine.
21
+ - Trained on a modified and improved version of Cognitive Computations Eric Hartford's Dolphin dataset. https://huggingface.co/datasets/cognitivecomputations/dolphin
22
+ - Training duration is around 1 day on 2x RTX3090 on our own machine, using 4-bit loading and Qlora 64-rank 128-alpha resulting in ~2% trainable weights.
23
+
24
+
25
+ The goal for this model is to have the model less-censored and great at general tasks like the previous dolphin based models by Eric Hartford.
26
+
27
+
28
+ Instruct format:
29
+ ```
30
+ <|begin_of_text|><|start_header_id|>system<|end_header_id|>
31
+
32
+ {{ system_prompt }}<|eot_id|><|start_header_id|>user<|end_header_id|>
33
+
34
+ {{ user_message_1 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
35
+
36
+ {{ model_answer_1 }}<|eot_id|><|start_header_id|>user<|end_header_id|>
37
+
38
+ {{ user_message_2 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
39
+ ```
40
+
41
+
42
+ Quants:
43
+
44
+
45
+
46
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
47
+
48
+ Axolotl Config:
49
+ ```
50
+ base_model: /home/owen/models/Meta-Llama-3-8B-Instruct
51
+ model_type: LlamaForCausalLM
52
+ tokenizer_type: AutoTokenizer
53
+
54
+ train_on_inputs: false
55
+ group_by_length: false
56
+ load_in_8bit: false
57
+ load_in_4bit: true
58
+ strict: false
59
+ sequence_len: 4096
60
+ bf16: true
61
+ fp16: false
62
+ tf32: false
63
+ flash_attention: true
64
+
65
+ # Data
66
+ datasets:
67
+ - path: /home/owen/datasets/cleaned-dolphin201-sharegpt2-uuid-improved.jsonl
68
+ type:
69
+ field_instruction: input
70
+ field_output: output
71
+ format: "<|start_header_id|>user<|end_header_id|>\n\n{instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
72
+ no_input_format: "<|start_header_id|>user<|end_header_id|>\n\n{instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
73
+
74
+ warmup_steps: 10
75
+ dataset_prepared_path: ./last_run_prepared
76
+
77
+ # Iterations
78
+ num_epochs: 1
79
+ saves_per_epoch: 4
80
+
81
+ # Evaluation
82
+ val_set_size: 0.01
83
+ eval_table_size:
84
+ eval_table_max_new_tokens:
85
+ eval_sample_packing: false
86
+ evals_per_epoch: 4
87
+
88
+ # LoRA
89
+ output_dir: ./qlora-out
90
+ adapter: qlora
91
+ lora_model_dir:
92
+ lora_r: 64
93
+ lora_alpha: 128
94
+ lora_dropout: 0.05
95
+ lora_target_linear: true
96
+ lora_fan_in_fan_out:
97
+ lora_target_modules:
98
+ save_safetensors: true
99
+
100
+ # Sampling
101
+ sample_packing: true
102
+ pad_to_sequence_len: true
103
+
104
+ # Batching
105
+ gradient_accumulation_steps: 32
106
+ micro_batch_size: 2
107
+ gradient_checkpointing: true
108
+ gradient_checkpointing_kwargs:
109
+ use_reentrant: true
110
+
111
+ # wandb
112
+ wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb
113
+ wandb_project: llama-3-8b-instruct-dolphin-q
114
+ wandb_entity: # A wandb Team name if using a Team
115
+ wandb_watch:
116
+ wandb_name: 64-128-4096-1ep-v0.2
117
+ wandb_run_id: # Set the ID of your wandb run
118
+ wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training
119
+
120
+ # Optimizer
121
+ optimizer: paged_adamw_8bit
122
+ lr_scheduler: cosine
123
+ learning_rate: 0.0002
124
+
125
+ # Misc
126
+ early_stopping_patience:
127
+ resume_from_checkpoint:
128
+ logging_steps: 1
129
+ debug:
130
+ deepspeed: /home/owen/axolotl/deepspeed_configs/zero3_bf16.json
131
+ weight_decay: 0.1
132
+ special_tokens:
133
+ pad_token: <|end_of_text|>
134
+ ```