Glavin001 commited on
Commit
b6d9372
1 Parent(s): 3747e51

Upload 2 files

Browse files
Files changed (2) hide show
  1. cfg.yaml +111 -0
  2. config.json +32 -0
cfg.yaml ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: int4
3
+ force_embedding_gradients: false
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: ''
8
+ augmentation:
9
+ random_parent_probability: 0.0
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ answer_column: output
16
+ chatbot_author: H2O.ai
17
+ chatbot_name: h2oGPT
18
+ data_sample: 1.0
19
+ data_sample_choice:
20
+ - Train
21
+ - Validation
22
+ limit_chained_samples: false
23
+ mask_prompt_labels: true
24
+ parent_id_column: None
25
+ personalize: false
26
+ prompt_column:
27
+ - instruction
28
+ text_answer_separator: <|answer|>
29
+ text_prompt_start: <|prompt|>
30
+ train_dataframe: data/user/startup-interviews/json-train.pq
31
+ validation_dataframe: None
32
+ validation_size: 0.02
33
+ validation_strategy: automatic
34
+ environment:
35
+ compile_model: false
36
+ find_unused_parameters: false
37
+ gpus:
38
+ - '0'
39
+ huggingface_branch: main
40
+ mixed_precision: true
41
+ number_of_workers: 8
42
+ seed: -1
43
+ trust_remote_code: true
44
+ use_fsdp: false
45
+ experiment_name: startup-interviews-13b-int4-2epochs.1
46
+ llm_backbone: huggyllama/llama-13b
47
+ logging:
48
+ logger: Neptune
49
+ neptune_project: glavin001/startup-interviews
50
+ number_of_texts: 10
51
+ output_directory: output/user/startup-interviews-13b-int4-2epochs.1/
52
+ prediction:
53
+ batch_size_inference: 0
54
+ do_sample: false
55
+ max_length_inference: 1024
56
+ metric: BLEU
57
+ min_length_inference: 2
58
+ num_beams: 1
59
+ num_history: 2
60
+ repetition_penalty: 1.2
61
+ stop_tokens: ''
62
+ temperature: 0.3
63
+ top_k: 0
64
+ top_p: 1.0
65
+ problem_type: text_causal_language_modeling
66
+ tokenizer:
67
+ add_prefix_space: false
68
+ add_prompt_answer_tokens: false
69
+ max_length: 2048
70
+ max_length_answer: 2048
71
+ max_length_prompt: 192
72
+ padding_quantile: 1.0
73
+ use_fast: false
74
+ training:
75
+ adaptive_kl_control: true
76
+ advantages_gamma: 0.99
77
+ advantages_lambda: 0.95
78
+ batch_size: 6
79
+ differential_learning_rate: 1.0e-05
80
+ differential_learning_rate_layers: []
81
+ drop_last_batch: true
82
+ epochs: 2
83
+ evaluate_before_training: true
84
+ evaluation_epochs: 0.2
85
+ grad_accumulation: 1
86
+ gradient_clip: 0.0
87
+ initial_kl_coefficient: 0.2
88
+ kl_horizon: 10000
89
+ kl_target: 6.0
90
+ learning_rate: 0.0001
91
+ lora: true
92
+ lora_alpha: 16
93
+ lora_dropout: 0.05
94
+ lora_r: 4
95
+ lora_target_modules: q_proj,k_proj,v_proj,o_proj,gate_proj,down_proj,up_proj
96
+ loss_function: TokenAveragedCrossEntropy
97
+ offload_reward_model: false
98
+ optimizer: AdamW
99
+ ppo_batch_size: 1
100
+ ppo_clip_policy: 0.2
101
+ ppo_clip_value: 0.2
102
+ ppo_epochs: 4
103
+ ppo_generate_temperature: 1.0
104
+ reward_model: OpenAssistant/reward-model-deberta-v3-large-v2
105
+ save_best_checkpoint: false
106
+ scaling_factor_value_loss: 0.1
107
+ schedule: Cosine
108
+ train_validation_data: false
109
+ use_rlhf: false
110
+ warmup_epochs: 0.05
111
+ weight_decay: 0.0
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "huggyllama/llama-13b",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "bos_token_id": 1,
8
+ "custom_pipelines": {
9
+ "text-generation": {
10
+ "impl": "h2oai_pipeline.H2OTextGenerationPipeline",
11
+ "pt": "AutoModelForCausalLM"
12
+ }
13
+ },
14
+ "eos_token_id": 2,
15
+ "hidden_act": "silu",
16
+ "hidden_dropout_prob": 0.0,
17
+ "hidden_size": 5120,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 13824,
20
+ "max_position_embeddings": 2048,
21
+ "max_sequence_length": 2048,
22
+ "model_type": "llama",
23
+ "num_attention_heads": 40,
24
+ "num_hidden_layers": 40,
25
+ "pad_token_id": 0,
26
+ "rms_norm_eps": 1e-06,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.30.1",
30
+ "use_cache": true,
31
+ "vocab_size": 32000
32
+ }