areegtarek
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -10,6 +10,28 @@ tags: []
|
|
10 |
|
11 |
|
12 |
## Model Details
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
### Model Description
|
15 |
|
|
|
10 |
|
11 |
|
12 |
## Model Details
|
13 |
+
r = 32,
|
14 |
+
lora_alpha = 64,
|
15 |
+
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
|
16 |
+
"gate_proj", "up_proj", "down_proj"],
|
17 |
+
lora_dropout = 0.1,
|
18 |
+
bias="none"
|
19 |
+
)
|
20 |
+
model = get_peft_model(model, config)
|
21 |
+
model.print_trainable_parameters()
|
22 |
+
training_args = TrainingArguments(
|
23 |
+
output_dir = f"{model_name}-logo",
|
24 |
+
dataloader_pin_memory = False,
|
25 |
+
logging_steps = 1,
|
26 |
+
remove_unused_columns = False,
|
27 |
+
push_to_hub=False,
|
28 |
+
label_names= ["labels"],
|
29 |
+
num_train_epochs = 10,
|
30 |
+
per_device_train_batch_size = 1,
|
31 |
+
gradient_accumulation_steps = 1,
|
32 |
+
warmup_steps = 0.1,
|
33 |
+
save_total_limit=5,
|
34 |
+
max_grad_norm=0.3,
|
35 |
|
36 |
### Model Description
|
37 |
|