alexkueck commited on
Commit
ca91fc3
·
1 Parent(s): 31c7c6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -0
app.py CHANGED
@@ -16,6 +16,13 @@ import sklearn
16
  from sklearn.model_selection import train_test_split
17
  from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
18
  import pprint
 
 
 
 
 
 
 
19
 
20
 
21
  #####################################################
@@ -97,6 +104,30 @@ tokenizer,model,device = load_tokenizer_and_model(base_model, True)
97
  tokenizer,model,device = load_tokenizer_and_model_Blaize(base_model, True)
98
  tokenizer.pad_token_id = 0
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  ####################################################
101
  #Datensets für Finetuning laden
102
  dataset_neu = daten_laden("alexkueck/tis")
 
16
  from sklearn.model_selection import train_test_split
17
  from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
18
  import pprint
19
+ from peft import (
20
+ prepare_model_for_int8_training,
21
+ LoraConfig,
22
+ get_peft_model,
23
+ get_peft_model_state_dict,
24
+ )
25
+
26
 
27
 
28
  #####################################################
 
104
  tokenizer,model,device = load_tokenizer_and_model_Blaize(base_model, True)
105
  tokenizer.pad_token_id = 0
106
 
107
+ #speziell für Blaize Model:
108
+ TARGET_MODULES = [
109
+ "q_proj",
110
+ "k_proj",
111
+ "v_proj",
112
+ "down_proj",
113
+ "gate_proj",
114
+ "up_proj",
115
+ ]
116
+
117
+ config = LoraConfig(
118
+ r=8,
119
+ lora_alpha=16,
120
+ target_modules=TARGET_MODULES,
121
+ lora_dropout=0.05,
122
+ bias="none",
123
+ task_type="CAUSAL_LM",
124
+ )
125
+ #config.save_pretrained(OUTPUT_DIR)
126
+ model = get_peft_model(model, config)
127
+ tokenizer.pad_token_id = 0
128
+
129
+
130
+
131
  ####################################################
132
  #Datensets für Finetuning laden
133
  dataset_neu = daten_laden("alexkueck/tis")