dh-mc commited on
Commit
6d1c39a
·
1 Parent(s): 3a9dd7a

initial code

Browse files
.env.example ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ HF_TOKEN=
2
+ MODEL_NAME=unsloth/Qwen2-0.5B-Instruct-bnb-4bit
3
+ LOAD_IN_4BIT=true
4
+ NUM_TRAIN_EPOCHS=10
5
+
6
+ DATA_PATH=datasets/mac/mac.tsv
7
+ RESULTS_PATH=results/mac-results-colab.csv
8
+
9
+ EVAL_BASE_MODEL=true
10
+ DO_FINE_TUNING=false
11
+ EVAL_FINE_TUNED=false
12
+ SAVE_FINE_TUNED=false
.gitignore ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.out
2
+ *.log
3
+ */outputs/
4
+ */models/
5
+ */wandb/
6
+ */cs605-nlp-assignment-2*/
7
+ */augmented_data/
8
+ */inflaton/
9
+ */llama.cpp/
10
+
11
+ # Byte-compiled / optimized / DLL files
12
+ __pycache__/
13
+ *.py[cod]
14
+ *$py.class
15
+
16
+ # C extensions
17
+ *.so
18
+
19
+ # Distribution / packaging
20
+ .Python
21
+ build/
22
+ develop-eggs/
23
+ dist/
24
+ downloads/
25
+ eggs/
26
+ .eggs/
27
+ lib/
28
+ lib64/
29
+ parts/
30
+ sdist/
31
+ var/
32
+ wheels/
33
+ pip-wheel-metadata/
34
+ share/python-wheels/
35
+ *.egg-info/
36
+ .installed.cfg
37
+ *.egg
38
+ MANIFEST
39
+
40
+ # PyInstaller
41
+ # Usually these files are written by a python script from a template
42
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
43
+ *.manifest
44
+ *.spec
45
+
46
+ # Installer logs
47
+ pip-log.txt
48
+ pip-delete-this-directory.txt
49
+
50
+ # Unit test / coverage reports
51
+ htmlcov/
52
+ .tox/
53
+ .nox/
54
+ .coverage
55
+ .coverage.*
56
+ .cache
57
+ nosetests.xml
58
+ coverage.xml
59
+ *.cover
60
+ *.py,cover
61
+ .hypothesis/
62
+ .pytest_cache/
63
+
64
+ # Translations
65
+ *.mo
66
+ *.pot
67
+
68
+ # Django stuff:
69
+ # *.log
70
+ local_settings.py
71
+ db.sqlite3
72
+ db.sqlite3-journal
73
+
74
+ # Flask stuff:
75
+ instance/
76
+ .webassets-cache
77
+
78
+ # Scrapy stuff:
79
+ .scrapy
80
+
81
+ # Sphinx documentation
82
+ docs/_build/
83
+
84
+ # PyBuilder
85
+ target/
86
+
87
+ # Jupyter Notebook
88
+ .ipynb_checkpoints
89
+
90
+ # IPython
91
+ profile_default/
92
+ ipython_config.py
93
+
94
+ # pyenv
95
+ .python-version
96
+
97
+ # pipenv
98
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
99
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
100
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
101
+ # install all needed dependencies.
102
+ #Pipfile.lock
103
+
104
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
105
+ __pypackages__/
106
+
107
+ # Celery stuff
108
+ celerybeat-schedule
109
+ celerybeat.pid
110
+
111
+ # SageMath parsed files
112
+ *.sage.py
113
+
114
+ # Environments
115
+ .env
116
+ .venv
117
+ env/
118
+ venv/
119
+ ENV/
120
+ env.bak/
121
+ venv.bak/
122
+
123
+ # Spyder project settings
124
+ .spyderproject
125
+ .spyproject
126
+
127
+ # Rope project settings
128
+ .ropeproject
129
+
130
+ # mkdocs documentation
131
+ /site
132
+
133
+ # mypy
134
+ .mypy_cache/
135
+ .dmypy.json
136
+ dmypy.json
137
+
138
+ # Pyre type checker
139
+ .pyre/
140
+
141
+ # JetBrains
142
+ .idea
143
+
144
+ *.db
145
+
146
+ .DS_Store
147
+ /outputs
148
+ /models
149
+ /llama.cpp
150
+ /huggingface_tokenizers_cache
datasets/mac/mac-test.tsv ADDED
The diff for this file is too large to render. See raw diff
 
datasets/mac/mac-train.tsv ADDED
The diff for this file is too large to render. See raw diff
 
datasets/mac/mac.tsv ADDED
The diff for this file is too large to render. See raw diff
 
llm_toolkit/translation_engine.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ from datasets import load_dataset
4
+ import torch
5
+ from unsloth import FastLanguageModel, is_bfloat16_supported
6
+ from trl import SFTTrainer
7
+ from transformers import TrainingArguments, TextStreamer
8
+ from tqdm import tqdm
9
+ from llm_toolkit.translation_utils import *
10
+
11
+ print(f"loading {__file__}")
12
+
13
+
14
+ def get_model_names(
15
+ model_name, save_method="merged_4bit_forced", quantization_method="q5_k_m"
16
+ ):
17
+ hub_model = model_name.split("/")[-1] + "-MAC-"
18
+ local_model = "models/" + hub_model
19
+
20
+ return {
21
+ "local": local_model + save_method,
22
+ "local-gguf": local_model + quantization_method,
23
+ "hub": hub_model + save_method,
24
+ "hub-gguf": hub_model + "gguf-" + quantization_method,
25
+ }
26
+
27
+
28
+ def load_model(
29
+ model_name,
30
+ max_seq_length=2048,
31
+ dtype=None,
32
+ load_in_4bit=False,
33
+ ):
34
+ print(f"loading model: {model_name}")
35
+
36
+ model, tokenizer = FastLanguageModel.from_pretrained(
37
+ model_name=model_name, # YOUR MODEL YOU USED FOR TRAINING
38
+ max_seq_length=max_seq_length,
39
+ dtype=dtype,
40
+ load_in_4bit=load_in_4bit,
41
+ trust_remote_code=True,
42
+ )
43
+ FastLanguageModel.for_inference(model)
44
+
45
+ return model, tokenizer
46
+
47
+
48
+ def test_model(model, tokenizer, prompt):
49
+ inputs = tokenizer(
50
+ [prompt],
51
+ return_tensors="pt",
52
+ ).to("cuda")
53
+
54
+ text_streamer = TextStreamer(tokenizer)
55
+
56
+ _ = model.generate(
57
+ **inputs, max_new_tokens=128, streamer=text_streamer, use_cache=True
58
+ )
59
+
60
+
61
+ def load_trainer(
62
+ model,
63
+ tokenizer,
64
+ dataset,
65
+ num_train_epochs,
66
+ max_seq_length=2048,
67
+ fp16=False,
68
+ bf16=False,
69
+ output_dir="./outputs",
70
+ ):
71
+ model = FastLanguageModel.get_peft_model(
72
+ model,
73
+ r=16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
74
+ target_modules=[
75
+ "q_proj",
76
+ "k_proj",
77
+ "v_proj",
78
+ "o_proj",
79
+ "gate_proj",
80
+ "up_proj",
81
+ "down_proj",
82
+ ],
83
+ lora_alpha=16,
84
+ lora_dropout=0, # Supports any, but = 0 is optimized
85
+ bias="none", # Supports any, but = "none" is optimized
86
+ # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
87
+ use_gradient_checkpointing="unsloth", # True or "unsloth" for very long context
88
+ random_state=3407,
89
+ use_rslora=False, # We support rank stabilized LoRA
90
+ loftq_config=None, # And LoftQ
91
+ )
92
+
93
+ trainer = SFTTrainer(
94
+ model=model,
95
+ tokenizer=tokenizer,
96
+ train_dataset=dataset,
97
+ dataset_text_field="text",
98
+ max_seq_length=max_seq_length,
99
+ dataset_num_proc=2,
100
+ packing=False, # Can make training 5x faster for short sequences.
101
+ args=TrainingArguments(
102
+ per_device_train_batch_size=2,
103
+ gradient_accumulation_steps=4,
104
+ warmup_steps=5,
105
+ num_train_epochs=num_train_epochs,
106
+ learning_rate=2e-4,
107
+ fp16=not is_bfloat16_supported(),
108
+ bf16=is_bfloat16_supported(),
109
+ logging_steps=100,
110
+ optim="adamw_8bit",
111
+ weight_decay=0.01,
112
+ lr_scheduler_type="linear",
113
+ seed=3407,
114
+ output_dir=output_dir,
115
+ ),
116
+ )
117
+
118
+ return trainer
119
+
120
+
121
+ def load_translation_dataset(data_path, tokenizer=None):
122
+ train_data_file = data_path.replace(".tsv", "-train.tsv")
123
+ test_data_file = data_path.replace(".tsv", "-test.tsv")
124
+
125
+ if not os.path.exists(train_data_file):
126
+ print("generating train/test data files")
127
+ dataset = load_dataset(
128
+ "csv", data_files=data_path, delimiter="\t", split="train"
129
+ )
130
+ print(len(dataset))
131
+ dataset = dataset.filter(lambda x: x["chinese"] and x["english"])
132
+
133
+ datasets = dataset.train_test_split(test_size=0.2)
134
+ print(len(dataset))
135
+
136
+ # Convert to pandas DataFrame
137
+ train_df = pd.DataFrame(datasets["train"])
138
+ test_df = pd.DataFrame(datasets["test"])
139
+
140
+ # Save to TSV
141
+ train_df.to_csv(train_data_file, sep="\t", index=False)
142
+ test_df.to_csv(test_data_file, sep="\t", index=False)
143
+
144
+ print("loading train/test data files")
145
+ datasets = load_dataset(
146
+ "csv",
147
+ data_files={"train": train_data_file, "test": test_data_file},
148
+ delimiter="\t",
149
+ )
150
+
151
+ if tokenizer:
152
+ translation_prompt = "Please translate the following Chinese text into English and provide only the translated content, nothing else.\n{}"
153
+
154
+ def formatting_prompts_func(examples):
155
+ inputs = examples["chinese"]
156
+ outputs = examples["english"]
157
+
158
+ messages = [
159
+ {
160
+ "role": "system",
161
+ "content": "You are an expert in translating Chinese to English.",
162
+ },
163
+ None,
164
+ ]
165
+
166
+ model_name = os.getenv("MODEL_NAME")
167
+
168
+ if "mistral" in model_name.lower():
169
+ messages = messages[1:]
170
+
171
+ texts = []
172
+ prompts = []
173
+ for input, output in zip(inputs, outputs):
174
+ prompt = translation_prompt.format(input)
175
+ messages[-1] = {"role": "user", "content": prompt}
176
+
177
+ prompt = tokenizer.apply_chat_template(
178
+ messages, tokenize=False, add_generation_prompt=True
179
+ )
180
+ prompts.append(prompt)
181
+ texts.append(prompt + output + tokenizer.eos_token)
182
+ return {"text": texts, "prompt": prompts}
183
+
184
+ datasets = datasets.map(
185
+ formatting_prompts_func,
186
+ batched=True,
187
+ )
188
+
189
+ print(datasets)
190
+ return datasets
191
+
192
+
193
+ def eval_model(model, tokenizer, eval_dataset):
194
+ total = len(eval_dataset)
195
+ predictions = []
196
+ for i in tqdm(range(total)):
197
+ inputs = tokenizer(
198
+ eval_dataset["prompt"][i : i + 1],
199
+ return_tensors="pt",
200
+ ).to("cuda")
201
+
202
+ outputs = model.generate(**inputs, max_new_tokens=4096, use_cache=False)
203
+ decoded_output = tokenizer.batch_decode(outputs)
204
+ debug = i == 0
205
+ decoded_output = [
206
+ extract_answer(output, debug=debug) for output in decoded_output
207
+ ]
208
+ predictions.extend(decoded_output)
209
+
210
+ return predictions
211
+
212
+
213
+ def save_model(
214
+ model,
215
+ tokenizer,
216
+ include_gguf=True,
217
+ include_merged=True,
218
+ publish=True,
219
+ ):
220
+ try:
221
+ token = os.getenv("HF_TOKEN") or None
222
+ model_name = os.getenv("MODEL_NAME")
223
+
224
+ save_method = "lora"
225
+ quantization_method = "q5_k_m"
226
+
227
+ model_names = get_model_names(
228
+ model_name, save_method=save_method, quantization_method=quantization_method
229
+ )
230
+
231
+ model.save_pretrained(model_names["local"])
232
+ tokenizer.save_pretrained(model_names["local"])
233
+
234
+ if publish:
235
+ model.push_to_hub(
236
+ model_names["hub"],
237
+ token=token,
238
+ )
239
+ tokenizer.push_to_hub(
240
+ model_names["hub"],
241
+ token=token,
242
+ )
243
+
244
+ if include_merged:
245
+ model.save_pretrained_merged(
246
+ model_names["local"] + "-merged", tokenizer, save_method=save_method
247
+ )
248
+ if publish:
249
+ model.push_to_hub_merged(
250
+ model_names["hub"] + "-merged",
251
+ tokenizer,
252
+ save_method="lora",
253
+ token="",
254
+ )
255
+
256
+ if include_gguf:
257
+ model.save_pretrained_gguf(
258
+ model_names["local-gguf"],
259
+ tokenizer,
260
+ quantization_method=quantization_method,
261
+ )
262
+
263
+ if publish:
264
+ model.push_to_hub_gguf(
265
+ model_names["hub-gguf"],
266
+ tokenizer,
267
+ quantization_method=quantization_method,
268
+ token=token,
269
+ )
270
+ except Exception as e:
271
+ print(e)
llm_toolkit/translation_utils.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import pandas as pd
4
+ import evaluate
5
+ import seaborn as sns
6
+ import matplotlib.pyplot as plt
7
+
8
+
9
+ bleu = evaluate.load("bleu")
10
+ rouge = evaluate.load("rouge")
11
+ meteor = evaluate.load("meteor")
12
+ accuracy = evaluate.load("accuracy")
13
+
14
+
15
+ def extract_answer(text, debug=False):
16
+ if text:
17
+ # Remove the begin and end tokens
18
+ text = re.sub(
19
+ r".*?(assistant|\[/INST\]).+?\b", "", text, flags=re.DOTALL | re.MULTILINE
20
+ )
21
+ if debug:
22
+ print("--------\nstep 1:", text)
23
+
24
+ text = re.sub(r"<.+?>.*", "", text, flags=re.DOTALL | re.MULTILINE)
25
+ if debug:
26
+ print("--------\nstep 2:", text)
27
+
28
+ text = re.sub(
29
+ r".*?end_header_id\|>\n\n", "", text, flags=re.DOTALL | re.MULTILINE
30
+ )
31
+ if debug:
32
+ print("--------\nstep 3:", text)
33
+
34
+ return text
35
+
36
+
37
+ def calc_metrics(references, predictions, debug=False):
38
+ assert len(references) == len(
39
+ predictions
40
+ ), f"lengths are difference: {len(references)} != {len(predictions)}"
41
+
42
+ predictions = [extract_answer(text) for text in predictions]
43
+
44
+ correct = [1 if ref == pred else 0 for ref, pred in zip(references, predictions)]
45
+ accuracy = sum(correct) / len(references)
46
+
47
+ results = {"accuracy": accuracy}
48
+ if debug:
49
+ correct_ids = [i for i, c in enumerate(correct) if c == 1]
50
+ results["correct_ids"] = correct_ids
51
+
52
+ results["meteor"] = meteor.compute(predictions=predictions, references=references)[
53
+ "meteor"
54
+ ]
55
+
56
+ results["bleu_scores"] = bleu.compute(
57
+ predictions=predictions, references=references, max_order=4
58
+ )
59
+ results["rouge_scores"] = rouge.compute(
60
+ predictions=predictions, references=references
61
+ )
62
+ return results
63
+
64
+
65
+ def save_results(model_name, results_path, dataset, predictions, debug=False):
66
+ if not os.path.exists(results_path):
67
+ # Get the directory part of the file path
68
+ dir_path = os.path.dirname(results_path)
69
+
70
+ # Create all directories in the path (if they don't exist)
71
+ os.makedirs(dir_path, exist_ok=True)
72
+ df = dataset.to_pandas()
73
+ df.drop(columns=["text", "prompt"], inplace=True)
74
+ else:
75
+ df = pd.read_csv(results_path, on_bad_lines="warn")
76
+
77
+ df[model_name] = predictions
78
+
79
+ if debug:
80
+ print(df.head(1))
81
+
82
+ df.to_csv(results_path, index=False)
83
+
84
+
85
+ def get_metrics(df):
86
+ metrics_df = pd.DataFrame(df.columns.T)[2:]
87
+ metrics_df.rename(columns={0: "model"}, inplace=True)
88
+ metrics_df["model"] = metrics_df["model"].apply(lambda x: x.split("/")[-1])
89
+ metrics_df.reset_index(inplace=True)
90
+ metrics_df = metrics_df.drop(columns=["index"])
91
+
92
+ accuracy = []
93
+ meteor = []
94
+ bleu_1 = []
95
+ rouge_l = []
96
+ all_metrics = []
97
+ for col in df.columns[2:]:
98
+ metrics = calc_metrics(df["english"], df[col], debug=True)
99
+ print(f"{col}: {metrics}")
100
+
101
+ accuracy.append(metrics["accuracy"])
102
+ meteor.append(metrics["meteor"])
103
+ bleu_1.append(metrics["bleu_scores"]["bleu"])
104
+ rouge_l.append(metrics["rouge_scores"]["rougeL"])
105
+ all_metrics.append(metrics)
106
+
107
+ metrics_df["accuracy"] = accuracy
108
+ metrics_df["meteor"] = meteor
109
+ metrics_df["bleu_1"] = bleu_1
110
+ metrics_df["rouge_l"] = rouge_l
111
+ metrics_df["all_metrics"] = all_metrics
112
+
113
+ return metrics_df
114
+
115
+
116
+ def plot_metrics(metrics_df, figsize=(14, 5), ylim=(0, 0.44)):
117
+ plt.figure(figsize=figsize)
118
+ df_melted = pd.melt(
119
+ metrics_df, id_vars="model", value_vars=["meteor", "bleu_1", "rouge_l"]
120
+ )
121
+
122
+ barplot = sns.barplot(x="variable", y="value", hue="model", data=df_melted)
123
+
124
+ # Set different hatches for each model
125
+ hatches = ["/", "\\", "|", "-", "+", "x", "o", "O", ".", "*", "//", "\\\\"]
126
+
127
+ # Create a dictionary to map models to hatches
128
+ model_hatches = {
129
+ model: hatches[i % len(hatches)]
130
+ for i, model in enumerate(metrics_df["model"].unique())
131
+ }
132
+
133
+ # Apply hatches based on the model
134
+ num_vars = len(df_melted["variable"].unique())
135
+ for i, bar in enumerate(barplot.patches):
136
+ model = df_melted["model"].iloc[i // num_vars]
137
+ bar.set_hatch(model_hatches[model])
138
+
139
+ # Manually update legend to match the bar hatches
140
+ handles, labels = barplot.get_legend_handles_labels()
141
+ for handle, model in zip(handles, metrics_df["model"].unique()):
142
+ handle.set_hatch(model_hatches[model])
143
+
144
+ barplot.set_xticklabels(["METEOR", "BLEU-1", "ROUGE-L"])
145
+ for p in barplot.patches:
146
+ if p.get_height() == 0:
147
+ continue
148
+ barplot.annotate(
149
+ f"{p.get_height():.2f}",
150
+ (p.get_x() + p.get_width() / 2.0, p.get_height()),
151
+ ha="center",
152
+ va="center",
153
+ xytext=(0, 10),
154
+ textcoords="offset points",
155
+ )
156
+
157
+ barplot.set(ylim=ylim, ylabel="Scores", xlabel="Metrics")
158
+ plt.legend(bbox_to_anchor=(0.5, -0.1), loc="upper center")
159
+ plt.show()
160
+
161
+
162
+ def plot_times(perf_df, ylim=0.421):
163
+ # Adjusted code to put "train-time" bars in red at the bottom
164
+
165
+ fig, ax1 = plt.subplots(figsize=(12, 10))
166
+
167
+ color_train = "tab:red"
168
+ color_eval = "orange"
169
+ ax1.set_xlabel("Models")
170
+ ax1.set_ylabel("Time (mins)")
171
+ ax1.set_xticks(range(len(perf_df["model"]))) # Set x-ticks positions
172
+ ax1.set_xticklabels(perf_df["model"], rotation=90)
173
+
174
+ # Plot "train-time" first so it's at the bottom
175
+ ax1.bar(
176
+ perf_df["model"],
177
+ perf_df["train-time(mins)"],
178
+ color=color_train,
179
+ label="train-time",
180
+ )
181
+
182
+ # Then, plot "eval-time" on top of "train-time"
183
+ ax1.bar(
184
+ perf_df["model"],
185
+ perf_df["eval-time(mins)"],
186
+ bottom=perf_df["train-time(mins)"],
187
+ color=color_eval,
188
+ label="eval-time",
189
+ )
190
+
191
+ ax1.tick_params(axis="y")
192
+ ax1.legend(loc="upper left")
193
+
194
+ if "meteor" in perf_df.columns:
195
+ ax2 = ax1.twinx()
196
+ color_meteor = "tab:blue"
197
+ ax2.set_ylabel("METEOR", color=color_meteor)
198
+ ax2.plot(
199
+ perf_df["model"],
200
+ perf_df["meteor"],
201
+ color=color_meteor,
202
+ marker="o",
203
+ label="meteor",
204
+ )
205
+ ax2.tick_params(axis="y", labelcolor=color_meteor)
206
+ ax2.legend(loc="upper right")
207
+ ax2.set_ylim(ax2.get_ylim()[0], ylim)
208
+
209
+ # Show numbers in bars
210
+ for p in ax1.patches:
211
+ height = p.get_height()
212
+ if height == 0: # Skip bars with height 0
213
+ continue
214
+ ax1.annotate(
215
+ f"{height:.2f}",
216
+ (p.get_x() + p.get_width() / 2.0, p.get_y() + height),
217
+ ha="center",
218
+ va="center",
219
+ xytext=(0, -10),
220
+ textcoords="offset points",
221
+ )
222
+
223
+ fig.tight_layout()
224
+ plt.show()
llm_toolkit/tune_mac.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import torch
4
+ from dotenv import find_dotenv, load_dotenv
5
+
6
+ found_dotenv = find_dotenv(".env")
7
+
8
+ if len(found_dotenv) == 0:
9
+ found_dotenv = find_dotenv(".env.example")
10
+ print(f"loading env vars from: {found_dotenv}")
11
+ load_dotenv(found_dotenv, override=False)
12
+
13
+ path = os.path.dirname(found_dotenv)
14
+ print(f"Adding {path} to sys.path")
15
+ sys.path.append(path)
16
+
17
+ from llm_toolkit.translation_engine import *
18
+ from llm_toolkit.translation_utils import *
19
+
20
+
21
+ model_name = os.getenv("MODEL_NAME")
22
+ load_in_4bit = os.getenv("LOAD_IN_4BIT") == "true"
23
+ eval_base_model = os.getenv("EVAL_BASE_MODEL") == "true"
24
+ eval_fine_tuned = os.getenv("EVAL_FINE_TUNED") == "true"
25
+ do_fine_tuning = os.getenv("DO_FINE_TUNING") == "true"
26
+ save_fine_tuned_model = os.getenv("SAVE_FINE_TUNED") == "true"
27
+ num_train_epochs = int(os.getenv("NUM_TRAIN_EPOCHS") or 0)
28
+ data_path = os.getenv("DATA_PATH")
29
+ results_path = os.getenv("RESULTS_PATH")
30
+
31
+ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
32
+ dtype = (
33
+ None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
34
+ )
35
+
36
+ print(
37
+ model_name,
38
+ load_in_4bit,
39
+ max_seq_length,
40
+ num_train_epochs,
41
+ dtype,
42
+ data_path,
43
+ results_path,
44
+ eval_base_model,
45
+ do_fine_tuning,
46
+ eval_fine_tuned,
47
+ save_fine_tuned_model,
48
+ )
49
+
50
+ gpu_stats = torch.cuda.get_device_properties(0)
51
+ start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
52
+ max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
53
+ print(f"(1) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
54
+ print(f"{start_gpu_memory} GB of memory reserved.")
55
+
56
+ model, tokenizer = load_model(model_name, load_in_4bit=load_in_4bit)
57
+
58
+ gpu_stats = torch.cuda.get_device_properties(0)
59
+ start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
60
+ max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
61
+ print(f"(2) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
62
+ print(f"{start_gpu_memory} GB of memory reserved.")
63
+
64
+ datasets = load_translation_dataset(data_path, tokenizer)
65
+
66
+ if eval_base_model:
67
+ print("Evaluating base model: " + model_name)
68
+ predictions = eval_model(model, tokenizer, datasets["test"])
69
+
70
+ # calc_metrics(datasets["test"]["english"], predictions, debug=True)
71
+
72
+ save_results(
73
+ model_name,
74
+ results_path,
75
+ datasets["test"],
76
+ predictions,
77
+ debug=True,
78
+ )
79
+
80
+ gpu_stats = torch.cuda.get_device_properties(0)
81
+ start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
82
+ max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
83
+ print(f"(3) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
84
+ print(f"{start_gpu_memory} GB of memory reserved.")
85
+
86
+
87
+ if not do_fine_tuning:
88
+ sys.exit(0)
89
+
90
+ trainer = load_trainer(
91
+ model,
92
+ tokenizer,
93
+ datasets["train"],
94
+ num_train_epochs,
95
+ fp16=not is_bfloat16_supported(),
96
+ bf16=is_bfloat16_supported(),
97
+ )
98
+
99
+ gpu_stats = torch.cuda.get_device_properties(0)
100
+ start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
101
+ max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
102
+ print(f"(4) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
103
+ print(f"{start_gpu_memory} GB of memory reserved.")
104
+
105
+ trainer_stats = trainer.train()
106
+
107
+ # @title Show final memory and time stats
108
+ used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
109
+ used_memory_for_lora = round(used_memory - start_gpu_memory, 3)
110
+ used_percentage = round(used_memory / max_memory * 100, 3)
111
+ lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)
112
+ print(f"(5) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
113
+ print(f"{trainer_stats.metrics['train_runtime']} seconds used for training.")
114
+ print(
115
+ f"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training."
116
+ )
117
+ print(f"Peak reserved memory = {used_memory} GB.")
118
+ print(f"Peak reserved memory for training = {used_memory_for_lora} GB.")
119
+ print(f"Peak reserved memory % of max memory = {used_percentage} %.")
120
+ print(f"Peak reserved memory for training % of max memory = {lora_percentage} %.")
121
+
122
+ if eval_fine_tuned:
123
+ print("Evaluating fine-tuned model: " + model_name)
124
+ FastLanguageModel.for_inference(model) # Enable native 2x faster inference
125
+ predictions = eval_model(model, tokenizer, datasets["test"])
126
+
127
+ # calc_metrics(datasets["test"]["english"], predictions, debug=True)
128
+
129
+ save_results(
130
+ model_name + "(finetuned)",
131
+ results_path,
132
+ datasets["test"],
133
+ predictions,
134
+ debug=True,
135
+ )
136
+
137
+ gpu_stats = torch.cuda.get_device_properties(0)
138
+ start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
139
+ max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
140
+ print(f"(6) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
141
+ print(f"{start_gpu_memory} GB of memory reserved.")
142
+
143
+ if save_fine_tuned_model:
144
+ save_model(model, tokenizer)
notebooks/00_fine-tune-with-colab.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{},"inputWidgets":{},"nuid":"0ea8b46b-839b-445b-8043-ccdf4e920ace","showTitle":false,"title":""},"id":"YLH80COBzi_F"},"outputs":[],"source":["%load_ext autoreload\n","%autoreload 2"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"id":"63B5exAuzq4M"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["workding_dir = \"/content/drive/MyDrive/logical-reasoning/\""],"metadata":{"id":"zFulf0bg0H-9"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{},"inputWidgets":{},"nuid":"6d394937-6c99-4a7c-9d32-7600a280032f","showTitle":false,"title":""},"id":"Rzln0ffbzi_H"},"outputs":[],"source":["import os\n","import sys\n","from pathlib import Path\n","\n","os.chdir(workding_dir)\n","sys.path.append(workding_dir)\n","print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","source":["%%capture\n","# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n","!pip install \"unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git\"\n","!pip install --no-deps xformers \"trl<0.9.0\" peft accelerate bitsandbytes\n","\n","\n","!pip install -r requirements.txt"],"metadata":{"id":"blBRUfev1ccE"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{},"inputWidgets":{},"nuid":"9f67ec60-2f24-411c-84eb-0dd664b44775","showTitle":false,"title":""},"id":"DIUiweYYzi_I"},"outputs":[],"source":["from dotenv import find_dotenv, load_dotenv\n","\n","found_dotenv = find_dotenv(\".env\")\n","\n","if len(found_dotenv) == 0:\n"," found_dotenv = find_dotenv(\".env.example\")\n","print(f\"loading env vars from: {found_dotenv}\")\n","load_dotenv(found_dotenv, override=True)"]},{"cell_type":"code","execution_count":null,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{},"inputWidgets":{},"nuid":"f1597656-8042-4878-9d3b-9ebfb8dd86dc","showTitle":false,"title":""},"id":"wLbMLrZbzi_I"},"outputs":[],"source":["import os\n","\n","model_name = os.getenv(\"MODEL_NAME\")\n","token = os.getenv(\"HF_TOKEN\") or None\n","load_in_4bit = os.getenv(\"LOAD_IN_4BIT\") == \"true\"\n","local_model = os.getenv(\"LOCAL_MODEL\")\n","hub_model = os.getenv(\"HUB_MODEL\")\n","num_train_epochs = int(os.getenv(\"NUM_TRAIN_EPOCHS\") or 0)\n","data_path = os.getenv(\"DATA_PATH\")\n","results_path = os.getenv(\"RESULTS_PATH\")\n","\n","max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!\n","dtype = (\n"," None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+\n",")\n","\n","model_name, load_in_4bit, local_model, hub_model, max_seq_length, num_train_epochs, dtype, data_path, results_path"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"6nmbA8Tpzi_J"},"outputs":[],"source":["!nvidia-smi"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"4hQO8gkFzi_K","colab":{"base_uri":"https://localhost:8080/"},"outputId":"d36c7526-b607-48f4-8311-4a3b7ba31d56"},"outputs":[{"output_type":"stream","name":"stdout","text":["Current Directory:\n","/content/drive/.shortcut-targets-by-id/1E09lTnfbsjtTgQg65dQ3y9D2R6l8waxR/logical-reasoning\n","Wed Jun 26 22:59:53 2024 \n","+---------------------------------------------------------------------------------------+\n","| NVIDIA-SMI 535.104.05 Driver Version: 535.104.05 CUDA Version: 12.2 |\n","|-----------------------------------------+----------------------+----------------------+\n","| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |\n","| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |\n","| | | MIG M. |\n","|=========================================+======================+======================|\n","| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n","| N/A 68C P8 11W / 70W | 0MiB / 15360MiB | 0% Default |\n","| | | N/A |\n","+-----------------------------------------+----------------------+----------------------+\n"," \n","+---------------------------------------------------------------------------------------+\n","| Processes: |\n","| GPU GI CI PID Type Process name GPU Memory |\n","| ID ID Usage |\n","|=======================================================================================|\n","| No running processes found |\n","+---------------------------------------------------------------------------------------+\n","Linux f4b16e94a91c 6.1.85+ #1 SMP PREEMPT_DYNAMIC Fri May 24 14:06:39 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux\n","PRETTY_NAME=\"Ubuntu 22.04.3 LTS\"\n","NAME=\"Ubuntu\"\n","VERSION_ID=\"22.04\"\n","VERSION=\"22.04.3 LTS (Jammy Jellyfish)\"\n","VERSION_CODENAME=jammy\n","ID=ubuntu\n","ID_LIKE=debian\n","HOME_URL=\"https://www.ubuntu.com/\"\n","SUPPORT_URL=\"https://help.ubuntu.com/\"\n","BUG_REPORT_URL=\"https://bugs.launchpad.net/ubuntu/\"\n","PRIVACY_POLICY_URL=\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"\n","UBUNTU_CODENAME=jammy\n","Architecture: x86_64\n"," CPU op-mode(s): 32-bit, 64-bit\n"," Address sizes: 46 bits physical, 48 bits virtual\n"," Byte Order: Little Endian\n","CPU(s): 2\n"," On-line CPU(s) list: 0,1\n","Vendor ID: GenuineIntel\n"," Model name: Intel(R) Xeon(R) CPU @ 2.00GHz\n"," CPU family: 6\n"," Model: 85\n"," Thread(s) per core: 2\n"," Core(s) per socket: 1\n"," Socket(s): 1\n"," Stepping: 3\n"," BogoMIPS: 4000.29\n"," Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 cl\n"," flush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc re\n"," p_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3\n"," fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand\n"," hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp \n"," fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f a\n"," vx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveop\n"," t xsavec xgetbv1 xsaves arat md_clear arch_capabilities\n","Virtualization features: \n"," Hypervisor vendor: KVM\n"," Virtualization type: full\n","Caches (sum of all): \n"," L1d: 32 KiB (1 instance)\n"," L1i: 32 KiB (1 instance)\n"," L2: 1 MiB (1 instance)\n"," L3: 38.5 MiB (1 instance)\n","NUMA: \n"," NUMA node(s): 1\n"," NUMA node0 CPU(s): 0,1\n","Vulnerabilities: \n"," Gather data sampling: Not affected\n"," Itlb multihit: Not affected\n"," L1tf: Mitigation; PTE Inversion\n"," Mds: Vulnerable; SMT Host state unknown\n"," Meltdown: Vulnerable\n"," Mmio stale data: Vulnerable\n"," Reg file data sampling: Not affected\n"," Retbleed: Vulnerable\n"," Spec rstack overflow: Not affected\n"," Spec store bypass: Vulnerable\n"," Spectre v1: Vulnerable: __user pointer sanitization and usercopy barriers only; no swa\n"," pgs barriers\n"," Spectre v2: Vulnerable; IBPB: disabled; STIBP: disabled; PBRSB-eIBRS: Not affected; BH\n"," I: Vulnerable (Syscall hardening enabled)\n"," Srbds: Not affected\n"," Tsx async abort: Vulnerable\n","MemTotal: 13290452 kB\n","Tuning unsloth/Qwen2-0.5B-Instruct-bnb-4bit\n","loading env vars from: /content/drive/.shortcut-targets-by-id/1E09lTnfbsjtTgQg65dQ3y9D2R6l8waxR/logical-reasoning/.env\n","Adding /content/drive/.shortcut-targets-by-id/1E09lTnfbsjtTgQg65dQ3y9D2R6l8waxR/logical-reasoning to sys.path\n","🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.\n","2024-06-26 23:00:00.295266: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n","2024-06-26 23:00:00.295314: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n","2024-06-26 23:00:00.296863: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n","2024-06-26 23:00:00.304716: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n","To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n","2024-06-26 23:00:01.409557: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n","[nltk_data] Downloading package wordnet to /root/nltk_data...\n","[nltk_data] Package wordnet is already up-to-date!\n","[nltk_data] Downloading package punkt to /root/nltk_data...\n","[nltk_data] Package punkt is already up-to-date!\n","[nltk_data] Downloading package omw-1.4 to /root/nltk_data...\n","[nltk_data] Package omw-1.4 is already up-to-date!\n","loading /content/drive/.shortcut-targets-by-id/1E09lTnfbsjtTgQg65dQ3y9D2R6l8waxR/logical-reasoning/llm_toolkit/translation_engine.py\n","unsloth/Qwen2-0.5B-Instruct-bnb-4bit True 2048 10 None datasets/mac/mac.tsv results/mac-results-colab.csv True True False\n","(1) GPU = Tesla T4. Max memory = 14.748 GB.\n","0.0 GB of memory reserved.\n","loading model: unsloth/Qwen2-0.5B-Instruct-bnb-4bit\n","==((====))== Unsloth: Fast Qwen2 patching release 2024.6\n"," \\\\ /| GPU: Tesla T4. Max memory: 14.748 GB. Platform = Linux.\n","O^O/ \\_/ \\ Pytorch: 2.3.0+cu121. CUDA = 7.5. CUDA Toolkit = 12.1.\n","\\ / Bfloat16 = FALSE. Xformers = 0.0.26.post1. FA = False.\n"," \"-____-\" Free Apache license: http://github.com/unslothai/unsloth\n","Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n","Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n","(2) GPU = Tesla T4. Max memory = 14.748 GB.\n","0.633 GB of memory reserved.\n","loading train/test data files\n","DatasetDict({\n"," train: Dataset({\n"," features: ['chinese', 'english', 'text', 'prompt'],\n"," num_rows: 4528\n"," })\n"," test: Dataset({\n"," features: ['chinese', 'english', 'text', 'prompt'],\n"," num_rows: 1133\n"," })\n","})\n","Evaluating base model: unsloth/Qwen2-0.5B-Instruct-bnb-4bit\n"," 0% 0/1133 [00:00<?, ?it/s]--------\n","step 1: Old耿举起枪,眯缝起了三角眼,砰的一声扣动扳机,子弹轰隆隆地落下,一颗颗的金麻雀扑棱棱地落在柳树上,嘎吱嘎呀地发出声响。<|im_end|>\n","--------\n","step 2: Old耿举起枪,眯缝起了三角眼,砰的一声扣动扳机,子弹轰隆隆地落下,一颗颗的金麻雀扑棱棱地落在柳树上,嘎吱嘎呀地发出声响。\n","--------\n","step 3: Old耿举起枪,眯缝起了三角眼,砰的一声扣动扳机,子弹轰隆隆地落下,一颗颗的金麻雀扑棱棱地落在柳树上,嘎吱嘎呀地发出声响。\n","100% 1133/1133 [1:01:12<00:00, 3.24s/it]\n"," chinese ... unsloth/Qwen2-0.5B-Instruct-bnb-4bit\n","0 老耿端起枪,眯缝起一只三角眼,一搂扳机响了枪,冰雹般的金麻雀劈哩啪啦往下落,铁砂子在柳枝间飞... ... Old耿举起枪,眯缝起了三角眼,砰的一声扣动扳机,子弹轰隆隆地落下,一颗颗的金麻雀扑棱棱地落...\n","\n","[1 rows x 3 columns]\n","(3) GPU = Tesla T4. Max memory = 14.748 GB.\n","0.955 GB of memory reserved.\n","Unsloth 2024.6 patched 24 layers with 0 QKV layers, 24 O layers and 24 MLP layers.\n","/usr/local/lib/python3.10/dist-packages/multiprocess/popen_fork.py:66: RuntimeWarning: os.fork() was called. os.fork() is incompatible with multithreaded code, and JAX is multithreaded, so this will likely lead to a deadlock.\n"," self.pid = os.fork()\n","Map (num_proc=2): 100% 4528/4528 [00:03<00:00, 1364.29 examples/s]\n","(4) GPU = Tesla T4. Max memory = 14.748 GB.\n","0.955 GB of memory reserved.\n","==((====))== Unsloth - 2x faster free finetuning | Num GPUs = 1\n"," \\\\ /| Num examples = 4,528 | Num Epochs = 10\n","O^O/ \\_/ \\ Batch size per device = 2 | Gradient Accumulation steps = 4\n","\\ / Total batch size = 8 | Total steps = 5,660\n"," \"-____-\" Number of trainable parameters = 8,798,208\n","{'loss': 1.9475, 'grad_norm': 0.953141450881958, 'learning_rate': 0.00019667550839964635, 'epoch': 0.18}\n","{'loss': 1.7755, 'grad_norm': 0.8021742701530457, 'learning_rate': 0.00019313881520778074, 'epoch': 0.35}\n","{'loss': 1.7138, 'grad_norm': 0.9069837927818298, 'learning_rate': 0.00018960212201591512, 'epoch': 0.53}\n","{'loss': 1.7147, 'grad_norm': 0.7456142902374268, 'learning_rate': 0.00018606542882404953, 'epoch': 0.71}\n","{'loss': 1.6848, 'grad_norm': 0.8153131008148193, 'learning_rate': 0.0001825287356321839, 'epoch': 0.88}\n","{'loss': 1.6068, 'grad_norm': 0.7906574010848999, 'learning_rate': 0.00017899204244031832, 'epoch': 1.06}\n","{'loss': 1.4909, 'grad_norm': 0.8980427384376526, 'learning_rate': 0.0001754553492484527, 'epoch': 1.24}\n","{'loss': 1.5395, 'grad_norm': 0.9705032110214233, 'learning_rate': 0.0001719186560565871, 'epoch': 1.41}\n","{'loss': 1.4708, 'grad_norm': 1.0619163513183594, 'learning_rate': 0.0001683819628647215, 'epoch': 1.59}\n","{'loss': 1.4672, 'grad_norm': 0.981268048286438, 'learning_rate': 0.0001648452696728559, 'epoch': 1.77}\n","{'loss': 1.5264, 'grad_norm': 1.004439353942871, 'learning_rate': 0.00016130857648099029, 'epoch': 1.94}\n","{'loss': 1.3477, 'grad_norm': 1.1568070650100708, 'learning_rate': 0.00015777188328912467, 'epoch': 2.12}\n","{'loss': 1.2139, 'grad_norm': 1.1788620948791504, 'learning_rate': 0.00015423519009725908, 'epoch': 2.3}\n","{'loss': 1.2553, 'grad_norm': 1.2886534929275513, 'learning_rate': 0.00015069849690539346, 'epoch': 2.47}\n","{'loss': 1.2538, 'grad_norm': 1.2783722877502441, 'learning_rate': 0.00014716180371352787, 'epoch': 2.65}\n","{'loss': 1.2643, 'grad_norm': 1.1668360233306885, 'learning_rate': 0.00014362511052166225, 'epoch': 2.83}\n","{'loss': 1.2326, 'grad_norm': 1.0913087129592896, 'learning_rate': 0.00014008841732979666, 'epoch': 3.0}\n","{'loss': 0.9765, 'grad_norm': 1.468590259552002, 'learning_rate': 0.00013655172413793104, 'epoch': 3.18}\n","{'loss': 0.9891, 'grad_norm': 1.523878574371338, 'learning_rate': 0.00013301503094606545, 'epoch': 3.36}\n","{'loss': 0.9958, 'grad_norm': 1.5432509183883667, 'learning_rate': 0.0001294783377541998, 'epoch': 3.53}\n","{'loss': 0.9778, 'grad_norm': 1.551354169845581, 'learning_rate': 0.00012594164456233422, 'epoch': 3.71}\n","{'loss': 1.0205, 'grad_norm': 1.5879628658294678, 'learning_rate': 0.0001224049513704686, 'epoch': 3.89}\n","{'loss': 0.9267, 'grad_norm': 1.5796101093292236, 'learning_rate': 0.00011886825817860301, 'epoch': 4.06}\n","{'loss': 0.7359, 'grad_norm': 1.7161469459533691, 'learning_rate': 0.0001153315649867374, 'epoch': 4.24}\n","{'loss': 0.7356, 'grad_norm': 1.6962307691574097, 'learning_rate': 0.0001117948717948718, 'epoch': 4.42}\n","{'loss': 0.7703, 'grad_norm': 2.1471188068389893, 'learning_rate': 0.0001082581786030062, 'epoch': 4.59}\n","{'loss': 0.7684, 'grad_norm': 2.1669650077819824, 'learning_rate': 0.00010472148541114059, 'epoch': 4.77}\n"," 49% 2747/5660 [1:25:40<1:23:13, 1.71s/it]"]}],"source":["%%time\n","\n","!chmod +x ./scripts/tune.sh\n","!./scripts/tune.sh"]}],"metadata":{"accelerator":"GPU","application/vnd.databricks.v1+notebook":{"dashboards":[],"environmentMetadata":null,"language":"python","notebookMetadata":{"pythonIndentUnit":4},"notebookName":"07_MAC_+_Qwen2-7B-Instructi_Unsloth_train","widgets":{}},"colab":{"gpuType":"T4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.14"}},"nbformat":4,"nbformat_minor":0}
results/mac-results-colab.csv ADDED
The diff for this file is too large to render. See raw diff
 
scripts/tune-mac.sh ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+
3
+ BASEDIR=$(dirname "$0")
4
+ cd $BASEDIR/..
5
+ echo Current Directory:
6
+ pwd
7
+
8
+ nvidia-smi
9
+ uname -a
10
+ cat /etc/os-release
11
+ lscpu
12
+ grep MemTotal /proc/meminfo
13
+
14
+ # pip install -r requirements.txt
15
+ # FLASH_ATTENTION_FORCE_BUILD=TRUE pip install --upgrade flash-attn
16
+
17
+ export MODEL_NAME=unsloth/Qwen2-0.5B-Instruct-bnb-4bit
18
+ echo Tuning $MODEL_NAME
19
+ python llm_toolkit/tune_mac.py
20
+
21
+ export MODEL_NAME=unsloth/Qwen2-1.5B-Instruct-bnb-4bit
22
+ echo Tuning $MODEL_NAME
23
+ python llm_toolkit/tune_mac.py
24
+
25
+ export MODEL_NAME=unsloth/Qwen2-7B-Instruct
26
+ echo Tuning $MODEL_NAME
27
+ python llm_toolkit/tune_mac.py
28
+
29
+ export MODEL_NAME=unsloth/mistral-7b-instruct-v0.3
30
+ echo Tuning $MODEL_NAME
31
+ python llm_toolkit/tune_mac.py
32
+
33
+ export MODEL_NAME=gradientai/Llama-3-8B-Instruct-Gradient-1048k
34
+ echo Tuning $MODEL_NAME
35
+ python llm_toolkit/tune_mac.py