Sebastien De Greef commited on
Commit
16c10c3
·
1 Parent(s): 1a34914

minor fixes to app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -16
app.py CHANGED
@@ -48,12 +48,13 @@ class PrinterCallback(TrainerCallback):
48
  def on_log(self, args, state, control, logs=None, **kwargs):
49
  _ = logs.pop("total_flos", None)
50
  if state.is_local_process_zero:
51
- print(logs)
 
52
  def on_step_end(self, args, state, control, **kwargs):
53
  if state.is_local_process_zero:
54
  self.step = state.global_step
55
  self.progress.update(self.step)
56
- print("**Step ", state.global_step)
57
 
58
 
59
 
@@ -68,8 +69,6 @@ def formatting_prompts_func(examples, prompt):
68
  text = prompt.format(instruction, input, output) + EOS_TOKEN
69
  texts.append(text)
70
  return { "text" : texts, }
71
- pass
72
-
73
 
74
  def load_model(initial_model_name, load_in_4bit, max_sequence_length):
75
  global model, tokenizer, max_seq_length
@@ -187,11 +186,11 @@ def save_model(model_name, hub_model_name, hub_token, gguf_16bit, gguf_8bit, ggu
187
  return "Model saved", gr.update(visible=True, interactive=True)
188
 
189
  # Create the Gradio interface
190
- with gr.Blocks() as demo:
191
  with gr.Column():
192
  gr.Image("unsloth.png", width="300px", interactive=False, show_download_button=False, show_label=False)
193
  with gr.Column():
194
- gr.Markdown(f"**GPU Information:** {gpu_stats.name} ({max_memory} GB)")
195
  with gr.Tab("Base Model Parameters"):
196
 
197
  with gr.Row():
@@ -299,17 +298,17 @@ with gr.Blocks() as demo:
299
  hub_token = gr.Textbox(label="Hub Token", interactive=True, type="password")
300
  gr.Markdown("---")
301
 
302
- with gr.Row():
303
- gr.Markdown("### Ollama options")
304
- with gr.Column():
305
- ollama_create_local = gr.Checkbox(label="Create in Ollama (local)", value=False, interactive=True)
306
- ollama_push_to_hub = gr.Checkbox(label="Push to Ollama", value=False, interactive=True)
307
- with gr.Column():
308
- ollama_model_name = gr.Textbox(label="Ollama Model Name", value="user/model_name")
309
- ollama_pub_key = gr.Button("Ollama Pub Key")
310
- gr.Markdown("---")
311
  save_button = gr.Button("Save Model", visible=True, interactive=True)
312
- save_button.click(save_model, inputs=[model_name, hub_model_name, hub_token, gguf_16bit, gguf_8bit, gguf_4bit, gguf_custom, gguf_custom_value, merge_16bit, merge_4bit, just_lora, push_to_hub], outputs=[save_button])
313
 
314
  with gr.Tab("Inference"):
315
  with gr.Row():
 
48
  def on_log(self, args, state, control, logs=None, **kwargs):
49
  _ = logs.pop("total_flos", None)
50
  if state.is_local_process_zero:
51
+ #print(logs)
52
+ pass
53
  def on_step_end(self, args, state, control, **kwargs):
54
  if state.is_local_process_zero:
55
  self.step = state.global_step
56
  self.progress.update(self.step)
57
+ #print("**Step ", state.global_step)
58
 
59
 
60
 
 
69
  text = prompt.format(instruction, input, output) + EOS_TOKEN
70
  texts.append(text)
71
  return { "text" : texts, }
 
 
72
 
73
  def load_model(initial_model_name, load_in_4bit, max_sequence_length):
74
  global model, tokenizer, max_seq_length
 
186
  return "Model saved", gr.update(visible=True, interactive=True)
187
 
188
  # Create the Gradio interface
189
+ with gr.Blocks(title="Unsloth fine-tuning") as demo:
190
  with gr.Column():
191
  gr.Image("unsloth.png", width="300px", interactive=False, show_download_button=False, show_label=False)
192
  with gr.Column():
193
+ gr.Markdown(f"**GPU Information:** {gpu_stats.name} ({max_memory} GB)\n\n[Unsloth Docs](http://docs.unsloth.com/)\n\n[Unsloth GitHub](https://github.com/unslothai/unsloth)")
194
  with gr.Tab("Base Model Parameters"):
195
 
196
  with gr.Row():
 
298
  hub_token = gr.Textbox(label="Hub Token", interactive=True, type="password")
299
  gr.Markdown("---")
300
 
301
+ # with gr.Row():
302
+ # gr.Markdown("### Ollama options")
303
+ # with gr.Column():
304
+ # ollama_create_local = gr.Checkbox(label="Create in Ollama (local)", value=False, interactive=True)
305
+ # ollama_push_to_hub = gr.Checkbox(label="Push to Ollama", value=False, interactive=True)
306
+ # with gr.Column():
307
+ # ollama_model_name = gr.Textbox(label="Ollama Model Name", value="user/model_name")
308
+ # ollama_pub_key = gr.Button("Ollama Pub Key")
309
+ save_output = gr.Markdown("---")
310
  save_button = gr.Button("Save Model", visible=True, interactive=True)
311
+ save_button.click(save_model, inputs=[model_name, hub_model_name, hub_token, gguf_16bit, gguf_8bit, gguf_4bit, gguf_custom, gguf_custom_value, merge_16bit, merge_4bit, just_lora, push_to_hub], outputs=[save_output, save_button])
312
 
313
  with gr.Tab("Inference"):
314
  with gr.Row():