1024m commited on
Commit
0911374
·
verified ·
1 Parent(s): 8afbb35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -79,9 +79,9 @@ def generate_response(message, temperature, max_new_tokens, top_p, task):
79
  print(f"Prompt: {prompt}")
80
  start_time = time.time()
81
  inputs = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
82
- outputs = model.generate(input_ids=inputs, max_new_tokens=max_new_tokens, use_cache=True, temperature=temperature, min_p=top_p, pad_token_id=tokenizer.eos_token_id)
83
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
84
- processed_response = response.split("RESPONSE :assistant")[-1].strip()
85
  end_time = time.time()
86
  time_taken = end_time - start_time
87
  print(f"Output: {processed_response}")
@@ -90,7 +90,7 @@ def generate_response(message, temperature, max_new_tokens, top_p, task):
90
  current_time_pst = datetime.now(pst_timezone).strftime("%Y-%m-%d %H:%M:%S %Z%z")
91
  print(f"Current timestamp (PST): {current_time_pst}")
92
  return processed_response
93
- with gr.Blocks() as demo:
94
  gr.Markdown("# Phi-4-Hindi Demo")
95
  with gr.Row():
96
  with gr.Column():
 
79
  print(f"Prompt: {prompt}")
80
  start_time = time.time()
81
  inputs = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
82
+ outputs = model.generate(input_ids=inputs, do_sample=True, max_new_tokens=max_new_tokens, use_cache=True, temperature=temperature, min_p=top_p, pad_token_id=tokenizer.eos_token_id)
83
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
84
+ processed_response = response.split("RESPONSE :")[-1].strip()
85
  end_time = time.time()
86
  time_taken = end_time - start_time
87
  print(f"Output: {processed_response}")
 
90
  current_time_pst = datetime.now(pst_timezone).strftime("%Y-%m-%d %H:%M:%S %Z%z")
91
  print(f"Current timestamp (PST): {current_time_pst}")
92
  return processed_response
93
+ with gr.Blocks(theme='1024m/1024m-1') as demo:
94
  gr.Markdown("# Phi-4-Hindi Demo")
95
  with gr.Row():
96
  with gr.Column():