Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
|
2 |
import torch
|
3 |
import time
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
@@ -10,7 +10,7 @@ model_name = "large-traversaal/Phi-4-Hindi"
|
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
|
12 |
print("Model and tokenizer loaded successfully!")
|
13 |
-
option_mapping = {"translation": "### TRANSLATION ###", "mcq": "### MCQ ###", "nli": "### NLI ###", "summarization": "### SUMMARIZATION ###",
|
14 |
"long response": "### LONG RESPONSE ###", "direct response": "### DIRECT RESPONSE ###", "paraphrase": "### PARAPHRASE ###", "code": "### CODE ###"}
|
15 |
def generate_response(message, temperature, max_new_tokens, top_p, task):
|
16 |
append_text = option_mapping.get(task, "")
|
@@ -41,7 +41,7 @@ with gr.Blocks() as demo:
|
|
41 |
with gr.Row():
|
42 |
with gr.Column():
|
43 |
input_text = gr.Textbox(label="Input", placeholder="Enter your text here...", lines=5)
|
44 |
-
task_dropdown = gr.Dropdown(choices=["translation", "mcq", "nli", "summarization", "long response", "direct response", "paraphrase", "code"], value="long response", label="Task")
|
45 |
with gr.Row():
|
46 |
with gr.Column():
|
47 |
temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Temperature")
|
@@ -111,4 +111,4 @@ with gr.Blocks(theme='1024m/1024m-1') as demo:
|
|
111 |
send_btn.click(fn=generate_response, inputs=[input_text, temperature, max_new_tokens, top_p, task_dropdown], outputs=output_text)
|
112 |
clear_btn.click(fn=lambda: ("", ""), inputs=None, outputs=[input_text, output_text])
|
113 |
if __name__ == "__main__":
|
114 |
-
demo.queue().launch()
|
|
|
1 |
+
import gradio as gr
|
2 |
import torch
|
3 |
import time
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
|
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
|
12 |
print("Model and tokenizer loaded successfully!")
|
13 |
+
option_mapping = {"translation": "### TRANSLATION ###", "mcq": "### MCQ ###", "nli": "### NLI ###", "summarization": "### SUMMARIZATION ###", "Boolean": "### BOOLEAN ###",
|
14 |
"long response": "### LONG RESPONSE ###", "direct response": "### DIRECT RESPONSE ###", "paraphrase": "### PARAPHRASE ###", "code": "### CODE ###"}
|
15 |
def generate_response(message, temperature, max_new_tokens, top_p, task):
|
16 |
append_text = option_mapping.get(task, "")
|
|
|
41 |
with gr.Row():
|
42 |
with gr.Column():
|
43 |
input_text = gr.Textbox(label="Input", placeholder="Enter your text here...", lines=5)
|
44 |
+
task_dropdown = gr.Dropdown(choices=["boolean", "translation", "mcq", "nli", "summarization", "long response", "direct response", "paraphrase", "code"], value="long response", label="Task")
|
45 |
with gr.Row():
|
46 |
with gr.Column():
|
47 |
temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Temperature")
|
|
|
111 |
send_btn.click(fn=generate_response, inputs=[input_text, temperature, max_new_tokens, top_p, task_dropdown], outputs=output_text)
|
112 |
clear_btn.click(fn=lambda: ("", ""), inputs=None, outputs=[input_text, output_text])
|
113 |
if __name__ == "__main__":
|
114 |
+
demo.queue().launch()"""
|