alpcansoydas commited on
Commit
210dbcf
1 Parent(s): 0e021ed

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -0
app.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
4
+ from langchain_core.output_parsers import JsonOutputParser
5
+ from langdetect import detect
6
+ import time
7
+
8
+ # Initialize the LLM and other components
9
+ llm = HuggingFaceEndpoint(
10
+ repo_id="mistralai/Mistral-7B-Instruct-v0.3",
11
+ task="text-generation",
12
+ max_new_tokens=4096,
13
+ temperature=0.5,
14
+ do_sample=False,
15
+ )
16
+ llm_engine_hf = ChatHuggingFace(llm=llm)
17
+
18
+ template_classify = '''
19
+ Please carefully read the following text. The text is written in {LANG} language:
20
+
21
+ <text>
22
+ {TEXT}
23
+ </text>
24
+
25
+ After reading it, I want you to classify it in three groups: Positive, Negative, or Neutral.
26
+ Your final response MUST contain only the response, no other text.
27
+ Example:
28
+ Positive
29
+ Negative
30
+ Neutral
31
+ '''
32
+
33
+ template_json = '''
34
+ Your task is to read the following text, convert it to json format using 'Answer' as key and return it.
35
+ <text>
36
+ {RESPONSE}
37
+ </text>
38
+
39
+ Your final response MUST contain only the response, no other text.
40
+ Example:
41
+ {{"Answer":"Positive"}}
42
+ '''
43
+ json_output_parser = JsonOutputParser()
44
+
45
+ # Define the classify_text function
46
+ def classify_text(text):
47
+ global llm
48
+
49
+ start = time.time()
50
+ lang = detect(text)
51
+
52
+ prompt_classify = PromptTemplate(
53
+ template=template_classify,
54
+ input_variables=["LANG", "TEXT"]
55
+ )
56
+ formatted_prompt = prompt_classify.format(TEXT=text, LANG=lang)
57
+ classify = llm.invoke(formatted_prompt)
58
+
59
+ prompt_json = PromptTemplate(
60
+ template=template_json,
61
+ input_variables=["RESPONSE"]
62
+ )
63
+
64
+ formatted_prompt = template_json.format(RESPONSE=classify)
65
+ response = llm.invoke(formatted_prompt)
66
+
67
+ parsed_output = json_output_parser.parse(response)
68
+ end = time.time()
69
+ duration = end - start
70
+ return parsed_output, duration #['Answer']
71
+
72
+ # Create the Gradio interface
73
+ def gradio_app(text):
74
+ classification, time_taken = classify_text(text)
75
+ return classification, f"Time taken: {time_taken:.2f} seconds"
76
+
77
+ def create_gradio_interface():
78
+ with gr.Blocks() as iface:
79
+ text_input = gr.Textbox(label="Text to Classify")
80
+ output_text = gr.Textbox(label="Classification")
81
+ time_taken = gr.Textbox(label="Time Taken (seconds)")
82
+ submit_btn = gr.Button("Classify")
83
+
84
+ submit_btn.click(fn=classify_text, inputs=text_input, outputs=[output_text, time_taken])
85
+
86
+ iface.launch()
87
+
88
+ if __name__ == "__main__":
89
+ create_gradio_interface()