naofunyannn commited on
Commit
5763fd0
·
verified ·
1 Parent(s): b27c67e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -21
app.py CHANGED
@@ -12,6 +12,7 @@ MODEL = "LLaMAX/LLaMAX2-7B-Alpaca"
12
  RELATIVE_MODEL="LLaMAX/LLaMAX2-7B"
13
 
14
  TITLE = "<h1><center>LLaMAX Translator</center></h1>"
 
15
 
16
 
17
  model = AutoModelForCausalLM.from_pretrained(
@@ -49,8 +50,8 @@ def chunk_text():
49
 
50
  # Function to calculate BLEU score
51
  def calculate_bleu_score(candidate: str, references: list):
52
- candidate_tokens = candidate.split() # Tokenizing the candidate output
53
- bleu_score = sentence_bleu(references, candidate_tokens) # Calculating BLEU score
54
  return bleu_score
55
 
56
  @spaces.GPU(duration=60)
@@ -83,19 +84,9 @@ def translate(
83
 
84
  resp = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
85
 
86
- #yield resp[len(prompt):]
87
  # Calculate BLEU score
88
- '''
89
- references = [
90
- 'this is a dog'.split(),
91
- 'it is dog'.split(),
92
- 'dog it is'.split(),
93
- 'a dog, it is'.split()
94
- ]
95
- bleu_score = calculate_bleu_score(resp[len(prompt):], references) # Calculate BLEU score
96
- '''
97
- references = [resp[len(prompt):].split()] # Use the generated response as the reference
98
- bleu_score = calculate_bleu_score(resp[len(prompt):], references) # Calculate BLEU score
99
 
100
  yield resp[len(prompt):], bleu_score
101
 
@@ -114,7 +105,7 @@ CSS = """
114
  """
115
 
116
  LICENSE = """
117
- Model: <a href="https://huggingface.co/LLaMAX/LLaMAX3-8B-Alpaca">LLaMAX3-8B-Alpaca</a>
118
  """
119
 
120
  LANG_LIST = ['Akrikaans', 'Amharic', 'Arabic', 'Armenian', 'Assamese', 'Asturian', 'Azerbaijani', \
@@ -145,9 +136,7 @@ with gr.Blocks(theme="soft", css=CSS) as demo:
145
  with gr.Column(scale=4):
146
  source_text = gr.Textbox(
147
  label="Văn bản gốc",
148
- value="LLaMAX is a language model with powerful multilingual capabilities without loss instruction-following capabilities. "+\
149
- "LLaMAX supports translation between more than 100 languages, "+\
150
- "surpassing the performance of similarly scaled LLMs.",
151
  lines=10,
152
  )
153
  output_text = gr.Textbox(
@@ -156,7 +145,7 @@ with gr.Blocks(theme="soft", css=CSS) as demo:
156
  show_copy_button=True,
157
  )
158
 
159
- bleu_score_output = gr.Textbox( # New holder area for BLEU score
160
  label="BLEU Score",
161
  lines=10,
162
  interactive=False,
@@ -181,7 +170,7 @@ with gr.Blocks(theme="soft", css=CSS) as demo:
181
  step=8,
182
  )
183
  temperature = gr.Slider(
184
- label="Temperature",
185
  minimum=0,
186
  maximum=1,
187
  value=0.3,
@@ -234,7 +223,6 @@ Write a response that ensuring accuracy and maintaining the tone and style of th
234
  gr.Markdown(LICENSE)
235
 
236
  #source_text.change(lang_detector, source_text, source_lang)
237
- #submit.click(fn=translate, inputs=[source_text, source_lang, target_lang, inst, prompt, max_length, temperature, top_p, rp], outputs=[output_text])
238
  submit.click(fn=translate, inputs=[source_text, source_lang, target_lang, inst, prompt, max_length, temperature, top_p, rp], outputs=[output_text, bleu_score_output])
239
 
240
  if __name__ == "__main__":
 
12
  RELATIVE_MODEL="LLaMAX/LLaMAX2-7B"
13
 
14
  TITLE = "<h1><center>LLaMAX Translator</center></h1>"
15
+ DESCRIPTION ="<center>Do tài chính có hạn nên dự án đang chỉ dùng CPU để xử lý yêu cầu. Để xử lý với tốc độ nhanh hơn thông qua GPU, vui lòng truy cập vào notebook Kaggle sau <a href="https://www.kaggle.com/code/naofunyannn/llamax-translation">LLaMAX3 Translator> </center>"
16
 
17
 
18
  model = AutoModelForCausalLM.from_pretrained(
 
50
 
51
  # Function to calculate BLEU score
52
  def calculate_bleu_score(candidate: str, references: list):
53
+ candidate_tokens = candidate.split()
54
+ bleu_score = sentence_bleu(references, candidate_tokens)
55
  return bleu_score
56
 
57
  @spaces.GPU(duration=60)
 
84
 
85
  resp = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
86
 
 
87
  # Calculate BLEU score
88
+ references = [resp[len(prompt):].split()]
89
+ bleu_score = calculate_bleu_score(resp[len(prompt):], references)
 
 
 
 
 
 
 
 
 
90
 
91
  yield resp[len(prompt):], bleu_score
92
 
 
105
  """
106
 
107
  LICENSE = """
108
+ Model: <a href="https://huggingface.co/LLaMAX/LLaMAX2-7B-Alpaca">LLaMAX2-7B-Alpaca</a>
109
  """
110
 
111
  LANG_LIST = ['Akrikaans', 'Amharic', 'Arabic', 'Armenian', 'Assamese', 'Asturian', 'Azerbaijani', \
 
136
  with gr.Column(scale=4):
137
  source_text = gr.Textbox(
138
  label="Văn bản gốc",
139
+ value="Hello",
 
 
140
  lines=10,
141
  )
142
  output_text = gr.Textbox(
 
145
  show_copy_button=True,
146
  )
147
 
148
+ bleu_score_output = gr.Textbox(
149
  label="BLEU Score",
150
  lines=10,
151
  interactive=False,
 
170
  step=8,
171
  )
172
  temperature = gr.Slider(
173
+ label="Độ sáng tạo",
174
  minimum=0,
175
  maximum=1,
176
  value=0.3,
 
223
  gr.Markdown(LICENSE)
224
 
225
  #source_text.change(lang_detector, source_text, source_lang)
 
226
  submit.click(fn=translate, inputs=[source_text, source_lang, target_lang, inst, prompt, max_length, temperature, top_p, rp], outputs=[output_text, bleu_score_output])
227
 
228
  if __name__ == "__main__":