gsarti commited on
Commit
7d34870
·
1 Parent(s): 80a4ebb

First part functional

Browse files
Files changed (6) hide show
  1. .gitignore +1 -0
  2. README.md +1 -2
  3. app.py +205 -9
  4. requirements.txt +3 -1
  5. style.py +46 -0
  6. translations.yaml +107 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ __pycache__
README.md CHANGED
@@ -4,11 +4,10 @@ emoji: 🧩
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.40.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
- fullWidth: true
12
  short_description: Solve Italian verbalized rebuses with Phi-3 Mini.
13
  models:
14
  - gsarti/phi3-mini-rebus-solver-fp16
 
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
11
  short_description: Solve Italian verbalized rebuses with Phi-3 Mini.
12
  models:
13
  - gsarti/phi3-mini-rebus-solver-fp16
app.py CHANGED
@@ -1,23 +1,219 @@
 
1
  import spaces
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
 
 
 
4
 
5
  template = """<s><|user|>
6
  Risolvi gli indizi tra parentesi per ottenere una prima lettura, e usa la chiave di lettura per ottenere la soluzione del rebus.
7
 
8
- {input}<|end|>
 
9
  <|assistant|>"""
10
 
11
- tokenizer = AutoTokenizer.from_pretrained("gsarti/phi3-mini-rebus-solver-fp16")
12
- model = AutoModelForCausalLM.from_pretrained("gsarti/phi3-mini-rebus-solver-fp16")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  @spaces.GPU
15
  def solve_verbalized_rebus(example, history):
16
  input = template.format(input=example)
17
- inputs = tokenizer(input, return_tensors="pt")["input_ids"]
18
- outputs = model.generate(input_ids = inputs, max_new_tokens = 500, use_cache = True)
19
- model_generations = tokenizer.batch_decode(outputs)
20
- return model_generations[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- demo = gr.ChatInterface(fn=solve_verbalized_rebus, examples=["Rebus: [Materiale espulso dai vulcani] R O [Strumento del calzolaio] [Si trovano ai lati del bacino] C I [Si ingrassano con la polenta] E I N [Contiene scorte di cibi] B [Isola in francese]\nChiave risolutiva: 1 ' 5 6 5 3 3 1 14"], title="Verbalized Rebus Solver")
23
- demo.launch()
 
1
+ import re
2
  import spaces
3
  import gradio as gr
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ from unidecode import unidecode
6
+ from gradio_i18n import gettext, Translate
7
+ from datasets import load_dataset
8
+
9
+ from style import custom_css, solution_style, letter_style, definition_style
10
 
11
  template = """<s><|user|>
12
  Risolvi gli indizi tra parentesi per ottenere una prima lettura, e usa la chiave di lettura per ottenere la soluzione del rebus.
13
 
14
+ Rebus: {rebus}
15
+ Chiave di lettura: {key}<|end|>
16
  <|assistant|>"""
17
 
18
+ eureka5_test_data = load_dataset(
19
+ 'gsarti/eureka-rebus', 'llm_sft',
20
+ data_files=["id_test.jsonl", "ood_test.jsonl"],
21
+ split = "train"
22
+ )
23
+
24
+ OUTPUTS_BASE_URL = "https://raw.githubusercontent.com/gsarti/verbalized-rebus/main/outputs/"
25
+
26
+ model_outputs = load_dataset(
27
+ "csv",
28
+ data_files={
29
+ "gpt4": OUTPUTS_BASE_URL + "prompted_models/gpt4o_results.csv",
30
+ "claude3_5_sonnet": OUTPUTS_BASE_URL + "prompted_models/claude3_5_sonnet_results.csv",
31
+ "llama3_70b": OUTPUTS_BASE_URL + "prompted_models/llama3_70b_results.csv",
32
+ "qwen_72b": OUTPUTS_BASE_URL + "prompted_models/qwen_72b_results.csv",
33
+ "phi3_mini": OUTPUTS_BASE_URL + "phi3_mini/phi3_mini_results_step_5070.csv",
34
+ "gemma2": OUTPUTS_BASE_URL + "gemma2_2b/gemma2_2b_results_step_5070.csv",
35
+ "llama3_1_8b": OUTPUTS_BASE_URL + "llama3.1_8b/llama3.1_8b_results_step_5070.csv"
36
+ }
37
+ )
38
+
39
+ def extract(span_text: str, tag: str = "span") -> str:
40
+ pattern = rf'<{tag}[^>]*>(.*?)<\/{tag}>'
41
+ matches = re.findall(pattern, span_text)
42
+ return "".join(matches) if matches else ""
43
+
44
+
45
+ def parse_rebus(ex_idx: int):
46
+ i = eureka5_test_data[ex_idx - 1]["conversations"][0]["value"]
47
+ o = eureka5_test_data[ex_idx - 1]["conversations"][1]["value"]
48
+ rebus = i.split("Rebus: ")[1].split("\n")[0]
49
+ rebus_letters = re.sub(r"\[.*?\]", "<<<>>>", rebus)
50
+ rebus_letters = re.sub(r"([a-zA-Z]+)", rf"""{letter_style}\1</span>""", rebus_letters)
51
+ fp_empty = rebus_letters.replace("<<<>>>", f"{definition_style}___</span>")
52
+ key = i.split("Chiave di lettura: ")[1].split("\n")[0]
53
+ key_split = key
54
+ key_highlighted = re.sub(r"(\d+)", rf"""{solution_style}\1</span>""", key)
55
+ fp_elements = re.findall(r"- (.*) = (.*)", o)
56
+ definitions = [x[0] for x in fp_elements if x[0].startswith("[")]
57
+ for i, el in enumerate(fp_elements):
58
+ if el[0].startswith("["):
59
+ fp_elements[i] = (re.sub(r"\[(.*?)\]", rf"""{definition_style}[\1]</span>""", fp_elements[i][0]), fp_elements[i][1])
60
+ else:
61
+ fp_elements[i] = (
62
+ f"{letter_style}{fp_elements[i][0]}</span>",
63
+ f"{letter_style}{fp_elements[i][1]}</span>",
64
+ )
65
+ fp = re.findall(r"Prima lettura: (.*)", o)[0]
66
+ s_elements = re.findall(r"(\d+) = (.*)", o)
67
+ s = re.findall(r"Soluzione: (.*)", o)[0]
68
+ for d in definitions:
69
+ rebus_letters = rebus_letters.replace("<<<>>>", d, 1)
70
+ rebus_highlighted = re.sub(r"\[(.*?)\]", rf"""{definition_style}[\1]</span>""", rebus_letters)
71
+ return {
72
+ "rebus": rebus_highlighted,
73
+ "key": key_highlighted,
74
+ "key_split": key_split,
75
+ "fp_elements": fp_elements,
76
+ "fp": fp,
77
+ "fp_empty": fp_empty,
78
+ "s_elements": s_elements,
79
+ "s": s
80
+ }
81
+
82
+
83
+ #tokenizer = AutoTokenizer.from_pretrained("gsarti/phi3-mini-rebus-solver-fp16")
84
+ #model = AutoModelForCausalLM.from_pretrained("gsarti/phi3-mini-rebus-solver-fp16")
85
 
86
  @spaces.GPU
87
  def solve_verbalized_rebus(example, history):
88
  input = template.format(input=example)
89
+ #inputs = tokenizer(input, return_tensors="pt")["input_ids"]
90
+ #outputs = model.generate(input_ids = inputs, max_new_tokens = 500, use_cache = True)
91
+ #model_generations = tokenizer.batch_decode(outputs)
92
+ #return model_generations[0]
93
+ return input
94
+
95
+ #demo = gr.ChatInterface(fn=solve_verbalized_rebus, examples=["Rebus: [Materiale espulso dai vulcani] R O [Strumento del calzolaio] [Si trovano ai lati del bacino] C I [Si ingrassano con la polenta] E I N [Contiene scorte di cibi] B [Isola in francese]\nChiave risolutiva: 1 ' 5 6 5 3 3 1 14"], title="Verbalized Rebus Solver")
96
+ #demo.launch()
97
+
98
+ with gr.Blocks(css=custom_css) as demo:
99
+ lang = gr.Dropdown([("English", "en"), ("Italian", "it")], value="it", label="Select language:", interactive=True)
100
+ with Translate("translations.yaml", lang, placeholder_langs=["en", "it"]):
101
+ gr.Markdown(gettext("Title"))
102
+ gr.Markdown(gettext("Intro"))
103
+ with gr.Tab(gettext("GuessingGame")):
104
+ with gr.Row():
105
+ with gr.Column():
106
+ example_id = gr.Number(1, label=gettext("CurrentExample"), minimum=1, maximum=2000, step=1, interactive=True)
107
+ with gr.Column():
108
+ show_length_hints = gr.Checkbox(False, label=gettext("ShowLengthHints"), interactive=True)
109
+
110
+ @gr.render(inputs=[example_id, show_length_hints], triggers=[demo.load, example_id.change, show_length_hints.change, lang.change])
111
+ def show_example(example_number, show_length_hints):
112
+ parsed_rebus = parse_rebus(example_number)
113
+ gr.Markdown(gettext("Instructions"))
114
+ gr.Markdown(gettext("Rebus") + f"{parsed_rebus['rebus']}</h4>"),
115
+ gr.Markdown(gettext("Key") + f"{parsed_rebus['key']}</h4>")
116
+ gr.Markdown("<br><br>")
117
+ with gr.Row():
118
+ answers: list[gr.Textbox] = []
119
+ with gr.Column(scale=2):
120
+ gr.Markdown(gettext("ProceedToResolution"))
121
+ for el_key, el_value in parsed_rebus['fp_elements']:
122
+ with gr.Row():
123
+ with gr.Column(scale=0.2, min_width=250):
124
+ gr.Markdown(f"<p>{el_key} = </p>")
125
+ if el_key.startswith('<span class="definition"') and show_length_hints:
126
+ gr.Markdown(f"<p>({len(el_value)} lettere)</p>")
127
+ with gr.Column(scale=0.2, min_width=150):
128
+ if el_key.startswith('<span class="definition"'):
129
+ definition_answer = gr.Textbox(show_label=False, placeholder="Guess...", interactive=True, max_lines=3)
130
+ answers.append(definition_answer)
131
+ else:
132
+ gr.Markdown(el_value)
133
+ gr.Markdown("<hr>")
134
+ with gr.Column(scale=3):
135
+ key_value = gr.Markdown(parsed_rebus['key_split'], visible=False)
136
+ fp_empty = gr.Markdown(parsed_rebus['fp_empty'], visible=False)
137
+ fp = gr.Markdown(gettext("FirstPass") + f"{parsed_rebus['fp_empty']}</h4><br>")
138
+ solution_words: list[gr.Markdown] = []
139
+ clean_solution_words: list[str] = []
140
+ clean_fp = extract(fp.value)
141
+ curr_idx = 0
142
+ for n_char in parsed_rebus['key_split'].split():
143
+ word = clean_fp[curr_idx:curr_idx + int(n_char)].upper()
144
+ clean_solution_words.append(word)
145
+ solution_word = gr.Markdown(gettext("SolutionWord") + f"{n_char}: {solution_style}{word}</span></h4>")
146
+ curr_idx += int(n_char)
147
+ solution_words.append(solution_word)
148
+ gr.Markdown("<br>")
149
+ solution = gr.Markdown(gettext("Solution") + f"{solution_style}{' '.join(clean_solution_words)}</span></h4>")
150
+ correct_solution = gr.Markdown(gettext("CorrectSolution") + f"{solution_style}{parsed_rebus['s'].upper()}</span></h4>", visible=False)
151
+ correct_solution_shown = gr.Checkbox(False, visible=False)
152
+ gr.Markdown("<hr>")
153
+ prompted_models = gr.Markdown(gettext("PromptedModels"), visible=False)
154
+ gpt4_solution = gr.Markdown(gettext("GPT4Solution") + f"{solution_style}{model_outputs['gpt4'][example_number - 1]['solution']}</span></h4>", visible=False)
155
+ claude_solution = gr.Markdown(gettext("ClaudeSolution") + f"{solution_style}{model_outputs['claude3_5_sonnet'][example_number - 1]['solution']}</span></h4>", visible=False)
156
+ llama3_70b_solution = gr.Markdown(gettext("LLaMA370BSolution") + f"{solution_style}{model_outputs['llama3_70b'][example_number - 1]['solution']}</span></h4>", visible=False)
157
+ qwen_72b_solution = gr.Markdown(gettext("Qwen72BSolution") + f"{solution_style}{model_outputs['qwen_72b'][example_number - 1]['solution']}</span></h4>", visible=False)
158
+ models_separator = gr.Markdown("<hr>", visible=False)
159
+ trained_models = gr.Markdown(gettext("TrainedModels"), visible=False)
160
+ llama3_1_8b_solution = gr.Markdown(gettext("LLaMA318BSolution") + f"{solution_style}{model_outputs['llama3_1_8b'][example_number - 1]['solution']}</span></h4>", visible=False)
161
+ phi3_mini_solution = gr.Markdown(gettext("Phi3MiniSolution") + f"{solution_style}{model_outputs['phi3_mini'][example_number - 1]['solution']}</span></h4>", visible=False)
162
+ gemma2_solution = gr.Markdown(gettext("Gemma22BSolution") + f"{solution_style}{model_outputs['gemma2'][example_number - 1]['solution']}</span></h4>", visible=False)
163
+ models_solutions_shown = gr.Checkbox(False, visible=False)
164
+ with gr.Row():
165
+ btn_check = gr.Button(gettext("CheckSolution"), variant="primary")
166
+ btn_show = gr.Button(gettext("ShowSolution"))
167
+ btn_show_models_solutions = gr.Button(gettext("ShowModelsSolutions"))
168
+
169
+ def update_fp(fp_empty=fp_empty, key_value=key_value, *answers):
170
+ len_solutions = key_value.split()
171
+ for answer in answers:
172
+ if answer is not None and answer != "":
173
+ fp_empty = fp_empty.replace("___", answer, 1)
174
+ curr_idx = 0
175
+ new_solutions = []
176
+ new_solutions_clean = []
177
+ clean_fp_empty = extract(fp_empty)
178
+ for n_char in len_solutions:
179
+ word = clean_fp_empty[curr_idx:curr_idx + int(n_char)].upper()
180
+ new_solutions_clean.append(word)
181
+ new_solutions.append(gr.Markdown(gettext("SolutionWord") + f"{n_char}: {solution_style}{word}</span></h4>"))
182
+ curr_idx += int(n_char)
183
+ return [
184
+ gr.Markdown(gettext("FirstPass") + f"{fp_empty}</h4><br>"),
185
+ gr.Markdown(gettext("Solution") + f"{solution_style}{' '.join(new_solutions_clean)}</span></h4>")
186
+ ] + new_solutions
187
+
188
+ def check_solution(solution, correct_solution):
189
+ solution = unidecode(extract(solution))
190
+ correct_solution = unidecode(extract(correct_solution, "h4"))
191
+ if solution == correct_solution:
192
+ gr.Info(gettext("CorrectSolutionMsg"))
193
+ else:
194
+ gr.Info(gettext("IncorrectSolutionMsg"))
195
+
196
+ def show_solution(correct_solution, btn_show, shown):
197
+ if shown:
198
+ return gr.Markdown(correct_solution, visible=False), gr.Button(gettext("ShowSolution")), gr.Checkbox(False, visible=False)
199
+ else:
200
+ return gr.Markdown(correct_solution, visible=True), gr.Button(gettext("HideSolution")), gr.Checkbox(True, visible=False)
201
+
202
+ def show_models_solutions(models_solutions_shown, btn_show_models_solutions, gpt4_solution, claude_solution, llama3_70b_solution, qwen_72b_solution, llama3_1_8b_solution, phi3_mini_solution, gemma2_solution, prompted_models, trained_models, models_separator):
203
+ if models_solutions_shown:
204
+ return gr.Markdown(gpt4_solution, visible=False), gr.Markdown(claude_solution, visible=False), gr.Markdown(llama3_70b_solution, visible=False), gr.Markdown(qwen_72b_solution, visible=False), gr.Markdown(llama3_1_8b_solution, visible=False), gr.Markdown(phi3_mini_solution, visible=False), gr.Markdown(gemma2_solution, visible=False), gr.Markdown(prompted_models, visible=False), gr.Markdown(trained_models, visible=False), gr.Markdown(models_separator, visible=False), gr.Button(gettext("ShowModelsSolutions")), gr.Checkbox(False, visible=False)
205
+ else:
206
+ return gr.Markdown(gpt4_solution, visible=True), gr.Markdown(claude_solution, visible=True), gr.Markdown(llama3_70b_solution, visible=True), gr.Markdown(qwen_72b_solution, visible=True), gr.Markdown(llama3_1_8b_solution, visible=True), gr.Markdown(phi3_mini_solution, visible=True), gr.Markdown(gemma2_solution, visible=True), gr.Markdown(prompted_models, visible=True), gr.Markdown(trained_models, visible=True), gr.Markdown(models_separator, visible=True), gr.Button(gettext("HideModelsSolutions")), gr.Checkbox(True, visible=False)
207
+
208
+ for answer in answers:
209
+ answer.change(update_fp, [fp_empty, key_value, *answers], [fp, solution, *solution_words])
210
+
211
+ btn_check.click(check_solution, [solution, correct_solution], None)
212
+ btn_show.click(show_solution, [correct_solution, btn_show, correct_solution_shown], [correct_solution, btn_show, correct_solution_shown])
213
+ btn_show_models_solutions.click(show_models_solutions, [models_solutions_shown, btn_show_models_solutions, gpt4_solution, claude_solution, llama3_70b_solution, qwen_72b_solution, llama3_1_8b_solution, phi3_mini_solution, gemma2_solution, prompted_models, trained_models, models_separator], [gpt4_solution, claude_solution, llama3_70b_solution, qwen_72b_solution, llama3_1_8b_solution, phi3_mini_solution, gemma2_solution, prompted_models, trained_models, models_separator, btn_show_models_solutions, models_solutions_shown])
214
+
215
+ with gr.Tab(gettext("ModelEvaluation")):
216
+ gr.Markdown("<i>This section is under construction! Check again later 🙏</i>")
217
+
218
 
219
+ demo.launch(show_api=False)
 
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
  spaces
2
- transformers
 
 
 
1
  spaces
2
+ transformers
3
+ gradio-i18n
4
+ Unidecode
style.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ custom_css = """
2
+ .demo-title {
3
+ text-align: center;
4
+ display: block;
5
+ margin-bottom: 0;
6
+ font-size: 1.7em;
7
+ }
8
+ .demo-subtitle {
9
+ text-align: center;
10
+ display: block;
11
+ margin-top: 0;
12
+ font-size: 1.3em;
13
+ }
14
+
15
+ h4 {
16
+ text-align: center;
17
+ display: block;
18
+ }
19
+ .summary-label {
20
+ display: inline;
21
+ }
22
+ .prose a:visited {
23
+ color: var(--link-text-color);
24
+ }
25
+ .footer-container {
26
+ align-items: center;
27
+ }
28
+ .footer-custom-block {
29
+ display: flex;
30
+ justify-content: center;
31
+ align-items: center;
32
+ }
33
+ .footer-custom-block b {
34
+ margin-right: 10px;
35
+ }
36
+ .footer-custom-block img {
37
+ margin-right: 15px;
38
+ }
39
+ ol {
40
+ padding-left: 30px;
41
+ }
42
+ """
43
+
44
+ solution_style = """<span class="number" style="color: #1a73e8; font-weight: var(--weight-semibold)">"""
45
+ letter_style = """<span class="letter" style="color: #f48126; font-weight: var(--weight-semibold)">"""
46
+ definition_style = """<span class="definition" style="color: #5fb77d; font-weight: var(--weight-semibold)">"""
translations.yaml ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ en:
2
+ Language: 'Select a language:'
3
+ en: English
4
+ it: Italian
5
+ Title: "<h1 class='demo-title'>\U0001F9E9 Italian Verbalized Rebus Solver \U0001F1EE\
6
+ \U0001F1F9</h1>"
7
+ Intro: "This demo will allow you to test your skills at solving Italian verbalized\
8
+ \ rebus puzzles, and evaluate the predictions of our small but capable <a href=\"\
9
+ https://huggingface.co/gsarti/phi3-mini-rebus-solver-fp16\" target='_blank'>Phi-3\
10
+ \ Verbalized Rebus Solver</a> model.\n\nIn the tab <code>\U0001F9E9 Guessing game</code>,\
11
+ \ you will be presented with a rebus puzzle from the test set of <a href=\"https://huggingface.co/datasets/gsarti/eureka-rebus\"\
12
+ \ target='_blank'>EurekaRebus</a>, and you will be asked to solve it step by step.\
13
+ \ The model will then generate a response, and you will be able to compare it\
14
+ \ with the ground truth solution.\n\nIn the tab <code>\U0001F916 Model evaluation</code>,\
15
+ \ you will be able to evaluate the model on a custom rebus puzzle of your choice.\
16
+ \ You can input a rebus puzzle and a key, and the model will generate a response.\
17
+ \ You can then provide feedback on model mistakes, which will be used for future\
18
+ \ model training and evaluations.\n\nWe are grateful to the [Associazione Culturale\
19
+ \ \"Biblioteca Enigmistica Italiana - G. Panini\"](http://www.enignet.it/home)\
20
+ \ for making the Eureka5 rebus collection openly accessible.\n\nFor more details,\
21
+ \ check out our <a href=\"https://arxiv.org/abs/2408.00584\" target='_blank'>paper</a>,\
22
+ \ <a href=\"https://github.com/gsarti/verbalized-rebus\" target='_blank'>code</a>\
23
+ \ and <a href=\"https://huggingface.co/collections/gsarti/verbalized-rebus-clic-it-2024-66ab8f11cb04e68bdf4fb028\"\
24
+ \ target='_blank'>materials</a>."
25
+ GuessingGame: "\U0001F9E9 Guessing game"
26
+ ModelEvaluation: "\U0001F916 Model evaluation"
27
+ CurrentExample: Current Example (1-2000)
28
+ ShowLengthHints: Show length hints
29
+ Instructions: "<h4>Solve the hints between parentheses to obtain a first pass, and\
30
+ \ use the solution key to derive the rebus solution.</h4><br>"
31
+ Rebus: "<h4>Rebus: "
32
+ Key: "<h4>Solution key: "
33
+ ProceedToResolution: "<h4>Let's proceed step by step to solve the rebus (<b>using Italian words</b>):</h4>"
34
+ FirstPass: "<h4>First pass: "
35
+ SolutionWord: "<h4>Solution word with length "
36
+ Solution: "<h4>Solution: "
37
+ CheckSolution: "Check solution"
38
+ ShowSolution: "Show solution"
39
+ HideSolution: "Hide solution"
40
+ IncorrectSolutionMsg: "The solution is incorrect, try again!"
41
+ CorrectSolutionMsg: "Well done! This is the correct solution."
42
+ CorrectSolution: "<h4>Correct solution: "
43
+ GPT4Solution: "<h4>GPT-4o solution: "
44
+ ClaudeSolution: "<h4>Claude-3.5 Sonnet solution: "
45
+ LLaMA370BSolution: "<h4>LLaMA-3 70B solution: "
46
+ Qwen72BSolution: "<h4>Qwen 72B solution: "
47
+ LLaMA318BSolution: "<h4>LLaMA-3.1 8B solution: "
48
+ Phi3MiniSolution: "<h4>Phi-3 Mini solution: "
49
+ Gemma22BSolution: "<h4>Gemma-2 2B solution: "
50
+ ShowModelsSolutions: "Show model solutions"
51
+ HideModelsSolutions: "Hide model solutions"
52
+ PromptedModels: "<b>Prompted models</b>"
53
+ TrainedModels: "<b>Trained models</b>"
54
+ it:
55
+ Language: 'Seleziona una lingua:'
56
+ en: Inglese
57
+ it: Italiano
58
+ Title: "<h1 class='demo-title'>\U0001F9E9 Risolutore di Rebus Verbalizzati in Italiano\
59
+ \ \U0001F1EE\U0001F1F9</h1>"
60
+ Intro: "Questa demo ti permetter\xE0 di testare le tue abilit\xE0 nella risoluzione\
61
+ \ di rebus verbalizzati in italiano, e valutare le predizioni del nostro piccolo\
62
+ \ ma capace modello <a href=\"https://huggingface.co/gsarti/phi3-mini-rebus-solver-fp16\"\
63
+ \ target='_blank'>Phi-3</a>.\n\nNella finestra <code>\U0001F9E9 Indovina il rebus</code>,\
64
+ \ ti verr\xE0 presentato un rebus scelto casualmente dalla collezione <a href=\"\
65
+ https://huggingface.co/datasets/gsarti/eureka-rebus\" target='_blank'>EurekaRebus</a>,\
66
+ \ e ti verr\xE0 chiesto di risolverlo passo passo. Il modello generer\xE0 quindi\
67
+ \ una risposta, e potrai confrontare la tua risposta con la soluzione.\n\nNella\
68
+ \ finestra <code>\U0001F916 Valuta il modello</code>, potrai valutare il modello\
69
+ \ su un rebus verbalizzato creato da te, fornendolo assieme ad una chiave di lettura\
70
+ \ e chiedendo al modello di risolverlo. Potrai quindi fornire feedback sugli errori\
71
+ \ del modello, che verr\xE0 poi utilizzato per migliorarne le prestazioni future.\n\
72
+ \nRingraziamo l'<a href=\"http://www.enignet.it/\" target='_blank'>Associazione\
73
+ \ Culturale \"Biblioteca Enigmistica Italiana - G. Panini\"</a> per aver reso\
74
+ \ disponibile la collezione di rebus Eureka5.\n\nPer maggiori dettagli, puoi consultare\
75
+ \ il nostro <a href=\"https://arxiv.org/abs/2408.00584\" target='_blank'>articolo</a>,\
76
+ \ con il relativo <a href=\"https://github.com/gsarti/verbalized-rebus\" target='_blank'>codice</a>\
77
+ \ e <a href=\"https://huggingface.co/collections/gsarti/verbalized-rebus-clic-it-2024-66ab8f11cb04e68bdf4fb028\"\
78
+ \ target='_blank'>materiali</a>."
79
+ GuessingGame: "\U0001F9E9 Indovina il rebus"
80
+ ModelEvaluation: "\U0001F916 Valuta il modello"
81
+ CurrentExample: Esempio attuale (1-2000)
82
+ ShowLengthHints: Mostra suggerimenti sulla lunghezza
83
+ Instructions: "<h4>Risolvi gli indizi tra parentesi per ottenere una prima lettura, e\
84
+ \ usa la chiave di lettura per ottenere la soluzione del rebus.</h4><br>"
85
+ Rebus: "<h4>Rebus: "
86
+ Key: '<h4>Chiave di lettura: '
87
+ ProceedToResolution: "<h4>Procediamo alla risoluzione del rebus passo per passo:</h4>"
88
+ FirstPass: "<h4>Prima lettura: "
89
+ SolutionWord: "<h4>Parola risolutiva con lunghezza "
90
+ Solution: "<h4>Soluzione: "
91
+ CheckSolution: "Verifica soluzione"
92
+ ShowSolution: "Mostra soluzione"
93
+ HideSolution: "Nascondi soluzione"
94
+ IncorrectSolutionMsg: "La soluzione è sbagliata, riprova!"
95
+ CorrectSolutionMsg: "Esatto! Questa è la soluzione corretta."
96
+ CorrectSolution: "<h4>Soluzione corretta: "
97
+ GPT4Solution: "<h4>Soluzione di GPT-4o: "
98
+ ClaudeSolution: "<h4>Soluzione di Claude-3.5 Sonnet: "
99
+ LLaMA370BSolution: "<h4>Soluzione di LLaMA-3 70B: "
100
+ Qwen72BSolution: "<h4>Soluzione di Qwen 72B: "
101
+ LLaMA318BSolution: "<h4>Soluzione di LLaMA-3.1 8B: "
102
+ Phi3MiniSolution: "<h4>Soluzione di Phi-3 Mini: "
103
+ Gemma22BSolution: "<h4>Soluzione di Gemma-2 2B: "
104
+ ShowModelsSolutions: "Mostra soluzioni dei modelli"
105
+ HideModelsSolutions: "Nascondi soluzioni dei modelli"
106
+ PromptedModels: "<b>Modelli promptati</b>"
107
+ TrainedModels: "<b>Modelli addestrati</b>"