mihaimasala
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,746 @@
|
|
1 |
-
---
|
2 |
-
license: cc-by-nc-4.0
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: cc-by-nc-4.0
|
3 |
+
language:
|
4 |
+
- ro
|
5 |
+
base_model:
|
6 |
+
- google/gemma-7b
|
7 |
+
datasets:
|
8 |
+
- OpenLLM-Ro/ro_sft_alpaca
|
9 |
+
- OpenLLM-Ro/ro_sft_alpaca_gpt4
|
10 |
+
- OpenLLM-Ro/ro_sft_dolly
|
11 |
+
- OpenLLM-Ro/ro_sft_selfinstruct_gpt4
|
12 |
+
- OpenLLM-Ro/ro_sft_norobots
|
13 |
+
- OpenLLM-Ro/ro_sft_orca
|
14 |
+
- OpenLLM-Ro/ro_sft_camel
|
15 |
+
model-index:
|
16 |
+
- name: OpenLLM-Ro/RoGemma-7b-Instruct-2024-10-09
|
17 |
+
results:
|
18 |
+
- task:
|
19 |
+
type: text-generation
|
20 |
+
dataset:
|
21 |
+
name: RoMT-Bench
|
22 |
+
type: RoMT-Bench
|
23 |
+
metrics:
|
24 |
+
- name: Score
|
25 |
+
type: Score
|
26 |
+
value: 5.24
|
27 |
+
- task:
|
28 |
+
type: text-generation
|
29 |
+
dataset:
|
30 |
+
name: RoCulturaBench
|
31 |
+
type: RoCulturaBench
|
32 |
+
metrics:
|
33 |
+
- name: Score
|
34 |
+
type: Score
|
35 |
+
value: 3.51
|
36 |
+
- task:
|
37 |
+
type: text-generation
|
38 |
+
dataset:
|
39 |
+
name: Romanian_Academic_Benchmarks
|
40 |
+
type: Romanian_Academic_Benchmarks
|
41 |
+
metrics:
|
42 |
+
- name: Average accuracy
|
43 |
+
type: accuracy
|
44 |
+
value: 50.48
|
45 |
+
- task:
|
46 |
+
type: text-generation
|
47 |
+
dataset:
|
48 |
+
name: OpenLLM-Ro/ro_arc_challenge
|
49 |
+
type: OpenLLM-Ro/ro_arc_challenge
|
50 |
+
metrics:
|
51 |
+
- name: Average accuracy
|
52 |
+
type: accuracy
|
53 |
+
value: 52.01
|
54 |
+
- task:
|
55 |
+
type: text-generation
|
56 |
+
dataset:
|
57 |
+
name: OpenLLM-Ro/ro_mmlu
|
58 |
+
type: OpenLLM-Ro/ro_mmlu
|
59 |
+
metrics:
|
60 |
+
- name: Average accuracy
|
61 |
+
type: accuracy
|
62 |
+
value: 52.37
|
63 |
+
- task:
|
64 |
+
type: text-generation
|
65 |
+
dataset:
|
66 |
+
name: OpenLLM-Ro/ro_winogrande
|
67 |
+
type: OpenLLM-Ro/ro_winogrande
|
68 |
+
metrics:
|
69 |
+
- name: Average accuracy
|
70 |
+
type: accuracy
|
71 |
+
value: 66.97
|
72 |
+
- task:
|
73 |
+
type: text-generation
|
74 |
+
dataset:
|
75 |
+
name: OpenLLM-Ro/ro_hellaswag
|
76 |
+
type: OpenLLM-Ro/ro_hellaswag
|
77 |
+
metrics:
|
78 |
+
- name: Average accuracy
|
79 |
+
type: accuracy
|
80 |
+
value: 56.34
|
81 |
+
- task:
|
82 |
+
type: text-generation
|
83 |
+
dataset:
|
84 |
+
name: OpenLLM-Ro/ro_gsm8k
|
85 |
+
type: OpenLLM-Ro/ro_gsm8k
|
86 |
+
metrics:
|
87 |
+
- name: Average accuracy
|
88 |
+
type: accuracy
|
89 |
+
value: 25.98
|
90 |
+
- task:
|
91 |
+
type: text-generation
|
92 |
+
dataset:
|
93 |
+
name: OpenLLM-Ro/ro_truthfulqa
|
94 |
+
type: OpenLLM-Ro/ro_truthfulqa
|
95 |
+
metrics:
|
96 |
+
- name: Average accuracy
|
97 |
+
type: accuracy
|
98 |
+
value: 49.18
|
99 |
+
- task:
|
100 |
+
type: text-generation
|
101 |
+
dataset:
|
102 |
+
name: LaRoSeDa_binary
|
103 |
+
type: LaRoSeDa_binary
|
104 |
+
metrics:
|
105 |
+
- name: Average macro-f1
|
106 |
+
type: macro-f1
|
107 |
+
value: 86.96
|
108 |
+
- task:
|
109 |
+
type: text-generation
|
110 |
+
dataset:
|
111 |
+
name: LaRoSeDa_multiclass
|
112 |
+
type: LaRoSeDa_multiclass
|
113 |
+
metrics:
|
114 |
+
- name: Average macro-f1
|
115 |
+
type: macro-f1
|
116 |
+
value: 56.72
|
117 |
+
- task:
|
118 |
+
type: text-generation
|
119 |
+
dataset:
|
120 |
+
name: LaRoSeDa_binary_finetuned
|
121 |
+
type: LaRoSeDa_binary_finetuned
|
122 |
+
metrics:
|
123 |
+
- name: Average macro-f1
|
124 |
+
type: macro-f1
|
125 |
+
value: 98.80
|
126 |
+
- task:
|
127 |
+
type: text-generation
|
128 |
+
dataset:
|
129 |
+
name: LaRoSeDa_multiclass_finetuned
|
130 |
+
type: LaRoSeDa_multiclass_finetuned
|
131 |
+
metrics:
|
132 |
+
- name: Average macro-f1
|
133 |
+
type: macro-f1
|
134 |
+
value: 85.81
|
135 |
+
- task:
|
136 |
+
type: text-generation
|
137 |
+
dataset:
|
138 |
+
name: WMT_EN-RO
|
139 |
+
type: WMT_EN-RO
|
140 |
+
metrics:
|
141 |
+
- name: Average bleu
|
142 |
+
type: bleu
|
143 |
+
value: 24.45
|
144 |
+
- task:
|
145 |
+
type: text-generation
|
146 |
+
dataset:
|
147 |
+
name: WMT_RO-EN
|
148 |
+
type: WMT_RO-EN
|
149 |
+
metrics:
|
150 |
+
- name: Average bleu
|
151 |
+
type: bleu
|
152 |
+
value: 14.20
|
153 |
+
- task:
|
154 |
+
type: text-generation
|
155 |
+
dataset:
|
156 |
+
name: WMT_EN-RO_finetuned
|
157 |
+
type: WMT_EN-RO_finetuned
|
158 |
+
metrics:
|
159 |
+
- name: Average bleu
|
160 |
+
type: bleu
|
161 |
+
value: 25.96
|
162 |
+
- task:
|
163 |
+
type: text-generation
|
164 |
+
dataset:
|
165 |
+
name: WMT_RO-EN_finetuned
|
166 |
+
type: WMT_RO-EN_finetuned
|
167 |
+
metrics:
|
168 |
+
- name: Average bleu
|
169 |
+
type: bleu
|
170 |
+
value: 39.07
|
171 |
+
- task:
|
172 |
+
type: text-generation
|
173 |
+
dataset:
|
174 |
+
name: XQuAD
|
175 |
+
type: XQuAD
|
176 |
+
metrics:
|
177 |
+
- name: Average exact_match
|
178 |
+
type: exact_match
|
179 |
+
value: 26.03
|
180 |
+
- task:
|
181 |
+
type: text-generation
|
182 |
+
dataset:
|
183 |
+
name: XQuAD
|
184 |
+
type: XQuAD
|
185 |
+
metrics:
|
186 |
+
- name: Average f1
|
187 |
+
type: f1
|
188 |
+
value: 41.58
|
189 |
+
- task:
|
190 |
+
type: text-generation
|
191 |
+
dataset:
|
192 |
+
name: XQuAD_finetuned
|
193 |
+
type: XQuAD_finetuned
|
194 |
+
metrics:
|
195 |
+
- name: Average exact_match
|
196 |
+
type: exact_match
|
197 |
+
value: 46.72
|
198 |
+
- task:
|
199 |
+
type: text-generation
|
200 |
+
dataset:
|
201 |
+
name: XQuAD_finetuned
|
202 |
+
type: XQuAD_finetuned
|
203 |
+
metrics:
|
204 |
+
- name: Average f1
|
205 |
+
type: f1
|
206 |
+
value: 60.79
|
207 |
+
- task:
|
208 |
+
type: text-generation
|
209 |
+
dataset:
|
210 |
+
name: STS
|
211 |
+
type: STS
|
212 |
+
metrics:
|
213 |
+
- name: Average spearman
|
214 |
+
type: spearman
|
215 |
+
value: 73.23
|
216 |
+
- task:
|
217 |
+
type: text-generation
|
218 |
+
dataset:
|
219 |
+
name: STS
|
220 |
+
type: STS
|
221 |
+
metrics:
|
222 |
+
- name: Average pearson
|
223 |
+
type: pearson
|
224 |
+
value: 71.58
|
225 |
+
- task:
|
226 |
+
type: text-generation
|
227 |
+
dataset:
|
228 |
+
name: STS_finetuned
|
229 |
+
type: STS_finetuned
|
230 |
+
metrics:
|
231 |
+
- name: Average spearman
|
232 |
+
type: spearman
|
233 |
+
value: 88.42
|
234 |
+
- task:
|
235 |
+
type: text-generation
|
236 |
+
dataset:
|
237 |
+
name: STS_finetuned
|
238 |
+
type: STS_finetuned
|
239 |
+
metrics:
|
240 |
+
- name: Average pearson
|
241 |
+
type: pearson
|
242 |
+
value: 88.45
|
243 |
+
- task:
|
244 |
+
type: text-generation
|
245 |
+
dataset:
|
246 |
+
name: RoMT-Bench
|
247 |
+
type: RoMT-Bench
|
248 |
+
metrics:
|
249 |
+
- name: First turn
|
250 |
+
type: Score
|
251 |
+
value: 5.55
|
252 |
+
- name: Second turn
|
253 |
+
type: Score
|
254 |
+
value: 4.94
|
255 |
+
- task:
|
256 |
+
type: text-generation
|
257 |
+
dataset:
|
258 |
+
name: OpenLLM-Ro/ro_arc_challenge
|
259 |
+
type: OpenLLM-Ro/ro_arc_challenge
|
260 |
+
metrics:
|
261 |
+
- name: 0-shot
|
262 |
+
type: accuracy
|
263 |
+
value: 49.53
|
264 |
+
- name: 1-shot
|
265 |
+
type: accuracy
|
266 |
+
value: 52.53
|
267 |
+
- name: 3-shot
|
268 |
+
type: accuracy
|
269 |
+
value: 51.50
|
270 |
+
- name: 5-shot
|
271 |
+
type: accuracy
|
272 |
+
value: 53.56
|
273 |
+
- name: 10-shot
|
274 |
+
type: accuracy
|
275 |
+
value: 52.53
|
276 |
+
- name: 25-shot
|
277 |
+
type: accuracy
|
278 |
+
value: 52.44
|
279 |
+
- task:
|
280 |
+
type: text-generation
|
281 |
+
dataset:
|
282 |
+
name: OpenLLM-Ro/ro_mmlu
|
283 |
+
type: OpenLLM-Ro/ro_mmlu
|
284 |
+
metrics:
|
285 |
+
- name: 0-shot
|
286 |
+
type: accuracy
|
287 |
+
value: 51.81
|
288 |
+
- name: 1-shot
|
289 |
+
type: accuracy
|
290 |
+
value: 52.45
|
291 |
+
- name: 3-shot
|
292 |
+
type: accuracy
|
293 |
+
value: 52.52
|
294 |
+
- name: 5-shot
|
295 |
+
type: accuracy
|
296 |
+
value: 52.70
|
297 |
+
- task:
|
298 |
+
type: text-generation
|
299 |
+
dataset:
|
300 |
+
name: OpenLLM-Ro/ro_winogrande
|
301 |
+
type: OpenLLM-Ro/ro_winogrande
|
302 |
+
metrics:
|
303 |
+
- name: 0-shot
|
304 |
+
type: accuracy
|
305 |
+
value: 66.54
|
306 |
+
- name: 1-shot
|
307 |
+
type: accuracy
|
308 |
+
value: 66.69
|
309 |
+
- name: 3-shot
|
310 |
+
type: accuracy
|
311 |
+
value: 67.09
|
312 |
+
- name: 5-shot
|
313 |
+
type: accuracy
|
314 |
+
value: 67.56
|
315 |
+
- task:
|
316 |
+
type: text-generation
|
317 |
+
dataset:
|
318 |
+
name: OpenLLM-Ro/ro_hellaswag
|
319 |
+
type: OpenLLM-Ro/ro_hellaswag
|
320 |
+
metrics:
|
321 |
+
- name: 0-shot
|
322 |
+
type: accuracy
|
323 |
+
value: 58.80
|
324 |
+
- name: 1-shot
|
325 |
+
type: accuracy
|
326 |
+
value: 57.04
|
327 |
+
- name: 3-shot
|
328 |
+
type: accuracy
|
329 |
+
value: 55.85
|
330 |
+
- name: 5-shot
|
331 |
+
type: accuracy
|
332 |
+
value: 54.15
|
333 |
+
- name: 10-shot
|
334 |
+
type: accuracy
|
335 |
+
value: 55.88
|
336 |
+
- task:
|
337 |
+
type: text-generation
|
338 |
+
dataset:
|
339 |
+
name: OpenLLM-Ro/ro_gsm8k
|
340 |
+
type: OpenLLM-Ro/ro_gsm8k
|
341 |
+
metrics:
|
342 |
+
- name: 1-shot
|
343 |
+
type: accuracy
|
344 |
+
value: 22.06
|
345 |
+
- name: 3-shot
|
346 |
+
type: accuracy
|
347 |
+
value: 25.40
|
348 |
+
- name: 5-shot
|
349 |
+
type: accuracy
|
350 |
+
value: 30.48
|
351 |
+
- task:
|
352 |
+
type: text-generation
|
353 |
+
dataset:
|
354 |
+
name: LaRoSeDa_binary
|
355 |
+
type: LaRoSeDa_binary
|
356 |
+
metrics:
|
357 |
+
- name: 0-shot
|
358 |
+
type: macro-f1
|
359 |
+
value: 87.28
|
360 |
+
- name: 1-shot
|
361 |
+
type: macro-f1
|
362 |
+
value: 86.40
|
363 |
+
- name: 3-shot
|
364 |
+
type: macro-f1
|
365 |
+
value: 87.95
|
366 |
+
- name: 5-shot
|
367 |
+
type: macro-f1
|
368 |
+
value: 86.20
|
369 |
+
- task:
|
370 |
+
type: text-generation
|
371 |
+
dataset:
|
372 |
+
name: LaRoSeDa_multiclass
|
373 |
+
type: LaRoSeDa_multiclass
|
374 |
+
metrics:
|
375 |
+
- name: 0-shot
|
376 |
+
type: macro-f1
|
377 |
+
value: 38.35
|
378 |
+
- name: 1-shot
|
379 |
+
type: macro-f1
|
380 |
+
value: 63.86
|
381 |
+
- name: 3-shot
|
382 |
+
type: macro-f1
|
383 |
+
value: 62.03
|
384 |
+
- name: 5-shot
|
385 |
+
type: macro-f1
|
386 |
+
value: 62.62
|
387 |
+
- task:
|
388 |
+
type: text-generation
|
389 |
+
dataset:
|
390 |
+
name: WMT_EN-RO
|
391 |
+
type: WMT_EN-RO
|
392 |
+
metrics:
|
393 |
+
- name: 0-shot
|
394 |
+
type: bleu
|
395 |
+
value: 11.39
|
396 |
+
- name: 1-shot
|
397 |
+
type: bleu
|
398 |
+
value: 28.08
|
399 |
+
- name: 3-shot
|
400 |
+
type: bleu
|
401 |
+
value: 29.18
|
402 |
+
- name: 5-shot
|
403 |
+
type: bleu
|
404 |
+
value: 29.13
|
405 |
+
- task:
|
406 |
+
type: text-generation
|
407 |
+
dataset:
|
408 |
+
name: WMT_RO-EN
|
409 |
+
type: WMT_RO-EN
|
410 |
+
metrics:
|
411 |
+
- name: 0-shot
|
412 |
+
type: bleu
|
413 |
+
value: 1.92
|
414 |
+
- name: 1-shot
|
415 |
+
type: bleu
|
416 |
+
value: 9.39
|
417 |
+
- name: 3-shot
|
418 |
+
type: bleu
|
419 |
+
value: 21.81
|
420 |
+
- name: 5-shot
|
421 |
+
type: bleu
|
422 |
+
value: 23.66
|
423 |
+
- task:
|
424 |
+
type: text-generation
|
425 |
+
dataset:
|
426 |
+
name: XQuAD_EM
|
427 |
+
type: XQuAD_EM
|
428 |
+
metrics:
|
429 |
+
- name: 0-shot
|
430 |
+
type: exact_match
|
431 |
+
value: 32.77
|
432 |
+
- name: 1-shot
|
433 |
+
type: exact_match
|
434 |
+
value: 20.25
|
435 |
+
- name: 3-shot
|
436 |
+
type: exact_match
|
437 |
+
value: 18.49
|
438 |
+
- name: 5-shot
|
439 |
+
type: exact_match
|
440 |
+
value: 32.60
|
441 |
+
- task:
|
442 |
+
type: text-generation
|
443 |
+
dataset:
|
444 |
+
name: XQuAD_F1
|
445 |
+
type: XQuAD_F1
|
446 |
+
metrics:
|
447 |
+
- name: 0-shot
|
448 |
+
type: f1
|
449 |
+
value: 47.98
|
450 |
+
- name: 1-shot
|
451 |
+
type: f1
|
452 |
+
value: 34.92
|
453 |
+
- name: 3-shot
|
454 |
+
type: f1
|
455 |
+
value: 33.27
|
456 |
+
- name: 5-shot
|
457 |
+
type: f1
|
458 |
+
value: 50.14
|
459 |
+
- task:
|
460 |
+
type: text-generation
|
461 |
+
dataset:
|
462 |
+
name: STS_Spearman
|
463 |
+
type: STS_Spearman
|
464 |
+
metrics:
|
465 |
+
- name: 1-shot
|
466 |
+
type: spearman
|
467 |
+
value: 71.75
|
468 |
+
- name: 3-shot
|
469 |
+
type: spearman
|
470 |
+
value: 71.83
|
471 |
+
- name: 5-shot
|
472 |
+
type: spearman
|
473 |
+
value: 76.11
|
474 |
+
- task:
|
475 |
+
type: text-generation
|
476 |
+
dataset:
|
477 |
+
name: STS_Pearson
|
478 |
+
type: STS_Pearson
|
479 |
+
metrics:
|
480 |
+
- name: 1-shot
|
481 |
+
type: pearson
|
482 |
+
value: 69.97
|
483 |
+
- name: 3-shot
|
484 |
+
type: pearson
|
485 |
+
value: 69.87
|
486 |
+
- name: 5-shot
|
487 |
+
type: pearson
|
488 |
+
value: 74.89
|
489 |
+
|
490 |
+
---
|
491 |
+
|
492 |
+
# Model Card for Model ID
|
493 |
+
|
494 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
495 |
+
|
496 |
+
RoGemma is a family of pretrained and fine-tuned generative text models for Romanian. This is the repository for the **instruct 7B model**. Links to other models can be found at the bottom of this page.
|
497 |
+
|
498 |
+
## Model Details
|
499 |
+
|
500 |
+
### Model Description
|
501 |
+
|
502 |
+
<!-- Provide a longer summary of what this model is. -->
|
503 |
+
OpenLLM-Ro represents the first open-source effort to build a LLM specialized for Romanian. OpenLLM-Ro developed and publicly releases a collection of Romanian LLMs, both in the form of foundational model and instruct and chat variants.
|
504 |
+
|
505 |
+
|
506 |
+
- **Developed by:** OpenLLM-Ro
|
507 |
+
<!-- - **Funded by [optional]:** [More Information Needed] -->
|
508 |
+
<!-- - **Shared by [optional]:** [More Information Needed] -->
|
509 |
+
<!-- - **Model type:** [More Information Needed] -->
|
510 |
+
- **Language(s):** Romanian
|
511 |
+
- **License:** cc-by-nc-4.0
|
512 |
+
- **Finetuned from model:** [gemma-7b](https://huggingface.co/google/gemma-7b)
|
513 |
+
- **Trained using:** [RoAlpaca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca), [RoAlpacaGPT4](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca_gpt4), [RoDolly](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_dolly), [RoSelfInstruct](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_selfinstruct_gpt4), [RoNoRobots](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_norobots), [RoOrca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_orca), [RoCamel](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_camel)
|
514 |
+
|
515 |
+
|
516 |
+
### Model Sources
|
517 |
+
|
518 |
+
<!-- Provide the basic links for the model. -->
|
519 |
+
|
520 |
+
- **Repository:** https://github.com/OpenLLM-Ro/LLaMA-Factory
|
521 |
+
- **Paper:** https://arxiv.org/abs/2406.18266
|
522 |
+
|
523 |
+
## Intended Use
|
524 |
+
|
525 |
+
### Intended Use Cases
|
526 |
+
|
527 |
+
RoGemma is intented for research use in Romanian. Base models can be adapted for a variety of natural language tasks while instruction and chat tuned models are intended for assistant-like chat.
|
528 |
+
|
529 |
+
### Out-of-Scope Use
|
530 |
+
|
531 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
532 |
+
|
533 |
+
Use in any manner that violates the license, any applicable laws or regluations, use in languages other than Romanian.
|
534 |
+
|
535 |
+
|
536 |
+
|
537 |
+
## How to Get Started with the Model
|
538 |
+
|
539 |
+
Use the code below to get started with the model.
|
540 |
+
|
541 |
+
```python
|
542 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
543 |
+
|
544 |
+
tokenizer = AutoTokenizer.from_pretrained("OpenLLM-Ro/RoGemma-7b-Instruct-2024-10-09")
|
545 |
+
model = AutoModelForCausalLM.from_pretrained("OpenLLM-Ro/RoGemma-7b-Instruct-2024-10-09")
|
546 |
+
|
547 |
+
instruction = "Ce jocuri de societate pot juca cu prietenii mei?"
|
548 |
+
chat = [
|
549 |
+
{"role": "user", "content": instruction},
|
550 |
+
]
|
551 |
+
prompt = tokenizer.apply_chat_template(chat, tokenize=False, system_message="")
|
552 |
+
|
553 |
+
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
554 |
+
outputs = model.generate(input_ids=inputs, max_new_tokens=128)
|
555 |
+
print(tokenizer.decode(outputs[0]))
|
556 |
+
```
|
557 |
+
|
558 |
+
## Academic Benchmarks
|
559 |
+
|
560 |
+
<table>
|
561 |
+
<tbody>
|
562 |
+
<tr>
|
563 |
+
<td><strong>Model</strong></td>
|
564 |
+
<td><strong><center>Average</center></strong></td>
|
565 |
+
<td><strong><center>ARC</center></strong></td>
|
566 |
+
<td><strong><center>MMLU</center></strong></td>
|
567 |
+
<td><strong><center>Winogrande</center></strong></td>
|
568 |
+
<td><strong><center>Hellaswag</center></strong></td>
|
569 |
+
<td><strong><center>GSM8k</center></strong></td>
|
570 |
+
<td><strong><center>TruthfulQA</center></strong></td>
|
571 |
+
</tr>
|
572 |
+
<tr>
|
573 |
+
<td>gemma-1.1-7b-it</td><td><center>41.44</center></td><td><center>40.32</center></td><td><center>47.22</center></td><td><center>55.01</center></td><td><center>47.03</center></td><td><center>9.50</center></td><td><center>49.58</center></td>
|
574 |
+
</tr>
|
575 |
+
<tr>
|
576 |
+
<td>RoGemma-7b-Instruct-2024-06-28</td><td><center><strong>53.41</strong></center></td><td><center><strong>52.44</strong></center></td><td><center>54.44</center></td><td><center><strong>69.36</strong></center></td><td><center><strong>61.96</strong></center></td><td><center>31.06</center></td><td><center><strong>51.23</strong></center></td>
|
577 |
+
</tr>
|
578 |
+
<tr>
|
579 |
+
<td><em>RoGemma-7b-Instruct-2024-10-09</em></td><td><center><em>50.48</em></center></td><td><center><em>52.01</em></center></td><td><center><em>52.37</em></center></td><td><center><em>66.97</em></center></td><td><center><em>56.34</em></center></td><td><center><em>25.98</em></center></td><td><center><em>49.18</em></center></td>
|
580 |
+
</tr>
|
581 |
+
<tr>
|
582 |
+
<td>RoGemma-7b-Instruct-DPO-2024-10-09</td><td><center>48.27</center></td><td><center>46.66</center></td><td><center><strong>54.45</strong></center></td><td><center>63.73</center></td><td><center>49.33</center></td><td><center><strong>34.98</strong></center></td><td><center>40.45</center></td>
|
583 |
+
</tr>
|
584 |
+
</tbody>
|
585 |
+
</table>
|
586 |
+
|
587 |
+
|
588 |
+
## Downstream tasks
|
589 |
+
|
590 |
+
<table>
|
591 |
+
<tbody>
|
592 |
+
<tr>
|
593 |
+
<td></td>
|
594 |
+
<td colspan="4"><center><strong>LaRoSeDa</strong></center></td>
|
595 |
+
<td colspan="4"><center><strong>WMT</strong></center></td>
|
596 |
+
</tr>
|
597 |
+
<tr>
|
598 |
+
<td></td>
|
599 |
+
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
600 |
+
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
601 |
+
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
602 |
+
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
603 |
+
</tr>
|
604 |
+
<tr>
|
605 |
+
<td><strong>Model</strong></td>
|
606 |
+
<td><center><strong>Binary<br>(Macro F1)</strong></center></td>
|
607 |
+
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td>
|
608 |
+
<td><center><strong>Binary<br>(Macro F1)</strong></center></td>
|
609 |
+
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td>
|
610 |
+
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td>
|
611 |
+
<td><center><strong>RO-EN<br>(Bleu)</strong></center></td>
|
612 |
+
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td>
|
613 |
+
<td><center><strong>RO-EN<br>(Bleu)</strong></center>
|
614 |
+
</tr>
|
615 |
+
<tr>
|
616 |
+
<td>gemma-1.1-7b-it</td><td><center>87.54</center></td><td><center>51.48</center></td><td><center>83.87</center></td><td><center>85.61</center></td><td><center>17.96</center></td><td><center><strong>27.74</strong></center></td><td><center>25.48</center></td><td><center>36.11</center></td>
|
617 |
+
</tr>
|
618 |
+
<tr>
|
619 |
+
<td>RoGemma-7b-Instruct-2024-06-28</td><td><center><strong>97.86</strong></center></td><td><center><strong>65.70</strong></center></td><td><center>98.43</center></td><td><center><strong>87.17</strong></center></td><td><center><strong>27.91</strong></center></td><td><center>23.08</center></td><td><center><strong>27.99</strong></center></td><td><center><strong>39.51</strong></center></td>
|
620 |
+
</tr>
|
621 |
+
<tr>
|
622 |
+
<td><em>RoGemma-7b-Instruct-2024-10-09</em></td><td><center><em>86.96</em></center></td><td><center><em>56.72</em></center></td><td><center><em><strong>98.80</strong></em></center></td><td><center><em>85.81</em></center></td><td><center><em>24.45</em></center></td><td><center><em>14.20</em></center></td><td><center><em>25.96</em></center></td><td><center><em>39.07</em></center></td>
|
623 |
+
</tr>
|
624 |
+
<tr>
|
625 |
+
<td>RoGemma-7b-Instruct-DPO-2024-10-09</td><td><center>96.45</center></td><td><center>63.23</center></td><td><center>-</center></td><td><center>-</center></td><td><center>20.73</center></td><td><center>7.87</center></td><td><center>-</center></td><td><center>-</center></td>
|
626 |
+
</tr>
|
627 |
+
</tbody>
|
628 |
+
</table>
|
629 |
+
|
630 |
+
|
631 |
+
<table>
|
632 |
+
<tbody>
|
633 |
+
<tr>
|
634 |
+
<td></td>
|
635 |
+
<td colspan="4"><center><strong>XQuAD</strong></center></td>
|
636 |
+
<td colspan="4"><center><strong>STS</strong></center></td>
|
637 |
+
</tr>
|
638 |
+
<tr>
|
639 |
+
<td></td>
|
640 |
+
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
641 |
+
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
642 |
+
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
643 |
+
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
644 |
+
</tr>
|
645 |
+
<tr>
|
646 |
+
<td><strong>Model</strong></td>
|
647 |
+
<td><center><strong>(EM)</strong></center></td>
|
648 |
+
<td><center><strong>(F1)</strong></center></td>
|
649 |
+
<td><center><strong>(EM)</strong></center></td>
|
650 |
+
<td><center><strong>(F1)</strong></center></td>
|
651 |
+
<td><center><strong>(Spearman)</strong></center></td>
|
652 |
+
<td><center><strong>(Pearson)</strong></center></td>
|
653 |
+
<td><center><strong>(Spearman)</strong></center></td>
|
654 |
+
<td><center><strong>(Pearson)</strong></center></td>
|
655 |
+
</tr>
|
656 |
+
<tr>
|
657 |
+
<td>gemma-1.1-7b-it</td><td><center><strong>42.10</strong></center></td><td><center><strong>62.30</strong></center></td><td><center><strong>60.34</strong></center></td><td><center><strong>77.40</strong></center></td><td><center>49.10</center></td><td><center>50.23</center></td><td><center>83.43</center></td><td><center>83.64</center></td>
|
658 |
+
</tr>
|
659 |
+
<tr>
|
660 |
+
<td>RoGemma-7b-Instruct-2024-06-28</td><td><center>17.75</center></td><td><center>28.11</center></td><td><center>52.02</center></td><td><center>68.43</center></td><td><center><strong>73.96</strong></center></td><td><center><strong>75.16</strong></center></td><td><center>86.45</center></td><td><center>86.31</center></td>
|
661 |
+
</tr>
|
662 |
+
<tr>
|
663 |
+
<td><em>RoGemma-7b-Instruct-2024-10-09</em></td><td><center><em>26.03</em></center></td><td><center><em>41.58</em></center></td><td><center><em>46.72</em></center></td><td><center><em>60.79</em></center></td><td><center><em>73.23</em></center></td><td><center><em>71.58</em></center></td><td><center><em><strong>88.42</strong></em></center></td><td><center><em><strong>88.45</strong></em></center></td>
|
664 |
+
</tr>
|
665 |
+
<tr>
|
666 |
+
<td>RoGemma-7b-Instruct-DPO-2024-10-09</td><td><center>19.14</center></td><td><center>38.10</center></td><td><center>-</center></td><td><center>-</center></td><td><center>69.38</center></td><td><center>69.34</center></td><td><center>-</center></td><td><center>-</center></td>
|
667 |
+
</tr>
|
668 |
+
</tbody>
|
669 |
+
</table>
|
670 |
+
|
671 |
+
|
672 |
+
## MT-Bench
|
673 |
+
|
674 |
+
<table>
|
675 |
+
<tbody>
|
676 |
+
<tr>
|
677 |
+
<td><strong>Model</strong></td>
|
678 |
+
<td><strong><center>Average</center></strong></td>
|
679 |
+
<td><strong><center>1st turn</center></strong></td>
|
680 |
+
<td><strong><center>2nd turn</center></strong></td>
|
681 |
+
<td><strong><center>Answers in Ro</center></strong></td>
|
682 |
+
</tr>
|
683 |
+
<tr>
|
684 |
+
<td>gemma-1.1-7b-it</td><td><center>4.83</center></td><td><center>5.11</center></td><td><center>4.55</center></td><td><center><strong>160/160</strong></center></td>
|
685 |
+
</tr>
|
686 |
+
<tr>
|
687 |
+
<td>RoGemma-7b-Instruct-2024-06-28</td><td><center>5.26</center></td><td><center><strong>5.92</strong></center></td><td><center>4.60</center></td><td><center><strong>160/160</strong></center></td>
|
688 |
+
</tr>
|
689 |
+
<tr>
|
690 |
+
<td><em>RoGemma-7b-Instruct-2024-10-09</em></td><td><center><em>5.24</em></center></td><td><center><em>5.55</em></center></td><td><center><em>4.94</em></center></td><td><center><em><strong>160/160</strong></em></center></td>
|
691 |
+
</tr>
|
692 |
+
<tr>
|
693 |
+
<td>RoGemma-7b-Instruct-DPO-2024-10-09</td><td><center><strong>5.47</strong></center></td><td><center><strong>5.92</strong></center></td><td><center><strong>5.03</strong></center></td><td><center><strong>160/160</strong></center></td>
|
694 |
+
</tr>
|
695 |
+
</tbody>
|
696 |
+
</table>
|
697 |
+
|
698 |
+
## RoCulturaBench
|
699 |
+
|
700 |
+
<table>
|
701 |
+
<tbody>
|
702 |
+
<tr>
|
703 |
+
<td><strong>Model</strong></td>
|
704 |
+
<td><strong><center>Average</center></strong></td>
|
705 |
+
<td><strong><center>Answers in Ro</center></strong></td>
|
706 |
+
</tr>
|
707 |
+
<tr>
|
708 |
+
<td>gemma-1.1-7b-it</td><td><center>3.38</center></td><td><center><strong>100/100</strong></center></td>
|
709 |
+
</tr>
|
710 |
+
<tr>
|
711 |
+
<td>RoGemma-7b-Instruct-2024-06-28</td><td><center>3.26</center></td><td><center><strong>100/100</strong></center></td>
|
712 |
+
</tr>
|
713 |
+
<tr>
|
714 |
+
<td><em>RoGemma-7b-Instruct-2024-10-09</em></td><td><center><em>3.51</em></center></td><td><center><em><strong>100/100</strong></em></center></td>
|
715 |
+
</tr>
|
716 |
+
<tr>
|
717 |
+
<td>RoGemma-7b-Instruct-DPO-2024-10-09</td><td><center><strong>3.94</strong></center></td><td><center><strong>100/100</strong></center></td>
|
718 |
+
</tr>
|
719 |
+
</tbody>
|
720 |
+
</table>
|
721 |
+
|
722 |
+
## RoGemma Model Family
|
723 |
+
|
724 |
+
| Model | Link |
|
725 |
+
|--------------------|:--------:|
|
726 |
+
|RoGemma-7b-Instruct-2024-06-28| [link](https://huggingface.co/OpenLLM-Ro/RoGemma-7b-Instruct-2024-06-28) |
|
727 |
+
|*RoGemma-7b-Instruct-2024-10-09*| [link](https://huggingface.co/OpenLLM-Ro/RoGemma-7b-Instruct-2024-10-09) |
|
728 |
+
|RoGemma-7b-Instruct-DPO-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoGemma-7b-Instruct-DPO-2024-10-09) |
|
729 |
+
|
730 |
+
|
731 |
+
## Citation
|
732 |
+
|
733 |
+
```
|
734 |
+
@misc{masala2024vorbecstiromanecsterecipetrain,
|
735 |
+
title={"Vorbe\c{s}ti Rom\^ane\c{s}te?" A Recipe to Train Powerful Romanian LLMs with English Instructions},
|
736 |
+
author={Mihai Masala and Denis C. Ilie-Ablachim and Alexandru Dima and Dragos Corlatescu and Miruna Zavelca and Ovio Olaru and Simina Terian-Dan and Andrei Terian-Dan and Marius Leordeanu and Horia Velicu and Marius Popescu and Mihai Dascalu and Traian Rebedea},
|
737 |
+
year={2024},
|
738 |
+
eprint={2406.18266},
|
739 |
+
archivePrefix={arXiv},
|
740 |
+
primaryClass={cs.CL},
|
741 |
+
url={https://arxiv.org/abs/2406.18266},
|
742 |
+
}
|
743 |
+
```
|
744 |
+
<!-- **APA:**
|
745 |
+
|
746 |
+
[More Information Needed] -->
|