Files changed (1) hide show
  1. README.md +110 -2
README.md CHANGED
@@ -10,8 +10,103 @@ base_model:
10
  - cognitivecomputations/dolphin-2.9.4-llama3.1-8b
11
  - NousResearch/Hermes-3-Llama-3.1-8B
12
  - NeverSleep/Lumimaid-v0.2-8B
13
- - Sao10K/L3-8B-Stheno-v3.2
14
  - DreadPoor/ScaduTorrent-8B-model_stock
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  ---
16
 
17
  # Trinas_Nectar-8B-model_stock
@@ -34,4 +129,17 @@ base_model: Sao10K/L3-8B-Stheno-v3.2
34
  normalize: false
35
  int8_mask: true
36
  dtype: bfloat16
37
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  - cognitivecomputations/dolphin-2.9.4-llama3.1-8b
11
  - NousResearch/Hermes-3-Llama-3.1-8B
12
  - NeverSleep/Lumimaid-v0.2-8B
13
+ - Sao10K/L3-8B-Stheno-v3.2
14
  - DreadPoor/ScaduTorrent-8B-model_stock
15
+ model-index:
16
+ - name: Trinas_Nectar-8B-model_stock
17
+ results:
18
+ - task:
19
+ type: text-generation
20
+ name: Text Generation
21
+ dataset:
22
+ name: IFEval (0-Shot)
23
+ type: HuggingFaceH4/ifeval
24
+ args:
25
+ num_few_shot: 0
26
+ metrics:
27
+ - type: inst_level_strict_acc and prompt_level_strict_acc
28
+ value: 72.59
29
+ name: strict accuracy
30
+ source:
31
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=DreadPoor/Trinas_Nectar-8B-model_stock
32
+ name: Open LLM Leaderboard
33
+ - task:
34
+ type: text-generation
35
+ name: Text Generation
36
+ dataset:
37
+ name: BBH (3-Shot)
38
+ type: BBH
39
+ args:
40
+ num_few_shot: 3
41
+ metrics:
42
+ - type: acc_norm
43
+ value: 31.98
44
+ name: normalized accuracy
45
+ source:
46
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=DreadPoor/Trinas_Nectar-8B-model_stock
47
+ name: Open LLM Leaderboard
48
+ - task:
49
+ type: text-generation
50
+ name: Text Generation
51
+ dataset:
52
+ name: MATH Lvl 5 (4-Shot)
53
+ type: hendrycks/competition_math
54
+ args:
55
+ num_few_shot: 4
56
+ metrics:
57
+ - type: exact_match
58
+ value: 13.75
59
+ name: exact match
60
+ source:
61
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=DreadPoor/Trinas_Nectar-8B-model_stock
62
+ name: Open LLM Leaderboard
63
+ - task:
64
+ type: text-generation
65
+ name: Text Generation
66
+ dataset:
67
+ name: GPQA (0-shot)
68
+ type: Idavidrein/gpqa
69
+ args:
70
+ num_few_shot: 0
71
+ metrics:
72
+ - type: acc_norm
73
+ value: 4.81
74
+ name: acc_norm
75
+ source:
76
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=DreadPoor/Trinas_Nectar-8B-model_stock
77
+ name: Open LLM Leaderboard
78
+ - task:
79
+ type: text-generation
80
+ name: Text Generation
81
+ dataset:
82
+ name: MuSR (0-shot)
83
+ type: TAUR-Lab/MuSR
84
+ args:
85
+ num_few_shot: 0
86
+ metrics:
87
+ - type: acc_norm
88
+ value: 11.41
89
+ name: acc_norm
90
+ source:
91
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=DreadPoor/Trinas_Nectar-8B-model_stock
92
+ name: Open LLM Leaderboard
93
+ - task:
94
+ type: text-generation
95
+ name: Text Generation
96
+ dataset:
97
+ name: MMLU-PRO (5-shot)
98
+ type: TIGER-Lab/MMLU-Pro
99
+ config: main
100
+ split: test
101
+ args:
102
+ num_few_shot: 5
103
+ metrics:
104
+ - type: acc
105
+ value: 29.09
106
+ name: accuracy
107
+ source:
108
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=DreadPoor/Trinas_Nectar-8B-model_stock
109
+ name: Open LLM Leaderboard
110
  ---
111
 
112
  # Trinas_Nectar-8B-model_stock
 
129
  normalize: false
130
  int8_mask: true
131
  dtype: bfloat16
132
+ ```
133
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
134
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_DreadPoor__Trinas_Nectar-8B-model_stock)
135
+
136
+ | Metric |Value|
137
+ |-------------------|----:|
138
+ |Avg. |27.27|
139
+ |IFEval (0-Shot) |72.59|
140
+ |BBH (3-Shot) |31.98|
141
+ |MATH Lvl 5 (4-Shot)|13.75|
142
+ |GPQA (0-shot) | 4.81|
143
+ |MuSR (0-shot) |11.41|
144
+ |MMLU-PRO (5-shot) |29.09|
145
+