alozowski commited on
Commit
fa767c6
1 Parent(s): e510947

Set model params from 0 to -1

Browse files
Files changed (28) hide show
  1. AIJUUD/JUUD_Barun_GPT_V1_eval_request_False_float16_Original.json +1 -1
  2. Aoyinke/qwen2-merged_eval_request_False_bfloat16_Original.json +1 -1
  3. Chan-Y/MathLlama3.1_eval_request_False_float16_Adapter.json +1 -1
  4. Chan-Y/MathLlama3.1_eval_request_False_float16_Original.json +1 -1
  5. Corianas/llama-3-reactor_eval_request_False_float16_Original.json +1 -1
  6. acbdkk/SupaMATH3_eval_request_False_float16_Original.json +1 -1
  7. alcholjung/llama3_medical_tuned_eval_request_False_float16_Adapter.json +1 -1
  8. basit0513/LLM_eval_request_False_4bit_Adapter.json +1 -1
  9. basit0513/LLM_eval_request_False_4bit_Original.json +1 -1
  10. basit0513/LLM_eval_request_False_float16_Adapter.json +1 -1
  11. brahmairesearch/cerberus-v0.1_eval_request_False_float16_Original.json +1 -1
  12. bunnycore/L3-uncensored-lora_model_eval_request_False_4bit_Adapter.json +1 -1
  13. bunnycore/Phi-3-mini-CoT-lora_eval_request_False_4bit_Adapter.json +1 -1
  14. cognitivecomputations/dolphin-2.9.2-Phi-3-Medium_eval_request_False_bfloat16_Original.json +1 -1
  15. dustinwloring1988/llama3-merge-4_eval_request_False_float16_Original.json +1 -1
  16. google/flan-ul2_eval_request_False_bfloat16_Original.json +1 -1
  17. langgptai/Qwen-las-v0.1_eval_request_False_bfloat16_Adapter.json +1 -1
  18. leejaymin/etri-ones-solar_eval_request_False_float16_Delta.json +1 -1
  19. pankajmathur/model_007_eval_request_False_bfloat16_Original.json +1 -1
  20. pankajmathur/model_007_preview_eval_request_False_bfloat16_Original.json +1 -1
  21. pankajmathur/model_009_eval_request_False_bfloat16_Original.json +1 -1
  22. pankajmathur/model_101_eval_request_False_bfloat16_Original.json +1 -1
  23. pankajmathur/model_420_eval_request_False_bfloat16_Original.json +1 -1
  24. pankajmathur/model_420_preview_eval_request_False_bfloat16_Original.json +1 -1
  25. pankajmathur/model_51_eval_request_False_bfloat16_Original.json +1 -1
  26. qcymaq/finetune-llm-wukong_eval_request_False_4bit_Adapter.json +1 -1
  27. sci-m-wang/Phi-3-mini-4k-instruct-sa-v0.1_eval_request_False_bfloat16_Adapter.json +1 -1
  28. xDAN2099/xDAN-L1-Llama3-Self-Alignment-Collections-0627-epoch0.6_eval_request_False_bfloat16_Original.json +1 -1
AIJUUD/JUUD_Barun_GPT_V1_eval_request_False_float16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "683bde57913b834f71ca454f490fe76556e50785",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
  "status": "RUNNING",
 
3
  "base_model": "",
4
  "revision": "683bde57913b834f71ca454f490fe76556e50785",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
  "status": "RUNNING",
Aoyinke/qwen2-merged_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "Qwen/Qwen2-7B",
4
  "revision": "bb4b4e1fc8a1fef90735e28b336babe8e7450bd7",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "MixtralForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "Qwen/Qwen2-7B",
4
  "revision": "bb4b4e1fc8a1fef90735e28b336babe8e7450bd7",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "MixtralForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
Chan-Y/MathLlama3.1_eval_request_False_float16_Adapter.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
4
  "revision": "ef5c5f13ab01d574ee0512d618b41177fec19a66",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
 
3
  "base_model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
4
  "revision": "ef5c5f13ab01d574ee0512d618b41177fec19a66",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
Chan-Y/MathLlama3.1_eval_request_False_float16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": " meta-llama/Meta-Llama-3.1-8B-Instruct ",
4
  "revision": "ef5c5f13ab01d574ee0512d618b41177fec19a66",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": " meta-llama/Meta-Llama-3.1-8B-Instruct ",
4
  "revision": "ef5c5f13ab01d574ee0512d618b41177fec19a66",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
Corianas/llama-3-reactor_eval_request_False_float16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "bef2eac42fd89baa0064badbc9c7958ad9ccbed3",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FINISHED",
 
3
  "base_model": "",
4
  "revision": "bef2eac42fd89baa0064badbc9c7958ad9ccbed3",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FINISHED",
acbdkk/SupaMATH3_eval_request_False_float16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "unsloth/qwen2-7b-bnb-4bit",
4
  "revision": "7ac41286818edb0b9581f9e7408bb17f77fab307",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "unsloth/qwen2-7b-bnb-4bit",
4
  "revision": "7ac41286818edb0b9581f9e7408bb17f77fab307",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
alcholjung/llama3_medical_tuned_eval_request_False_float16_Adapter.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "meta-llama/Meta-Llama-3-8B",
4
  "revision": "3c438cf81f25565bba0bfa70f868540be0e34ca3",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "RUNNING",
 
3
  "base_model": "meta-llama/Meta-Llama-3-8B",
4
  "revision": "3c438cf81f25565bba0bfa70f868540be0e34ca3",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "RUNNING",
basit0513/LLM_eval_request_False_4bit_Adapter.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "NousResearch/Llama-2-7b-chat-hf",
4
  "revision": "a078a50de68f1a344a8c9c379468b84fdb8c8315",
5
  "precision": "4bit",
6
- "params": 0,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
 
3
  "base_model": "NousResearch/Llama-2-7b-chat-hf",
4
  "revision": "a078a50de68f1a344a8c9c379468b84fdb8c8315",
5
  "precision": "4bit",
6
+ "params": -1,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
basit0513/LLM_eval_request_False_4bit_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "NousResearch/Llama-2-7b-chat-hf",
4
  "revision": "a078a50de68f1a344a8c9c379468b84fdb8c8315",
5
  "precision": "4bit",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "NousResearch/Llama-2-7b-chat-hf",
4
  "revision": "a078a50de68f1a344a8c9c379468b84fdb8c8315",
5
  "precision": "4bit",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
basit0513/LLM_eval_request_False_float16_Adapter.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "NousResearch/Llama-2-7b-chat-hf",
4
  "revision": "a078a50de68f1a344a8c9c379468b84fdb8c8315",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
 
3
  "base_model": "NousResearch/Llama-2-7b-chat-hf",
4
  "revision": "a078a50de68f1a344a8c9c379468b84fdb8c8315",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
brahmairesearch/cerberus-v0.1_eval_request_False_float16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "llama3",
4
  "revision": "23971e593a9b34fc06c42c42a4044ebf560349f3",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "llama3",
4
  "revision": "23971e593a9b34fc06c42c42a4044ebf560349f3",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
bunnycore/L3-uncensored-lora_model_eval_request_False_4bit_Adapter.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "unsloth/llama-3-8b-bnb-4bit",
4
  "revision": "1d05ba04b3b22a839581fe7bb7e34dbd0f1a187e",
5
  "precision": "4bit",
6
- "params": 0,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
 
3
  "base_model": "unsloth/llama-3-8b-bnb-4bit",
4
  "revision": "1d05ba04b3b22a839581fe7bb7e34dbd0f1a187e",
5
  "precision": "4bit",
6
+ "params": -1,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
bunnycore/Phi-3-mini-CoT-lora_eval_request_False_4bit_Adapter.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "unsloth/Phi-3-mini-4k-instruct-bnb-4bit",
4
  "revision": "f14dbe2295103ff29e392353beecedb6ebefbc55",
5
  "precision": "4bit",
6
- "params": 0,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
 
3
  "base_model": "unsloth/Phi-3-mini-4k-instruct-bnb-4bit",
4
  "revision": "f14dbe2295103ff29e392353beecedb6ebefbc55",
5
  "precision": "4bit",
6
+ "params": -1,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
cognitivecomputations/dolphin-2.9.2-Phi-3-Medium_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "0470c5b912b51fa6e27d87a8ea7feafacd8cb101",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FINISHED",
 
3
  "base_model": "",
4
  "revision": "0470c5b912b51fa6e27d87a8ea7feafacd8cb101",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FINISHED",
dustinwloring1988/llama3-merge-4_eval_request_False_float16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "07b4bad25553ae3d7d11a86c29517b784b8d86d3",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "07b4bad25553ae3d7d11a86c29517b784b8d86d3",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
google/flan-ul2_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "452d74ce28ac4a7f211d6ba3ef0717027f7a8074",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "T5ForConditionalGeneration",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "452d74ce28ac4a7f211d6ba3ef0717027f7a8074",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "T5ForConditionalGeneration",
8
  "weight_type": "Original",
9
  "status": "FAILED",
langgptai/Qwen-las-v0.1_eval_request_False_bfloat16_Adapter.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "Qwen/Qwen1.5-4B-Chat",
4
  "revision": "b6c6c48f777668ca93a19487ec71a4e8f2473dd7",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
 
3
  "base_model": "Qwen/Qwen1.5-4B-Chat",
4
  "revision": "b6c6c48f777668ca93a19487ec71a4e8f2473dd7",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
leejaymin/etri-ones-solar_eval_request_False_float16_Delta.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "upstage/SOLAR-10.7B-v1.0",
4
  "revision": "40f78570d3ffb88649a52e4933a95525871ba599",
5
  "precision": "float16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Delta",
9
  "status": "FAILED",
 
3
  "base_model": "upstage/SOLAR-10.7B-v1.0",
4
  "revision": "40f78570d3ffb88649a52e4933a95525871ba599",
5
  "precision": "float16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Delta",
9
  "status": "FAILED",
pankajmathur/model_007_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "c4261ec1043c2c27d844bdfdac3c6d24baf4e85c",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "c4261ec1043c2c27d844bdfdac3c6d24baf4e85c",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
pankajmathur/model_007_preview_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "b138ff68415f86ee8a781e747b325718d7827d6b",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "b138ff68415f86ee8a781e747b325718d7827d6b",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
pankajmathur/model_009_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "6f3324d49125daffe68ae30ec56f2d9fdae233f7",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "6f3324d49125daffe68ae30ec56f2d9fdae233f7",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
pankajmathur/model_101_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "3be9263c488692cd037bdddafcaf484d6acc54af",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "3be9263c488692cd037bdddafcaf484d6acc54af",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
pankajmathur/model_420_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "bbd7997e1fd3955c5b67ade070a4cf80988624d2",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "bbd7997e1fd3955c5b67ade070a4cf80988624d2",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
pankajmathur/model_420_preview_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "fc4e7e9167dc130c649734772e94dd93e838c8c1",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "fc4e7e9167dc130c649734772e94dd93e838c8c1",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
pankajmathur/model_51_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "c285b3dd15327ffe294c4d7e4fbf80eafacd9808",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "c285b3dd15327ffe294c4d7e4fbf80eafacd9808",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
qcymaq/finetune-llm-wukong_eval_request_False_4bit_Adapter.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "openai-community/gpt2-xl",
4
  "revision": "69d69b174236ac26c0893fa1b5a0847f8163e4d5",
5
  "precision": "4bit",
6
- "params": 0,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "RUNNING",
 
3
  "base_model": "openai-community/gpt2-xl",
4
  "revision": "69d69b174236ac26c0893fa1b5a0847f8163e4d5",
5
  "precision": "4bit",
6
+ "params": -1,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "RUNNING",
sci-m-wang/Phi-3-mini-4k-instruct-sa-v0.1_eval_request_False_bfloat16_Adapter.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "microsoft/Phi-3-mini-4k-instruct",
4
  "revision": "f63c0a59bed81cc4ddcfac250cc0ab746449288a",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
 
3
  "base_model": "microsoft/Phi-3-mini-4k-instruct",
4
  "revision": "f63c0a59bed81cc4ddcfac250cc0ab746449288a",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
  "status": "FAILED",
xDAN2099/xDAN-L1-Llama3-Self-Alignment-Collections-0627-epoch0.6_eval_request_False_bfloat16_Original.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model": "",
4
  "revision": "997b10e32ad866362059dfa9863bc2c223bf8e41",
5
  "precision": "bfloat16",
6
- "params": 0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",
 
3
  "base_model": "",
4
  "revision": "997b10e32ad866362059dfa9863bc2c223bf8e41",
5
  "precision": "bfloat16",
6
+ "params": -1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
  "status": "FAILED",