pythia-ggml / pythia-2.8b-f16.meta
LLukas22's picture
Upload new model file: 'pythia-2.8b-f16.bin'
b607e6d
raw
history blame contribute delete
274 Bytes
{
"model": "GptNeoX",
"quantization": "F16",
"quantization_version": "Not_Quantized",
"container": "GGML",
"converter": "llm-rs",
"hash": "dfc00e8525dce7c447e100b814d6e38b171711462654e5659f231c6eda649eff",
"base_model": "EleutherAI/pythia-2.8b"
}