File size: 319 Bytes
eaf81da
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
{
  "bits": 4,
  "group_size": 32,
  "damp_percent": 0.01,
  "desc_act": false,
  "static_groups": false,
  "sym": true,
  "true_sequential": true,
  "model_name_or_path": "quantized/line-corporation/japanese-large-lm-3.6b-instruction-sft/gptq-4bit-32g-actorder_False",
  "model_file_base_name": "gptq_model-4bit-32g"
}