abhinavnmagic's picture
Upload recipe.yaml with huggingface_hub
333969e verified
raw
history blame
315 Bytes
quant_stage:
quant_modifiers:
GPTQModifier:
sequential_update: false
dampening_frac: 0.01
block_size: 128
ignore: [lm_head]
config_groups:
group_0:
targets: [Linear]
weights: {num_bits: 4, type: int, symmetric: true, strategy: group, group_size: 128}