test_stage: quant_modifiers: vLLMQuantizationModifier: ignore: [lm_head] config_groups: group_0: weights: {num_bits: 8, type: int, symmetric: true, strategy: tensor} input_activations: {num_bits: 8, type: int, symmetric: true, strategy: tensor} targets: [Linear] SparseGPTModifier: {sparsity: 0.0, quantize: true, sequential_update: false}