mgoin's picture
Update README.md
6ca4dbe verified
metadata
base_model:
  - nvidia/NVLM-D-72B

Tested using this vLLM PR: https://github.com/vllm-project/vllm/pull/9045

Creation

from transformers import AutoProcessor, AutoModelForCausalLM

from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers import oneshot, wrap_hf_model_class

MODEL_ID = "nvidia/NVLM-D-72B"

# Load model.
model_class = wrap_hf_model_class(AutoModelForCausalLM)
model = model_class.from_pretrained(MODEL_ID, device_map="auto", torch_dtype="auto", trust_remote_code=True)
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)

# Configure the quantization algorithm and scheme.
# In this case, we:
#   * quantize the weights to fp8 with per channel via ptq
#   * quantize the activations to fp8 with dynamic per token
recipe = QuantizationModifier(
    targets="Linear",
    scheme="FP8_DYNAMIC",
    ignore=["re:.*lm_head", "re:mlp1.*", "re:vision_model.*"],
)

# Apply quantization and save to disk in compressed-tensors format.
SAVE_DIR = MODEL_ID.split("/")[1] + "-FP8-dynamic"
oneshot(model=model, recipe=recipe, output_dir=SAVE_DIR)
processor.save_pretrained(SAVE_DIR)