adamo1139 commited on
Commit
7ea1280
1 Parent(s): e0b5893

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +34 -3
README.md CHANGED
@@ -1,3 +1,34 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+
5
+ Script for creating this. llmcompressor installed from source since it depends on something that wasn't compiled into the release at the time.
6
+
7
+ ```
8
+ from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
9
+
10
+ from llmcompressor.modifiers.quantization import QuantizationModifier
11
+ from llmcompressor.transformers import oneshot, wrap_hf_model_class
12
+
13
+ MODEL_ID = "Qwen/Qwen2-VL-7B-Instruct"
14
+
15
+ # Load model.
16
+ model_class = wrap_hf_model_class(Qwen2VLForConditionalGeneration)
17
+ model = model_class.from_pretrained(MODEL_ID, device_map="auto", torch_dtype="auto")
18
+ processor = AutoProcessor.from_pretrained(MODEL_ID)
19
+
20
+ # Configure the simple PTQ quantization
21
+ recipe = QuantizationModifier(
22
+ targets="Linear",
23
+ scheme="FP8_DYNAMIC",
24
+ ignore=["re:.*lm_head", "re:visual.*"]
25
+ )
26
+
27
+ # Apply the quantization algorithm.
28
+ oneshot(model=model, recipe=recipe)
29
+
30
+ # Save the model.
31
+ SAVE_DIR = MODEL_ID.split("/")[1] + "-FP8-Dynamic"
32
+ model.save_pretrained(SAVE_DIR)
33
+ processor.save_pretrained(SAVE_DIR)
34
+ ```