|
<?xml version="1.0"?> |
|
<net name="Model3" version="11"> |
|
<layers> |
|
<layer id="0" name="input" type="Parameter" version="opset1"> |
|
<data shape="?,?,4096" element_type="f32" /> |
|
<output> |
|
<port id="0" precision="FP32" names="input"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="1" name="self.0.weight" type="Const" version="opset1"> |
|
<data element_type="i8" shape="3072, 4096" offset="0" size="12582912" /> |
|
<output> |
|
<port id="0" precision="I8"> |
|
<dim>3072</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="2" name="Convert_166582" type="Convert" version="opset1"> |
|
<data destination_type="f16" /> |
|
<input> |
|
<port id="0" precision="I8"> |
|
<dim>3072</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="1" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="3" name="self.0.weight/scale" type="Const" version="opset1"> |
|
<data element_type="f16" shape="3072, 1" offset="12582912" size="6144" /> |
|
<output> |
|
<port id="0" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>1</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="4" name="self.0.weight/fq_weights_1" type="Multiply" version="opset1"> |
|
<data auto_broadcast="numpy" /> |
|
<input> |
|
<port id="0" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>4096</dim> |
|
</port> |
|
<port id="1" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>1</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="5" name="self.0.weight/fq_weights_1/convert" type="Convert" version="opset1"> |
|
<data destination_type="f32" /> |
|
<input> |
|
<port id="0" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="1" precision="FP32"> |
|
<dim>3072</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="6" name="__module.0/aten::linear/MatMul" type="MatMul" version="opset1"> |
|
<data transpose_a="false" transpose_b="true" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
<port id="1" precision="FP32"> |
|
<dim>3072</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="7" name="Constant_110947" type="Const" version="opset1"> |
|
<data element_type="f32" shape="1, 1, 3072" offset="12589056" size="12288" /> |
|
<output> |
|
<port id="0" precision="FP32"> |
|
<dim>1</dim> |
|
<dim>1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="8" name="__module.0/aten::linear/Add" type="Add" version="opset1"> |
|
<data auto_broadcast="numpy" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
<port id="1" precision="FP32"> |
|
<dim>1</dim> |
|
<dim>1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP32" names="11"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="9" name="__module.1/aten::gelu/Gelu" type="Gelu" version="opset7"> |
|
<data approximation_mode="ERF" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="1" precision="FP32" names="13"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="10" name="self.2.weight" type="Const" version="opset1"> |
|
<data element_type="i8" shape="3072, 3072" offset="12601344" size="9437184" /> |
|
<output> |
|
<port id="0" precision="I8"> |
|
<dim>3072</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="11" name="Convert_166589" type="Convert" version="opset1"> |
|
<data destination_type="f16" /> |
|
<input> |
|
<port id="0" precision="I8"> |
|
<dim>3072</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="1" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="12" name="self.2.weight/scale" type="Const" version="opset1"> |
|
<data element_type="f16" shape="3072, 1" offset="22038528" size="6144" /> |
|
<output> |
|
<port id="0" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>1</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="13" name="self.2.weight/fq_weights_1" type="Multiply" version="opset1"> |
|
<data auto_broadcast="numpy" /> |
|
<input> |
|
<port id="0" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>3072</dim> |
|
</port> |
|
<port id="1" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>1</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="14" name="self.2.weight/fq_weights_1/convert" type="Convert" version="opset1"> |
|
<data destination_type="f32" /> |
|
<input> |
|
<port id="0" precision="FP16"> |
|
<dim>3072</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="1" precision="FP32"> |
|
<dim>3072</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="15" name="__module.2/aten::linear/MatMul" type="MatMul" version="opset1"> |
|
<data transpose_a="false" transpose_b="true" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
<port id="1" precision="FP32"> |
|
<dim>3072</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="16" name="Constant_110948" type="Const" version="opset1"> |
|
<data element_type="f32" shape="1, 1, 3072" offset="22044672" size="12288" /> |
|
<output> |
|
<port id="0" precision="FP32"> |
|
<dim>1</dim> |
|
<dim>1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="17" name="__module.2/aten::linear/Add" type="Add" version="opset1"> |
|
<data auto_broadcast="numpy" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
<port id="1" precision="FP32"> |
|
<dim>1</dim> |
|
<dim>1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP32" names="last_hidden_state"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="18" name="Result_109307" type="Result" version="opset1"> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>3072</dim> |
|
</port> |
|
</input> |
|
</layer> |
|
</layers> |
|
<edges> |
|
<edge from-layer="0" from-port="0" to-layer="6" to-port="0" /> |
|
<edge from-layer="1" from-port="0" to-layer="2" to-port="0" /> |
|
<edge from-layer="2" from-port="1" to-layer="4" to-port="0" /> |
|
<edge from-layer="3" from-port="0" to-layer="4" to-port="1" /> |
|
<edge from-layer="4" from-port="2" to-layer="5" to-port="0" /> |
|
<edge from-layer="5" from-port="1" to-layer="6" to-port="1" /> |
|
<edge from-layer="6" from-port="2" to-layer="8" to-port="0" /> |
|
<edge from-layer="7" from-port="0" to-layer="8" to-port="1" /> |
|
<edge from-layer="8" from-port="2" to-layer="9" to-port="0" /> |
|
<edge from-layer="9" from-port="1" to-layer="15" to-port="0" /> |
|
<edge from-layer="10" from-port="0" to-layer="11" to-port="0" /> |
|
<edge from-layer="11" from-port="1" to-layer="13" to-port="0" /> |
|
<edge from-layer="12" from-port="0" to-layer="13" to-port="1" /> |
|
<edge from-layer="13" from-port="2" to-layer="14" to-port="0" /> |
|
<edge from-layer="14" from-port="1" to-layer="15" to-port="1" /> |
|
<edge from-layer="15" from-port="2" to-layer="17" to-port="0" /> |
|
<edge from-layer="16" from-port="0" to-layer="17" to-port="1" /> |
|
<edge from-layer="17" from-port="2" to-layer="18" to-port="0" /> |
|
</edges> |
|
<rt_info> |
|
<Runtime_version value="2025.0.0-17908-513dcc5c7b7-releases/2025/0" /> |
|
<conversion_parameters> |
|
<framework value="pytorch" /> |
|
<is_python_object value="True" /> |
|
</conversion_parameters> |
|
<nncf> |
|
<friendly_names_were_updated value="True" /> |
|
<weight_compression> |
|
<advanced_parameters value="{'statistics_path': None, 'awq_params': {'subset_size': 32, 'percent_to_apply': 0.002, 'alpha_min': 0.0, 'alpha_max': 1.0, 'steps': 100}, 'scale_estimation_params': {'subset_size': 64, 'initial_steps': 5, 'scale_steps': 5, 'weight_penalty': -1.0}, 'gptq_params': {'damp_percent': 0.1, 'block_size': 128, 'subset_size': 128}, 'lora_correction_params': {'adapter_rank': 8, 'num_iterations': 3, 'apply_regularization': True, 'subset_size': 128, 'use_int8_adapters': True}}" /> |
|
<all_layers value="False" /> |
|
<awq value="False" /> |
|
<backup_mode value="int8_asym" /> |
|
<gptq value="False" /> |
|
<group_size value="-1" /> |
|
<ignored_scope value="[]" /> |
|
<lora_correction value="False" /> |
|
<mode value="int8_sym" /> |
|
<ratio value="1.0" /> |
|
<scale_estimation value="False" /> |
|
<sensitivity_metric value="weight_quantization_error" /> |
|
</weight_compression> |
|
</nncf> |
|
<optimum> |
|
<optimum_intel_version value="1.22.0.dev0+e465c7f7" /> |
|
<optimum_version value="1.24.0.dev0" /> |
|
<pytorch_version value="2.5.1+cpu" /> |
|
<transformers_version value="4.47.0" /> |
|
</optimum> |
|
</rt_info> |
|
</net> |
|
|