OPEA
/

Safetensors
mllama
4-bit precision
intel/auto-round
weiweiz1's picture
convert lfs json files
e0529aa
{
"_name_or_path": "/data5/zww//Llama-3.2-90B-Vision-Instruct",
"architectures": [
"MllamaForConditionalGeneration"
],
"image_token_index": 128256,
"model_type": "mllama",
"quantization_config": {
"amp": true,
"autoround_version": "0.4.0.dev",
"backend": "auto_round:gptq:exllamav2",
"batch_size": 1,
"bits": 4,
"data_type": "int",
"dataset": "liuhaotian/llava",
"enable_minmax_tuning": true,
"enable_norm_bias_tuning": false,
"enable_quanted_input": true,
"gradient_accumulate_steps": 8,
"group_size": 128,
"iters": 200,
"low_gpu_mem_usage": true,
"lr": 0.005,
"minmax_lr": 0.005,
"nsamples": 512,
"quant_method": "intel/auto-round",
"scale_dtype": "torch.float16",
"seqlen": 512,
"sym": true,
"to_quant_block_names": [
[
"language_model.model.layers.0",
"language_model.model.layers.1",
"language_model.model.layers.2",
"language_model.model.layers.3",
"language_model.model.layers.4",
"language_model.model.layers.5",
"language_model.model.layers.6",
"language_model.model.layers.7",
"language_model.model.layers.8",
"language_model.model.layers.9",
"language_model.model.layers.10",
"language_model.model.layers.11",
"language_model.model.layers.12",
"language_model.model.layers.13",
"language_model.model.layers.14",
"language_model.model.layers.15",
"language_model.model.layers.16",
"language_model.model.layers.17",
"language_model.model.layers.18",
"language_model.model.layers.19",
"language_model.model.layers.20",
"language_model.model.layers.21",
"language_model.model.layers.22",
"language_model.model.layers.23",
"language_model.model.layers.24",
"language_model.model.layers.25",
"language_model.model.layers.26",
"language_model.model.layers.27",
"language_model.model.layers.28",
"language_model.model.layers.29",
"language_model.model.layers.30",
"language_model.model.layers.31",
"language_model.model.layers.32",
"language_model.model.layers.33",
"language_model.model.layers.34",
"language_model.model.layers.35",
"language_model.model.layers.36",
"language_model.model.layers.37",
"language_model.model.layers.38",
"language_model.model.layers.39",
"language_model.model.layers.40",
"language_model.model.layers.41",
"language_model.model.layers.42",
"language_model.model.layers.43",
"language_model.model.layers.44",
"language_model.model.layers.45",
"language_model.model.layers.46",
"language_model.model.layers.47",
"language_model.model.layers.48",
"language_model.model.layers.49",
"language_model.model.layers.50",
"language_model.model.layers.51",
"language_model.model.layers.52",
"language_model.model.layers.53",
"language_model.model.layers.54",
"language_model.model.layers.55",
"language_model.model.layers.56",
"language_model.model.layers.57",
"language_model.model.layers.58",
"language_model.model.layers.59",
"language_model.model.layers.60",
"language_model.model.layers.61",
"language_model.model.layers.62",
"language_model.model.layers.63",
"language_model.model.layers.64",
"language_model.model.layers.65",
"language_model.model.layers.66",
"language_model.model.layers.67",
"language_model.model.layers.68",
"language_model.model.layers.69",
"language_model.model.layers.70",
"language_model.model.layers.71",
"language_model.model.layers.72",
"language_model.model.layers.73",
"language_model.model.layers.74",
"language_model.model.layers.75",
"language_model.model.layers.76",
"language_model.model.layers.77",
"language_model.model.layers.78",
"language_model.model.layers.79",
"language_model.model.layers.80",
"language_model.model.layers.81",
"language_model.model.layers.82",
"language_model.model.layers.83",
"language_model.model.layers.84",
"language_model.model.layers.85",
"language_model.model.layers.86",
"language_model.model.layers.87",
"language_model.model.layers.88",
"language_model.model.layers.89",
"language_model.model.layers.90",
"language_model.model.layers.91",
"language_model.model.layers.92",
"language_model.model.layers.93",
"language_model.model.layers.94",
"language_model.model.layers.95",
"language_model.model.layers.96",
"language_model.model.layers.97",
"language_model.model.layers.98",
"language_model.model.layers.99"
]
]
},
"text_config": {
"_attn_implementation_autoset": false,
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": 128000,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"cross_attention_layers": [
3,
8,
13,
18,
23,
28,
33,
38,
43,
48,
53,
58,
63,
68,
73,
78,
83,
88,
93,
98
],
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"dropout": 0,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": [
128001,
128008,
128009
],
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "silu",
"hidden_size": 8192,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_range": 0.02,
"intermediate_size": 28672,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"length_penalty": 1.0,
"max_length": 20,
"max_position_embeddings": 131072,
"min_length": 0,
"model_type": "mllama_text_model",
"no_repeat_ngram_size": 0,
"num_attention_heads": 64,
"num_beam_groups": 1,
"num_beams": 1,
"num_hidden_layers": 100,
"num_key_value_heads": 8,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 128004,
"prefix": null,
"problem_type": null,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 8.0,
"high_freq_factor": 4.0,
"low_freq_factor": 1.0,
"original_max_position_embeddings": 8192,
"rope_type": "llama3"
},
"rope_theta": 500000.0,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": false,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": "bfloat16",
"torchscript": false,
"typical_p": 1.0,
"use_bfloat16": false,
"use_cache": true,
"vocab_size": 128256
},
"torch_dtype": "bfloat16",
"transformers_version": "4.46.1",
"vision_config": {
"_attn_implementation_autoset": false,
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"attention_heads": 16,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": null,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": null,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "gelu",
"hidden_size": 1280,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"image_size": 560,
"initializer_range": 0.02,
"intermediate_layers_indices": [
3,
7,
15,
23,
30
],
"intermediate_size": 5120,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"length_penalty": 1.0,
"max_length": 20,
"max_num_tiles": 4,
"min_length": 0,
"model_type": "mllama_vision_model",
"no_repeat_ngram_size": 0,
"norm_eps": 1e-05,
"num_beam_groups": 1,
"num_beams": 1,
"num_channels": 3,
"num_global_layers": 8,
"num_hidden_layers": 32,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": null,
"patch_size": 14,
"prefix": null,
"problem_type": null,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"supported_aspect_ratios": [
[
1,
1
],
[
1,
2
],
[
1,
3
],
[
1,
4
],
[
2,
1
],
[
2,
2
],
[
3,
1
],
[
4,
1
]
],
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": "bfloat16",
"torchscript": false,
"typical_p": 1.0,
"use_bfloat16": false,
"vision_output_dim": 7680
}
}