{ "_name_or_path": "apple/mobilevitv2-1.0-imagenet1k-256", "architectures": [ "MobileViTV2ForImageClassification" ], "aspp_dropout_prob": 0.1, "aspp_out_channels": 512, "atrous_rates": [ 6, 12, 18 ], "attn_dropout": 0.0, "base_attn_unit_dims": [ 128, 192, 256 ], "classifier_dropout_prob": 0.1, "conv_kernel_size": 3, "expand_ratio": 2.0, "ffn_dropout": 0.0, "ffn_multiplier": 2, "hidden_act": "swish", "id2label": { "0": "Forest", "1": "AnnualCrop", "2": "Highway", "3": "PermanentCrop", "4": "Industrial", "5": "River", "6": "SeaLake", "7": "HerbaceousVegetation", "8": "Pasture", "9": "Residential" }, "image_size": 64, "initializer_range": 0.02, "label2id": { "AnnualCrop": 1, "Forest": 0, "HerbaceousVegetation": 7, "Highway": 2, "Industrial": 4, "Pasture": 8, "PermanentCrop": 3, "Residential": 9, "River": 5, "SeaLake": 6 }, "layer_norm_eps": 1e-05, "mlp_ratio": 2.0, "model_type": "mobilevitv2", "n_attn_blocks": [ 2, 4, 3 ], "num_channels": 13, "output_stride": 32, "patch_size": 2, "problem_type": "single_label_classification", "semantic_loss_ignore_index": 255, "torch_dtype": "float32", "transformers_version": "4.37.2", "width_multiplier": 1.0 }