fayetitchenal's picture
Training in progress, step 2500, checkpoint
c326ee0 verified
{
"_name_or_path": "nvidia/mit-b5",
"architectures": [
"SegformerForSemanticSegmentation"
],
"attention_probs_dropout_prob": 0.0,
"classifier_dropout_prob": 0.1,
"decoder_hidden_size": 768,
"depths": [
3,
6,
40,
3
],
"downsampling_rates": [
1,
4,
8,
16
],
"drop_path_rate": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_sizes": [
64,
128,
320,
512
],
"id2label": {
"0": "background",
"1": "double_plant",
"2": "drydown",
"3": "endrow",
"4": "nutrient_deficiency",
"5": "planter_skip",
"6": "water",
"7": "waterway",
"8": "weed_cluster"
},
"image_size": 224,
"initializer_range": 0.02,
"label2id": {
"background": 0,
"double_plant": 1,
"drydown": 2,
"endrow": 3,
"nutrient_deficiency": 4,
"planter_skip": 5,
"water": 6,
"waterway": 7,
"weed_cluster": 8
},
"layer_norm_eps": 1e-06,
"mlp_ratios": [
4,
4,
4,
4
],
"model_type": "segformer",
"num_attention_heads": [
1,
2,
5,
8
],
"num_channels": 4,
"num_encoder_blocks": 4,
"patch_sizes": [
7,
3,
3,
3
],
"reshape_last_stage": true,
"semantic_loss_ignore_index": 255,
"sr_ratios": [
8,
4,
2,
1
],
"strides": [
4,
2,
2,
2
],
"torch_dtype": "float32",
"transformers_version": "4.40.2"
}