|
{ |
|
"architectures": [ |
|
"PerceiverForMaskedLM" |
|
], |
|
"attention_probs_dropout_prob": 0.1, |
|
"audio_samples_per_frame": 1920, |
|
"cross_attention_shape_for_attention": "kv", |
|
"cross_attention_widening_factor": 1, |
|
"d_latents": 1280, |
|
"d_model": 768, |
|
"hidden_act": "gelu", |
|
"image_size": 56, |
|
"initializer_range": 0.02, |
|
"layer_norm_eps": 1e-12, |
|
"max_position_embeddings": 2048, |
|
"model_type": "perceiver", |
|
"num_blocks": 1, |
|
"num_cross_attention_heads": 8, |
|
"num_frames": 16, |
|
"num_latents": 256, |
|
"num_self_attends_per_block": 26, |
|
"num_self_attention_heads": 8, |
|
"output_shape": [ |
|
1, |
|
16, |
|
224, |
|
224 |
|
], |
|
"qk_channels": 256, |
|
"samples_per_patch": 16, |
|
"self_attention_widening_factor": 1, |
|
"torch_dtype": "float32", |
|
"train_size": [ |
|
368, |
|
496 |
|
], |
|
"transformers_version": "4.12.0.dev0", |
|
"use_query_residual": true, |
|
"v_channels": 1280, |
|
"vocab_size": 262 |
|
} |
|
|