anicolson commited on
Commit
737a819
1 Parent(s): bdd2189

Upload model

Browse files
config.json CHANGED
@@ -78,7 +78,7 @@
78
  "top_p": 1.0,
79
  "torch_dtype": null,
80
  "torchscript": false,
81
- "transformers_version": "4.29.2",
82
  "type_vocab_size": 2,
83
  "typical_p": 1.0,
84
  "use_bfloat16": false,
@@ -2243,7 +2243,7 @@
2243
  "top_p": 1.0,
2244
  "torch_dtype": "float32",
2245
  "torchscript": false,
2246
- "transformers_version": "4.29.2",
2247
  "typical_p": 1.0,
2248
  "use_bfloat16": false
2249
  },
 
78
  "top_p": 1.0,
79
  "torch_dtype": null,
80
  "torchscript": false,
81
+ "transformers_version": "4.28.1",
82
  "type_vocab_size": 2,
83
  "typical_p": 1.0,
84
  "use_bfloat16": false,
 
2243
  "top_p": 1.0,
2244
  "torch_dtype": "float32",
2245
  "torchscript": false,
2246
+ "transformers_version": "4.28.1",
2247
  "typical_p": 1.0,
2248
  "use_bfloat16": false
2249
  },
generation_config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
- "transformers_version": "4.29.2"
5
  }
 
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
+ "transformers_version": "4.28.1"
5
  }
modelling_variable.py CHANGED
@@ -22,7 +22,7 @@ class CvtWithProjectionHeadConfig(transformers.CvtConfig):
22
 
23
 
24
  class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
25
- projected_last_hidden_state: torch.FloatTensor
26
  attention_mask: torch.FloatTensor
27
 
28
 
@@ -86,7 +86,7 @@ class VariableCvtWithProjectionHead(transformers.CvtPreTrainedModel):
86
  return projection
87
 
88
  return ModelOutputWithProjectionEmbedding(
89
- projected_last_hidden_state=projection, attention_mask=attention_mask,
90
  )
91
 
92
 
@@ -223,7 +223,7 @@ class VariableCXREncoderDecoderModel(VisionEncoderDecoderModel):
223
  decoder_hidden_states=decoder_outputs.hidden_states,
224
  decoder_attentions=decoder_outputs.attentions,
225
  cross_attentions=decoder_outputs.cross_attentions,
226
- encoder_last_hidden_state=encoder_outputs.projected_last_hidden_state,
227
  # encoder_hidden_states=encoder_outputs.hidden_states,
228
  # encoder_attentions=encoder_outputs.attentions,
229
  )
 
22
 
23
 
24
  class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
25
+ last_hidden_state: torch.FloatTensor
26
  attention_mask: torch.FloatTensor
27
 
28
 
 
86
  return projection
87
 
88
  return ModelOutputWithProjectionEmbedding(
89
+ last_hidden_state=projection, attention_mask=attention_mask,
90
  )
91
 
92
 
 
223
  decoder_hidden_states=decoder_outputs.hidden_states,
224
  decoder_attentions=decoder_outputs.attentions,
225
  cross_attentions=decoder_outputs.cross_attentions,
226
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
227
  # encoder_hidden_states=encoder_outputs.hidden_states,
228
  # encoder_attentions=encoder_outputs.attentions,
229
  )
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9128f8c8767baf52490e0f1bc882cf80ee14434b4cc4844f433c49272cd9dd95
3
  size 449713809
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd3d75dceeec1cb112f40a5b8acb031384617714e2e441947f8a1ef3bc5df878
3
  size 449713809