anicolson commited on
Commit
697b021
1 Parent(s): fd926d9

Upload model

Browse files
config.json CHANGED
@@ -78,7 +78,7 @@
78
  "top_p": 1.0,
79
  "torch_dtype": null,
80
  "torchscript": false,
81
- "transformers_version": "4.29.2",
82
  "type_vocab_size": 2,
83
  "typical_p": 1.0,
84
  "use_bfloat16": false,
@@ -2243,7 +2243,7 @@
2243
  "top_p": 1.0,
2244
  "torch_dtype": "float32",
2245
  "torchscript": false,
2246
- "transformers_version": "4.29.2",
2247
  "typical_p": 1.0,
2248
  "use_bfloat16": false
2249
  },
 
78
  "top_p": 1.0,
79
  "torch_dtype": null,
80
  "torchscript": false,
81
+ "transformers_version": "4.28.1",
82
  "type_vocab_size": 2,
83
  "typical_p": 1.0,
84
  "use_bfloat16": false,
 
2243
  "top_p": 1.0,
2244
  "torch_dtype": "float32",
2245
  "torchscript": false,
2246
+ "transformers_version": "4.28.1",
2247
  "typical_p": 1.0,
2248
  "use_bfloat16": false
2249
  },
generation_config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
- "transformers_version": "4.29.2"
5
  }
 
1
  {
2
  "_from_model_config": true,
3
  "pad_token_id": 0,
4
+ "transformers_version": "4.28.1"
5
  }
modelling_longitudinal.py CHANGED
@@ -1,11 +1,13 @@
1
  import os
 
2
  from typing import Any, Optional, Tuple, Union
3
 
4
  import torch
5
  import transformers
6
  from peft import LoraConfig, TaskType, get_peft_config, get_peft_model
7
  from torch.nn import CrossEntropyLoss
8
- from transformers import PreTrainedTokenizerFast, VisionEncoderDecoderModel
 
9
  from transformers.configuration_utils import PretrainedConfig
10
  from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
11
  from transformers.modeling_utils import PreTrainedModel
@@ -23,7 +25,7 @@ class CvtWithProjectionHeadConfig(transformers.CvtConfig):
23
 
24
 
25
  class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
26
- projected_last_hidden_state: torch.FloatTensor
27
  attention_mask: torch.FloatTensor
28
 
29
 
@@ -87,7 +89,7 @@ class VariableCvtWithProjectionHead(transformers.CvtPreTrainedModel):
87
  return projection
88
 
89
  return ModelOutputWithProjectionEmbedding(
90
- projected_last_hidden_state=projection, attention_mask=attention_mask,
91
  )
92
 
93
 
@@ -103,7 +105,7 @@ class LongitudinalPromptVariableCXREncoderDecoderModel(VisionEncoderDecoderModel
103
  config: Optional[PretrainedConfig] = None,
104
  encoder: Optional[PreTrainedModel] = None,
105
  decoder: Optional[PreTrainedModel] = None,
106
- encoder_decoder_ckpt_path: Optional[str] = None,
107
  ):
108
 
109
  if decoder:
@@ -149,8 +151,11 @@ class LongitudinalPromptVariableCXREncoderDecoderModel(VisionEncoderDecoderModel
149
  self.decoder.config = self.config.decoder
150
 
151
  # Load variable checkpoint:
152
- if encoder_decoder_ckpt_path:
153
- self.load_state_dict(torch.load(encoder_decoder_ckpt_path)['state_dict'])
 
 
 
154
 
155
  # Freeze the encoder:
156
  for p in self.encoder.parameters():
@@ -240,7 +245,7 @@ class LongitudinalPromptVariableCXREncoderDecoderModel(VisionEncoderDecoderModel
240
  decoder_hidden_states=decoder_outputs.hidden_states,
241
  decoder_attentions=decoder_outputs.attentions,
242
  cross_attentions=decoder_outputs.cross_attentions,
243
- encoder_last_hidden_state=encoder_outputs.projected_last_hidden_state,
244
  # encoder_hidden_states=encoder_outputs.hidden_states,
245
  # encoder_attentions=encoder_outputs.attentions,
246
  )
 
1
  import os
2
+ import warnings
3
  from typing import Any, Optional, Tuple, Union
4
 
5
  import torch
6
  import transformers
7
  from peft import LoraConfig, TaskType, get_peft_config, get_peft_model
8
  from torch.nn import CrossEntropyLoss
9
+ from transformers import (AutoModel, PreTrainedTokenizerFast,
10
+ VisionEncoderDecoderModel)
11
  from transformers.configuration_utils import PretrainedConfig
12
  from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
13
  from transformers.modeling_utils import PreTrainedModel
 
25
 
26
 
27
  class ModelOutputWithProjectionEmbedding(transformers.modeling_outputs.ModelOutput):
28
+ last_hidden_state: torch.FloatTensor
29
  attention_mask: torch.FloatTensor
30
 
31
 
 
89
  return projection
90
 
91
  return ModelOutputWithProjectionEmbedding(
92
+ last_hidden_state=projection, attention_mask=attention_mask,
93
  )
94
 
95
 
 
105
  config: Optional[PretrainedConfig] = None,
106
  encoder: Optional[PreTrainedModel] = None,
107
  decoder: Optional[PreTrainedModel] = None,
108
+ encoder_decoder_ckpt_name: Optional[str] = None,
109
  ):
110
 
111
  if decoder:
 
151
  self.decoder.config = self.config.decoder
152
 
153
  # Load variable checkpoint:
154
+ if encoder_decoder_ckpt_name:
155
+ encoder_decoder = AutoModel.from_pretrained(encoder_decoder_ckpt_name, trust_remote_code=True)
156
+ self.load_state_dict(encoder_decoder.state_dict())
157
+ else:
158
+ warnings.warn('The encoder-to-decoder model was not warm-started before applying low-rank approximation.')
159
 
160
  # Freeze the encoder:
161
  for p in self.encoder.parameters():
 
245
  decoder_hidden_states=decoder_outputs.hidden_states,
246
  decoder_attentions=decoder_outputs.attentions,
247
  cross_attentions=decoder_outputs.cross_attentions,
248
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
249
  # encoder_hidden_states=encoder_outputs.hidden_states,
250
  # encoder_attentions=encoder_outputs.attentions,
251
  )
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec90882d89dcb37bd495b287c8f2f3d655fd16a025f0b8197018fcb9abb43362
3
  size 450315441
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b04999f2bf3a2d29880d0897eeb3cb38496ecb2d4dc322198bc6b972eca5c85e
3
  size 450315441