AlexHung29629 commited on
Commit
5ab87b5
·
verified ·
1 Parent(s): 5c6ab95

Update modeling_llama3.py

Browse files
Files changed (1) hide show
  1. modeling_llama3.py +0 -8
modeling_llama3.py CHANGED
@@ -80,8 +80,6 @@ class Llama3ForCausalLM(Llama3PreTrainedModel, GenerationMixin):
80
  input_ids: Optional[torch.LongTensor] = None,
81
  audio_features: Optional[torch.FloatTensor] = None,
82
  attention_mask: Optional[torch.Tensor] = None,
83
- cross_attention_mask: Optional[torch.Tensor] = None,
84
- cross_attention_states: Optional[torch.Tensor] = None,
85
  position_ids: Optional[torch.LongTensor] = None,
86
  past_key_values: Optional[List[torch.FloatTensor]] = None,
87
  inputs_embeds: Optional[torch.FloatTensor] = None,
@@ -152,8 +150,6 @@ class Llama3ForCausalLM(Llama3PreTrainedModel, GenerationMixin):
152
  input_ids=None,
153
  attention_mask=attention_mask,
154
  position_ids=position_ids,
155
- cross_attention_states=cross_attention_states,
156
- cross_attention_mask=cross_attention_mask,
157
  full_text_row_masked_out_mask=None,
158
  past_key_values=past_key_values,
159
  use_cache=use_cache,
@@ -175,9 +171,6 @@ class Llama3ForCausalLM(Llama3PreTrainedModel, GenerationMixin):
175
  inputs_embeds=None,
176
  attention_mask=None,
177
  position_ids=None,
178
- aspect_ratio_ids=None,
179
- aspect_ratio_mask=None,
180
- cross_attention_mask=None,
181
  past_key_values=None,
182
  use_cache=False,
183
  cache_position=None,
@@ -224,7 +217,6 @@ class Llama3ForCausalLM(Llama3PreTrainedModel, GenerationMixin):
224
  "past_key_values": past_key_values,
225
  "use_cache": use_cache,
226
  "attention_mask": attention_mask,
227
- "cross_attention_mask": cross_attention_mask,
228
  }
229
  )
230
 
 
80
  input_ids: Optional[torch.LongTensor] = None,
81
  audio_features: Optional[torch.FloatTensor] = None,
82
  attention_mask: Optional[torch.Tensor] = None,
 
 
83
  position_ids: Optional[torch.LongTensor] = None,
84
  past_key_values: Optional[List[torch.FloatTensor]] = None,
85
  inputs_embeds: Optional[torch.FloatTensor] = None,
 
150
  input_ids=None,
151
  attention_mask=attention_mask,
152
  position_ids=position_ids,
 
 
153
  full_text_row_masked_out_mask=None,
154
  past_key_values=past_key_values,
155
  use_cache=use_cache,
 
171
  inputs_embeds=None,
172
  attention_mask=None,
173
  position_ids=None,
 
 
 
174
  past_key_values=None,
175
  use_cache=False,
176
  cache_position=None,
 
217
  "past_key_values": past_key_values,
218
  "use_cache": use_cache,
219
  "attention_mask": attention_mask,
 
220
  }
221
  )
222