robinzixuan commited on
Commit
15902cc
1 Parent(s): fada12b

Upload modeling_opt.py

Browse files
Files changed (1) hide show
  1. modeling_opt.py +7 -14
modeling_opt.py CHANGED
@@ -200,8 +200,7 @@ class OPTAttention(nn.Module):
200
 
201
  if (self.head_dim * self.num_heads) != self.embed_dim:
202
  raise ValueError(
203
- f"embed_dim must be divisible by num_heads (got `embed_dim`: {
204
- self.embed_dim}"
205
  f" and `num_heads`: {self.num_heads})."
206
  )
207
  self.scaling = self.head_dim**-0.5
@@ -371,16 +370,14 @@ class OPTAttention(nn.Module):
371
 
372
  if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
373
  raise ValueError(
374
- f"Attention weights should be of size {
375
- (bsz * self.num_heads, tgt_len, src_len)}, but is"
376
  f" {attn_weights.size()}"
377
  )
378
 
379
  if attention_mask is not None:
380
  if attention_mask.size() != (bsz, 1, tgt_len, src_len):
381
  raise ValueError(
382
- f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {
383
- attention_mask.size()}"
384
  )
385
  attn_weights = attn_weights.view(
386
  bsz, self.num_heads, tgt_len, src_len) + attention_mask
@@ -401,8 +398,7 @@ class OPTAttention(nn.Module):
401
  if layer_head_mask is not None:
402
  if layer_head_mask.size() != (self.num_heads,):
403
  raise ValueError(
404
- f"Head mask for a single layer should be of size {
405
- (self.num_heads,)}, but is"
406
  f" {layer_head_mask.size()}"
407
  )
408
  attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
@@ -436,8 +432,7 @@ class OPTAttention(nn.Module):
436
 
437
  if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
438
  raise ValueError(
439
- f"`attn_output` should be of size {
440
- (bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
441
  f" {attn_output.size()}"
442
  )
443
 
@@ -1095,8 +1090,7 @@ class OPTDecoder(OPTPreTrainedModel):
1095
  batch_size, mask_seq_length, device=inputs_embeds.device)
1096
  elif attention_mask.shape[1] != mask_seq_length:
1097
  raise ValueError(
1098
- f"The provided attention mask has length {
1099
- attention_mask.shape[1]}, but its length should be "
1100
  f"{mask_seq_length} (sum of the lengths of current and past inputs)"
1101
  )
1102
  causal_attention_mask = _prepare_4d_causal_attention_mask(
@@ -1128,8 +1122,7 @@ class OPTDecoder(OPTPreTrainedModel):
1128
  if attn_mask is not None:
1129
  if attn_mask.size()[0] != (len(self.layers)):
1130
  raise ValueError(
1131
- f"The `{mask_name}` should be specified for {
1132
- len(self.layers)} layers, but it is for"
1133
  f" {head_mask.size()[0]}."
1134
  )
1135
 
 
200
 
201
  if (self.head_dim * self.num_heads) != self.embed_dim:
202
  raise ValueError(
203
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
 
204
  f" and `num_heads`: {self.num_heads})."
205
  )
206
  self.scaling = self.head_dim**-0.5
 
370
 
371
  if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
372
  raise ValueError(
373
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
 
374
  f" {attn_weights.size()}"
375
  )
376
 
377
  if attention_mask is not None:
378
  if attention_mask.size() != (bsz, 1, tgt_len, src_len):
379
  raise ValueError(
380
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
 
381
  )
382
  attn_weights = attn_weights.view(
383
  bsz, self.num_heads, tgt_len, src_len) + attention_mask
 
398
  if layer_head_mask is not None:
399
  if layer_head_mask.size() != (self.num_heads,):
400
  raise ValueError(
401
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
 
402
  f" {layer_head_mask.size()}"
403
  )
404
  attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
 
432
 
433
  if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
434
  raise ValueError(
435
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
 
436
  f" {attn_output.size()}"
437
  )
438
 
 
1090
  batch_size, mask_seq_length, device=inputs_embeds.device)
1091
  elif attention_mask.shape[1] != mask_seq_length:
1092
  raise ValueError(
1093
+ f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be "
 
1094
  f"{mask_seq_length} (sum of the lengths of current and past inputs)"
1095
  )
1096
  causal_attention_mask = _prepare_4d_causal_attention_mask(
 
1122
  if attn_mask is not None:
1123
  if attn_mask.size()[0] != (len(self.layers)):
1124
  raise ValueError(
1125
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
 
1126
  f" {head_mask.size()[0]}."
1127
  )
1128