|
|
|
|
|
"""Pretrain BERT""" |
|
|
|
from functools import partial |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
|
|
import megatron.initialize |
|
import megatron |
|
from megatron import get_args |
|
from megatron import print_rank_0 |
|
from megatron import get_timers |
|
from megatron.core import tensor_parallel |
|
from megatron.data.dataset_utils import build_train_valid_test_datasets |
|
from megatron.model import BertModel, ModelType |
|
|
|
from megatron.utils import average_losses_across_data_parallel_group |
|
|
|
|
|
def model_provider(pre_process=True, post_process=True): |
|
"""Build the model.""" |
|
|
|
print_rank_0('building BERT model ...') |
|
|
|
args = get_args() |
|
num_tokentypes = 2 if args.bert_binary_head else 0 |
|
|
|
model_type_bert = ModelType.encoder_or_decoder |
|
model = BertModel( |
|
num_tokentypes=num_tokentypes, |
|
add_binary_head=args.bert_binary_head, |
|
parallel_output=True, |
|
pre_process=pre_process, |
|
post_process=post_process, |
|
model_type=model_type_bert) |
|
|
|
return model |
|
|
|
|
|
def get_batch(data_iterator): |
|
"""Build the batch.""" |
|
|
|
|
|
keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask'] |
|
datatype = torch.int64 |
|
|
|
|
|
if data_iterator is not None: |
|
data = next(data_iterator) |
|
else: |
|
data = None |
|
data_b = tensor_parallel.broadcast_data(keys, data, datatype) |
|
|
|
|
|
tokens = data_b['text'].long() |
|
types = data_b['types'].long() |
|
sentence_order = data_b['is_random'].long() |
|
loss_mask = data_b['loss_mask'].float() |
|
lm_labels = data_b['labels'].long() |
|
padding_mask = data_b['padding_mask'].long() |
|
|
|
return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask |
|
|
|
|
|
def loss_func(loss_mask, sentence_order, output_tensor): |
|
lm_loss_, sop_logits = output_tensor |
|
|
|
lm_loss_ = lm_loss_.float() |
|
loss_mask = loss_mask.float() |
|
lm_loss = torch.sum( |
|
lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() |
|
|
|
if sop_logits is not None: |
|
sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), |
|
sentence_order.view(-1), |
|
ignore_index=-1) |
|
sop_loss = sop_loss.float() |
|
loss = lm_loss + sop_loss |
|
averaged_losses = average_losses_across_data_parallel_group( |
|
[lm_loss, sop_loss]) |
|
return loss, {'lm loss': averaged_losses[0], |
|
'sop loss': averaged_losses[1]} |
|
|
|
else: |
|
loss = lm_loss |
|
averaged_losses = average_losses_across_data_parallel_group( |
|
[lm_loss]) |
|
return loss, {'lm loss': averaged_losses[0]} |
|
|
|
|
|
def forward_step(data_iterator, model): |
|
"""Forward step.""" |
|
args = get_args() |
|
timers = get_timers() |
|
|
|
|
|
timers('batch-generator', log_level=2).start() |
|
tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch( |
|
data_iterator) |
|
timers('batch-generator').stop() |
|
|
|
if not args.bert_binary_head: |
|
types = None |
|
|
|
|
|
output_tensor = model(tokens, padding_mask, tokentype_ids=types, |
|
lm_labels=lm_labels) |
|
|
|
return output_tensor, partial(loss_func, loss_mask, sentence_order) |
|
|
|
|
|
def train_valid_test_datasets_provider(train_val_test_num_samples): |
|
"""Build train, valid, and test datasets.""" |
|
args = get_args() |
|
|
|
print_rank_0('> building train, validation, and test datasets ' |
|
'for BERT ...') |
|
train_ds, valid_ds, test_ds = build_train_valid_test_datasets( |
|
data_prefix=args.data_path, |
|
data_impl=args.data_impl, |
|
splits_string=args.split, |
|
train_valid_test_num_samples=train_val_test_num_samples, |
|
max_seq_length=args.seq_length, |
|
masked_lm_prob=args.mask_prob, |
|
short_seq_prob=args.short_seq_prob, |
|
seed=args.seed, |
|
skip_warmup=(not args.mmap_warmup), |
|
binary_head=args.bert_binary_head) |
|
print_rank_0("> finished creating BERT datasets ...") |
|
|
|
return train_ds, valid_ds, test_ds |
|
|
|
|
|
if __name__ == "__main__": |
|
model_type_bert = ModelType.encoder_or_decoder |
|
args_defaults = {'tokenizer_type': 'BertWordPieceLowerCase'} |
|
megatron.initialize.initialize_megatron(extra_args_provider=None, |
|
args_defaults=args_defaults) |
|
args = megatron.get_args() |
|
megatron.training.pretrain(args, |
|
train_valid_test_datasets_provider, |
|
model_provider, |
|
model_type_bert, |
|
forward_step) |
|
|