Spaces:
Running
on
Zero
Running
on
Zero
from typing import List, Optional, Tuple, Union | |
import torch | |
import torch.nn as nn | |
from PIL import Image | |
from torch.nn import CrossEntropyLoss | |
from transformers import ( | |
AutoConfig, | |
AutoModelForCausalLM, | |
MixtralConfig, | |
MixtralForCausalLM, | |
MixtralModel, | |
) | |
from transformers.cache_utils import Cache, DynamicCache | |
from transformers.modeling_outputs import CausalLMOutputWithPast, MoeCausalLMOutputWithPast | |
from ..vita_arch import VITAMetaForCausalLM, VITAMetaModel | |
def load_balancing_loss_func( | |
gate_logits: torch.Tensor, | |
num_experts: torch.Tensor = None, | |
top_k=2, | |
attention_mask: Optional[torch.Tensor] = None, | |
) -> float: | |
r""" | |
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. | |
See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss | |
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between | |
experts is too unbalanced. | |
Args: | |
gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]): | |
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of | |
shape [batch_size X sequence_length, num_experts]. | |
attention_mask (`torch.Tensor`, None): | |
The attention_mask used in forward function | |
shape [batch_size X sequence_length] if not None. | |
num_experts (`int`, *optional*): | |
Number of experts | |
Returns: | |
The auxiliary loss. | |
""" | |
if gate_logits is None or not isinstance(gate_logits, tuple): | |
return 0 | |
if isinstance(gate_logits, tuple): | |
compute_device = gate_logits[0].device | |
concatenated_gate_logits = torch.cat( | |
[layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0 | |
) | |
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) | |
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1) | |
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) | |
if attention_mask is None: | |
# Compute the percentage of tokens routed to each experts | |
tokens_per_expert = torch.mean(expert_mask.float(), dim=0) | |
# Compute the average probability of routing to these experts | |
router_prob_per_expert = torch.mean(routing_weights, dim=0) | |
else: | |
batch_size, sequence_length = attention_mask.shape | |
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) | |
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask | |
expert_attention_mask = ( | |
attention_mask[None, :, :, None, None] | |
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) | |
.reshape(-1, top_k, num_experts) | |
.to(compute_device) | |
) | |
# Compute the percentage of tokens routed to each experts | |
tokens_per_expert = torch.sum( | |
expert_mask.float() * expert_attention_mask, dim=0 | |
) / torch.sum(expert_attention_mask, dim=0) | |
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert | |
router_per_expert_attention_mask = ( | |
attention_mask[None, :, :, None] | |
.expand((num_hidden_layers, batch_size, sequence_length, num_experts)) | |
.reshape(-1, num_experts) | |
.to(compute_device) | |
) | |
# Compute the average probability of routing to these experts | |
router_prob_per_expert = torch.sum( | |
routing_weights * router_per_expert_attention_mask, dim=0 | |
) / torch.sum(router_per_expert_attention_mask, dim=0) | |
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) | |
return overall_loss * num_experts | |
def custom_forward( | |
self, | |
input_ids: torch.LongTensor = None, | |
attention_mask: Optional[torch.Tensor] = None, | |
position_ids: Optional[torch.LongTensor] = None, | |
past_key_values: Optional[List[torch.FloatTensor]] = None, | |
inputs_embeds: Optional[torch.FloatTensor] = None, | |
labels: Optional[torch.LongTensor] = None, | |
use_cache: Optional[bool] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
output_router_logits: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
) -> Union[Tuple, MoeCausalLMOutputWithPast]: | |
r""" | |
Args: | |
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., | |
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored | |
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. | |
Returns: | |
Example: | |
```python | |
>>> from transformers import AutoTokenizer, MixtralForCausalLM | |
>>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1") | |
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1") | |
>>> prompt = "Hey, are you conscious? Can you talk to me?" | |
>>> inputs = tokenizer(prompt, return_tensors="pt") | |
>>> # Generate | |
>>> generate_ids = model.generate(inputs.input_ids, max_length=30) | |
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] | |
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." | |
```""" | |
output_attentions = ( | |
output_attentions if output_attentions is not None else self.config.output_attentions | |
) | |
output_router_logits = ( | |
output_router_logits | |
if output_router_logits is not None | |
else self.config.output_router_logits | |
) | |
output_hidden_states = ( | |
output_hidden_states | |
if output_hidden_states is not None | |
else self.config.output_hidden_states | |
) | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) | |
outputs = self.model( | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
position_ids=position_ids, | |
past_key_values=past_key_values, | |
inputs_embeds=inputs_embeds, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
output_router_logits=output_router_logits, | |
return_dict=return_dict, | |
) | |
hidden_states = outputs[0] | |
logits = self.lm_head(hidden_states) | |
# logits = logits.float() | |
loss = None | |
if labels is not None: | |
# Shift so that tokens < n predict n | |
shift_logits = logits[..., :-1, :].contiguous() | |
shift_labels = labels[..., 1:].contiguous() | |
# Flatten the tokens | |
loss_fct = CrossEntropyLoss() | |
shift_logits = shift_logits.view(-1, self.config.vocab_size) | |
shift_labels = shift_labels.view(-1) | |
# Enable model parallelism | |
shift_labels = shift_labels.to(shift_logits.device) | |
loss = loss_fct(shift_logits, shift_labels) | |
aux_loss = None | |
if output_router_logits: | |
aux_loss = load_balancing_loss_func( | |
outputs.router_logits if return_dict else outputs[-1], | |
self.num_experts, | |
self.num_experts_per_tok, | |
attention_mask, | |
) | |
if labels is not None: | |
loss += self.router_aux_loss_coef * aux_loss.to( | |
loss.device | |
) # make sure to reside in the same device | |
if not return_dict: | |
output = (logits,) + outputs[1:] | |
if output_router_logits: | |
output = (aux_loss,) + output | |
return (loss,) + output if loss is not None else output | |
return MoeCausalLMOutputWithPast( | |
loss=loss, | |
aux_loss=aux_loss, | |
logits=logits, | |
past_key_values=outputs.past_key_values, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
router_logits=outputs.router_logits, | |
) | |
MixtralForCausalLM.forward = custom_forward | |
class VITAMixtralConfig(MixtralConfig): | |
model_type = "vita-mixtral" | |
class VITAMixtralModel(VITAMetaModel, MixtralModel): | |
config_class = VITAMixtralConfig | |
def __init__(self, config: MixtralConfig): | |
super(VITAMixtralModel, self).__init__(config) | |
class VITAMixtralForCausalLM(MixtralForCausalLM, VITAMetaForCausalLM): | |
config_class = VITAMixtralConfig | |
def __init__(self, config): | |
super(MixtralForCausalLM, self).__init__(config) | |
self.model = VITAMixtralModel(config) | |
self.vocab_size = config.vocab_size | |
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) | |
self.router_aux_loss_coef = config.router_aux_loss_coef | |
self.num_experts = config.num_local_experts | |
self.num_experts_per_tok = config.num_experts_per_tok | |
# Initialize weights and apply final processing | |
self.post_init() | |
def get_model(self): | |
return self.model | |
def forward( | |
self, | |
input_ids: torch.LongTensor = None, | |
attention_mask: Optional[torch.Tensor] = None, | |
position_ids: Optional[torch.LongTensor] = None, | |
past_key_values: Optional[List[torch.FloatTensor]] = None, | |
inputs_embeds: Optional[torch.FloatTensor] = None, | |
labels: Optional[torch.LongTensor] = None, | |
use_cache: Optional[bool] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
images: Optional[torch.FloatTensor] = None, | |
audios: Optional[dict] = None, | |
sf_masks: Optional[torch.Tensor] = None, | |
output_router_logits: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
) -> Union[Tuple, CausalLMOutputWithPast]: | |
if inputs_embeds is None: | |
( | |
input_ids, | |
position_ids, | |
attention_mask, | |
past_key_values, | |
inputs_embeds, | |
labels, | |
) = self.prepare_inputs_labels_for_multimodal( | |
input_ids, position_ids, attention_mask, past_key_values, labels, images, audios, sf_masks | |
) | |
return super().forward( | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
position_ids=position_ids, | |
past_key_values=past_key_values, | |
inputs_embeds=inputs_embeds, | |
labels=labels, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
output_router_logits=output_router_logits, | |
return_dict=return_dict, | |
) | |
def prepare_inputs_for_generation_original( | |
self, | |
input_ids, | |
past_key_values=None, | |
attention_mask=None, | |
inputs_embeds=None, | |
output_router_logits=False, | |
**kwargs, | |
): | |
# Omit tokens covered by past_key_values | |
if past_key_values is not None: | |
if isinstance(past_key_values, Cache): | |
cache_length = past_key_values.get_seq_length() | |
past_length = past_key_values.seen_tokens | |
max_cache_length = past_key_values.get_max_length() | |
else: | |
cache_length = past_length = past_key_values[0][0].shape[2] | |
max_cache_length = None | |
# Keep only the unprocessed tokens: | |
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where | |
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as | |
# input) | |
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: | |
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] | |
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard | |
# input_ids based on the past_length. | |
elif past_length < input_ids.shape[1]: | |
input_ids = input_ids[:, past_length:] | |
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. | |
else: | |
remove_prefix_length = input_ids.shape[1] - 1 | |
input_ids = input_ids[:, remove_prefix_length:] | |
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask. | |
if ( | |
max_cache_length is not None | |
and attention_mask is not None | |
and cache_length + input_ids.shape[1] > max_cache_length | |
): | |
attention_mask = attention_mask[:, -max_cache_length:] | |
position_ids = kwargs.get("position_ids", None) | |
if attention_mask is not None and position_ids is None: | |
# create position_ids on the fly for batch generation | |
position_ids = attention_mask.long().cumsum(-1) - 1 | |
position_ids.masked_fill_(attention_mask == 0, 1) | |
if past_key_values: | |
position_ids = position_ids[:, -input_ids.shape[1] :] | |
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step | |
if inputs_embeds is not None and past_key_values is None: | |
model_inputs = {"inputs_embeds": inputs_embeds} | |
else: | |
model_inputs = {"input_ids": input_ids} | |
model_inputs.update( | |
{ | |
"position_ids": position_ids, | |
"past_key_values": past_key_values, | |
"use_cache": kwargs.get("use_cache"), | |
"attention_mask": attention_mask, | |
"output_router_logits": output_router_logits, | |
} | |
) | |
return model_inputs | |
def prepare_inputs_for_generation( | |
self, | |
input_ids, | |
past_key_values=None, | |
inputs_embeds=None, | |
attention_mask=None, | |
output_router_logits=False, | |
**kwargs, | |
): | |
images = kwargs.pop("images", None) | |
audios = kwargs.pop("audios", None) | |
_inputs = self.prepare_inputs_for_generation_original( | |
input_ids, | |
past_key_values=past_key_values, | |
inputs_embeds=inputs_embeds, | |
attention_mask=attention_mask, | |
output_router_logits=output_router_logits, | |
**kwargs, | |
) | |
if images is not None: | |
_inputs["images"] = images | |
if audios is not None: | |
_inputs["audios"] = audios | |
return _inputs | |
def expand2square(self, pil_img, background_color): | |
width, height = pil_img.size | |
if width == height: | |
return pil_img | |
elif width > height: | |
result = Image.new(pil_img.mode, (width, width), background_color) | |
result.paste(pil_img, (0, (width - height) // 2)) | |
return result | |
else: | |
result = Image.new(pil_img.mode, (height, height), background_color) | |
result.paste(pil_img, ((height - width) // 2, 0)) | |
return result | |
def process_images(self, images, model_cfg): | |
vision_tower = self.get_vision_tower() | |
if not vision_tower.is_loaded: | |
vision_tower.load_model() | |
image_processor = vision_tower.image_processor | |
image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None) | |
new_images = [] | |
if image_aspect_ratio == "pad": | |
for image in images: | |
image = self.expand2square( | |
image, tuple(int(x * 255) for x in image_processor.image_mean) | |
) | |
image = image_processor.preprocess(image, return_tensors="pt")["pixel_values"][0] | |
new_images.append(image) | |
else: | |
return image_processor(images, return_tensors="pt")["pixel_values"] | |
if all(x.shape == new_images[0].shape for x in new_images): | |
new_images = torch.stack(new_images, dim=0) | |
return new_images | |
AutoConfig.register("vita-mixtral", VITAMixtralConfig) | |
AutoModelForCausalLM.register(VITAMixtralConfig, VITAMixtralForCausalLM) | |