AlexHung29629 commited on
Commit
debfd7b
·
verified ·
1 Parent(s): b574b5d

Create audio_processing_mllama.py

Browse files
Files changed (1) hide show
  1. audio_processing_mllama.py +61 -0
audio_processing_mllama.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Dict, List, Optional, Union
3
+ import numpy as np
4
+ import transformers
5
+ from transformers.tokenization_utils_base import AudioInput
6
+ from transformers.models.seamless_m4t.feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor
7
+ from transformers.utils import TensorType
8
+ from transformers.feature_extraction_utils import BatchFeature
9
+ from transformers import AutoFeatureExtractor
10
+
11
+
12
+ def build_audio_tokens(text: List[str], audio_features: Union[Dict, List[List[np.ndarray]]], audio_token="<|audio|>") -> Dict:
13
+ if not isinstance(audio_features, list):
14
+ audio_features = audio_features['audio_features']
15
+ bs = audio_features.shape[0]
16
+ for i in range(bs):
17
+ for j in range(len(audio_features[i])):
18
+ tgt_token = f"<|audio_{j+1}|>" * get_num_embeddings(audio_features[i][j].shape[0])
19
+ text[i] = text[i].replace(audio_token, tgt_token, 1)
20
+ return text
21
+
22
+ def get_num_embeddings(num_framses, adapter_kernel_size=7, adapter_stride=4) -> int:
23
+ return math.ceil((num_framses - adapter_kernel_size) / adapter_stride) + 1 + 2 # 2 = <|begin_of_audio|>, <|end_of_audio|>
24
+
25
+
26
+ class MllamaAudioFeatureExtractor(SeamlessM4TFeatureExtractor):
27
+
28
+ def __call__(
29
+ self,
30
+ batch_audio_clips: List[List[AudioInput]],
31
+ return_tensors: Optional[Union[str, TensorType]] = None,
32
+ ) -> BatchFeature:
33
+ audio_features = [[ super(MllamaAudioFeatureExtractor, self).__call__(audio_j, sampling_rate=16000, return_attention_mask=False)['input_features'][0] for audio_j in audio_i ] for audio_i in batch_audio_clips ]
34
+ packed_audio_features = self.pack_audio_clips(audio_features)
35
+
36
+ encoded_audio_inputs = BatchFeature(
37
+ data={
38
+ "audio_features": packed_audio_features,
39
+ },
40
+ tensor_type=return_tensors,
41
+ )
42
+
43
+ return encoded_audio_inputs
44
+
45
+ def pack_audio_clips(self, batch_audio_clips: List[List[np.ndarray]]) -> np.ndarray:
46
+ assert batch_audio_clips[0][0].ndim == 2 # sequence length x feature dimension
47
+ # Determine output shape: (batch_size, max_num_clips, max_frames, feature_dim)
48
+ batch_size = len(batch_audio_clips)
49
+ max_num_clips = max([len(clips) for clips in batch_audio_clips])
50
+ max_frames = max([clip.shape[0] for clips in batch_audio_clips for clip in clips])
51
+ feature_dim = batch_audio_clips[0][0].shape[1]
52
+
53
+ stacked_audio_clips = np.zeros((batch_size, max_num_clips, max_frames, feature_dim), dtype=np.float32)
54
+ for i, clips in enumerate(batch_audio_clips):
55
+ for j, clip in enumerate(clips):
56
+ stacked_audio_clips[i, j, :clip.shape[0], :] = clip
57
+
58
+ return stacked_audio_clips
59
+
60
+ AutoFeatureExtractor.register("MllamaAudioFeatureExtractor", MllamaAudioFeatureExtractor)
61
+ transformers.MllamaAudioFeatureExtractor = MllamaAudioFeatureExtractor