xtts2-gpt / gpt_config.py
mlinmg's picture
Upload 3 files
bfce01d verified
raw
history blame
8.18 kB
from dataclasses import asdict, dataclass, field
from typing import Dict, Optional, List
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
@dataclass
class XTTSAudioConfig:
"""Configuration for audio processing parameters"""
sample_rate: int = 22050
output_sample_rate: int = 24000
mel_channels: int = 80
hop_length: int = 256
win_length: int = 1024
n_fft: int = 1024
fmin: int = 0
fmax: int = 8000
power: float = 1.0
mel_norms_file: Optional[str] = None
class XTTSGPTConfig(PretrainedConfig):
"""Configuration class for the GPT component of XTTS"""
model_type = "xtts_gpt"
def __init__(
self,
# Model architecture
vocab_size: int = 256,
hidden_size: int = 1024, # Changed from gpt_n_model_channels
num_hidden_layers: int = 30, # Changed from gpt_layers
num_attention_heads: int = 16, # Changed from gpt_n_heads
n_inner: Optional[int] = None, # Added for GPT-2 compatibility
max_position_embeddings: int = 2048, # Added for positional embeddings
layer_norm_epsilon: float = 1e-5, # Added for layer norm
activation_function: str = "gelu", # Added activation function
resid_pdrop: float = 0.1, # Added dropout rates
embd_pdrop: float = 0.1,
attn_pdrop: float = 0.1,
# Specific XTTS parameters
num_chars: int = 255,
batch_size: int = 1, # Changed from gpt_batch_size
max_audio_tokens: int = 605, # Changed from gpt_max_audio_tokens
max_text_tokens: int = 402, # Changed from gpt_max_text_tokens
max_prompt_tokens: int = 70, # Changed from gpt_max_prompt_tokens
number_text_tokens: int = 6681, # Changed from gpt_number_text_tokens
start_text_token: Optional[int] = None, # Changed from gpt_start_text_token
stop_text_token: Optional[int] = None, # Changed from gpt_stop_text_token
num_audio_tokens: int = 1026, # Changed from gpt_num_audio_tokens
start_audio_token: int = 1024, # Changed from gpt_start_audio_token
stop_audio_token: int = 1025, # Changed from gpt_stop_audio_token
code_stride_len: int = 1024, # Changed from gpt_code_stride_len
use_masking_gt_prompt_approach: bool = True, # Changed from gpt_use_masking_gt_prompt_approach
use_perceiver_resampler: bool = True, # Changed from gpt_use_perceiver_resampler
checkpointing: bool = False, # Changed from gpt_checkpointing
train_solo_embeddings: bool = False, # Changed from gpt_train_solo_embeddings
# Training parameters
enable_redaction: bool = False,
kv_cache: bool = True,
perceiver_cond_length_compression: int = 256,
label_smoothing: float = 0.0,
# Generation parameters
temperature: float = 0.75,
length_penalty: float = 1.0,
repetition_penalty: float = 5.0,
top_k: int = 50,
top_p: float = 0.85,
cond_len: int = 30, # Changed from gpt_cond_len
cond_chunk_len: int = 4, # Changed from gpt_cond_chunk_len
max_ref_len: int = 30,
sound_norm_refs: bool = False,
# Audio processing
audio_config: Optional[XTTSAudioConfig] = None,
# Constants and limits
duration_const: int = 102400,
char_limits: Optional[Dict[str, int]] = None,
languages: Optional[List[str]] = None,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
# GPT-2 compatibility flags
scale_attn_by_inverse_layer_idx: bool = False,
reorder_and_upcast_attn: bool = False,
add_cross_attention: bool = False,
tie_word_embeddings: bool = True,
**kwargs,
):
if char_limits is None:
char_limits = {
"en": 250, "de": 253, "fr": 273, "es": 239,
"it": 213, "pt": 203, "pl": 224, "zh": 82,
"ar": 166, "cs": 186, "ru": 182, "nl": 251,
"tr": 226, "ja": 71, "hu": 224, "ko": 95,
}
if languages is None:
languages = [
"en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl",
"cs", "ar", "zh-cn", "hu", "ko", "ja", "hi"
]
if audio_config is None:
audio_config = XTTSAudioConfig()
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs
)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.n_inner = n_inner
self.max_position_embeddings = max_position_embeddings
self.layer_norm_epsilon = layer_norm_epsilon
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
# XTTS specific parameters
self.num_chars = num_chars
self.batch_size = batch_size
self.max_audio_tokens = max_audio_tokens
self.max_text_tokens = max_text_tokens
self.max_prompt_tokens = max_prompt_tokens
self.number_text_tokens = number_text_tokens
self.start_text_token = start_text_token
self.stop_text_token = stop_text_token
self.num_audio_tokens = num_audio_tokens
self.start_audio_token = start_audio_token
self.stop_audio_token = stop_audio_token
self.code_stride_len = code_stride_len
self.use_masking_gt_prompt_approach = use_masking_gt_prompt_approach
self.use_perceiver_resampler = use_perceiver_resampler
self.checkpointing = checkpointing
self.train_solo_embeddings = train_solo_embeddings
# Training parameters
self.enable_redaction = enable_redaction
self.kv_cache = kv_cache
self.perceiver_cond_length_compression = perceiver_cond_length_compression
self.label_smoothing = label_smoothing
# Generation parameters
self.temperature = temperature
self.length_penalty = length_penalty
self.repetition_penalty = repetition_penalty
self.top_k = top_k
self.top_p = top_p
self.cond_len = cond_len
self.cond_chunk_len = cond_chunk_len
self.max_ref_len = max_ref_len
self.sound_norm_refs = sound_norm_refs
# Audio processing
self.audio_config = audio_config
# Constants and limits
self.duration_const = duration_const
self.char_limits = char_limits
self.languages = languages
# GPT-2 compatibility flags
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.reorder_and_upcast_attn = reorder_and_upcast_attn
self.add_cross_attention = add_cross_attention
self.tie_word_embeddings = tie_word_embeddings
def to_dict(self):
"""Convert config to dictionary"""
config_dict = super().to_dict()
config_dict["audio_config"] = asdict(self.audio_config)
return config_dict
@classmethod
def from_dict(cls, config_dict, *args, **kwargs):
"""Create config from dictionary"""
audio_config = XTTSAudioConfig(**config_dict.pop("audio_config", {}))
return cls(audio_config=audio_config, **config_dict, **kwargs)
def update_with_tokenizer(self, tokenizer=None):
"""Update configuration values based on tokenizer"""
if tokenizer is not None:
self.number_text_tokens = tokenizer.get_vocab_size()
self.start_text_token = tokenizer.bos_token_id
self.stop_text_token = tokenizer.eos_token_id