atonyxu commited on
Commit
bedd457
1 Parent(s): b5c77b8

Upload 2 files

Browse files
furry/kaji/model/kaji/config.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
2
+
3
+ from fish_diffusion.datasets.audio_folder import AudioFolderDataset
4
+
5
+ _base_ = [
6
+ "./_base_/archs/diff_svc_v2.py",
7
+ "./_base_/trainers/base.py",
8
+ "./_base_/schedulers/warmup_cosine_finetune.py",
9
+ "./_base_/datasets/audio_folder.py",
10
+ ]
11
+
12
+ speaker_mapping = {
13
+ "kaji_astro_merc": 0,
14
+ "kaji_astro_delta": 1,
15
+ "kaji_astro_ksi": 2,
16
+ "m4_alto_01": 3,
17
+ "m4_bass_01": 4,
18
+ "m4_soprano_01": 5,
19
+ "m4_tenor_01": 6,
20
+ "m4_tenor_07": 7,
21
+ }
22
+
23
+ train_datasets = AudioFolderDataset.get_datasets_from_subfolder("dataset/train", speaker_mapping) # Build datasets manually.
24
+ valid_datasets = AudioFolderDataset.get_datasets_from_subfolder("dataset/valid", speaker_mapping) # Build datasets manually.
25
+
26
+ dataset = dict(
27
+ train=dict(
28
+ _delete_=True, # Delete the default train dataset
29
+ type="ConcatDataset",
30
+ datasets=train_datasets,
31
+ # Are there any other ways to do this?
32
+ collate_fn=AudioFolderDataset.collate_fn,
33
+ ),
34
+ valid=dict(
35
+ _delete_=True, # Delete the default valid dataset
36
+ type="ConcatDataset",
37
+ datasets=valid_datasets,
38
+ collate_fn=AudioFolderDataset.collate_fn,
39
+ ),
40
+ )
41
+
42
+ model = dict(
43
+ speaker_encoder=dict(
44
+ input_size=len(speaker_mapping),
45
+ ),
46
+ text_encoder=dict(
47
+ type="NaiveProjectionEncoder",
48
+ input_size=256,
49
+ output_size=256,
50
+ ),
51
+ )
52
+
53
+ preprocessing = dict(
54
+ text_features_extractor=dict(
55
+ type="ChineseHubertSoft",
56
+ pretrained=True,
57
+ gate_size=15,
58
+ ),
59
+ pitch_extractor=dict(
60
+ type="ParselMouthPitchExtractor",
61
+ ),
62
+ )
63
+
64
+ # The following trainer val and save checkpoints every 1000 steps
65
+ trainer = dict(
66
+ val_check_interval=5000,
67
+ callbacks=[
68
+ ModelCheckpoint(
69
+ filename="{epoch}-{step}-{valid_loss:.2f}",
70
+ every_n_train_steps=5000,
71
+ save_top_k=-1,
72
+ ),
73
+ LearningRateMonitor(logging_interval="step"),
74
+ ],
75
+ )
furry/kaji/model/kaji/model.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd55f5cbd0163e85ccee93f4a7705d36f2eb5459c48b295f24facfe7f22820bc
3
+ size 717795849