ZeyuXie commited on
Commit
09d8de6
1 Parent(s): f5b1a19

Upload 96 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. data/meta_data/test-frequency-control_onoffFromGpt_multi-event.json +200 -0
  2. data/meta_data/test-frequency-control_onoffFromGpt_single-event.json +400 -0
  3. data/meta_data/test-onoff-control_multi-event.json +200 -0
  4. data/meta_data/test-onoff-control_single-event.json +400 -0
  5. data/meta_data/train.json +0 -0
  6. picoaudio/audioldm/__init__.py +8 -0
  7. picoaudio/audioldm/__main__.py +183 -0
  8. picoaudio/audioldm/audio/__init__.py +2 -0
  9. picoaudio/audioldm/audio/audio_processing.py +100 -0
  10. picoaudio/audioldm/audio/stft.py +186 -0
  11. picoaudio/audioldm/audio/tools.py +85 -0
  12. picoaudio/audioldm/clap/__init__.py +0 -0
  13. picoaudio/audioldm/clap/encoders.py +170 -0
  14. picoaudio/audioldm/clap/open_clip/__init__.py +25 -0
  15. picoaudio/audioldm/clap/open_clip/bert.py +40 -0
  16. picoaudio/audioldm/clap/open_clip/bpe_simple_vocab_16e6.txt.gz +3 -0
  17. picoaudio/audioldm/clap/open_clip/factory.py +279 -0
  18. picoaudio/audioldm/clap/open_clip/feature_fusion.py +192 -0
  19. picoaudio/audioldm/clap/open_clip/htsat.py +1308 -0
  20. picoaudio/audioldm/clap/open_clip/linear_probe.py +66 -0
  21. picoaudio/audioldm/clap/open_clip/loss.py +398 -0
  22. picoaudio/audioldm/clap/open_clip/model.py +936 -0
  23. picoaudio/audioldm/clap/open_clip/model_configs/HTSAT-base.json +23 -0
  24. picoaudio/audioldm/clap/open_clip/model_configs/HTSAT-large.json +23 -0
  25. picoaudio/audioldm/clap/open_clip/model_configs/HTSAT-tiny-win-1536.json +23 -0
  26. picoaudio/audioldm/clap/open_clip/model_configs/HTSAT-tiny.json +23 -0
  27. picoaudio/audioldm/clap/open_clip/model_configs/PANN-10.json +23 -0
  28. picoaudio/audioldm/clap/open_clip/model_configs/PANN-14-fmax-18k.json +23 -0
  29. picoaudio/audioldm/clap/open_clip/model_configs/PANN-14-fmax-8k-20s.json +23 -0
  30. picoaudio/audioldm/clap/open_clip/model_configs/PANN-14-tiny-transformer.json +23 -0
  31. picoaudio/audioldm/clap/open_clip/model_configs/PANN-14-win-1536.json +23 -0
  32. picoaudio/audioldm/clap/open_clip/model_configs/PANN-14.json +23 -0
  33. picoaudio/audioldm/clap/open_clip/model_configs/PANN-6.json +23 -0
  34. picoaudio/audioldm/clap/open_clip/model_configs/RN101-quickgelu.json +22 -0
  35. picoaudio/audioldm/clap/open_clip/model_configs/RN101.json +21 -0
  36. picoaudio/audioldm/clap/open_clip/model_configs/RN50-quickgelu.json +22 -0
  37. picoaudio/audioldm/clap/open_clip/model_configs/RN50.json +21 -0
  38. picoaudio/audioldm/clap/open_clip/model_configs/RN50x16.json +21 -0
  39. picoaudio/audioldm/clap/open_clip/model_configs/RN50x4.json +21 -0
  40. picoaudio/audioldm/clap/open_clip/model_configs/ViT-B-16.json +16 -0
  41. picoaudio/audioldm/clap/open_clip/model_configs/ViT-B-32-quickgelu.json +17 -0
  42. picoaudio/audioldm/clap/open_clip/model_configs/ViT-B-32.json +16 -0
  43. picoaudio/audioldm/clap/open_clip/model_configs/ViT-L-14.json +16 -0
  44. picoaudio/audioldm/clap/open_clip/openai.py +159 -0
  45. picoaudio/audioldm/clap/open_clip/pann_model.py +704 -0
  46. picoaudio/audioldm/clap/open_clip/pretrained.py +169 -0
  47. picoaudio/audioldm/clap/open_clip/timm_model.py +112 -0
  48. picoaudio/audioldm/clap/open_clip/tokenizer.py +197 -0
  49. picoaudio/audioldm/clap/open_clip/transform.py +45 -0
  50. picoaudio/audioldm/clap/open_clip/utils.py +362 -0
data/meta_data/test-frequency-control_onoffFromGpt_multi-event.json ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"filepath": "data/multi_event_test/syn_1.wav", "onoffCaption": "cat meowing at 0.5-2.0, 3.0-4.5 and whistling at 5.0-6.5 and explosion at 7.0-8.0, 8.5-9.5", "frequencyCaption": "cat meowing two times and whistling one times and explosion two times"}
2
+ {"filepath": "data/multi_event_test/syn_6.wav", "onoffCaption": "whistling at 2.0-6.0", "frequencyCaption": "whistling one times"}
3
+ {"filepath": "data/multi_event_test/syn_8.wav", "onoffCaption": "cow mooing at 1.954-4.954, 6.219-9.219", "frequencyCaption": "cow mooing two times"}
4
+ {"filepath": "data/multi_event_test/syn_11.wav", "onoffCaption": "burping belching at 0.0-2.0, 2.5-4.5 and dog barking at 5.0-7.0", "frequencyCaption": "burping belching two times and dog barking one times"}
5
+ {"filepath": "data/multi_event_test/syn_16.wav", "onoffCaption": "duck quacking at 0.0-1.0, 2.0-3.0, 4.0-5.0", "frequencyCaption": "duck quacking three times"}
6
+ {"filepath": "data/multi_event_test/syn_18.wav", "onoffCaption": "door knocking at 0.138-2.518, 3.708-6.088 and door slamming at 2.798-4.798", "frequencyCaption": "door knocking two times and door slamming one times"}
7
+ {"filepath": "data/multi_event_test/syn_21.wav", "onoffCaption": "dog barking at 0-1, 2-3", "frequencyCaption": "dog barking two times"}
8
+ {"filepath": "data/multi_event_test/syn_26.wav", "onoffCaption": "whistling at 0.2-4.2", "frequencyCaption": "whistling one times"}
9
+ {"filepath": "data/multi_event_test/syn_28.wav", "onoffCaption": "cow mooing at 0.0-1.0 and spraying at 1.0-2.0", "frequencyCaption": "cow mooing one times and spraying one times"}
10
+ {"filepath": "data/multi_event_test/syn_32.wav", "onoffCaption": "duck quacking at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "duck quacking three times"}
11
+ {"filepath": "data/multi_event_test/syn_35.wav", "onoffCaption": "car horn honking at 0.5-2.5, 3.0-5.0", "frequencyCaption": "car horn honking two times"}
12
+ {"filepath": "data/multi_event_test/syn_43.wav", "onoffCaption": "dog barking at 0.0-2.0, 2.5-4.5 and burping belching at 5.0-7.0, 7.5-9.5 and explosion at 4.8-7.8", "frequencyCaption": "dog barking two times and burping belching two times and explosion one times"}
13
+ {"filepath": "data/multi_event_test/syn_44.wav", "onoffCaption": "sneeze at 0.5-1.5", "frequencyCaption": "sneeze one times"}
14
+ {"filepath": "data/multi_event_test/syn_50.wav", "onoffCaption": "car horn honking at 0.0-2.0, 3.0-5.0 and sneeze at 6.0-7.0 and train horn at 8.0-10.0", "frequencyCaption": "car horn honking two times and sneeze one times and train horn one times"}
15
+ {"filepath": "data/multi_event_test/syn_57.wav", "onoffCaption": "dog barking at 0.0-2.0, 3.0-5.0 and cow mooing at 6.0-9.0", "frequencyCaption": "dog barking two times and cow mooing one times"}
16
+ {"filepath": "data/multi_event_test/syn_59.wav", "onoffCaption": "door slamming at 0.0-1.0 and explosion at 1.5-4.5, 5.0-8.0", "frequencyCaption": "door slamming one times and explosion two times"}
17
+ {"filepath": "data/multi_event_test/syn_60.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
18
+ {"filepath": "data/multi_event_test/syn_67.wav", "onoffCaption": "whistling at 0.204-5.379", "frequencyCaption": "whistling one times"}
19
+ {"filepath": "data/multi_event_test/syn_69.wav", "onoffCaption": "door knocking at 0-1, 2-3, 4-5", "frequencyCaption": "door knocking three times"}
20
+ {"filepath": "data/multi_event_test/syn_73.wav", "onoffCaption": "door knocking at 0-1, 1-2, 2-3 and sneeze at 3-4, 4-5", "frequencyCaption": "door knocking three times and sneeze two times"}
21
+ {"filepath": "data/multi_event_test/syn_74.wav", "onoffCaption": "spraying at 0.5-1.0, 1.5-2.0 and gunshot at 3.0-4.0, 5.0-6.0, 7.0-8.0", "frequencyCaption": "spraying two times and gunshot three times"}
22
+ {"filepath": "data/multi_event_test/syn_82.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
23
+ {"filepath": "data/multi_event_test/syn_91.wav", "onoffCaption": "gunshot at 0-1, 2-3", "frequencyCaption": "gunshot two times"}
24
+ {"filepath": "data/multi_event_test/syn_96.wav", "onoffCaption": "door slamming at 0-1, 2-3, 4-5", "frequencyCaption": "door slamming three times"}
25
+ {"filepath": "data/multi_event_test/syn_98.wav", "onoffCaption": "thump thud at 1.017-4.684, 5.695-9.362", "frequencyCaption": "thump thud two times"}
26
+ {"filepath": "data/multi_event_test/syn_101.wav", "onoffCaption": "dog barking at 0.464-2.464", "frequencyCaption": "dog barking one times"}
27
+ {"filepath": "data/multi_event_test/syn_106.wav", "onoffCaption": "burping belching at 0.0-1.0, 2.0-3.0, 4.0-5.0", "frequencyCaption": "burping belching three times"}
28
+ {"filepath": "data/multi_event_test/syn_108.wav", "onoffCaption": "sneeze at 0.5-1.5", "frequencyCaption": "sneeze one times"}
29
+ {"filepath": "data/multi_event_test/syn_112.wav", "onoffCaption": "woman laughing at 0.004-2.372, 3.672-6.653", "frequencyCaption": "woman laughing two times"}
30
+ {"filepath": "data/multi_event_test/syn_115.wav", "onoffCaption": "duck quacking at 0.3-2.3 and tapping clicking clanking at 2.5-5.5, 6.0-9.0", "frequencyCaption": "duck quacking one times and tapping clicking clanking two times"}
31
+ {"filepath": "data/multi_event_test/syn_122.wav", "onoffCaption": "door knocking at 0-1, 3-4, 6-7", "frequencyCaption": "door knocking three times"}
32
+ {"filepath": "data/multi_event_test/syn_125.wav", "onoffCaption": "cow mooing at 1.5-4.5, 5.5-8.5", "frequencyCaption": "cow mooing two times"}
33
+ {"filepath": "data/multi_event_test/syn_131.wav", "onoffCaption": "whistling at 0-1, 2-3 and cat meowing at 1-2", "frequencyCaption": "whistling two times and cat meowing one times"}
34
+ {"filepath": "data/multi_event_test/syn_136.wav", "onoffCaption": "sheep goat bleating at 0.5-2.5, 3.5-5.5 and whistling at 6.0-8.0, 8.5-9.5 and woman laughing at 2.0-4.0", "frequencyCaption": "sheep goat bleating two times and whistling two times and woman laughing one times"}
35
+ {"filepath": "data/multi_event_test/syn_138.wav", "onoffCaption": "gunshot at 0.0-1.0 and tapping clicking clanking at 1.5-5.0", "frequencyCaption": "gunshot one times and tapping clicking clanking one times"}
36
+ {"filepath": "data/multi_event_test/syn_140.wav", "onoffCaption": "door knocking at 0.00-2.00, 3.00-5.00, 6.00-8.00", "frequencyCaption": "door knocking three times"}
37
+ {"filepath": "data/multi_event_test/syn_147.wav", "onoffCaption": "door slamming at 0-1, 2-3, 4-5", "frequencyCaption": "door slamming three times"}
38
+ {"filepath": "data/multi_event_test/syn_149.wav", "onoffCaption": "car horn honking at 0.0-2.0, 3.0-5.0 and spraying at 5.5-6.0, 7.0-7.5", "frequencyCaption": "car horn honking two times and spraying two times"}
39
+ {"filepath": "data/multi_event_test/syn_153.wav", "onoffCaption": "cat meowing at 0-1.0", "frequencyCaption": "cat meowing one times"}
40
+ {"filepath": "data/multi_event_test/syn_154.wav", "onoffCaption": "cat meowing at 0.5-1.5 and door knocking at 2-3.5", "frequencyCaption": "cat meowing one times and door knocking one times"}
41
+ {"filepath": "data/multi_event_test/syn_163.wav", "onoffCaption": "sheep goat bleating at 0-1, 2-3, 4-5", "frequencyCaption": "sheep goat bleating three times"}
42
+ {"filepath": "data/multi_event_test/syn_164.wav", "onoffCaption": "whistling at 0.204-5.379, 7.724-10.0", "frequencyCaption": "whistling two times"}
43
+ {"filepath": "data/multi_event_test/syn_170.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
44
+ {"filepath": "data/multi_event_test/syn_177.wav", "onoffCaption": "thump thud at 0-1 and cow mooing at 1-2", "frequencyCaption": "thump thud one times and cow mooing one times"}
45
+ {"filepath": "data/multi_event_test/syn_179.wav", "onoffCaption": "cow mooing at 1.954-4.602, 5.719-8.729", "frequencyCaption": "cow mooing two times"}
46
+ {"filepath": "data/multi_event_test/syn_181.wav", "onoffCaption": "cow mooing at 1.0-3.0, 4.0-6.0", "frequencyCaption": "cow mooing two times"}
47
+ {"filepath": "data/multi_event_test/syn_186.wav", "onoffCaption": "gunshot at 0.0-1.0", "frequencyCaption": "gunshot one times"}
48
+ {"filepath": "data/multi_event_test/syn_188.wav", "onoffCaption": "gunshot at 0-1, 1-2 and duck quacking at 2-3", "frequencyCaption": "gunshot two times and duck quacking one times"}
49
+ {"filepath": "data/multi_event_test/syn_192.wav", "onoffCaption": "spraying at 0.0-1.0, 2.0-3.0", "frequencyCaption": "spraying two times"}
50
+ {"filepath": "data/multi_event_test/syn_195.wav", "onoffCaption": "thump thud at 1.017-4.684, 5.695-9.362", "frequencyCaption": "thump thud two times"}
51
+ {"filepath": "data/multi_event_test/syn_3.wav", "onoffCaption": "tapping clicking clanking at 0.5-3.0, 4.0-7.5", "frequencyCaption": "tapping clicking clanking two times"}
52
+ {"filepath": "data/multi_event_test/syn_4.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
53
+ {"filepath": "data/multi_event_test/syn_13.wav", "onoffCaption": "duck quacking at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "duck quacking three times"}
54
+ {"filepath": "data/multi_event_test/syn_14.wav", "onoffCaption": "sneeze at 0.38-1.38", "frequencyCaption": "sneeze one times"}
55
+ {"filepath": "data/multi_event_test/syn_23.wav", "onoffCaption": "sneeze at 0.5-1.5, 2.5-3.5", "frequencyCaption": "sneeze two times"}
56
+ {"filepath": "data/multi_event_test/syn_24.wav", "onoffCaption": "woman laughing at 2.782-5.368", "frequencyCaption": "woman laughing one times"}
57
+ {"filepath": "data/multi_event_test/syn_30.wav", "onoffCaption": "burping belching at 0.871-3.871, 4.871-7.871", "frequencyCaption": "burping belching two times"}
58
+ {"filepath": "data/multi_event_test/syn_37.wav", "onoffCaption": "thump thud at 0.0-1.5, 5.0-6.5 and door knocking at 1.5-3.5, 6.5-8.5 and burping belching at 3.5-4.5", "frequencyCaption": "thump thud two times and door knocking two times and burping belching one times"}
59
+ {"filepath": "data/multi_event_test/syn_39.wav", "onoffCaption": "train horn at 0.0-2.0, 2.5-4.5", "frequencyCaption": "train horn two times"}
60
+ {"filepath": "data/multi_event_test/syn_41.wav", "onoffCaption": "thump thud at 0.0-1.0", "frequencyCaption": "thump thud one times"}
61
+ {"filepath": "data/multi_event_test/syn_48.wav", "onoffCaption": "cat meowing at 0-1, 1-2, 2-3", "frequencyCaption": "cat meowing three times"}
62
+ {"filepath": "data/multi_event_test/syn_52.wav", "onoffCaption": "gunshot at 0.0-1.0 and duck quacking at 1.5-2.5 and tapping clicking clanking at 3.0-4.0", "frequencyCaption": "gunshot one times and duck quacking one times and tapping clicking clanking one times"}
63
+ {"filepath": "data/multi_event_test/syn_55.wav", "onoffCaption": "sneeze at 1.3-2.403, 4.759-6.442", "frequencyCaption": "sneeze two times"}
64
+ {"filepath": "data/multi_event_test/syn_62.wav", "onoffCaption": "woman laughing at 0.004-2.372, 3.672-6.653", "frequencyCaption": "woman laughing two times"}
65
+ {"filepath": "data/multi_event_test/syn_65.wav", "onoffCaption": "gunshot at 0.0-2.0, 2.1-4.1", "frequencyCaption": "gunshot two times"}
66
+ {"filepath": "data/multi_event_test/syn_71.wav", "onoffCaption": "door slamming at 0.0-1.0, 2.0-3.0 and whistling at 4.0-8.0", "frequencyCaption": "door slamming two times and whistling one times"}
67
+ {"filepath": "data/multi_event_test/syn_76.wav", "onoffCaption": "dog barking at 0.464-2.464", "frequencyCaption": "dog barking one times"}
68
+ {"filepath": "data/multi_event_test/syn_78.wav", "onoffCaption": "explosion at 0.0-2.0, 2.5-4.5 and duck quacking at 5.0-7.0, 7.5-9.5", "frequencyCaption": "explosion two times and duck quacking two times"}
69
+ {"filepath": "data/multi_event_test/syn_80.wav", "onoffCaption": "door slamming at 0.0-1.0 and sheep goat bleating at 2.0-4.0", "frequencyCaption": "door slamming one times and sheep goat bleating one times"}
70
+ {"filepath": "data/multi_event_test/syn_85.wav", "onoffCaption": "door knocking at 2.047-4.422", "frequencyCaption": "door knocking one times"}
71
+ {"filepath": "data/multi_event_test/syn_87.wav", "onoffCaption": "explosion at 1.773-4.034, 5.15-7.411", "frequencyCaption": "explosion two times"}
72
+ {"filepath": "data/multi_event_test/syn_89.wav", "onoffCaption": "car horn honking at 0.0-2.0 and cat meowing at 2.5-4.0", "frequencyCaption": "car horn honking one times and cat meowing one times"}
73
+ {"filepath": "data/multi_event_test/syn_93.wav", "onoffCaption": "dog barking at 0-2, 2-4", "frequencyCaption": "dog barking two times"}
74
+ {"filepath": "data/multi_event_test/syn_94.wav", "onoffCaption": "gunshot at 0.0-2.0, 2.5-4.5, 5.0-7.0 and thump thud at 1.0-3.0, 4.0-6.0 and sheep goat bleating at 2.0-4.0, 7.0-9.0", "frequencyCaption": "gunshot three times and thump thud two times and sheep goat bleating two times"}
75
+ {"filepath": "data/multi_event_test/syn_103.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
76
+ {"filepath": "data/multi_event_test/syn_104.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
77
+ {"filepath": "data/multi_event_test/syn_110.wav", "onoffCaption": "train horn at 0-1 and duck quacking at 1-2 and cow mooing at 2-3", "frequencyCaption": "train horn one times and duck quacking one times and cow mooing one times"}
78
+ {"filepath": "data/multi_event_test/syn_117.wav", "onoffCaption": "sheep goat bleating at 1.0-3.0, 4.5-6.5", "frequencyCaption": "sheep goat bleating two times"}
79
+ {"filepath": "data/multi_event_test/syn_119.wav", "onoffCaption": "train horn at 0.0-2.0 and door knocking at 2.5-4.5, 5.0-7.0", "frequencyCaption": "train horn one times and door knocking two times"}
80
+ {"filepath": "data/multi_event_test/syn_120.wav", "onoffCaption": "burping belching at 0.871-2.871, 3.871-5.871", "frequencyCaption": "burping belching two times"}
81
+ {"filepath": "data/multi_event_test/syn_127.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
82
+ {"filepath": "data/multi_event_test/syn_129.wav", "onoffCaption": "door knocking at 0-1, 1-2, 2-3", "frequencyCaption": "door knocking three times"}
83
+ {"filepath": "data/multi_event_test/syn_133.wav", "onoffCaption": "duck quacking at 2.203-4.203, 5.361-7.361", "frequencyCaption": "duck quacking two times"}
84
+ {"filepath": "data/multi_event_test/syn_134.wav", "onoffCaption": "car horn honking at 1.0-3.0, 4.0-6.0", "frequencyCaption": "car horn honking two times"}
85
+ {"filepath": "data/multi_event_test/syn_142.wav", "onoffCaption": "sneeze at 0.5-1.5, 2.0-3.0", "frequencyCaption": "sneeze two times"}
86
+ {"filepath": "data/multi_event_test/syn_145.wav", "onoffCaption": "door knocking at 0.002-2.092, 2.842-5.601 and whistling at 1.9-10.0", "frequencyCaption": "door knocking two times and whistling one times"}
87
+ {"filepath": "data/multi_event_test/syn_151.wav", "onoffCaption": "dog barking at 0.121-2.121, 3.824-5.824, 7.767-9.767", "frequencyCaption": "dog barking three times"}
88
+ {"filepath": "data/multi_event_test/syn_156.wav", "onoffCaption": "car horn honking at 0.0-1.0, 2.0-3.0, 4.0-5.0", "frequencyCaption": "car horn honking three times"}
89
+ {"filepath": "data/multi_event_test/syn_158.wav", "onoffCaption": "tapping clicking clanking at 1.5-4.5, 5.5-8.5", "frequencyCaption": "tapping clicking clanking two times"}
90
+ {"filepath": "data/multi_event_test/syn_161.wav", "onoffCaption": "spraying at 0-1, 2-3", "frequencyCaption": "spraying two times"}
91
+ {"filepath": "data/multi_event_test/syn_166.wav", "onoffCaption": "woman laughing at 1.672-3.955", "frequencyCaption": "woman laughing one times"}
92
+ {"filepath": "data/multi_event_test/syn_168.wav", "onoffCaption": "sheep goat bleating at 0.56-2.56", "frequencyCaption": "sheep goat bleating one times"}
93
+ {"filepath": "data/multi_event_test/syn_172.wav", "onoffCaption": "door knocking at 0-1, 1-2, 2-3", "frequencyCaption": "door knocking three times"}
94
+ {"filepath": "data/multi_event_test/syn_175.wav", "onoffCaption": "cow mooing at 0-3 and spraying at 3-6", "frequencyCaption": "cow mooing one times and spraying one times"}
95
+ {"filepath": "data/multi_event_test/syn_183.wav", "onoffCaption": "explosion at 0.0-2.0, 2.1-4.1", "frequencyCaption": "explosion two times"}
96
+ {"filepath": "data/multi_event_test/syn_184.wav", "onoffCaption": "sheep goat bleating at 0-1", "frequencyCaption": "sheep goat bleating one times"}
97
+ {"filepath": "data/multi_event_test/syn_190.wav", "onoffCaption": "whistling at 0.0-1.0", "frequencyCaption": "whistling one times"}
98
+ {"filepath": "data/multi_event_test/syn_197.wav", "onoffCaption": "tapping clicking clanking at 0.032-2.032, 2.532-4.532", "frequencyCaption": "tapping clicking clanking two times"}
99
+ {"filepath": "data/multi_event_test/syn_199.wav", "onoffCaption": "duck quacking at 0.0-2.0 and cat meowing at 2.5-4.5", "frequencyCaption": "duck quacking one times and cat meowing one times"}
100
+ {"filepath": "data/multi_event_test/syn_200.wav", "onoffCaption": "explosion at 1.0-3.0, 4.0-6.0", "frequencyCaption": "explosion two times"}
101
+ {"filepath": "data/multi_event_test/syn_2.wav", "onoffCaption": "door knocking at 0.0-1.0", "frequencyCaption": "door knocking one times"}
102
+ {"filepath": "data/multi_event_test/syn_5.wav", "onoffCaption": "burping belching at 0.359-2.774", "frequencyCaption": "burping belching one times"}
103
+ {"filepath": "data/multi_event_test/syn_12.wav", "onoffCaption": "sheep goat bleating at 0.0-2.0 and sneeze at 2.5-3.5", "frequencyCaption": "sheep goat bleating one times and sneeze one times"}
104
+ {"filepath": "data/multi_event_test/syn_15.wav", "onoffCaption": "tapping clicking clanking at 2.992-6.432", "frequencyCaption": "tapping clicking clanking one times"}
105
+ {"filepath": "data/multi_event_test/syn_22.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
106
+ {"filepath": "data/multi_event_test/syn_25.wav", "onoffCaption": "burping belching at 0.871-3.871, 4.391-7.391", "frequencyCaption": "burping belching two times"}
107
+ {"filepath": "data/multi_event_test/syn_31.wav", "onoffCaption": "woman laughing at 0-1, 2-3", "frequencyCaption": "woman laughing two times"}
108
+ {"filepath": "data/multi_event_test/syn_36.wav", "onoffCaption": "door slamming at 0.5-1.5, 2.0-3.0, 3.5-4.5", "frequencyCaption": "door slamming three times"}
109
+ {"filepath": "data/multi_event_test/syn_38.wav", "onoffCaption": "cat meowing at 0-1, 2-3, 4-5", "frequencyCaption": "cat meowing three times"}
110
+ {"filepath": "data/multi_event_test/syn_40.wav", "onoffCaption": "door knocking at 0.138-2.518, 3.708-6.088 and cow mooing at 6.91-9.447", "frequencyCaption": "door knocking two times and cow mooing one times"}
111
+ {"filepath": "data/multi_event_test/syn_46.wav", "onoffCaption": "door slamming at 1.145-2.085, 3.545-4.463", "frequencyCaption": "door slamming two times"}
112
+ {"filepath": "data/multi_event_test/syn_47.wav", "onoffCaption": "spraying at 0.0-1.0, 2.0-3.0, 4.0-5.0 and cow mooing at 6.0-8.0, 8.5-10.0", "frequencyCaption": "spraying three times and cow mooing two times"}
113
+ {"filepath": "data/multi_event_test/syn_49.wav", "onoffCaption": "sheep goat bleating at 0.5-2.5, 3.0-5.0 and tapping clicking clanking at 0.0-4.0, 5.5-9.5", "frequencyCaption": "sheep goat bleating two times and tapping clicking clanking two times"}
114
+ {"filepath": "data/multi_event_test/syn_51.wav", "onoffCaption": "train horn at 0.873-4.633, 5.147-8.907", "frequencyCaption": "train horn two times"}
115
+ {"filepath": "data/multi_event_test/syn_53.wav", "onoffCaption": "dog barking at 0-1, 2-3, 4-5", "frequencyCaption": "dog barking three times"}
116
+ {"filepath": "data/multi_event_test/syn_54.wav", "onoffCaption": "train horn at 0.0-2.0, 2.5-4.5", "frequencyCaption": "train horn two times"}
117
+ {"filepath": "data/multi_event_test/syn_63.wav", "onoffCaption": "train horn at 0-1 and cat meowing at 2-3 and dog barking at 4-5", "frequencyCaption": "train horn one times and cat meowing one times and dog barking one times"}
118
+ {"filepath": "data/multi_event_test/syn_64.wav", "onoffCaption": "sheep goat bleating at 0.56-2.56", "frequencyCaption": "sheep goat bleating one times"}
119
+ {"filepath": "data/multi_event_test/syn_70.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
120
+ {"filepath": "data/multi_event_test/syn_77.wav", "onoffCaption": "cow mooing at 0.0-3.0", "frequencyCaption": "cow mooing one times"}
121
+ {"filepath": "data/multi_event_test/syn_79.wav", "onoffCaption": "dog barking at 0-1, 2-3, 4-5", "frequencyCaption": "dog barking three times"}
122
+ {"filepath": "data/multi_event_test/syn_81.wav", "onoffCaption": "gunshot at 0.0-2.0, 3.0-5.0, 6.0-8.0", "frequencyCaption": "gunshot three times"}
123
+ {"filepath": "data/multi_event_test/syn_86.wav", "onoffCaption": "whistling at 0-1 and woman laughing at 1-3, 3-5", "frequencyCaption": "whistling one times and woman laughing two times"}
124
+ {"filepath": "data/multi_event_test/syn_88.wav", "onoffCaption": "sheep goat bleating at 1.0-3.0, 4.0-6.0", "frequencyCaption": "sheep goat bleating two times"}
125
+ {"filepath": "data/multi_event_test/syn_92.wav", "onoffCaption": "door slamming at 0.0-1.0, 2.0-3.0, 4.0-5.0 and tapping clicking clanking at 6.0-7.0", "frequencyCaption": "door slamming three times and tapping clicking clanking one times"}
126
+ {"filepath": "data/multi_event_test/syn_95.wav", "onoffCaption": "door slamming at 0-1, 2-3, 4-5", "frequencyCaption": "door slamming three times"}
127
+ {"filepath": "data/multi_event_test/syn_102.wav", "onoffCaption": "door knocking at 1.973-5.029, 6.285-9.132", "frequencyCaption": "door knocking two times"}
128
+ {"filepath": "data/multi_event_test/syn_105.wav", "onoffCaption": "train horn at 0.0-1.0, 2.0-3.0, 4.0-5.0", "frequencyCaption": "train horn three times"}
129
+ {"filepath": "data/multi_event_test/syn_111.wav", "onoffCaption": "whistling at 0.204-2.79, 4.0-6.586 and door slamming at 7.0-8.0", "frequencyCaption": "whistling two times and door slamming one times"}
130
+ {"filepath": "data/multi_event_test/syn_116.wav", "onoffCaption": "burping belching at 1.0-3.0, 4.0-6.0", "frequencyCaption": "burping belching two times"}
131
+ {"filepath": "data/multi_event_test/syn_118.wav", "onoffCaption": "sneeze at 0.0-1.0", "frequencyCaption": "sneeze one times"}
132
+ {"filepath": "data/multi_event_test/syn_121.wav", "onoffCaption": "car horn honking at 0.0-1.0, 2.0-3.0, 4.0-5.0", "frequencyCaption": "car horn honking three times"}
133
+ {"filepath": "data/multi_event_test/syn_123.wav", "onoffCaption": "sheep goat bleating at 0.65-2.65, 3.65-5.65", "frequencyCaption": "sheep goat bleating two times"}
134
+ {"filepath": "data/multi_event_test/syn_126.wav", "onoffCaption": "sneeze at 0.373-2.332 and car horn honking at 1.03-5.542, 6.081-10.0", "frequencyCaption": "sneeze one times and car horn honking two times"}
135
+ {"filepath": "data/multi_event_test/syn_128.wav", "onoffCaption": "sheep goat bleating at 1.0-3.0 and door knocking at 3.5-5.5, 6.0-8.0", "frequencyCaption": "sheep goat bleating one times and door knocking two times"}
136
+ {"filepath": "data/multi_event_test/syn_132.wav", "onoffCaption": "sheep goat bleating at 0.0-2.0 and spraying at 2.5-3.0, 4.0-4.5, 5.5-6.0 and duck quacking at 6.5-7.5, 8.0-9.0, 9.5-10.0", "frequencyCaption": "sheep goat bleating one times and spraying three times and duck quacking three times"}
137
+ {"filepath": "data/multi_event_test/syn_135.wav", "onoffCaption": "tapping clicking clanking at 1.0-3.0, 4.0-6.0", "frequencyCaption": "tapping clicking clanking two times"}
138
+ {"filepath": "data/multi_event_test/syn_139.wav", "onoffCaption": "thump thud at 1.017-4.684, 5.695-9.362", "frequencyCaption": "thump thud two times"}
139
+ {"filepath": "data/multi_event_test/syn_143.wav", "onoffCaption": "spraying at 0.0-1.0 and explosion at 1.5-4.5", "frequencyCaption": "spraying one times and explosion one times"}
140
+ {"filepath": "data/multi_event_test/syn_144.wav", "onoffCaption": "duck quacking at 0-1, 2-3, 4-5", "frequencyCaption": "duck quacking three times"}
141
+ {"filepath": "data/multi_event_test/syn_150.wav", "onoffCaption": "gunshot at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "gunshot three times"}
142
+ {"filepath": "data/multi_event_test/syn_157.wav", "onoffCaption": "train horn at 0-3.5", "frequencyCaption": "train horn one times"}
143
+ {"filepath": "data/multi_event_test/syn_159.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
144
+ {"filepath": "data/multi_event_test/syn_160.wav", "onoffCaption": "spraying at 0.0-1.0 and whistling at 1.0-3.0", "frequencyCaption": "spraying one times and whistling one times"}
145
+ {"filepath": "data/multi_event_test/syn_167.wav", "onoffCaption": "burping belching at 0.0-2.0, 2.5-4.5 and gunshot at 5.0-7.0", "frequencyCaption": "burping belching two times and gunshot one times"}
146
+ {"filepath": "data/multi_event_test/syn_169.wav", "onoffCaption": "sneeze at 0.373-2.332, 3.255-5.716", "frequencyCaption": "sneeze two times"}
147
+ {"filepath": "data/multi_event_test/syn_173.wav", "onoffCaption": "sheep goat bleating at 0-1, 2-3, 4-5", "frequencyCaption": "sheep goat bleating three times"}
148
+ {"filepath": "data/multi_event_test/syn_174.wav", "onoffCaption": "dog barking at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "dog barking three times"}
149
+ {"filepath": "data/multi_event_test/syn_176.wav", "onoffCaption": "woman laughing at 1.625-3.98, 4.735-6.981", "frequencyCaption": "woman laughing two times"}
150
+ {"filepath": "data/multi_event_test/syn_182.wav", "onoffCaption": "cow mooing at 0.0-3.0 and gunshot at 4.0-5.0", "frequencyCaption": "cow mooing one times and gunshot one times"}
151
+ {"filepath": "data/multi_event_test/syn_185.wav", "onoffCaption": "spraying at 0.22-0.847 and door knocking at 2.797-5.334", "frequencyCaption": "spraying one times and door knocking one times"}
152
+ {"filepath": "data/multi_event_test/syn_189.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
153
+ {"filepath": "data/multi_event_test/syn_191.wav", "onoffCaption": "burping belching at 0.0-2.0, 2.5-4.5", "frequencyCaption": "burping belching two times"}
154
+ {"filepath": "data/multi_event_test/syn_193.wav", "onoffCaption": "cow mooing at 1.0-3.0, 4.0-6.0", "frequencyCaption": "cow mooing two times"}
155
+ {"filepath": "data/multi_event_test/syn_196.wav", "onoffCaption": "spraying at 0.0-1.0", "frequencyCaption": "spraying one times"}
156
+ {"filepath": "data/multi_event_test/syn_198.wav", "onoffCaption": "gunshot at 0.0-2.0, 3.0-5.0, 6.0-8.0", "frequencyCaption": "gunshot three times"}
157
+ {"filepath": "data/multi_event_test/syn_7.wav", "onoffCaption": "spraying at 0.0-1.0 and burping belching at 1.5-2.5", "frequencyCaption": "spraying one times and burping belching one times"}
158
+ {"filepath": "data/multi_event_test/syn_9.wav", "onoffCaption": "cow mooing at 0.0-3.0", "frequencyCaption": "cow mooing one times"}
159
+ {"filepath": "data/multi_event_test/syn_10.wav", "onoffCaption": "door knocking at 2-4, 5-7", "frequencyCaption": "door knocking two times"}
160
+ {"filepath": "data/multi_event_test/syn_17.wav", "onoffCaption": "dog barking at 0-1, 2-3", "frequencyCaption": "dog barking two times"}
161
+ {"filepath": "data/multi_event_test/syn_19.wav", "onoffCaption": "gunshot at 0.0-1.0 and spraying at 1.5-2.5", "frequencyCaption": "gunshot one times and spraying one times"}
162
+ {"filepath": "data/multi_event_test/syn_20.wav", "onoffCaption": "tapping clicking clanking at 1.0-3.0, 4.0-6.0", "frequencyCaption": "tapping clicking clanking two times"}
163
+ {"filepath": "data/multi_event_test/syn_27.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
164
+ {"filepath": "data/multi_event_test/syn_29.wav", "onoffCaption": "tapping clicking clanking at 1.0-3.0, 4.0-6.0", "frequencyCaption": "tapping clicking clanking two times"}
165
+ {"filepath": "data/multi_event_test/syn_33.wav", "onoffCaption": "dog barking at 0.0-2.0, 2.5-4.5 and car horn honking at 5.0-7.0", "frequencyCaption": "dog barking two times and car horn honking one times"}
166
+ {"filepath": "data/multi_event_test/syn_34.wav", "onoffCaption": "sheep goat bleating at 1.575-3.575", "frequencyCaption": "sheep goat bleating one times"}
167
+ {"filepath": "data/multi_event_test/syn_42.wav", "onoffCaption": "tapping clicking clanking at 0.0-2.0, 2.5-4.5", "frequencyCaption": "tapping clicking clanking two times"}
168
+ {"filepath": "data/multi_event_test/syn_45.wav", "onoffCaption": "cat meowing at 0.5-1.5 and train horn at 2.0-6.0", "frequencyCaption": "cat meowing one times and train horn one times"}
169
+ {"filepath": "data/multi_event_test/syn_56.wav", "onoffCaption": "tapping clicking clanking at 0.961-4.401, 6.37-9.81", "frequencyCaption": "tapping clicking clanking two times"}
170
+ {"filepath": "data/multi_event_test/syn_58.wav", "onoffCaption": "door slamming at 0.355-2.581", "frequencyCaption": "door slamming one times"}
171
+ {"filepath": "data/multi_event_test/syn_61.wav", "onoffCaption": "explosion at 0.5-3.5, 4.0-7.0 and train horn at 7.5-10.0 and woman laughing at 1.0-4.0", "frequencyCaption": "explosion two times and train horn one times and woman laughing one times"}
172
+ {"filepath": "data/multi_event_test/syn_66.wav", "onoffCaption": "sheep goat bleating at 0.56-2.56", "frequencyCaption": "sheep goat bleating one times"}
173
+ {"filepath": "data/multi_event_test/syn_68.wav", "onoffCaption": "car horn honking at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "car horn honking three times"}
174
+ {"filepath": "data/multi_event_test/syn_72.wav", "onoffCaption": "spraying at 0.0-0.6, 1.0-1.6 and thump thud at 2.0-3.6 and dog barking at 4.0-6.0", "frequencyCaption": "spraying two times and thump thud one times and dog barking one times"}
175
+ {"filepath": "data/multi_event_test/syn_75.wav", "onoffCaption": "explosion at 0.5-2.5, 2.501-4.501", "frequencyCaption": "explosion two times"}
176
+ {"filepath": "data/multi_event_test/syn_83.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
177
+ {"filepath": "data/multi_event_test/syn_84.wav", "onoffCaption": "burping belching at 0.871-3.871, 4.642-7.642", "frequencyCaption": "burping belching two times"}
178
+ {"filepath": "data/multi_event_test/syn_90.wav", "onoffCaption": "gunshot at 0.2-1.2", "frequencyCaption": "gunshot one times"}
179
+ {"filepath": "data/multi_event_test/syn_97.wav", "onoffCaption": "cat meowing at 0.5-1.5", "frequencyCaption": "cat meowing one times"}
180
+ {"filepath": "data/multi_event_test/syn_99.wav", "onoffCaption": "duck quacking at 0.0-2.0, 2.0-4.0", "frequencyCaption": "duck quacking two times"}
181
+ {"filepath": "data/multi_event_test/syn_100.wav", "onoffCaption": "cat meowing at 0.0-2.0 and sheep goat bleating at 3.0-5.0, 6.0-8.0, 9.0-10.0", "frequencyCaption": "cat meowing one times and sheep goat bleating three times"}
182
+ {"filepath": "data/multi_event_test/syn_107.wav", "onoffCaption": "spraying at 0.0-1.5, 2.0-3.5 and dog barking at 4.0-6.0, 7.0-9.0 and tapping clicking clanking at 1.6-3.1, 3.6-5.1", "frequencyCaption": "spraying two times and dog barking two times and tapping clicking clanking two times"}
183
+ {"filepath": "data/multi_event_test/syn_109.wav", "onoffCaption": "cow mooing at 0.0-3.0 and gunshot at 3.5-4.5", "frequencyCaption": "cow mooing one times and gunshot one times"}
184
+ {"filepath": "data/multi_event_test/syn_113.wav", "onoffCaption": "whistling at 0.742-5.917 and tapping clicking clanking at 2.992-6.432", "frequencyCaption": "whistling one times and tapping clicking clanking one times"}
185
+ {"filepath": "data/multi_event_test/syn_114.wav", "onoffCaption": "car horn honking at 0-2 and door knocking at 2-4", "frequencyCaption": "car horn honking one times and door knocking one times"}
186
+ {"filepath": "data/multi_event_test/syn_124.wav", "onoffCaption": "gunshot at 0.0-2.0, 3.0-5.0", "frequencyCaption": "gunshot two times"}
187
+ {"filepath": "data/multi_event_test/syn_130.wav", "onoffCaption": "dog barking at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "dog barking three times"}
188
+ {"filepath": "data/multi_event_test/syn_137.wav", "onoffCaption": "door knocking at 0-1 and cow mooing at 2-3 and gunshot at 4-5", "frequencyCaption": "door knocking one times and cow mooing one times and gunshot one times"}
189
+ {"filepath": "data/multi_event_test/syn_141.wav", "onoffCaption": "sneeze at 0.33-1.403, 2.759-3.832", "frequencyCaption": "sneeze two times"}
190
+ {"filepath": "data/multi_event_test/syn_146.wav", "onoffCaption": "sneeze at 0.0-1.0, 2.0-3.0 and cat meowing at 4.0-5.0", "frequencyCaption": "sneeze two times and cat meowing one times"}
191
+ {"filepath": "data/multi_event_test/syn_148.wav", "onoffCaption": "duck quacking at 0-1, 2-3 and cow mooing at 4-5", "frequencyCaption": "duck quacking two times and cow mooing one times"}
192
+ {"filepath": "data/multi_event_test/syn_152.wav", "onoffCaption": "tapping clicking clanking at 0.0-1.0, 1.5-2.5 and train horn at 3.0-7.0", "frequencyCaption": "tapping clicking clanking two times and train horn one times"}
193
+ {"filepath": "data/multi_event_test/syn_155.wav", "onoffCaption": "tapping clicking clanking at 0.0-1.0 and gunshot at 2.0-3.0 and cat meowing at 4.0-5.0", "frequencyCaption": "tapping clicking clanking one times and gunshot one times and cat meowing one times"}
194
+ {"filepath": "data/multi_event_test/syn_162.wav", "onoffCaption": "gunshot at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "gunshot three times"}
195
+ {"filepath": "data/multi_event_test/syn_165.wav", "onoffCaption": "thump thud at 0.0-1.5, 2.0-3.5 and whistling at 4.0-7.0", "frequencyCaption": "thump thud two times and whistling one times"}
196
+ {"filepath": "data/multi_event_test/syn_171.wav", "onoffCaption": "spraying at 0.0-0.5, 1.5-2.0, 3.0-3.5 and thump thud at 4.0-5.0 and sheep goat bleating at 5.5-6.5, 7.0-8.0", "frequencyCaption": "spraying three times and thump thud one times and sheep goat bleating two times"}
197
+ {"filepath": "data/multi_event_test/syn_178.wav", "onoffCaption": "door slamming at 0.355-2.581 and woman laughing at 0.964-3.319", "frequencyCaption": "door slamming one times and woman laughing one times"}
198
+ {"filepath": "data/multi_event_test/syn_180.wav", "onoffCaption": "spraying at 0.0-1.0 and cow mooing at 2.0-5.0", "frequencyCaption": "spraying one times and cow mooing one times"}
199
+ {"filepath": "data/multi_event_test/syn_187.wav", "onoffCaption": "sneeze at 1.3-2.403, 4.759-6.442", "frequencyCaption": "sneeze two times"}
200
+ {"filepath": "data/multi_event_test/syn_194.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
data/meta_data/test-frequency-control_onoffFromGpt_single-event.json ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"filepath": "data/single_event_multi_identity_test/syn_1.wav", "onoffCaption": "cat meowing at 1.674-5.019", "frequencyCaption": "cat meowing one times"}
2
+ {"filepath": "data/single_event_multi_identity_test/syn_6.wav", "onoffCaption": "tapping clicking clanking at 0.536-3.976", "frequencyCaption": "tapping clicking clanking one times"}
3
+ {"filepath": "data/single_event_multi_identity_test/syn_8.wav", "onoffCaption": "door slamming at 0-1", "frequencyCaption": "door slamming one times"}
4
+ {"filepath": "data/single_event_multi_identity_test/syn_11.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
5
+ {"filepath": "data/single_event_multi_identity_test/syn_16.wav", "onoffCaption": "thump thud at 0-1", "frequencyCaption": "thump thud one times"}
6
+ {"filepath": "data/single_event_multi_identity_test/syn_18.wav", "onoffCaption": "sheep goat bleating at 0.5-2.5", "frequencyCaption": "sheep goat bleating one times"}
7
+ {"filepath": "data/single_event_multi_identity_test/syn_21.wav", "onoffCaption": "sheep goat bleating at 0.56-2.56", "frequencyCaption": "sheep goat bleating one times"}
8
+ {"filepath": "data/single_event_multi_identity_test/syn_26.wav", "onoffCaption": "tapping clicking clanking at 0.536-3.976", "frequencyCaption": "tapping clicking clanking one times"}
9
+ {"filepath": "data/single_event_multi_identity_test/syn_28.wav", "onoffCaption": "sneeze at 0-1, 2-3", "frequencyCaption": "sneeze two times"}
10
+ {"filepath": "data/single_event_multi_identity_test/syn_32.wav", "onoffCaption": "cow mooing at 0-3.309", "frequencyCaption": "cow mooing one times"}
11
+ {"filepath": "data/single_event_multi_identity_test/syn_35.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
12
+ {"filepath": "data/single_event_multi_identity_test/syn_43.wav", "onoffCaption": "thump thud at 0-1, 2-3", "frequencyCaption": "thump thud two times"}
13
+ {"filepath": "data/single_event_multi_identity_test/syn_44.wav", "onoffCaption": "burping belching at 0-1, 2-3, 4-5", "frequencyCaption": "burping belching three times"}
14
+ {"filepath": "data/single_event_multi_identity_test/syn_50.wav", "onoffCaption": "car horn honking at 1.0-3.0, 4.0-6.0", "frequencyCaption": "car horn honking two times"}
15
+ {"filepath": "data/single_event_multi_identity_test/syn_57.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
16
+ {"filepath": "data/single_event_multi_identity_test/syn_59.wav", "onoffCaption": "woman laughing at 2.0-4.5, 5.0-7.5", "frequencyCaption": "woman laughing two times"}
17
+ {"filepath": "data/single_event_multi_identity_test/syn_60.wav", "onoffCaption": "cat meowing at 1-2, 3-4", "frequencyCaption": "cat meowing two times"}
18
+ {"filepath": "data/single_event_multi_identity_test/syn_67.wav", "onoffCaption": "cow mooing at 0-3", "frequencyCaption": "cow mooing one times"}
19
+ {"filepath": "data/single_event_multi_identity_test/syn_69.wav", "onoffCaption": "burping belching at 2.0-3.0", "frequencyCaption": "burping belching one times"}
20
+ {"filepath": "data/single_event_multi_identity_test/syn_73.wav", "onoffCaption": "whistling at 0-1", "frequencyCaption": "whistling one times"}
21
+ {"filepath": "data/single_event_multi_identity_test/syn_74.wav", "onoffCaption": "cat meowing at 0-1", "frequencyCaption": "cat meowing one times"}
22
+ {"filepath": "data/single_event_multi_identity_test/syn_82.wav", "onoffCaption": "thump thud at 1.017-4.684, 5.695-8.362", "frequencyCaption": "thump thud two times"}
23
+ {"filepath": "data/single_event_multi_identity_test/syn_91.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
24
+ {"filepath": "data/single_event_multi_identity_test/syn_96.wav", "onoffCaption": "cat meowing at 0-1, 2-3, 4-5", "frequencyCaption": "cat meowing three times"}
25
+ {"filepath": "data/single_event_multi_identity_test/syn_98.wav", "onoffCaption": "woman laughing at 0.0-2.0, 2.5-4.5", "frequencyCaption": "woman laughing two times"}
26
+ {"filepath": "data/single_event_multi_identity_test/syn_101.wav", "onoffCaption": "burping belching at 0.871-1.871, 2.871-3.871", "frequencyCaption": "burping belching two times"}
27
+ {"filepath": "data/single_event_multi_identity_test/syn_106.wav", "onoffCaption": "gunshot at 0-1", "frequencyCaption": "gunshot one times"}
28
+ {"filepath": "data/single_event_multi_identity_test/syn_108.wav", "onoffCaption": "cat meowing at 0.0-2.0, 2.5-4.5", "frequencyCaption": "cat meowing two times"}
29
+ {"filepath": "data/single_event_multi_identity_test/syn_112.wav", "onoffCaption": "train horn at 0-1", "frequencyCaption": "train horn one times"}
30
+ {"filepath": "data/single_event_multi_identity_test/syn_115.wav", "onoffCaption": "sheep goat bleating at 0.5-2.5, 3.0-5.0", "frequencyCaption": "sheep goat bleating two times"}
31
+ {"filepath": "data/single_event_multi_identity_test/syn_122.wav", "onoffCaption": "tapping clicking clanking at 1.0-4.0, 5.0-8.0", "frequencyCaption": "tapping clicking clanking two times"}
32
+ {"filepath": "data/single_event_multi_identity_test/syn_125.wav", "onoffCaption": "car horn honking at 0.0-2.0, 3.0-5.0", "frequencyCaption": "car horn honking two times"}
33
+ {"filepath": "data/single_event_multi_identity_test/syn_131.wav", "onoffCaption": "cow mooing at 1.954-4.602, 6.719-9.729", "frequencyCaption": "cow mooing two times"}
34
+ {"filepath": "data/single_event_multi_identity_test/syn_136.wav", "onoffCaption": "tapping clicking clanking at 1-3, 6-8", "frequencyCaption": "tapping clicking clanking two times"}
35
+ {"filepath": "data/single_event_multi_identity_test/syn_138.wav", "onoffCaption": "burping belching at 0-1, 2-3, 4-5", "frequencyCaption": "burping belching three times"}
36
+ {"filepath": "data/single_event_multi_identity_test/syn_140.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
37
+ {"filepath": "data/single_event_multi_identity_test/syn_147.wav", "onoffCaption": "burping belching at 0.5-2.5, 3.5-5.5", "frequencyCaption": "burping belching two times"}
38
+ {"filepath": "data/single_event_multi_identity_test/syn_149.wav", "onoffCaption": "gunshot at 0-1", "frequencyCaption": "gunshot one times"}
39
+ {"filepath": "data/single_event_multi_identity_test/syn_153.wav", "onoffCaption": "cow mooing at 0-1, 2-3", "frequencyCaption": "cow mooing two times"}
40
+ {"filepath": "data/single_event_multi_identity_test/syn_154.wav", "onoffCaption": "train horn at 0-1, 2-3", "frequencyCaption": "train horn two times"}
41
+ {"filepath": "data/single_event_multi_identity_test/syn_163.wav", "onoffCaption": "cow mooing at 1.954-4.602, 6.719-9.729", "frequencyCaption": "cow mooing two times"}
42
+ {"filepath": "data/single_event_multi_identity_test/syn_164.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
43
+ {"filepath": "data/single_event_multi_identity_test/syn_170.wav", "onoffCaption": "whistling at 0-1", "frequencyCaption": "whistling one times"}
44
+ {"filepath": "data/single_event_multi_identity_test/syn_177.wav", "onoffCaption": "door knocking at 1-2, 3-4", "frequencyCaption": "door knocking two times"}
45
+ {"filepath": "data/single_event_multi_identity_test/syn_179.wav", "onoffCaption": "gunshot at 0.0-2.0", "frequencyCaption": "gunshot one times"}
46
+ {"filepath": "data/single_event_multi_identity_test/syn_181.wav", "onoffCaption": "door knocking at 0-1.0", "frequencyCaption": "door knocking one times"}
47
+ {"filepath": "data/single_event_multi_identity_test/syn_186.wav", "onoffCaption": "sneeze at 0.3-1.3, 2.3-3.3", "frequencyCaption": "sneeze two times"}
48
+ {"filepath": "data/single_event_multi_identity_test/syn_188.wav", "onoffCaption": "explosion at 0-2", "frequencyCaption": "explosion one times"}
49
+ {"filepath": "data/single_event_multi_identity_test/syn_192.wav", "onoffCaption": "cat meowing at 0.0-2.0, 2.5-4.5", "frequencyCaption": "cat meowing two times"}
50
+ {"filepath": "data/single_event_multi_identity_test/syn_195.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
51
+ {"filepath": "data/single_event_multi_identity_test/syn_3.wav", "onoffCaption": "burping belching at 0.5-2.5, 3.0-5.0", "frequencyCaption": "burping belching two times"}
52
+ {"filepath": "data/single_event_multi_identity_test/syn_4.wav", "onoffCaption": "cat meowing at 0-1", "frequencyCaption": "cat meowing one times"}
53
+ {"filepath": "data/single_event_multi_identity_test/syn_13.wav", "onoffCaption": "tapping clicking clanking at 0.032-1.032, 2.032-3.032, 4.032-5.032", "frequencyCaption": "tapping clicking clanking three times"}
54
+ {"filepath": "data/single_event_multi_identity_test/syn_14.wav", "onoffCaption": "tapping clicking clanking at 0-1, 2-3", "frequencyCaption": "tapping clicking clanking two times"}
55
+ {"filepath": "data/single_event_multi_identity_test/syn_23.wav", "onoffCaption": "cow mooing at 1.954-4.602, 6.719-9.729", "frequencyCaption": "cow mooing two times"}
56
+ {"filepath": "data/single_event_multi_identity_test/syn_24.wav", "onoffCaption": "thump thud at 1-2, 3-4", "frequencyCaption": "thump thud two times"}
57
+ {"filepath": "data/single_event_multi_identity_test/syn_30.wav", "onoffCaption": "explosion at 0-1, 2-3", "frequencyCaption": "explosion two times"}
58
+ {"filepath": "data/single_event_multi_identity_test/syn_37.wav", "onoffCaption": "tapping clicking clanking at 0.5-2.5, 3-5", "frequencyCaption": "tapping clicking clanking two times"}
59
+ {"filepath": "data/single_event_multi_identity_test/syn_39.wav", "onoffCaption": "burping belching at 0.5-2.5, 3.0-5.0", "frequencyCaption": "burping belching two times"}
60
+ {"filepath": "data/single_event_multi_identity_test/syn_41.wav", "onoffCaption": "car horn honking at 1-2, 3-4", "frequencyCaption": "car horn honking two times"}
61
+ {"filepath": "data/single_event_multi_identity_test/syn_48.wav", "onoffCaption": "train horn at 0-1", "frequencyCaption": "train horn one times"}
62
+ {"filepath": "data/single_event_multi_identity_test/syn_52.wav", "onoffCaption": "dog barking at 0-1, 2-3", "frequencyCaption": "dog barking two times"}
63
+ {"filepath": "data/single_event_multi_identity_test/syn_55.wav", "onoffCaption": "spraying at 0-1, 2-3, 4-5", "frequencyCaption": "spraying three times"}
64
+ {"filepath": "data/single_event_multi_identity_test/syn_62.wav", "onoffCaption": "woman laughing at 2.782-5.368, 6.831-8.912", "frequencyCaption": "woman laughing two times"}
65
+ {"filepath": "data/single_event_multi_identity_test/syn_65.wav", "onoffCaption": "tapping clicking clanking at 1.0-3.0, 4.0-6.0", "frequencyCaption": "tapping clicking clanking two times"}
66
+ {"filepath": "data/single_event_multi_identity_test/syn_71.wav", "onoffCaption": "train horn at 0-1", "frequencyCaption": "train horn one times"}
67
+ {"filepath": "data/single_event_multi_identity_test/syn_76.wav", "onoffCaption": "door knocking at 0-1", "frequencyCaption": "door knocking one times"}
68
+ {"filepath": "data/single_event_multi_identity_test/syn_78.wav", "onoffCaption": "door knocking at 1-2, 3-4", "frequencyCaption": "door knocking two times"}
69
+ {"filepath": "data/single_event_multi_identity_test/syn_80.wav", "onoffCaption": "car horn honking at 0-1", "frequencyCaption": "car horn honking one times"}
70
+ {"filepath": "data/single_event_multi_identity_test/syn_85.wav", "onoffCaption": "gunshot at 0.0-2.0, 3.0-5.0, 6.0-8.0", "frequencyCaption": "gunshot three times"}
71
+ {"filepath": "data/single_event_multi_identity_test/syn_87.wav", "onoffCaption": "thump thud at 1.017-4.684, 5.695-9.362", "frequencyCaption": "thump thud two times"}
72
+ {"filepath": "data/single_event_multi_identity_test/syn_89.wav", "onoffCaption": "door knocking at 1.973-5.029, 6.285-9.132", "frequencyCaption": "door knocking two times"}
73
+ {"filepath": "data/single_event_multi_identity_test/syn_93.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
74
+ {"filepath": "data/single_event_multi_identity_test/syn_94.wav", "onoffCaption": "burping belching at 0.871-2.871, 5.218-7.218", "frequencyCaption": "burping belching two times"}
75
+ {"filepath": "data/single_event_multi_identity_test/syn_103.wav", "onoffCaption": "dog barking at 0-1, 2-3, 4-5", "frequencyCaption": "dog barking three times"}
76
+ {"filepath": "data/single_event_multi_identity_test/syn_104.wav", "onoffCaption": "thump thud at 0-2, 3-5", "frequencyCaption": "thump thud two times"}
77
+ {"filepath": "data/single_event_multi_identity_test/syn_110.wav", "onoffCaption": "whistling at 0-1", "frequencyCaption": "whistling one times"}
78
+ {"filepath": "data/single_event_multi_identity_test/syn_117.wav", "onoffCaption": "tapping clicking clanking at 0-2", "frequencyCaption": "tapping clicking clanking one times"}
79
+ {"filepath": "data/single_event_multi_identity_test/syn_119.wav", "onoffCaption": "duck quacking at 0.235-2.235, 3.085-5.085", "frequencyCaption": "duck quacking two times"}
80
+ {"filepath": "data/single_event_multi_identity_test/syn_120.wav", "onoffCaption": "dog barking at 0-1, 2-3, 4-5", "frequencyCaption": "dog barking three times"}
81
+ {"filepath": "data/single_event_multi_identity_test/syn_127.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
82
+ {"filepath": "data/single_event_multi_identity_test/syn_129.wav", "onoffCaption": "burping belching at 0.5-1.5, 2.5-3.5", "frequencyCaption": "burping belching two times"}
83
+ {"filepath": "data/single_event_multi_identity_test/syn_133.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
84
+ {"filepath": "data/single_event_multi_identity_test/syn_134.wav", "onoffCaption": "spraying at 0.0-1.0, 2.0-3.0, 4.0-5.0", "frequencyCaption": "spraying three times"}
85
+ {"filepath": "data/single_event_multi_identity_test/syn_142.wav", "onoffCaption": "cow mooing at 1-2, 3-4", "frequencyCaption": "cow mooing two times"}
86
+ {"filepath": "data/single_event_multi_identity_test/syn_145.wav", "onoffCaption": "cow mooing at 0.0-2.0, 3.0-5.0", "frequencyCaption": "cow mooing two times"}
87
+ {"filepath": "data/single_event_multi_identity_test/syn_151.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
88
+ {"filepath": "data/single_event_multi_identity_test/syn_156.wav", "onoffCaption": "thump thud at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "thump thud three times"}
89
+ {"filepath": "data/single_event_multi_identity_test/syn_158.wav", "onoffCaption": "burping belching at 1.5-3.5", "frequencyCaption": "burping belching one times"}
90
+ {"filepath": "data/single_event_multi_identity_test/syn_161.wav", "onoffCaption": "car horn honking at 1.5-3.5, 4.0-6.0", "frequencyCaption": "car horn honking two times"}
91
+ {"filepath": "data/single_event_multi_identity_test/syn_166.wav", "onoffCaption": "burping belching at 0-1, 1-2, 2-3", "frequencyCaption": "burping belching three times"}
92
+ {"filepath": "data/single_event_multi_identity_test/syn_168.wav", "onoffCaption": "door slamming at 0-1, 2-3, 4-5", "frequencyCaption": "door slamming three times"}
93
+ {"filepath": "data/single_event_multi_identity_test/syn_172.wav", "onoffCaption": "woman laughing at 0.0-2.0", "frequencyCaption": "woman laughing one times"}
94
+ {"filepath": "data/single_event_multi_identity_test/syn_175.wav", "onoffCaption": "spraying at 0-1", "frequencyCaption": "spraying one times"}
95
+ {"filepath": "data/single_event_multi_identity_test/syn_183.wav", "onoffCaption": "woman laughing at 2.782-5.368, 6.831-8.912", "frequencyCaption": "woman laughing two times"}
96
+ {"filepath": "data/single_event_multi_identity_test/syn_184.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
97
+ {"filepath": "data/single_event_multi_identity_test/syn_190.wav", "onoffCaption": "explosion at 1.773-4.034, 5.15-7.411", "frequencyCaption": "explosion two times"}
98
+ {"filepath": "data/single_event_multi_identity_test/syn_197.wav", "onoffCaption": "car horn honking at 1.817-4.404, 5.85-8.437", "frequencyCaption": "car horn honking two times"}
99
+ {"filepath": "data/single_event_multi_identity_test/syn_199.wav", "onoffCaption": "car horn honking at 0.664-3.129", "frequencyCaption": "car horn honking one times"}
100
+ {"filepath": "data/single_event_multi_identity_test/syn_200.wav", "onoffCaption": "train horn at 0-2", "frequencyCaption": "train horn one times"}
101
+ {"filepath": "data/single_event_multi_identity_test/syn_2.wav", "onoffCaption": "cat meowing at 0.5-2.5", "frequencyCaption": "cat meowing one times"}
102
+ {"filepath": "data/single_event_multi_identity_test/syn_5.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
103
+ {"filepath": "data/single_event_multi_identity_test/syn_12.wav", "onoffCaption": "tapping clicking clanking at 0.536-3.976", "frequencyCaption": "tapping clicking clanking one times"}
104
+ {"filepath": "data/single_event_multi_identity_test/syn_15.wav", "onoffCaption": "explosion at 1.773-4.034, 5.15-7.411", "frequencyCaption": "explosion two times"}
105
+ {"filepath": "data/single_event_multi_identity_test/syn_22.wav", "onoffCaption": "sheep goat bleating at 0-1, 2-3, 4-5", "frequencyCaption": "sheep goat bleating three times"}
106
+ {"filepath": "data/single_event_multi_identity_test/syn_25.wav", "onoffCaption": "gunshot at 0.0-2.0, 2.1-4.1", "frequencyCaption": "gunshot two times"}
107
+ {"filepath": "data/single_event_multi_identity_test/syn_31.wav", "onoffCaption": "sneeze at 0.0-1.0", "frequencyCaption": "sneeze one times"}
108
+ {"filepath": "data/single_event_multi_identity_test/syn_36.wav", "onoffCaption": "sheep goat bleating at 0-1, 1-2", "frequencyCaption": "sheep goat bleating two times"}
109
+ {"filepath": "data/single_event_multi_identity_test/syn_38.wav", "onoffCaption": "whistling at 0-1", "frequencyCaption": "whistling one times"}
110
+ {"filepath": "data/single_event_multi_identity_test/syn_40.wav", "onoffCaption": "woman laughing at 0.0-2.0, 2.5-4.5", "frequencyCaption": "woman laughing two times"}
111
+ {"filepath": "data/single_event_multi_identity_test/syn_46.wav", "onoffCaption": "tapping clicking clanking at 1.5-5.0", "frequencyCaption": "tapping clicking clanking one times"}
112
+ {"filepath": "data/single_event_multi_identity_test/syn_47.wav", "onoffCaption": "cow mooing at 0.0-2.0, 3.0-5.0, 6.0-8.0", "frequencyCaption": "cow mooing three times"}
113
+ {"filepath": "data/single_event_multi_identity_test/syn_49.wav", "onoffCaption": "dog barking at 0-1", "frequencyCaption": "dog barking one times"}
114
+ {"filepath": "data/single_event_multi_identity_test/syn_51.wav", "onoffCaption": "whistling at 0-1", "frequencyCaption": "whistling one times"}
115
+ {"filepath": "data/single_event_multi_identity_test/syn_53.wav", "onoffCaption": "whistling at 0-1, 2-3", "frequencyCaption": "whistling two times"}
116
+ {"filepath": "data/single_event_multi_identity_test/syn_54.wav", "onoffCaption": "cow mooing at 1.954-6.383, 7.52-10.0", "frequencyCaption": "cow mooing two times"}
117
+ {"filepath": "data/single_event_multi_identity_test/syn_63.wav", "onoffCaption": "explosion at 0.0-3.0", "frequencyCaption": "explosion one times"}
118
+ {"filepath": "data/single_event_multi_identity_test/syn_64.wav", "onoffCaption": "whistling at 0-1, 2-3", "frequencyCaption": "whistling two times"}
119
+ {"filepath": "data/single_event_multi_identity_test/syn_70.wav", "onoffCaption": "dog barking at 1-2", "frequencyCaption": "dog barking one times"}
120
+ {"filepath": "data/single_event_multi_identity_test/syn_77.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
121
+ {"filepath": "data/single_event_multi_identity_test/syn_79.wav", "onoffCaption": "train horn at 0-2.5", "frequencyCaption": "train horn one times"}
122
+ {"filepath": "data/single_event_multi_identity_test/syn_81.wav", "onoffCaption": "tapping clicking clanking at 0-1", "frequencyCaption": "tapping clicking clanking one times"}
123
+ {"filepath": "data/single_event_multi_identity_test/syn_86.wav", "onoffCaption": "gunshot at 0.0-2.0, 2.1-4.1", "frequencyCaption": "gunshot two times"}
124
+ {"filepath": "data/single_event_multi_identity_test/syn_88.wav", "onoffCaption": "car horn honking at 0.0-1.0, 2.0-3.0, 4.0-5.0", "frequencyCaption": "car horn honking three times"}
125
+ {"filepath": "data/single_event_multi_identity_test/syn_92.wav", "onoffCaption": "door slamming at 0-1, 2-4", "frequencyCaption": "door slamming two times"}
126
+ {"filepath": "data/single_event_multi_identity_test/syn_95.wav", "onoffCaption": "woman laughing at 1.5-3.5, 4.0-6.0", "frequencyCaption": "woman laughing two times"}
127
+ {"filepath": "data/single_event_multi_identity_test/syn_102.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
128
+ {"filepath": "data/single_event_multi_identity_test/syn_105.wav", "onoffCaption": "door slamming at 0.355-2.581", "frequencyCaption": "door slamming one times"}
129
+ {"filepath": "data/single_event_multi_identity_test/syn_111.wav", "onoffCaption": "dog barking at 0-1, 2-3", "frequencyCaption": "dog barking two times"}
130
+ {"filepath": "data/single_event_multi_identity_test/syn_116.wav", "onoffCaption": "sheep goat bleating at 0-1", "frequencyCaption": "sheep goat bleating one times"}
131
+ {"filepath": "data/single_event_multi_identity_test/syn_118.wav", "onoffCaption": "sheep goat bleating at 0.5-1.5", "frequencyCaption": "sheep goat bleating one times"}
132
+ {"filepath": "data/single_event_multi_identity_test/syn_121.wav", "onoffCaption": "cat meowing at 0-1", "frequencyCaption": "cat meowing one times"}
133
+ {"filepath": "data/single_event_multi_identity_test/syn_123.wav", "onoffCaption": "sheep goat bleating at 0.0-2.0, 3.0-5.0", "frequencyCaption": "sheep goat bleating two times"}
134
+ {"filepath": "data/single_event_multi_identity_test/syn_126.wav", "onoffCaption": "burping belching at 0.5-2.5, 3.0-5.0", "frequencyCaption": "burping belching two times"}
135
+ {"filepath": "data/single_event_multi_identity_test/syn_128.wav", "onoffCaption": "train horn at 0-1", "frequencyCaption": "train horn one times"}
136
+ {"filepath": "data/single_event_multi_identity_test/syn_132.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
137
+ {"filepath": "data/single_event_multi_identity_test/syn_135.wav", "onoffCaption": "whistling at 0-1, 2-3", "frequencyCaption": "whistling two times"}
138
+ {"filepath": "data/single_event_multi_identity_test/syn_139.wav", "onoffCaption": "spraying at 0-1, 1-2", "frequencyCaption": "spraying two times"}
139
+ {"filepath": "data/single_event_multi_identity_test/syn_143.wav", "onoffCaption": "door knocking at 0.645-2.772, 3.875-6.782, 7.405-9.692", "frequencyCaption": "door knocking three times"}
140
+ {"filepath": "data/single_event_multi_identity_test/syn_144.wav", "onoffCaption": "spraying at 0-1, 2-3, 4-5", "frequencyCaption": "spraying three times"}
141
+ {"filepath": "data/single_event_multi_identity_test/syn_150.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
142
+ {"filepath": "data/single_event_multi_identity_test/syn_157.wav", "onoffCaption": "explosion at 0.5-1.5, 2-3", "frequencyCaption": "explosion two times"}
143
+ {"filepath": "data/single_event_multi_identity_test/syn_159.wav", "onoffCaption": "sneeze at 0.5-1.5, 2.5-3.5", "frequencyCaption": "sneeze two times"}
144
+ {"filepath": "data/single_event_multi_identity_test/syn_160.wav", "onoffCaption": "woman laughing at 0.0-2.0", "frequencyCaption": "woman laughing one times"}
145
+ {"filepath": "data/single_event_multi_identity_test/syn_167.wav", "onoffCaption": "thump thud at 1.017-4.684, 5.695-9.362", "frequencyCaption": "thump thud two times"}
146
+ {"filepath": "data/single_event_multi_identity_test/syn_169.wav", "onoffCaption": "gunshot at 0.0-2.0", "frequencyCaption": "gunshot one times"}
147
+ {"filepath": "data/single_event_multi_identity_test/syn_173.wav", "onoffCaption": "explosion at 0-3", "frequencyCaption": "explosion one times"}
148
+ {"filepath": "data/single_event_multi_identity_test/syn_174.wav", "onoffCaption": "duck quacking at 0.2-2.2, 3.2-5.2", "frequencyCaption": "duck quacking two times"}
149
+ {"filepath": "data/single_event_multi_identity_test/syn_176.wav", "onoffCaption": "gunshot at 0.0-2.0, 2.5-4.5", "frequencyCaption": "gunshot two times"}
150
+ {"filepath": "data/single_event_multi_identity_test/syn_182.wav", "onoffCaption": "car horn honking at 0.653-3.872", "frequencyCaption": "car horn honking one times"}
151
+ {"filepath": "data/single_event_multi_identity_test/syn_185.wav", "onoffCaption": "dog barking at 0-2, 3-5", "frequencyCaption": "dog barking two times"}
152
+ {"filepath": "data/single_event_multi_identity_test/syn_189.wav", "onoffCaption": "burping belching at 0-1, 2-3", "frequencyCaption": "burping belching two times"}
153
+ {"filepath": "data/single_event_multi_identity_test/syn_191.wav", "onoffCaption": "tapping clicking clanking at 0.0-4.0", "frequencyCaption": "tapping clicking clanking one times"}
154
+ {"filepath": "data/single_event_multi_identity_test/syn_193.wav", "onoffCaption": "dog barking at 0-1", "frequencyCaption": "dog barking one times"}
155
+ {"filepath": "data/single_event_multi_identity_test/syn_196.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
156
+ {"filepath": "data/single_event_multi_identity_test/syn_198.wav", "onoffCaption": "sheep goat bleating at 0.56-2.56", "frequencyCaption": "sheep goat bleating one times"}
157
+ {"filepath": "data/single_event_multi_identity_test/syn_7.wav", "onoffCaption": "door slamming at 0-2, 2-4", "frequencyCaption": "door slamming two times"}
158
+ {"filepath": "data/single_event_multi_identity_test/syn_9.wav", "onoffCaption": "sneeze at 0.5-1.5, 2.0-3.0", "frequencyCaption": "sneeze two times"}
159
+ {"filepath": "data/single_event_multi_identity_test/syn_10.wav", "onoffCaption": "door slamming at 0.0-1.0", "frequencyCaption": "door slamming one times"}
160
+ {"filepath": "data/single_event_multi_identity_test/syn_17.wav", "onoffCaption": "gunshot at 0.0-2.0", "frequencyCaption": "gunshot one times"}
161
+ {"filepath": "data/single_event_multi_identity_test/syn_19.wav", "onoffCaption": "thump thud at 1.9-4.5, 5.5-8.1", "frequencyCaption": "thump thud two times"}
162
+ {"filepath": "data/single_event_multi_identity_test/syn_20.wav", "onoffCaption": "dog barking at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "dog barking three times"}
163
+ {"filepath": "data/single_event_multi_identity_test/syn_27.wav", "onoffCaption": "whistling at 0-1, 2-3", "frequencyCaption": "whistling two times"}
164
+ {"filepath": "data/single_event_multi_identity_test/syn_29.wav", "onoffCaption": "woman laughing at 0-1", "frequencyCaption": "woman laughing one times"}
165
+ {"filepath": "data/single_event_multi_identity_test/syn_33.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
166
+ {"filepath": "data/single_event_multi_identity_test/syn_34.wav", "onoffCaption": "dog barking at 0-1, 1.5-2.5, 3-4", "frequencyCaption": "dog barking three times"}
167
+ {"filepath": "data/single_event_multi_identity_test/syn_42.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
168
+ {"filepath": "data/single_event_multi_identity_test/syn_45.wav", "onoffCaption": "woman laughing at 0-1", "frequencyCaption": "woman laughing one times"}
169
+ {"filepath": "data/single_event_multi_identity_test/syn_56.wav", "onoffCaption": "cat meowing at 0-1", "frequencyCaption": "cat meowing one times"}
170
+ {"filepath": "data/single_event_multi_identity_test/syn_58.wav", "onoffCaption": "spraying at 0.5-1.5", "frequencyCaption": "spraying one times"}
171
+ {"filepath": "data/single_event_multi_identity_test/syn_61.wav", "onoffCaption": "sheep goat bleating at 0.8-2.8, 3.8-5.8", "frequencyCaption": "sheep goat bleating two times"}
172
+ {"filepath": "data/single_event_multi_identity_test/syn_66.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
173
+ {"filepath": "data/single_event_multi_identity_test/syn_68.wav", "onoffCaption": "door slamming at 0.355-2.581", "frequencyCaption": "door slamming one times"}
174
+ {"filepath": "data/single_event_multi_identity_test/syn_72.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
175
+ {"filepath": "data/single_event_multi_identity_test/syn_75.wav", "onoffCaption": "door slamming at 0-1, 2-3, 4-5", "frequencyCaption": "door slamming three times"}
176
+ {"filepath": "data/single_event_multi_identity_test/syn_83.wav", "onoffCaption": "spraying at 0-1, 2-3, 4-5", "frequencyCaption": "spraying three times"}
177
+ {"filepath": "data/single_event_multi_identity_test/syn_84.wav", "onoffCaption": "burping belching at 0-3", "frequencyCaption": "burping belching one times"}
178
+ {"filepath": "data/single_event_multi_identity_test/syn_90.wav", "onoffCaption": "whistling at 1-2", "frequencyCaption": "whistling one times"}
179
+ {"filepath": "data/single_event_multi_identity_test/syn_97.wav", "onoffCaption": "dog barking at 0-1", "frequencyCaption": "dog barking one times"}
180
+ {"filepath": "data/single_event_multi_identity_test/syn_99.wav", "onoffCaption": "gunshot at 0.5-2.5, 3.0-5.0", "frequencyCaption": "gunshot two times"}
181
+ {"filepath": "data/single_event_multi_identity_test/syn_100.wav", "onoffCaption": "gunshot at 0-1, 2-3", "frequencyCaption": "gunshot two times"}
182
+ {"filepath": "data/single_event_multi_identity_test/syn_107.wav", "onoffCaption": "cat meowing at 0-1, 2-3, 4-5", "frequencyCaption": "cat meowing three times"}
183
+ {"filepath": "data/single_event_multi_identity_test/syn_109.wav", "onoffCaption": "cat meowing at 0-1, 2-3, 4-5", "frequencyCaption": "cat meowing three times"}
184
+ {"filepath": "data/single_event_multi_identity_test/syn_113.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
185
+ {"filepath": "data/single_event_multi_identity_test/syn_114.wav", "onoffCaption": "explosion at 0.0-3.0", "frequencyCaption": "explosion one times"}
186
+ {"filepath": "data/single_event_multi_identity_test/syn_124.wav", "onoffCaption": "woman laughing at 0-1, 1-2", "frequencyCaption": "woman laughing two times"}
187
+ {"filepath": "data/single_event_multi_identity_test/syn_130.wav", "onoffCaption": "gunshot at 0.0-2.0", "frequencyCaption": "gunshot one times"}
188
+ {"filepath": "data/single_event_multi_identity_test/syn_137.wav", "onoffCaption": "train horn at 0-1, 2-3, 4-5", "frequencyCaption": "train horn three times"}
189
+ {"filepath": "data/single_event_multi_identity_test/syn_141.wav", "onoffCaption": "woman laughing at 0.5-3.5", "frequencyCaption": "woman laughing one times"}
190
+ {"filepath": "data/single_event_multi_identity_test/syn_146.wav", "onoffCaption": "sneeze at 0.8-1.8", "frequencyCaption": "sneeze one times"}
191
+ {"filepath": "data/single_event_multi_identity_test/syn_148.wav", "onoffCaption": "dog barking at 0-1, 2-3, 4-5", "frequencyCaption": "dog barking three times"}
192
+ {"filepath": "data/single_event_multi_identity_test/syn_152.wav", "onoffCaption": "dog barking at 0-1", "frequencyCaption": "dog barking one times"}
193
+ {"filepath": "data/single_event_multi_identity_test/syn_155.wav", "onoffCaption": "spraying at 0.033-1.519", "frequencyCaption": "spraying one times"}
194
+ {"filepath": "data/single_event_multi_identity_test/syn_162.wav", "onoffCaption": "explosion at 0-1, 2-3", "frequencyCaption": "explosion two times"}
195
+ {"filepath": "data/single_event_multi_identity_test/syn_165.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
196
+ {"filepath": "data/single_event_multi_identity_test/syn_171.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
197
+ {"filepath": "data/single_event_multi_identity_test/syn_178.wav", "onoffCaption": "tapping clicking clanking at 1-3, 4-6", "frequencyCaption": "tapping clicking clanking two times"}
198
+ {"filepath": "data/single_event_multi_identity_test/syn_180.wav", "onoffCaption": "cow mooing at 0-3", "frequencyCaption": "cow mooing one times"}
199
+ {"filepath": "data/single_event_multi_identity_test/syn_187.wav", "onoffCaption": "explosion at 1.5-4.5", "frequencyCaption": "explosion one times"}
200
+ {"filepath": "data/single_event_multi_identity_test/syn_194.wav", "onoffCaption": "gunshot at 0-1", "frequencyCaption": "gunshot one times"}
201
+ {"filepath": "data/single_event_single_identity_test/syn_11.wav", "onoffCaption": "door knocking at 1-2, 3-4", "frequencyCaption": "door knocking two times"}
202
+ {"filepath": "data/single_event_single_identity_test/syn_16.wav", "onoffCaption": "burping belching at 0.5-3.5", "frequencyCaption": "burping belching one times"}
203
+ {"filepath": "data/single_event_single_identity_test/syn_18.wav", "onoffCaption": "burping belching at 1.0-2.0", "frequencyCaption": "burping belching one times"}
204
+ {"filepath": "data/single_event_single_identity_test/syn_21.wav", "onoffCaption": "burping belching at 0.5-1.5", "frequencyCaption": "burping belching one times"}
205
+ {"filepath": "data/single_event_single_identity_test/syn_26.wav", "onoffCaption": "spraying at 0-1", "frequencyCaption": "spraying one times"}
206
+ {"filepath": "data/single_event_single_identity_test/syn_28.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
207
+ {"filepath": "data/single_event_single_identity_test/syn_32.wav", "onoffCaption": "gunshot at 0-1, 2-3, 4-5", "frequencyCaption": "gunshot three times"}
208
+ {"filepath": "data/single_event_single_identity_test/syn_35.wav", "onoffCaption": "woman laughing at 0-2, 3-5", "frequencyCaption": "woman laughing two times"}
209
+ {"filepath": "data/single_event_single_identity_test/syn_43.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
210
+ {"filepath": "data/single_event_single_identity_test/syn_44.wav", "onoffCaption": "dog barking at 0-1, 2-3, 4-5", "frequencyCaption": "dog barking three times"}
211
+ {"filepath": "data/single_event_single_identity_test/syn_50.wav", "onoffCaption": "door knocking at 0-1, 2-3", "frequencyCaption": "door knocking two times"}
212
+ {"filepath": "data/single_event_single_identity_test/syn_57.wav", "onoffCaption": "train horn at 0-1", "frequencyCaption": "train horn one times"}
213
+ {"filepath": "data/single_event_single_identity_test/syn_59.wav", "onoffCaption": "gunshot at 0-1, 2-3, 4-5", "frequencyCaption": "gunshot three times"}
214
+ {"filepath": "data/single_event_single_identity_test/syn_60.wav", "onoffCaption": "cow mooing at 0.0-2.0, 2.5-4.5", "frequencyCaption": "cow mooing two times"}
215
+ {"filepath": "data/single_event_single_identity_test/syn_67.wav", "onoffCaption": "cow mooing at 1.0-3.0, 4.0-6.0", "frequencyCaption": "cow mooing two times"}
216
+ {"filepath": "data/single_event_single_identity_test/syn_69.wav", "onoffCaption": "burping belching at 0.0-2.0, 2.1-4.1", "frequencyCaption": "burping belching two times"}
217
+ {"filepath": "data/single_event_single_identity_test/syn_73.wav", "onoffCaption": "dog barking at 0-1, 2-3, 4-5", "frequencyCaption": "dog barking three times"}
218
+ {"filepath": "data/single_event_single_identity_test/syn_74.wav", "onoffCaption": "cow mooing at 0-2, 3-5", "frequencyCaption": "cow mooing two times"}
219
+ {"filepath": "data/single_event_single_identity_test/syn_82.wav", "onoffCaption": "woman laughing at 2.0-6.0", "frequencyCaption": "woman laughing one times"}
220
+ {"filepath": "data/single_event_single_identity_test/syn_85.wav", "onoffCaption": "dog barking at 0-1", "frequencyCaption": "dog barking one times"}
221
+ {"filepath": "data/single_event_single_identity_test/syn_91.wav", "onoffCaption": "tapping clicking clanking at 1.0-4.0", "frequencyCaption": "tapping clicking clanking one times"}
222
+ {"filepath": "data/single_event_single_identity_test/syn_96.wav", "onoffCaption": "door knocking at 0-1", "frequencyCaption": "door knocking one times"}
223
+ {"filepath": "data/single_event_single_identity_test/syn_98.wav", "onoffCaption": "door slamming at 0.355-2.581", "frequencyCaption": "door slamming one times"}
224
+ {"filepath": "data/single_event_single_identity_test/syn_101.wav", "onoffCaption": "spraying at 0.0-1.0, 2.0-3.0, 4.0-5.0", "frequencyCaption": "spraying three times"}
225
+ {"filepath": "data/single_event_single_identity_test/syn_106.wav", "onoffCaption": "spraying at 0.0-1.0", "frequencyCaption": "spraying one times"}
226
+ {"filepath": "data/single_event_single_identity_test/syn_108.wav", "onoffCaption": "gunshot at 0.2-2.2, 3.2-5.2", "frequencyCaption": "gunshot two times"}
227
+ {"filepath": "data/single_event_single_identity_test/syn_112.wav", "onoffCaption": "burping belching at 0.871-2.871, 3.871-5.871", "frequencyCaption": "burping belching two times"}
228
+ {"filepath": "data/single_event_single_identity_test/syn_115.wav", "onoffCaption": "explosion at 0-1", "frequencyCaption": "explosion one times"}
229
+ {"filepath": "data/single_event_single_identity_test/syn_122.wav", "onoffCaption": "tapping clicking clanking at 1.0-5.0", "frequencyCaption": "tapping clicking clanking one times"}
230
+ {"filepath": "data/single_event_single_identity_test/syn_125.wav", "onoffCaption": "explosion at 1.0-3.0, 3.5-5.5", "frequencyCaption": "explosion two times"}
231
+ {"filepath": "data/single_event_single_identity_test/syn_131.wav", "onoffCaption": "door slamming at 0-1, 2-3, 4-5", "frequencyCaption": "door slamming three times"}
232
+ {"filepath": "data/single_event_single_identity_test/syn_136.wav", "onoffCaption": "car horn honking at 0-1", "frequencyCaption": "car horn honking one times"}
233
+ {"filepath": "data/single_event_single_identity_test/syn_138.wav", "onoffCaption": "explosion at 0-1", "frequencyCaption": "explosion one times"}
234
+ {"filepath": "data/single_event_single_identity_test/syn_140.wav", "onoffCaption": "train horn at 0-1, 1-2", "frequencyCaption": "train horn two times"}
235
+ {"filepath": "data/single_event_single_identity_test/syn_147.wav", "onoffCaption": "explosion at 0-1, 2-3, 4-5", "frequencyCaption": "explosion three times"}
236
+ {"filepath": "data/single_event_single_identity_test/syn_149.wav", "onoffCaption": "spraying at 0.1-1.1, 1.2-2.2, 3.3-4.3", "frequencyCaption": "spraying three times"}
237
+ {"filepath": "data/single_event_single_identity_test/syn_153.wav", "onoffCaption": "dog barking at 0-1, 2-3", "frequencyCaption": "dog barking two times"}
238
+ {"filepath": "data/single_event_single_identity_test/syn_154.wav", "onoffCaption": "explosion at 0.0-1.0", "frequencyCaption": "explosion one times"}
239
+ {"filepath": "data/single_event_single_identity_test/syn_163.wav", "onoffCaption": "sneeze at 0-1", "frequencyCaption": "sneeze one times"}
240
+ {"filepath": "data/single_event_single_identity_test/syn_164.wav", "onoffCaption": "sneeze at 0-1", "frequencyCaption": "sneeze one times"}
241
+ {"filepath": "data/single_event_single_identity_test/syn_170.wav", "onoffCaption": "burping belching at 0.5-2.5", "frequencyCaption": "burping belching one times"}
242
+ {"filepath": "data/single_event_single_identity_test/syn_175.wav", "onoffCaption": "explosion at 2.941-5.813", "frequencyCaption": "explosion one times"}
243
+ {"filepath": "data/single_event_single_identity_test/syn_177.wav", "onoffCaption": "door knocking at 0-1, 2-3", "frequencyCaption": "door knocking two times"}
244
+ {"filepath": "data/single_event_single_identity_test/syn_179.wav", "onoffCaption": "explosion at 0.0-4.0", "frequencyCaption": "explosion one times"}
245
+ {"filepath": "data/single_event_single_identity_test/syn_181.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
246
+ {"filepath": "data/single_event_single_identity_test/syn_186.wav", "onoffCaption": "sheep goat bleating at 0.0-2.0, 2.5-4.5", "frequencyCaption": "sheep goat bleating two times"}
247
+ {"filepath": "data/single_event_single_identity_test/syn_188.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
248
+ {"filepath": "data/single_event_single_identity_test/syn_190.wav", "onoffCaption": "woman laughing at 0-1, 2-3", "frequencyCaption": "woman laughing two times"}
249
+ {"filepath": "data/single_event_single_identity_test/syn_192.wav", "onoffCaption": "door knocking at 1-2, 3-4", "frequencyCaption": "door knocking two times"}
250
+ {"filepath": "data/single_event_single_identity_test/syn_195.wav", "onoffCaption": "cow mooing at 1.5-3.5, 4.0-6.0", "frequencyCaption": "cow mooing two times"}
251
+ {"filepath": "data/single_event_single_identity_test/syn_13.wav", "onoffCaption": "tapping clicking clanking at 0.536-3.976", "frequencyCaption": "tapping clicking clanking one times"}
252
+ {"filepath": "data/single_event_single_identity_test/syn_14.wav", "onoffCaption": "woman laughing at 0-2", "frequencyCaption": "woman laughing one times"}
253
+ {"filepath": "data/single_event_single_identity_test/syn_23.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
254
+ {"filepath": "data/single_event_single_identity_test/syn_24.wav", "onoffCaption": "dog barking at 0.311-2.711", "frequencyCaption": "dog barking one times"}
255
+ {"filepath": "data/single_event_single_identity_test/syn_30.wav", "onoffCaption": "whistling at 2.158-10.0", "frequencyCaption": "whistling one times"}
256
+ {"filepath": "data/single_event_single_identity_test/syn_37.wav", "onoffCaption": "whistling at 0-1", "frequencyCaption": "whistling one times"}
257
+ {"filepath": "data/single_event_single_identity_test/syn_39.wav", "onoffCaption": "whistling at 0-1, 2-3", "frequencyCaption": "whistling two times"}
258
+ {"filepath": "data/single_event_single_identity_test/syn_41.wav", "onoffCaption": "sheep goat bleating at 0.5-2.5, 2.75-4.75", "frequencyCaption": "sheep goat bleating two times"}
259
+ {"filepath": "data/single_event_single_identity_test/syn_46.wav", "onoffCaption": "car horn honking at 1.0-3.0, 4.0-6.0", "frequencyCaption": "car horn honking two times"}
260
+ {"filepath": "data/single_event_single_identity_test/syn_48.wav", "onoffCaption": "thump thud at 1.017-4.684, 5.695-9.362", "frequencyCaption": "thump thud two times"}
261
+ {"filepath": "data/single_event_single_identity_test/syn_52.wav", "onoffCaption": "dog barking at 0-1, 2-3, 4-5", "frequencyCaption": "dog barking three times"}
262
+ {"filepath": "data/single_event_single_identity_test/syn_55.wav", "onoffCaption": "gunshot at 0.0-2.0, 3.0-5.0", "frequencyCaption": "gunshot two times"}
263
+ {"filepath": "data/single_event_single_identity_test/syn_62.wav", "onoffCaption": "burping belching at 1.5-3.5, 4.0-6.0", "frequencyCaption": "burping belching two times"}
264
+ {"filepath": "data/single_event_single_identity_test/syn_65.wav", "onoffCaption": "sheep goat bleating at 1.0-3.0, 4.0-6.0", "frequencyCaption": "sheep goat bleating two times"}
265
+ {"filepath": "data/single_event_single_identity_test/syn_71.wav", "onoffCaption": "tapping clicking clanking at 0.5-3.0", "frequencyCaption": "tapping clicking clanking one times"}
266
+ {"filepath": "data/single_event_single_identity_test/syn_76.wav", "onoffCaption": "sheep goat bleating at 0.5-2.5, 3.0-5.0", "frequencyCaption": "sheep goat bleating two times"}
267
+ {"filepath": "data/single_event_single_identity_test/syn_78.wav", "onoffCaption": "train horn at 0-1, 2-3", "frequencyCaption": "train horn two times"}
268
+ {"filepath": "data/single_event_single_identity_test/syn_80.wav", "onoffCaption": "whistling at 0-1, 2-3", "frequencyCaption": "whistling two times"}
269
+ {"filepath": "data/single_event_single_identity_test/syn_87.wav", "onoffCaption": "car horn honking at 1.817-4.12, 6.106-8.453", "frequencyCaption": "car horn honking two times"}
270
+ {"filepath": "data/single_event_single_identity_test/syn_89.wav", "onoffCaption": "train horn at 0-1", "frequencyCaption": "train horn one times"}
271
+ {"filepath": "data/single_event_single_identity_test/syn_93.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
272
+ {"filepath": "data/single_event_single_identity_test/syn_94.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
273
+ {"filepath": "data/single_event_single_identity_test/syn_103.wav", "onoffCaption": "gunshot at 0.0-2.0", "frequencyCaption": "gunshot one times"}
274
+ {"filepath": "data/single_event_single_identity_test/syn_104.wav", "onoffCaption": "cat meowing at 0-1, 2-3, 4-5", "frequencyCaption": "cat meowing three times"}
275
+ {"filepath": "data/single_event_single_identity_test/syn_110.wav", "onoffCaption": "whistling at 0-1", "frequencyCaption": "whistling one times"}
276
+ {"filepath": "data/single_event_single_identity_test/syn_117.wav", "onoffCaption": "cat meowing at 1.0-3.0, 4.0-6.0", "frequencyCaption": "cat meowing two times"}
277
+ {"filepath": "data/single_event_single_identity_test/syn_119.wav", "onoffCaption": "car horn honking at 0.0-2.0", "frequencyCaption": "car horn honking one times"}
278
+ {"filepath": "data/single_event_single_identity_test/syn_120.wav", "onoffCaption": "door knocking at 0-1", "frequencyCaption": "door knocking one times"}
279
+ {"filepath": "data/single_event_single_identity_test/syn_127.wav", "onoffCaption": "sheep goat bleating at 0.5-2.5, 3-5", "frequencyCaption": "sheep goat bleating two times"}
280
+ {"filepath": "data/single_event_single_identity_test/syn_129.wav", "onoffCaption": "sheep goat bleating at 0-1, 2-3, 4-5", "frequencyCaption": "sheep goat bleating three times"}
281
+ {"filepath": "data/single_event_single_identity_test/syn_133.wav", "onoffCaption": "gunshot at 0.0-2.0, 2.5-4.5", "frequencyCaption": "gunshot two times"}
282
+ {"filepath": "data/single_event_single_identity_test/syn_134.wav", "onoffCaption": "sheep goat bleating at 0-1, 2-3", "frequencyCaption": "sheep goat bleating two times"}
283
+ {"filepath": "data/single_event_single_identity_test/syn_142.wav", "onoffCaption": "dog barking at 0-1, 2-3", "frequencyCaption": "dog barking two times"}
284
+ {"filepath": "data/single_event_single_identity_test/syn_145.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
285
+ {"filepath": "data/single_event_single_identity_test/syn_151.wav", "onoffCaption": "burping belching at 1-3", "frequencyCaption": "burping belching one times"}
286
+ {"filepath": "data/single_event_single_identity_test/syn_156.wav", "onoffCaption": "cow mooing at 0-3", "frequencyCaption": "cow mooing one times"}
287
+ {"filepath": "data/single_event_single_identity_test/syn_158.wav", "onoffCaption": "door knocking at 0-1", "frequencyCaption": "door knocking one times"}
288
+ {"filepath": "data/single_event_single_identity_test/syn_161.wav", "onoffCaption": "spraying at 0-1, 2-3, 4-5", "frequencyCaption": "spraying three times"}
289
+ {"filepath": "data/single_event_single_identity_test/syn_166.wav", "onoffCaption": "tapping clicking clanking at 0.032-3.472, 4.758-7.489", "frequencyCaption": "tapping clicking clanking two times"}
290
+ {"filepath": "data/single_event_single_identity_test/syn_168.wav", "onoffCaption": "explosion at 2.941-5.813", "frequencyCaption": "explosion one times"}
291
+ {"filepath": "data/single_event_single_identity_test/syn_172.wav", "onoffCaption": "gunshot at 0.0-2.0", "frequencyCaption": "gunshot one times"}
292
+ {"filepath": "data/single_event_single_identity_test/syn_183.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
293
+ {"filepath": "data/single_event_single_identity_test/syn_184.wav", "onoffCaption": "spraying at 0-1", "frequencyCaption": "spraying one times"}
294
+ {"filepath": "data/single_event_single_identity_test/syn_197.wav", "onoffCaption": "sheep goat bleating at 0.0-2.0, 3.0-5.0", "frequencyCaption": "sheep goat bleating two times"}
295
+ {"filepath": "data/single_event_single_identity_test/syn_199.wav", "onoffCaption": "dog barking at 0-2, 2-4", "frequencyCaption": "dog barking two times"}
296
+ {"filepath": "data/single_event_single_identity_test/syn_200.wav", "onoffCaption": "thump thud at 2.224-5.891, 7.389-9.889", "frequencyCaption": "thump thud two times"}
297
+ {"filepath": "data/single_event_single_identity_test/syn_12.wav", "onoffCaption": "dog barking at 0-1", "frequencyCaption": "dog barking one times"}
298
+ {"filepath": "data/single_event_single_identity_test/syn_15.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
299
+ {"filepath": "data/single_event_single_identity_test/syn_22.wav", "onoffCaption": "whistling at 2.603-10.0", "frequencyCaption": "whistling one times"}
300
+ {"filepath": "data/single_event_single_identity_test/syn_25.wav", "onoffCaption": "explosion at 0.0-2.0, 2.5-4.5", "frequencyCaption": "explosion two times"}
301
+ {"filepath": "data/single_event_single_identity_test/syn_31.wav", "onoffCaption": "gunshot at 0.0-2.0, 2.5-4.5, 5.0-7.0", "frequencyCaption": "gunshot three times"}
302
+ {"filepath": "data/single_event_single_identity_test/syn_36.wav", "onoffCaption": "dog barking at 0.5-1.5", "frequencyCaption": "dog barking one times"}
303
+ {"filepath": "data/single_event_single_identity_test/syn_38.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
304
+ {"filepath": "data/single_event_single_identity_test/syn_40.wav", "onoffCaption": "sheep goat bleating at 0-1", "frequencyCaption": "sheep goat bleating one times"}
305
+ {"filepath": "data/single_event_single_identity_test/syn_47.wav", "onoffCaption": "door slamming at 0-1, 2-3, 4-5", "frequencyCaption": "door slamming three times"}
306
+ {"filepath": "data/single_event_single_identity_test/syn_49.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
307
+ {"filepath": "data/single_event_single_identity_test/syn_51.wav", "onoffCaption": "cat meowing at 0.0-1.0, 2.0-3.0, 4.0-5.0", "frequencyCaption": "cat meowing three times"}
308
+ {"filepath": "data/single_event_single_identity_test/syn_53.wav", "onoffCaption": "cat meowing at 0-2", "frequencyCaption": "cat meowing one times"}
309
+ {"filepath": "data/single_event_single_identity_test/syn_54.wav", "onoffCaption": "gunshot at 0-1, 2-3", "frequencyCaption": "gunshot two times"}
310
+ {"filepath": "data/single_event_single_identity_test/syn_63.wav", "onoffCaption": "door slamming at 0.355-2.581", "frequencyCaption": "door slamming one times"}
311
+ {"filepath": "data/single_event_single_identity_test/syn_64.wav", "onoffCaption": "sheep goat bleating at 0-1, 2-3, 4-5", "frequencyCaption": "sheep goat bleating three times"}
312
+ {"filepath": "data/single_event_single_identity_test/syn_70.wav", "onoffCaption": "sneeze at 1.3-2.403, 4.759-6.442", "frequencyCaption": "sneeze two times"}
313
+ {"filepath": "data/single_event_single_identity_test/syn_77.wav", "onoffCaption": "dog barking at 0-1, 2-3", "frequencyCaption": "dog barking two times"}
314
+ {"filepath": "data/single_event_single_identity_test/syn_79.wav", "onoffCaption": "tapping clicking clanking at 0.536-3.976", "frequencyCaption": "tapping clicking clanking one times"}
315
+ {"filepath": "data/single_event_single_identity_test/syn_81.wav", "onoffCaption": "spraying at 0-1", "frequencyCaption": "spraying one times"}
316
+ {"filepath": "data/single_event_single_identity_test/syn_86.wav", "onoffCaption": "door knocking at 1-2", "frequencyCaption": "door knocking one times"}
317
+ {"filepath": "data/single_event_single_identity_test/syn_88.wav", "onoffCaption": "cow mooing at 1.0-3.0, 4.0-6.0", "frequencyCaption": "cow mooing two times"}
318
+ {"filepath": "data/single_event_single_identity_test/syn_92.wav", "onoffCaption": "train horn at 0.0-2.0, 2.5-4.5", "frequencyCaption": "train horn two times"}
319
+ {"filepath": "data/single_event_single_identity_test/syn_95.wav", "onoffCaption": "thump thud at 0.0-1.0", "frequencyCaption": "thump thud one times"}
320
+ {"filepath": "data/single_event_single_identity_test/syn_102.wav", "onoffCaption": "thump thud at 0-1, 2-3", "frequencyCaption": "thump thud two times"}
321
+ {"filepath": "data/single_event_single_identity_test/syn_105.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
322
+ {"filepath": "data/single_event_single_identity_test/syn_111.wav", "onoffCaption": "door knocking at 0-1, 2-3", "frequencyCaption": "door knocking two times"}
323
+ {"filepath": "data/single_event_single_identity_test/syn_116.wav", "onoffCaption": "gunshot at 0-1, 2-3", "frequencyCaption": "gunshot two times"}
324
+ {"filepath": "data/single_event_single_identity_test/syn_118.wav", "onoffCaption": "cat meowing at 0-3", "frequencyCaption": "cat meowing one times"}
325
+ {"filepath": "data/single_event_single_identity_test/syn_121.wav", "onoffCaption": "door knocking at 1.155-5.305", "frequencyCaption": "door knocking one times"}
326
+ {"filepath": "data/single_event_single_identity_test/syn_126.wav", "onoffCaption": "sheep goat bleating at 0.5-2.5, 3.0-5.0", "frequencyCaption": "sheep goat bleating two times"}
327
+ {"filepath": "data/single_event_single_identity_test/syn_128.wav", "onoffCaption": "tapping clicking clanking at 0-1", "frequencyCaption": "tapping clicking clanking one times"}
328
+ {"filepath": "data/single_event_single_identity_test/syn_132.wav", "onoffCaption": "cat meowing at 0-1, 2-3, 4-5", "frequencyCaption": "cat meowing three times"}
329
+ {"filepath": "data/single_event_single_identity_test/syn_135.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
330
+ {"filepath": "data/single_event_single_identity_test/syn_139.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
331
+ {"filepath": "data/single_event_single_identity_test/syn_143.wav", "onoffCaption": "cat meowing at 0-1", "frequencyCaption": "cat meowing one times"}
332
+ {"filepath": "data/single_event_single_identity_test/syn_144.wav", "onoffCaption": "cow mooing at 1.954-4.602, 6.719-9.729", "frequencyCaption": "cow mooing two times"}
333
+ {"filepath": "data/single_event_single_identity_test/syn_150.wav", "onoffCaption": "dog barking at 0-1", "frequencyCaption": "dog barking one times"}
334
+ {"filepath": "data/single_event_single_identity_test/syn_157.wav", "onoffCaption": "sneeze at 0-1", "frequencyCaption": "sneeze one times"}
335
+ {"filepath": "data/single_event_single_identity_test/syn_159.wav", "onoffCaption": "sneeze at 0-1", "frequencyCaption": "sneeze one times"}
336
+ {"filepath": "data/single_event_single_identity_test/syn_160.wav", "onoffCaption": "tapping clicking clanking at 1.5-3.5, 5-7", "frequencyCaption": "tapping clicking clanking two times"}
337
+ {"filepath": "data/single_event_single_identity_test/syn_167.wav", "onoffCaption": "cat meowing at 0-1, 2-3", "frequencyCaption": "cat meowing two times"}
338
+ {"filepath": "data/single_event_single_identity_test/syn_169.wav", "onoffCaption": "train horn at 0-3.5", "frequencyCaption": "train horn one times"}
339
+ {"filepath": "data/single_event_single_identity_test/syn_173.wav", "onoffCaption": "thump thud at 0-1", "frequencyCaption": "thump thud one times"}
340
+ {"filepath": "data/single_event_single_identity_test/syn_174.wav", "onoffCaption": "cat meowing at 0-1.2, 2-3.2, 4-5.2", "frequencyCaption": "cat meowing three times"}
341
+ {"filepath": "data/single_event_single_identity_test/syn_182.wav", "onoffCaption": "door knocking at 1-3, 4-6", "frequencyCaption": "door knocking two times"}
342
+ {"filepath": "data/single_event_single_identity_test/syn_185.wav", "onoffCaption": "gunshot at 0.0-2.0, 3.0-5.0", "frequencyCaption": "gunshot two times"}
343
+ {"filepath": "data/single_event_single_identity_test/syn_189.wav", "onoffCaption": "door knocking at 2.047-4.422", "frequencyCaption": "door knocking one times"}
344
+ {"filepath": "data/single_event_single_identity_test/syn_191.wav", "onoffCaption": "cow mooing at 0-3", "frequencyCaption": "cow mooing one times"}
345
+ {"filepath": "data/single_event_single_identity_test/syn_196.wav", "onoffCaption": "door knocking at 0-1, 2-3, 4-5", "frequencyCaption": "door knocking three times"}
346
+ {"filepath": "data/single_event_single_identity_test/syn_198.wav", "onoffCaption": "explosion at 0.0-1.0", "frequencyCaption": "explosion one times"}
347
+ {"filepath": "data/single_event_single_identity_test/syn_10.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
348
+ {"filepath": "data/single_event_single_identity_test/syn_17.wav", "onoffCaption": "burping belching at 0.871-5.871, 7.218-10.0", "frequencyCaption": "burping belching two times"}
349
+ {"filepath": "data/single_event_single_identity_test/syn_19.wav", "onoffCaption": "cat meowing at 0-1, 2-3", "frequencyCaption": "cat meowing two times"}
350
+ {"filepath": "data/single_event_single_identity_test/syn_20.wav", "onoffCaption": "tapping clicking clanking at 1.851-5.291, 7.569-10.0", "frequencyCaption": "tapping clicking clanking two times"}
351
+ {"filepath": "data/single_event_single_identity_test/syn_27.wav", "onoffCaption": "spraying at 0-1, 2-3, 4-5", "frequencyCaption": "spraying three times"}
352
+ {"filepath": "data/single_event_single_identity_test/syn_29.wav", "onoffCaption": "tapping clicking clanking at 1-3, 4-6", "frequencyCaption": "tapping clicking clanking two times"}
353
+ {"filepath": "data/single_event_single_identity_test/syn_33.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
354
+ {"filepath": "data/single_event_single_identity_test/syn_34.wav", "onoffCaption": "burping belching at 0-1, 2-3", "frequencyCaption": "burping belching two times"}
355
+ {"filepath": "data/single_event_single_identity_test/syn_42.wav", "onoffCaption": "dog barking at 2.579-4.579", "frequencyCaption": "dog barking one times"}
356
+ {"filepath": "data/single_event_single_identity_test/syn_45.wav", "onoffCaption": "cat meowing at 0-1", "frequencyCaption": "cat meowing one times"}
357
+ {"filepath": "data/single_event_single_identity_test/syn_56.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
358
+ {"filepath": "data/single_event_single_identity_test/syn_58.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
359
+ {"filepath": "data/single_event_single_identity_test/syn_61.wav", "onoffCaption": "spraying at 0.0-1.0", "frequencyCaption": "spraying one times"}
360
+ {"filepath": "data/single_event_single_identity_test/syn_66.wav", "onoffCaption": "cat meowing at 0-1, 2-3", "frequencyCaption": "cat meowing two times"}
361
+ {"filepath": "data/single_event_single_identity_test/syn_68.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
362
+ {"filepath": "data/single_event_single_identity_test/syn_72.wav", "onoffCaption": "sneeze at 1.3-2.403, 4.759-6.442", "frequencyCaption": "sneeze two times"}
363
+ {"filepath": "data/single_event_single_identity_test/syn_75.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
364
+ {"filepath": "data/single_event_single_identity_test/syn_83.wav", "onoffCaption": "dog barking at 0.464-2.464, 4.19-6.19", "frequencyCaption": "dog barking two times"}
365
+ {"filepath": "data/single_event_single_identity_test/syn_84.wav", "onoffCaption": "woman laughing at 0-2, 2-4", "frequencyCaption": "woman laughing two times"}
366
+ {"filepath": "data/single_event_single_identity_test/syn_90.wav", "onoffCaption": "dog barking at 0-1, 2-3, 4-5", "frequencyCaption": "dog barking three times"}
367
+ {"filepath": "data/single_event_single_identity_test/syn_97.wav", "onoffCaption": "gunshot at 0-1", "frequencyCaption": "gunshot one times"}
368
+ {"filepath": "data/single_event_single_identity_test/syn_99.wav", "onoffCaption": "door knocking at 1-3, 4-6", "frequencyCaption": "door knocking two times"}
369
+ {"filepath": "data/single_event_single_identity_test/syn_100.wav", "onoffCaption": "burping belching at 0.871-2.871, 3.891-5.891", "frequencyCaption": "burping belching two times"}
370
+ {"filepath": "data/single_event_single_identity_test/syn_107.wav", "onoffCaption": "woman laughing at 0-2, 5-7", "frequencyCaption": "woman laughing two times"}
371
+ {"filepath": "data/single_event_single_identity_test/syn_109.wav", "onoffCaption": "cat meowing at 0.0-2.0, 3.0-5.0, 6.0-8.0", "frequencyCaption": "cat meowing three times"}
372
+ {"filepath": "data/single_event_single_identity_test/syn_113.wav", "onoffCaption": "duck quacking at 0-1", "frequencyCaption": "duck quacking one times"}
373
+ {"filepath": "data/single_event_single_identity_test/syn_114.wav", "onoffCaption": "duck quacking at 0-1, 2-3", "frequencyCaption": "duck quacking two times"}
374
+ {"filepath": "data/single_event_single_identity_test/syn_123.wav", "onoffCaption": "woman laughing at 2.777-6.165", "frequencyCaption": "woman laughing one times"}
375
+ {"filepath": "data/single_event_single_identity_test/syn_124.wav", "onoffCaption": "door slamming at 0.145-1.085, 2.545-4.463", "frequencyCaption": "door slamming two times"}
376
+ {"filepath": "data/single_event_single_identity_test/syn_130.wav", "onoffCaption": "duck quacking at 0.0-2.0, 3.0-5.0, 6.0-8.0", "frequencyCaption": "duck quacking three times"}
377
+ {"filepath": "data/single_event_single_identity_test/syn_137.wav", "onoffCaption": "dog barking at 0.5-2.5, 3.0-5.0", "frequencyCaption": "dog barking two times"}
378
+ {"filepath": "data/single_event_single_identity_test/syn_141.wav", "onoffCaption": "woman laughing at 2.782-5.368", "frequencyCaption": "woman laughing one times"}
379
+ {"filepath": "data/single_event_single_identity_test/syn_146.wav", "onoffCaption": "dog barking at 0-1, 2-3", "frequencyCaption": "dog barking two times"}
380
+ {"filepath": "data/single_event_single_identity_test/syn_148.wav", "onoffCaption": "thump thud at 2-3", "frequencyCaption": "thump thud one times"}
381
+ {"filepath": "data/single_event_single_identity_test/syn_152.wav", "onoffCaption": "sheep goat bleating at 0-1, 2-3", "frequencyCaption": "sheep goat bleating two times"}
382
+ {"filepath": "data/single_event_single_identity_test/syn_155.wav", "onoffCaption": "woman laughing at 0.5-2.5, 3.0-5.0", "frequencyCaption": "woman laughing two times"}
383
+ {"filepath": "data/single_event_single_identity_test/syn_162.wav", "onoffCaption": "door knocking at 0-1, 1-2", "frequencyCaption": "door knocking two times"}
384
+ {"filepath": "data/single_event_single_identity_test/syn_165.wav", "onoffCaption": "door slamming at 0.355-2.581", "frequencyCaption": "door slamming one times"}
385
+ {"filepath": "data/single_event_single_identity_test/syn_171.wav", "onoffCaption": "woman laughing at 2.672-5.672", "frequencyCaption": "woman laughing one times"}
386
+ {"filepath": "data/single_event_single_identity_test/syn_176.wav", "onoffCaption": "burping belching at 0.5-3.5, 4.5-7.5", "frequencyCaption": "burping belching two times"}
387
+ {"filepath": "data/single_event_single_identity_test/syn_178.wav", "onoffCaption": "sheep goat bleating at 1-3, 4-7", "frequencyCaption": "sheep goat bleating two times"}
388
+ {"filepath": "data/single_event_single_identity_test/syn_180.wav", "onoffCaption": "cow mooing at 0-3", "frequencyCaption": "cow mooing one times"}
389
+ {"filepath": "data/single_event_single_identity_test/syn_187.wav", "onoffCaption": "gunshot at 0.5-2.5, 3.0-5.0", "frequencyCaption": "gunshot two times"}
390
+ {"filepath": "data/single_event_single_identity_test/syn_193.wav", "onoffCaption": "tapping clicking clanking at 1.851-5.291, 7.569-10.0", "frequencyCaption": "tapping clicking clanking two times"}
391
+ {"filepath": "data/single_event_single_identity_test/syn_194.wav", "onoffCaption": "train horn at 0-3", "frequencyCaption": "train horn one times"}
392
+ {"filepath": "data/single_event_single_identity_test/syn_1.wav", "onoffCaption": "cat meowing at 1.0-3.0, 4.0-6.0", "frequencyCaption": "cat meowing two times"}
393
+ {"filepath": "data/single_event_single_identity_test/syn_2.wav", "onoffCaption": "cat meowing at 0.5-1.5, 2.5-3.5", "frequencyCaption": "cat meowing two times"}
394
+ {"filepath": "data/single_event_single_identity_test/syn_3.wav", "onoffCaption": "burping belching at 0-1, 2-3, 4-5", "frequencyCaption": "burping belching three times"}
395
+ {"filepath": "data/single_event_single_identity_test/syn_4.wav", "onoffCaption": "car horn honking at 0.664-3.129, 4.357-7.014", "frequencyCaption": "car horn honking two times"}
396
+ {"filepath": "data/single_event_single_identity_test/syn_5.wav", "onoffCaption": "dog barking at 0.0-2.0", "frequencyCaption": "dog barking one times"}
397
+ {"filepath": "data/single_event_single_identity_test/syn_6.wav", "onoffCaption": "explosion at 0-1", "frequencyCaption": "explosion one times"}
398
+ {"filepath": "data/single_event_single_identity_test/syn_7.wav", "onoffCaption": "dog barking at 0-1", "frequencyCaption": "dog barking one times"}
399
+ {"filepath": "data/single_event_single_identity_test/syn_8.wav", "onoffCaption": "burping belching at 2.861-8.462", "frequencyCaption": "burping belching one times"}
400
+ {"filepath": "data/single_event_single_identity_test/syn_9.wav", "onoffCaption": "burping belching at 0.569-4.438", "frequencyCaption": "burping belching one times"}
data/meta_data/test-onoff-control_multi-event.json ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"filepath": "data/multi_event_test/syn_1.wav", "onoffCaption": "cat meowing at 0.393-1.783, 3.975-5.365 and whistling at 0.861-5.455 and explosion at 2.089-4.841, 5.738-8.538", "frequencyCaption": "cat meowing two times and whistling one times and explosion two times"}
2
+ {"filepath": "data/multi_event_test/syn_6.wav", "onoffCaption": "whistling at 2.093-10.0", "frequencyCaption": "whistling one times"}
3
+ {"filepath": "data/multi_event_test/syn_8.wav", "onoffCaption": "cow mooing at 1.177-3.977, 5.15-7.774", "frequencyCaption": "cow mooing two times"}
4
+ {"filepath": "data/multi_event_test/syn_11.wav", "onoffCaption": "burping belching at 1.039-3.198, 4.085-6.244 and dog barking at 3.119-5.119", "frequencyCaption": "burping belching two times and dog barking one times"}
5
+ {"filepath": "data/multi_event_test/syn_16.wav", "onoffCaption": "duck quacking at 0.799-2.799, 3.634-5.634, 6.976-8.976", "frequencyCaption": "duck quacking three times"}
6
+ {"filepath": "data/multi_event_test/syn_18.wav", "onoffCaption": "door knocking at 1.225-3.352, 5.173-7.3 and door slamming at 5.439-7.678", "frequencyCaption": "door knocking two times and door slamming one times"}
7
+ {"filepath": "data/multi_event_test/syn_21.wav", "onoffCaption": "dog barking at 2.947-4.947, 6.186-8.186", "frequencyCaption": "dog barking two times"}
8
+ {"filepath": "data/multi_event_test/syn_26.wav", "onoffCaption": "whistling at 2.848-7.442", "frequencyCaption": "whistling one times"}
9
+ {"filepath": "data/multi_event_test/syn_28.wav", "onoffCaption": "cow mooing at 2.639-5.263 and spraying at 8.565-9.667", "frequencyCaption": "cow mooing one times and spraying one times"}
10
+ {"filepath": "data/multi_event_test/syn_32.wav", "onoffCaption": "duck quacking at 0.039-2.039, 3.171-5.171, 5.938-7.938", "frequencyCaption": "duck quacking three times"}
11
+ {"filepath": "data/multi_event_test/syn_35.wav", "onoffCaption": "car horn honking at 2.31-5.271, 5.91-8.871", "frequencyCaption": "car horn honking two times"}
12
+ {"filepath": "data/multi_event_test/syn_43.wav", "onoffCaption": "dog barking at 2.157-4.157, 5.953-7.953 and burping belching at 2.431-5.388, 6.452-8.611 and explosion at 4.8-7.552", "frequencyCaption": "dog barking two times and burping belching two times and explosion one times"}
13
+ {"filepath": "data/multi_event_test/syn_44.wav", "onoffCaption": "sneeze at 2.638-6.791", "frequencyCaption": "sneeze one times"}
14
+ {"filepath": "data/multi_event_test/syn_50.wav", "onoffCaption": "car horn honking at 0.874-3.835, 4.429-7.39 and sneeze at 1.814-5.167 and train horn at 2.818-7.898", "frequencyCaption": "car horn honking two times and sneeze one times and train horn one times"}
15
+ {"filepath": "data/multi_event_test/syn_57.wav", "onoffCaption": "dog barking at 3.007-5.007, 6.103-8.103 and cow mooing at 3.017-5.641", "frequencyCaption": "dog barking two times and cow mooing one times"}
16
+ {"filepath": "data/multi_event_test/syn_59.wav", "onoffCaption": "door slamming at 0.035-2.274 and explosion at 3.857-6.609, 7.377-10.0", "frequencyCaption": "door slamming one times and explosion two times"}
17
+ {"filepath": "data/multi_event_test/syn_60.wav", "onoffCaption": "train horn at 0.062-3.062", "frequencyCaption": "train horn one times"}
18
+ {"filepath": "data/multi_event_test/syn_67.wav", "onoffCaption": "whistling at 1.616-10.0", "frequencyCaption": "whistling one times"}
19
+ {"filepath": "data/multi_event_test/syn_69.wav", "onoffCaption": "door knocking at 0.237-2.801, 4.117-6.681, 7.378-9.942", "frequencyCaption": "door knocking three times"}
20
+ {"filepath": "data/multi_event_test/syn_73.wav", "onoffCaption": "door knocking at 0.045-2.172, 2.718-5.282, 6.027-8.591 and sneeze at 2.92-6.273, 6.847-9.032", "frequencyCaption": "door knocking three times and sneeze two times"}
21
+ {"filepath": "data/multi_event_test/syn_74.wav", "onoffCaption": "spraying at 0.38-1.176, 3.06-3.856 and gunshot at 1.729-3.729, 4.367-6.367, 7.031-9.031", "frequencyCaption": "spraying two times and gunshot three times"}
22
+ {"filepath": "data/multi_event_test/syn_82.wav", "onoffCaption": "dog barking at 0.497-2.497, 4.187-6.187", "frequencyCaption": "dog barking two times"}
23
+ {"filepath": "data/multi_event_test/syn_91.wav", "onoffCaption": "gunshot at 0.501-2.501, 3.148-5.148", "frequencyCaption": "gunshot two times"}
24
+ {"filepath": "data/multi_event_test/syn_96.wav", "onoffCaption": "door slamming at 0.154-2.393, 3.23-4.641, 5.232-7.471", "frequencyCaption": "door slamming three times"}
25
+ {"filepath": "data/multi_event_test/syn_98.wav", "onoffCaption": "thump thud at 1.835-4.135, 6.505-9.18", "frequencyCaption": "thump thud two times"}
26
+ {"filepath": "data/multi_event_test/syn_101.wav", "onoffCaption": "dog barking at 0.681-2.681", "frequencyCaption": "dog barking one times"}
27
+ {"filepath": "data/multi_event_test/syn_106.wav", "onoffCaption": "burping belching at 0.093-3.05, 3.962-6.121, 7.309-9.468", "frequencyCaption": "burping belching three times"}
28
+ {"filepath": "data/multi_event_test/syn_108.wav", "onoffCaption": "sneeze at 3.287-7.44", "frequencyCaption": "sneeze one times"}
29
+ {"filepath": "data/multi_event_test/syn_112.wav", "onoffCaption": "woman laughing at 1.823-4.587, 6.243-9.007", "frequencyCaption": "woman laughing two times"}
30
+ {"filepath": "data/multi_event_test/syn_115.wav", "onoffCaption": "duck quacking at 0.044-1.862 and tapping clicking clanking at 0.436-3.876, 5.547-7.6", "frequencyCaption": "duck quacking one times and tapping clicking clanking two times"}
31
+ {"filepath": "data/multi_event_test/syn_122.wav", "onoffCaption": "door knocking at 1.266-3.83, 4.854-7.418, 7.929-10.0", "frequencyCaption": "door knocking three times"}
32
+ {"filepath": "data/multi_event_test/syn_125.wav", "onoffCaption": "cow mooing at 2.954-5.754, 6.384-9.008", "frequencyCaption": "cow mooing two times"}
33
+ {"filepath": "data/multi_event_test/syn_131.wav", "onoffCaption": "whistling at 0.666-5.26, 5.984-8.335 and cat meowing at 0.904-2.294", "frequencyCaption": "whistling two times and cat meowing one times"}
34
+ {"filepath": "data/multi_event_test/syn_136.wav", "onoffCaption": "sheep goat bleating at 0.226-2.226, 3.707-5.707 and whistling at 1.058-5.652, 6.943-10.0 and woman laughing at 2.749-7.207", "frequencyCaption": "sheep goat bleating two times and whistling two times and woman laughing one times"}
35
+ {"filepath": "data/multi_event_test/syn_138.wav", "onoffCaption": "gunshot at 0.785-2.785 and tapping clicking clanking at 5.685-9.125", "frequencyCaption": "gunshot one times and tapping clicking clanking one times"}
36
+ {"filepath": "data/multi_event_test/syn_140.wav", "onoffCaption": "door knocking at 0.341-2.468, 3.382-5.946, 7.206-9.77", "frequencyCaption": "door knocking three times"}
37
+ {"filepath": "data/multi_event_test/syn_147.wav", "onoffCaption": "door slamming at 0.305-1.716, 2.95-4.361, 5.691-7.102", "frequencyCaption": "door slamming three times"}
38
+ {"filepath": "data/multi_event_test/syn_149.wav", "onoffCaption": "car horn honking at 0.666-3.35, 5.748-8.432 and spraying at 7.494-8.29, 8.904-9.7", "frequencyCaption": "car horn honking two times and spraying two times"}
39
+ {"filepath": "data/multi_event_test/syn_153.wav", "onoffCaption": "cat meowing at 3.029-4.355", "frequencyCaption": "cat meowing one times"}
40
+ {"filepath": "data/multi_event_test/syn_154.wav", "onoffCaption": "cat meowing at 2.044-3.37 and door knocking at 2.866-5.43", "frequencyCaption": "cat meowing one times and door knocking one times"}
41
+ {"filepath": "data/multi_event_test/syn_163.wav", "onoffCaption": "sheep goat bleating at 0.139-2.139, 3.188-5.188, 6.077-8.077", "frequencyCaption": "sheep goat bleating three times"}
42
+ {"filepath": "data/multi_event_test/syn_164.wav", "onoffCaption": "whistling at 0.15-4.744, 6.868-8.971", "frequencyCaption": "whistling two times"}
43
+ {"filepath": "data/multi_event_test/syn_170.wav", "onoffCaption": "dog barking at 0.286-2.286, 3.801-5.801", "frequencyCaption": "dog barking two times"}
44
+ {"filepath": "data/multi_event_test/syn_177.wav", "onoffCaption": "thump thud at 0.593-2.893 and cow mooing at 4.617-7.241", "frequencyCaption": "thump thud one times and cow mooing one times"}
45
+ {"filepath": "data/multi_event_test/syn_179.wav", "onoffCaption": "cow mooing at 2.754-5.378, 6.145-8.769", "frequencyCaption": "cow mooing two times"}
46
+ {"filepath": "data/multi_event_test/syn_181.wav", "onoffCaption": "cow mooing at 3.381-6.181, 7.936-10.0", "frequencyCaption": "cow mooing two times"}
47
+ {"filepath": "data/multi_event_test/syn_186.wav", "onoffCaption": "gunshot at 0.131-2.131", "frequencyCaption": "gunshot one times"}
48
+ {"filepath": "data/multi_event_test/syn_188.wav", "onoffCaption": "gunshot at 0.785-2.785, 3.847-5.847 and duck quacking at 2.99-4.99", "frequencyCaption": "gunshot two times and duck quacking one times"}
49
+ {"filepath": "data/multi_event_test/syn_192.wav", "onoffCaption": "spraying at 1.763-2.865, 5.335-6.437", "frequencyCaption": "spraying two times"}
50
+ {"filepath": "data/multi_event_test/syn_195.wav", "onoffCaption": "thump thud at 2.422-5.097, 5.945-8.245", "frequencyCaption": "thump thud two times"}
51
+ {"filepath": "data/multi_event_test/syn_3.wav", "onoffCaption": "tapping clicking clanking at 2.711-6.151, 7.783-10.0", "frequencyCaption": "tapping clicking clanking two times"}
52
+ {"filepath": "data/multi_event_test/syn_4.wav", "onoffCaption": "door slamming at 3.076-4.487, 6.877-8.288", "frequencyCaption": "door slamming two times"}
53
+ {"filepath": "data/multi_event_test/syn_13.wav", "onoffCaption": "duck quacking at 0.012-2.012, 3.202-5.202, 7.582-9.582", "frequencyCaption": "duck quacking three times"}
54
+ {"filepath": "data/multi_event_test/syn_14.wav", "onoffCaption": "sneeze at 1.853-6.006", "frequencyCaption": "sneeze one times"}
55
+ {"filepath": "data/multi_event_test/syn_23.wav", "onoffCaption": "sneeze at 0.109-4.262, 6.151-8.608", "frequencyCaption": "sneeze two times"}
56
+ {"filepath": "data/multi_event_test/syn_24.wav", "onoffCaption": "woman laughing at 3.051-7.509", "frequencyCaption": "woman laughing one times"}
57
+ {"filepath": "data/multi_event_test/syn_30.wav", "onoffCaption": "burping belching at 3.234-6.191, 7.597-10.0", "frequencyCaption": "burping belching two times"}
58
+ {"filepath": "data/multi_event_test/syn_37.wav", "onoffCaption": "thump thud at 1.883-4.558, 6.153-8.453 and door knocking at 2.227-4.791, 5.771-8.335 and burping belching at 6.746-8.905", "frequencyCaption": "thump thud two times and door knocking two times and burping belching one times"}
59
+ {"filepath": "data/multi_event_test/syn_39.wav", "onoffCaption": "train horn at 2.197-5.197, 5.755-8.755", "frequencyCaption": "train horn two times"}
60
+ {"filepath": "data/multi_event_test/syn_41.wav", "onoffCaption": "thump thud at 1.465-3.765", "frequencyCaption": "thump thud one times"}
61
+ {"filepath": "data/multi_event_test/syn_48.wav", "onoffCaption": "cat meowing at 0.07-1.396, 3.738-5.064, 6.912-8.238", "frequencyCaption": "cat meowing three times"}
62
+ {"filepath": "data/multi_event_test/syn_52.wav", "onoffCaption": "gunshot at 0.761-2.761 and duck quacking at 0.994-2.994 and tapping clicking clanking at 5.144-8.584", "frequencyCaption": "gunshot one times and duck quacking one times and tapping clicking clanking one times"}
63
+ {"filepath": "data/multi_event_test/syn_55.wav", "onoffCaption": "sneeze at 2.529-6.682, 7.206-9.677", "frequencyCaption": "sneeze two times"}
64
+ {"filepath": "data/multi_event_test/syn_62.wav", "onoffCaption": "woman laughing at 0.152-2.916, 5.112-7.934", "frequencyCaption": "woman laughing two times"}
65
+ {"filepath": "data/multi_event_test/syn_65.wav", "onoffCaption": "gunshot at 3.755-5.755, 6.54-8.54", "frequencyCaption": "gunshot two times"}
66
+ {"filepath": "data/multi_event_test/syn_71.wav", "onoffCaption": "door slamming at 0.023-2.262, 4.712-6.123 and whistling at 1.979-6.573", "frequencyCaption": "door slamming two times and whistling one times"}
67
+ {"filepath": "data/multi_event_test/syn_76.wav", "onoffCaption": "dog barking at 0.741-2.741", "frequencyCaption": "dog barking one times"}
68
+ {"filepath": "data/multi_event_test/syn_78.wav", "onoffCaption": "explosion at 0.11-2.862, 4.292-7.044 and duck quacking at 2.338-4.156, 5.898-7.716", "frequencyCaption": "explosion two times and duck quacking two times"}
69
+ {"filepath": "data/multi_event_test/syn_80.wav", "onoffCaption": "door slamming at 0.695-2.106 and sheep goat bleating at 0.985-2.985", "frequencyCaption": "door slamming one times and sheep goat bleating one times"}
70
+ {"filepath": "data/multi_event_test/syn_85.wav", "onoffCaption": "door knocking at 4.074-6.201", "frequencyCaption": "door knocking one times"}
71
+ {"filepath": "data/multi_event_test/syn_87.wav", "onoffCaption": "explosion at 0.371-3.123, 5.335-8.087", "frequencyCaption": "explosion two times"}
72
+ {"filepath": "data/multi_event_test/syn_89.wav", "onoffCaption": "car horn honking at 2.099-5.06 and cat meowing at 5.989-7.315", "frequencyCaption": "car horn honking one times and cat meowing one times"}
73
+ {"filepath": "data/multi_event_test/syn_93.wav", "onoffCaption": "dog barking at 0.988-2.988, 5.289-7.289", "frequencyCaption": "dog barking two times"}
74
+ {"filepath": "data/multi_event_test/syn_94.wav", "onoffCaption": "gunshot at 1.463-3.463, 4.41-6.41, 7.226-9.226 and thump thud at 1.729-4.404, 6.318-8.993 and sheep goat bleating at 1.895-3.895, 5.909-7.909", "frequencyCaption": "gunshot three times and thump thud two times and sheep goat bleating two times"}
75
+ {"filepath": "data/multi_event_test/syn_103.wav", "onoffCaption": "whistling at 2.759-10.0", "frequencyCaption": "whistling one times"}
76
+ {"filepath": "data/multi_event_test/syn_104.wav", "onoffCaption": "duck quacking at 4.149-5.967", "frequencyCaption": "duck quacking one times"}
77
+ {"filepath": "data/multi_event_test/syn_110.wav", "onoffCaption": "train horn at 0.111-5.191 and duck quacking at 0.894-2.894 and cow mooing at 5.062-7.862", "frequencyCaption": "train horn one times and duck quacking one times and cow mooing one times"}
78
+ {"filepath": "data/multi_event_test/syn_117.wav", "onoffCaption": "sheep goat bleating at 3.487-5.487, 7.705-9.705", "frequencyCaption": "sheep goat bleating two times"}
79
+ {"filepath": "data/multi_event_test/syn_119.wav", "onoffCaption": "train horn at 2.056-5.056 and door knocking at 2.912-5.039, 5.997-8.124", "frequencyCaption": "train horn one times and door knocking two times"}
80
+ {"filepath": "data/multi_event_test/syn_120.wav", "onoffCaption": "burping belching at 2.114-5.071, 5.723-8.68", "frequencyCaption": "burping belching two times"}
81
+ {"filepath": "data/multi_event_test/syn_127.wav", "onoffCaption": "whistling at 1.653-10.0", "frequencyCaption": "whistling one times"}
82
+ {"filepath": "data/multi_event_test/syn_129.wav", "onoffCaption": "door knocking at 0.592-2.719, 3.326-5.453, 6.255-8.382", "frequencyCaption": "door knocking three times"}
83
+ {"filepath": "data/multi_event_test/syn_133.wav", "onoffCaption": "duck quacking at 1.444-3.262, 4.595-6.413", "frequencyCaption": "duck quacking two times"}
84
+ {"filepath": "data/multi_event_test/syn_134.wav", "onoffCaption": "car horn honking at 0.439-3.123, 5.193-7.877", "frequencyCaption": "car horn honking two times"}
85
+ {"filepath": "data/multi_event_test/syn_142.wav", "onoffCaption": "sneeze at 0.338-4.491, 5.776-7.91", "frequencyCaption": "sneeze two times"}
86
+ {"filepath": "data/multi_event_test/syn_145.wav", "onoffCaption": "door knocking at 0.308-2.872, 4.395-6.959 and whistling at 0.583-9.383", "frequencyCaption": "door knocking two times and whistling one times"}
87
+ {"filepath": "data/multi_event_test/syn_151.wav", "onoffCaption": "dog barking at 0.368-2.368, 3.112-5.112, 5.983-7.983", "frequencyCaption": "dog barking three times"}
88
+ {"filepath": "data/multi_event_test/syn_156.wav", "onoffCaption": "car horn honking at 0.03-2.714, 3.401-6.085, 6.775-9.459", "frequencyCaption": "car horn honking three times"}
89
+ {"filepath": "data/multi_event_test/syn_158.wav", "onoffCaption": "tapping clicking clanking at 3.057-6.497, 7.876-10.0", "frequencyCaption": "tapping clicking clanking two times"}
90
+ {"filepath": "data/multi_event_test/syn_161.wav", "onoffCaption": "spraying at 0.049-1.151, 2.004-2.8", "frequencyCaption": "spraying two times"}
91
+ {"filepath": "data/multi_event_test/syn_166.wav", "onoffCaption": "woman laughing at 1.442-5.9", "frequencyCaption": "woman laughing one times"}
92
+ {"filepath": "data/multi_event_test/syn_168.wav", "onoffCaption": "sheep goat bleating at 0.016-2.016", "frequencyCaption": "sheep goat bleating one times"}
93
+ {"filepath": "data/multi_event_test/syn_172.wav", "onoffCaption": "door knocking at 0.153-2.28, 3.142-5.706, 6.305-8.869", "frequencyCaption": "door knocking three times"}
94
+ {"filepath": "data/multi_event_test/syn_175.wav", "onoffCaption": "cow mooing at 0.61-3.41 and spraying at 3.012-4.114", "frequencyCaption": "cow mooing one times and spraying one times"}
95
+ {"filepath": "data/multi_event_test/syn_183.wav", "onoffCaption": "explosion at 0.192-5.114, 5.844-8.596", "frequencyCaption": "explosion two times"}
96
+ {"filepath": "data/multi_event_test/syn_184.wav", "onoffCaption": "sheep goat bleating at 0.322-2.322", "frequencyCaption": "sheep goat bleating one times"}
97
+ {"filepath": "data/multi_event_test/syn_190.wav", "onoffCaption": "whistling at 2.571-7.165", "frequencyCaption": "whistling one times"}
98
+ {"filepath": "data/multi_event_test/syn_197.wav", "onoffCaption": "tapping clicking clanking at 1.043-4.483, 5.786-9.226", "frequencyCaption": "tapping clicking clanking two times"}
99
+ {"filepath": "data/multi_event_test/syn_199.wav", "onoffCaption": "duck quacking at 3.246-5.246 and cat meowing at 7.245-8.635", "frequencyCaption": "duck quacking one times and cat meowing one times"}
100
+ {"filepath": "data/multi_event_test/syn_200.wav", "onoffCaption": "explosion at 3.045-5.797, 7.133-9.196", "frequencyCaption": "explosion two times"}
101
+ {"filepath": "data/multi_event_test/syn_2.wav", "onoffCaption": "door knocking at 2.42-4.984", "frequencyCaption": "door knocking one times"}
102
+ {"filepath": "data/multi_event_test/syn_5.wav", "onoffCaption": "burping belching at 3.676-5.835", "frequencyCaption": "burping belching one times"}
103
+ {"filepath": "data/multi_event_test/syn_12.wav", "onoffCaption": "sheep goat bleating at 1.611-3.611 and sneeze at 5.808-9.161", "frequencyCaption": "sheep goat bleating one times and sneeze one times"}
104
+ {"filepath": "data/multi_event_test/syn_15.wav", "onoffCaption": "tapping clicking clanking at 0.807-4.247", "frequencyCaption": "tapping clicking clanking one times"}
105
+ {"filepath": "data/multi_event_test/syn_22.wav", "onoffCaption": "whistling at 3.354-7.948", "frequencyCaption": "whistling one times"}
106
+ {"filepath": "data/multi_event_test/syn_25.wav", "onoffCaption": "burping belching at 2.316-5.273, 6.42-9.377", "frequencyCaption": "burping belching two times"}
107
+ {"filepath": "data/multi_event_test/syn_31.wav", "onoffCaption": "woman laughing at 0.674-5.132, 6.464-10.0", "frequencyCaption": "woman laughing two times"}
108
+ {"filepath": "data/multi_event_test/syn_36.wav", "onoffCaption": "door slamming at 0.106-2.345, 2.885-5.124, 5.997-8.236", "frequencyCaption": "door slamming three times"}
109
+ {"filepath": "data/multi_event_test/syn_38.wav", "onoffCaption": "cat meowing at 0.245-1.571, 3.125-4.451, 5.016-6.342", "frequencyCaption": "cat meowing three times"}
110
+ {"filepath": "data/multi_event_test/syn_40.wav", "onoffCaption": "door knocking at 2.051-4.178, 4.942-7.506 and cow mooing at 2.928-5.728", "frequencyCaption": "door knocking two times and cow mooing one times"}
111
+ {"filepath": "data/multi_event_test/syn_46.wav", "onoffCaption": "door slamming at 0.382-1.793, 2.674-4.913", "frequencyCaption": "door slamming two times"}
112
+ {"filepath": "data/multi_event_test/syn_47.wav", "onoffCaption": "spraying at 0.719-1.515, 2.813-3.915, 4.469-5.265 and cow mooing at 1.592-4.392, 4.998-7.798", "frequencyCaption": "spraying three times and cow mooing two times"}
113
+ {"filepath": "data/multi_event_test/syn_49.wav", "onoffCaption": "sheep goat bleating at 0.44-2.44, 3.141-5.141 and tapping clicking clanking at 1.283-4.723, 6.144-8.215", "frequencyCaption": "sheep goat bleating two times and tapping clicking clanking two times"}
114
+ {"filepath": "data/multi_event_test/syn_51.wav", "onoffCaption": "train horn at 0.258-3.258, 4.737-7.277", "frequencyCaption": "train horn two times"}
115
+ {"filepath": "data/multi_event_test/syn_53.wav", "onoffCaption": "dog barking at 0.072-2.072, 3.076-5.076, 6.003-8.003", "frequencyCaption": "dog barking three times"}
116
+ {"filepath": "data/multi_event_test/syn_54.wav", "onoffCaption": "train horn at 0.347-3.347, 4.652-7.652", "frequencyCaption": "train horn two times"}
117
+ {"filepath": "data/multi_event_test/syn_63.wav", "onoffCaption": "train horn at 0.507-3.507 and cat meowing at 7.463-8.789 and dog barking at 7.612-9.612", "frequencyCaption": "train horn one times and cat meowing one times and dog barking one times"}
118
+ {"filepath": "data/multi_event_test/syn_64.wav", "onoffCaption": "sheep goat bleating at 1.521-3.521", "frequencyCaption": "sheep goat bleating one times"}
119
+ {"filepath": "data/multi_event_test/syn_70.wav", "onoffCaption": "whistling at 2.267-10.0", "frequencyCaption": "whistling one times"}
120
+ {"filepath": "data/multi_event_test/syn_77.wav", "onoffCaption": "cow mooing at 0.75-3.55", "frequencyCaption": "cow mooing one times"}
121
+ {"filepath": "data/multi_event_test/syn_79.wav", "onoffCaption": "dog barking at 1.282-3.282, 4.117-6.117, 6.789-8.789", "frequencyCaption": "dog barking three times"}
122
+ {"filepath": "data/multi_event_test/syn_81.wav", "onoffCaption": "gunshot at 0.019-2.019, 2.851-4.851, 5.918-7.918", "frequencyCaption": "gunshot three times"}
123
+ {"filepath": "data/multi_event_test/syn_86.wav", "onoffCaption": "whistling at 1.438-6.032 and woman laughing at 2.351-5.115, 6.601-9.365", "frequencyCaption": "whistling one times and woman laughing two times"}
124
+ {"filepath": "data/multi_event_test/syn_88.wav", "onoffCaption": "sheep goat bleating at 3.021-5.021, 6.26-8.26", "frequencyCaption": "sheep goat bleating two times"}
125
+ {"filepath": "data/multi_event_test/syn_92.wav", "onoffCaption": "door slamming at 0.346-1.757, 2.569-3.98, 5.839-7.25 and tapping clicking clanking at 2.508-5.948", "frequencyCaption": "door slamming three times and tapping clicking clanking one times"}
126
+ {"filepath": "data/multi_event_test/syn_95.wav", "onoffCaption": "door slamming at 2.522-3.933, 5.673-7.084, 8.486-9.897", "frequencyCaption": "door slamming three times"}
127
+ {"filepath": "data/multi_event_test/syn_102.wav", "onoffCaption": "door knocking at 2.145-4.272, 4.881-7.008", "frequencyCaption": "door knocking two times"}
128
+ {"filepath": "data/multi_event_test/syn_105.wav", "onoffCaption": "train horn at 0.682-3.682, 4.465-6.698, 7.809-10.0", "frequencyCaption": "train horn three times"}
129
+ {"filepath": "data/multi_event_test/syn_111.wav", "onoffCaption": "whistling at 0.032-4.626, 6.182-10.0 and door slamming at 0.753-2.164", "frequencyCaption": "whistling two times and door slamming one times"}
130
+ {"filepath": "data/multi_event_test/syn_116.wav", "onoffCaption": "burping belching at 3.577-5.736, 6.261-9.218", "frequencyCaption": "burping belching two times"}
131
+ {"filepath": "data/multi_event_test/syn_118.wav", "onoffCaption": "sneeze at 3.124-6.477", "frequencyCaption": "sneeze one times"}
132
+ {"filepath": "data/multi_event_test/syn_121.wav", "onoffCaption": "car horn honking at 0.782-3.743, 4.51-7.194, 7.76-10.0", "frequencyCaption": "car horn honking three times"}
133
+ {"filepath": "data/multi_event_test/syn_123.wav", "onoffCaption": "sheep goat bleating at 2.222-4.222, 6.493-8.493", "frequencyCaption": "sheep goat bleating two times"}
134
+ {"filepath": "data/multi_event_test/syn_126.wav", "onoffCaption": "sneeze at 2.136-6.289 and car horn honking at 2.473-5.434, 7.027-9.711", "frequencyCaption": "sneeze one times and car horn honking two times"}
135
+ {"filepath": "data/multi_event_test/syn_128.wav", "onoffCaption": "sheep goat bleating at 0.291-2.291 and door knocking at 0.293-2.42, 3.227-5.791", "frequencyCaption": "sheep goat bleating one times and door knocking two times"}
136
+ {"filepath": "data/multi_event_test/syn_132.wav", "onoffCaption": "sheep goat bleating at 0.295-2.295 and spraying at 0.328-1.124, 2.065-3.167, 4.421-5.217 and duck quacking at 0.387-2.387, 2.967-4.785, 5.384-7.384", "frequencyCaption": "sheep goat bleating one times and spraying three times and duck quacking three times"}
137
+ {"filepath": "data/multi_event_test/syn_135.wav", "onoffCaption": "tapping clicking clanking at 0.458-3.898, 5.425-8.865", "frequencyCaption": "tapping clicking clanking two times"}
138
+ {"filepath": "data/multi_event_test/syn_139.wav", "onoffCaption": "thump thud at 2.477-4.777, 6.095-8.77", "frequencyCaption": "thump thud two times"}
139
+ {"filepath": "data/multi_event_test/syn_143.wav", "onoffCaption": "spraying at 2.679-3.475 and explosion at 5.945-10.0", "frequencyCaption": "spraying one times and explosion one times"}
140
+ {"filepath": "data/multi_event_test/syn_144.wav", "onoffCaption": "duck quacking at 1.162-2.98, 3.994-5.994, 8.158-9.976", "frequencyCaption": "duck quacking three times"}
141
+ {"filepath": "data/multi_event_test/syn_150.wav", "onoffCaption": "gunshot at 1.946-3.946, 4.6-6.6, 7.322-9.322", "frequencyCaption": "gunshot three times"}
142
+ {"filepath": "data/multi_event_test/syn_157.wav", "onoffCaption": "train horn at 1.991-7.071", "frequencyCaption": "train horn one times"}
143
+ {"filepath": "data/multi_event_test/syn_159.wav", "onoffCaption": "door slamming at 3.182-5.421, 7.675-9.086", "frequencyCaption": "door slamming two times"}
144
+ {"filepath": "data/multi_event_test/syn_160.wav", "onoffCaption": "spraying at 0.179-0.975 and whistling at 3.947-10.0", "frequencyCaption": "spraying one times and whistling one times"}
145
+ {"filepath": "data/multi_event_test/syn_167.wav", "onoffCaption": "burping belching at 0.386-3.343, 4.105-6.264 and gunshot at 4.772-6.772", "frequencyCaption": "burping belching two times and gunshot one times"}
146
+ {"filepath": "data/multi_event_test/syn_169.wav", "onoffCaption": "sneeze at 0.56-4.713, 5.69-7.783", "frequencyCaption": "sneeze two times"}
147
+ {"filepath": "data/multi_event_test/syn_173.wav", "onoffCaption": "sheep goat bleating at 0.834-2.834, 3.932-5.932, 6.656-8.656", "frequencyCaption": "sheep goat bleating three times"}
148
+ {"filepath": "data/multi_event_test/syn_174.wav", "onoffCaption": "dog barking at 0.021-2.021, 2.529-4.529, 5.505-7.505", "frequencyCaption": "dog barking three times"}
149
+ {"filepath": "data/multi_event_test/syn_176.wav", "onoffCaption": "woman laughing at 2.645-5.409, 7.198-9.435", "frequencyCaption": "woman laughing two times"}
150
+ {"filepath": "data/multi_event_test/syn_182.wav", "onoffCaption": "cow mooing at 0.007-2.807 and gunshot at 1.124-3.124", "frequencyCaption": "cow mooing one times and gunshot one times"}
151
+ {"filepath": "data/multi_event_test/syn_185.wav", "onoffCaption": "spraying at 2.564-3.666 and door knocking at 6.756-9.32", "frequencyCaption": "spraying one times and door knocking one times"}
152
+ {"filepath": "data/multi_event_test/syn_189.wav", "onoffCaption": "door slamming at 2.717-4.956, 5.586-6.997", "frequencyCaption": "door slamming two times"}
153
+ {"filepath": "data/multi_event_test/syn_191.wav", "onoffCaption": "burping belching at 2.833-4.992, 6.271-8.43", "frequencyCaption": "burping belching two times"}
154
+ {"filepath": "data/multi_event_test/syn_193.wav", "onoffCaption": "cow mooing at 0.942-3.742, 4.83-7.454", "frequencyCaption": "cow mooing two times"}
155
+ {"filepath": "data/multi_event_test/syn_196.wav", "onoffCaption": "spraying at 3.461-4.563", "frequencyCaption": "spraying one times"}
156
+ {"filepath": "data/multi_event_test/syn_198.wav", "onoffCaption": "gunshot at 1.546-3.546, 4.501-6.501, 7.428-9.428", "frequencyCaption": "gunshot three times"}
157
+ {"filepath": "data/multi_event_test/syn_7.wav", "onoffCaption": "spraying at 0.113-0.909 and burping belching at 0.623-3.58", "frequencyCaption": "spraying one times and burping belching one times"}
158
+ {"filepath": "data/multi_event_test/syn_9.wav", "onoffCaption": "cow mooing at 1.06-3.86", "frequencyCaption": "cow mooing one times"}
159
+ {"filepath": "data/multi_event_test/syn_10.wav", "onoffCaption": "door knocking at 0.3-2.864, 5.022-7.586", "frequencyCaption": "door knocking two times"}
160
+ {"filepath": "data/multi_event_test/syn_17.wav", "onoffCaption": "dog barking at 3.791-5.791, 7.757-9.757", "frequencyCaption": "dog barking two times"}
161
+ {"filepath": "data/multi_event_test/syn_19.wav", "onoffCaption": "gunshot at 0.007-2.007 and spraying at 4.251-5.047", "frequencyCaption": "gunshot one times and spraying one times"}
162
+ {"filepath": "data/multi_event_test/syn_20.wav", "onoffCaption": "tapping clicking clanking at 0.017-3.457, 5.475-7.882", "frequencyCaption": "tapping clicking clanking two times"}
163
+ {"filepath": "data/multi_event_test/syn_27.wav", "onoffCaption": "dog barking at 2.012-4.012, 4.76-6.76", "frequencyCaption": "dog barking two times"}
164
+ {"filepath": "data/multi_event_test/syn_29.wav", "onoffCaption": "tapping clicking clanking at 2.18-5.62, 6.49-9.93", "frequencyCaption": "tapping clicking clanking two times"}
165
+ {"filepath": "data/multi_event_test/syn_33.wav", "onoffCaption": "dog barking at 2.805-4.805, 5.866-7.866 and car horn honking at 5.136-8.097", "frequencyCaption": "dog barking two times and car horn honking one times"}
166
+ {"filepath": "data/multi_event_test/syn_34.wav", "onoffCaption": "sheep goat bleating at 1.113-3.113", "frequencyCaption": "sheep goat bleating one times"}
167
+ {"filepath": "data/multi_event_test/syn_42.wav", "onoffCaption": "tapping clicking clanking at 2.443-5.883, 7.179-9.684", "frequencyCaption": "tapping clicking clanking two times"}
168
+ {"filepath": "data/multi_event_test/syn_45.wav", "onoffCaption": "cat meowing at 0.324-1.65 and train horn at 4.186-9.266", "frequencyCaption": "cat meowing one times and train horn one times"}
169
+ {"filepath": "data/multi_event_test/syn_56.wav", "onoffCaption": "tapping clicking clanking at 1.696-5.136, 6.886-9.533", "frequencyCaption": "tapping clicking clanking two times"}
170
+ {"filepath": "data/multi_event_test/syn_58.wav", "onoffCaption": "door slamming at 2.48-3.891", "frequencyCaption": "door slamming one times"}
171
+ {"filepath": "data/multi_event_test/syn_61.wav", "onoffCaption": "explosion at 2.489-5.241, 5.792-8.521 and train horn at 2.512-7.592 and woman laughing at 6.424-9.188", "frequencyCaption": "explosion two times and train horn one times and woman laughing one times"}
172
+ {"filepath": "data/multi_event_test/syn_66.wav", "onoffCaption": "sheep goat bleating at 1.634-3.634", "frequencyCaption": "sheep goat bleating one times"}
173
+ {"filepath": "data/multi_event_test/syn_68.wav", "onoffCaption": "car horn honking at 0.051-3.012, 4.062-6.746, 7.319-10.0", "frequencyCaption": "car horn honking three times"}
174
+ {"filepath": "data/multi_event_test/syn_72.wav", "onoffCaption": "spraying at 0.013-0.809, 1.742-2.844 and thump thud at 1.117-3.792 and dog barking at 6.065-8.065", "frequencyCaption": "spraying two times and thump thud one times and dog barking one times"}
175
+ {"filepath": "data/multi_event_test/syn_75.wav", "onoffCaption": "explosion at 0.266-5.188, 6.431-9.183", "frequencyCaption": "explosion two times"}
176
+ {"filepath": "data/multi_event_test/syn_83.wav", "onoffCaption": "whistling at 2.863-10.0", "frequencyCaption": "whistling one times"}
177
+ {"filepath": "data/multi_event_test/syn_84.wav", "onoffCaption": "burping belching at 2.009-4.966, 6.768-8.927", "frequencyCaption": "burping belching two times"}
178
+ {"filepath": "data/multi_event_test/syn_90.wav", "onoffCaption": "gunshot at 0.175-2.175", "frequencyCaption": "gunshot one times"}
179
+ {"filepath": "data/multi_event_test/syn_97.wav", "onoffCaption": "cat meowing at 3.666-5.056", "frequencyCaption": "cat meowing one times"}
180
+ {"filepath": "data/multi_event_test/syn_99.wav", "onoffCaption": "duck quacking at 0.697-2.515, 3.677-5.677", "frequencyCaption": "duck quacking two times"}
181
+ {"filepath": "data/multi_event_test/syn_100.wav", "onoffCaption": "cat meowing at 0.122-1.512 and sheep goat bleating at 0.564-2.564, 3.078-5.078, 5.762-7.762", "frequencyCaption": "cat meowing one times and sheep goat bleating three times"}
182
+ {"filepath": "data/multi_event_test/syn_107.wav", "onoffCaption": "spraying at 0.005-1.107, 3.385-4.487 and dog barking at 1.269-3.269, 4.85-6.85 and tapping clicking clanking at 1.455-4.895, 5.47-8.91", "frequencyCaption": "spraying two times and dog barking two times and tapping clicking clanking two times"}
183
+ {"filepath": "data/multi_event_test/syn_109.wav", "onoffCaption": "cow mooing at 1.573-4.373 and gunshot at 7.482-9.482", "frequencyCaption": "cow mooing one times and gunshot one times"}
184
+ {"filepath": "data/multi_event_test/syn_113.wav", "onoffCaption": "whistling at 0.12-4.714 and tapping clicking clanking at 0.731-4.171", "frequencyCaption": "whistling one times and tapping clicking clanking one times"}
185
+ {"filepath": "data/multi_event_test/syn_114.wav", "onoffCaption": "car horn honking at 3.216-5.9 and door knocking at 3.814-6.378", "frequencyCaption": "car horn honking one times and door knocking one times"}
186
+ {"filepath": "data/multi_event_test/syn_124.wav", "onoffCaption": "gunshot at 2.794-4.794, 5.712-7.712", "frequencyCaption": "gunshot two times"}
187
+ {"filepath": "data/multi_event_test/syn_130.wav", "onoffCaption": "dog barking at 0.835-2.835, 3.911-5.911, 6.459-8.459", "frequencyCaption": "dog barking three times"}
188
+ {"filepath": "data/multi_event_test/syn_137.wav", "onoffCaption": "door knocking at 0.152-2.716 and cow mooing at 1.559-4.183 and gunshot at 5.826-7.826", "frequencyCaption": "door knocking one times and cow mooing one times and gunshot one times"}
189
+ {"filepath": "data/multi_event_test/syn_141.wav", "onoffCaption": "sneeze at 0.816-4.969, 5.643-9.796", "frequencyCaption": "sneeze two times"}
190
+ {"filepath": "data/multi_event_test/syn_146.wav", "onoffCaption": "sneeze at 0.145-4.298, 5.107-8.031 and cat meowing at 1.128-2.454", "frequencyCaption": "sneeze two times and cat meowing one times"}
191
+ {"filepath": "data/multi_event_test/syn_148.wav", "onoffCaption": "duck quacking at 3.185-5.003, 5.701-7.701 and cow mooing at 3.469-6.093", "frequencyCaption": "duck quacking two times and cow mooing one times"}
192
+ {"filepath": "data/multi_event_test/syn_152.wav", "onoffCaption": "tapping clicking clanking at 0.851-4.291, 4.863-7.054 and train horn at 5.524-8.524", "frequencyCaption": "tapping clicking clanking two times and train horn one times"}
193
+ {"filepath": "data/multi_event_test/syn_155.wav", "onoffCaption": "tapping clicking clanking at 0.869-4.309 and gunshot at 1.402-3.402 and cat meowing at 6.9-8.226", "frequencyCaption": "tapping clicking clanking one times and gunshot one times and cat meowing one times"}
194
+ {"filepath": "data/multi_event_test/syn_162.wav", "onoffCaption": "gunshot at 0.5-2.5, 3.074-5.074, 5.829-7.829", "frequencyCaption": "gunshot three times"}
195
+ {"filepath": "data/multi_event_test/syn_165.wav", "onoffCaption": "thump thud at 0.322-2.622, 4.239-6.914 and whistling at 0.361-9.161", "frequencyCaption": "thump thud two times and whistling one times"}
196
+ {"filepath": "data/multi_event_test/syn_171.wav", "onoffCaption": "spraying at 1.23-2.332, 3.511-4.613, 5.79-6.892 and thump thud at 1.604-3.904 and sheep goat bleating at 1.985-3.985, 4.796-6.796", "frequencyCaption": "spraying three times and thump thud one times and sheep goat bleating two times"}
197
+ {"filepath": "data/multi_event_test/syn_178.wav", "onoffCaption": "door slamming at 0.233-2.472 and woman laughing at 6.658-10.0", "frequencyCaption": "door slamming one times and woman laughing one times"}
198
+ {"filepath": "data/multi_event_test/syn_180.wav", "onoffCaption": "spraying at 2.203-3.305 and cow mooing at 4.398-7.198", "frequencyCaption": "spraying one times and cow mooing one times"}
199
+ {"filepath": "data/multi_event_test/syn_187.wav", "onoffCaption": "sneeze at 2.13-6.283, 6.866-10.0", "frequencyCaption": "sneeze two times"}
200
+ {"filepath": "data/multi_event_test/syn_194.wav", "onoffCaption": "duck quacking at 2.028-3.846, 5.612-7.43", "frequencyCaption": "duck quacking two times"}
data/meta_data/test-onoff-control_single-event.json ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"filepath": "data/single_event_multi_identity_test/syn_1.wav", "onoffCaption": "cat meowing at 0.258-1.584", "frequencyCaption": "cat meowing one times"}
2
+ {"filepath": "data/single_event_multi_identity_test/syn_6.wav", "onoffCaption": "tapping clicking clanking at 1.246-4.686", "frequencyCaption": "tapping clicking clanking one times"}
3
+ {"filepath": "data/single_event_multi_identity_test/syn_8.wav", "onoffCaption": "door slamming at 2.564-4.803", "frequencyCaption": "door slamming one times"}
4
+ {"filepath": "data/single_event_multi_identity_test/syn_11.wav", "onoffCaption": "dog barking at 0.084-2.084, 2.908-4.908", "frequencyCaption": "dog barking two times"}
5
+ {"filepath": "data/single_event_multi_identity_test/syn_16.wav", "onoffCaption": "thump thud at 0.776-3.451", "frequencyCaption": "thump thud one times"}
6
+ {"filepath": "data/single_event_multi_identity_test/syn_18.wav", "onoffCaption": "sheep goat bleating at 3.833-5.833", "frequencyCaption": "sheep goat bleating one times"}
7
+ {"filepath": "data/single_event_multi_identity_test/syn_21.wav", "onoffCaption": "sheep goat bleating at 2.491-4.491", "frequencyCaption": "sheep goat bleating one times"}
8
+ {"filepath": "data/single_event_multi_identity_test/syn_26.wav", "onoffCaption": "tapping clicking clanking at 0.89-4.33", "frequencyCaption": "tapping clicking clanking one times"}
9
+ {"filepath": "data/single_event_multi_identity_test/syn_28.wav", "onoffCaption": "sneeze at 0.109-4.262, 6.151-8.608", "frequencyCaption": "sneeze two times"}
10
+ {"filepath": "data/single_event_multi_identity_test/syn_32.wav", "onoffCaption": "cow mooing at 1.486-4.11", "frequencyCaption": "cow mooing one times"}
11
+ {"filepath": "data/single_event_multi_identity_test/syn_35.wav", "onoffCaption": "door slamming at 0.085-2.324, 4.153-5.564", "frequencyCaption": "door slamming two times"}
12
+ {"filepath": "data/single_event_multi_identity_test/syn_43.wav", "onoffCaption": "thump thud at 2.551-4.851, 5.601-8.276", "frequencyCaption": "thump thud two times"}
13
+ {"filepath": "data/single_event_multi_identity_test/syn_44.wav", "onoffCaption": "burping belching at 0.979-3.138, 4.115-7.072, 7.609-9.768", "frequencyCaption": "burping belching three times"}
14
+ {"filepath": "data/single_event_multi_identity_test/syn_50.wav", "onoffCaption": "car horn honking at 1.566-4.25, 6.473-9.434", "frequencyCaption": "car horn honking two times"}
15
+ {"filepath": "data/single_event_multi_identity_test/syn_57.wav", "onoffCaption": "train horn at 3.341-8.421", "frequencyCaption": "train horn one times"}
16
+ {"filepath": "data/single_event_multi_identity_test/syn_59.wav", "onoffCaption": "woman laughing at 2.439-5.203, 6.08-8.827", "frequencyCaption": "woman laughing two times"}
17
+ {"filepath": "data/single_event_multi_identity_test/syn_60.wav", "onoffCaption": "cat meowing at 0.074-1.464, 3.742-5.068", "frequencyCaption": "cat meowing two times"}
18
+ {"filepath": "data/single_event_multi_identity_test/syn_67.wav", "onoffCaption": "cow mooing at 3.535-6.159", "frequencyCaption": "cow mooing one times"}
19
+ {"filepath": "data/single_event_multi_identity_test/syn_69.wav", "onoffCaption": "burping belching at 0.799-2.958", "frequencyCaption": "burping belching one times"}
20
+ {"filepath": "data/single_event_multi_identity_test/syn_73.wav", "onoffCaption": "whistling at 2.868-7.462", "frequencyCaption": "whistling one times"}
21
+ {"filepath": "data/single_event_multi_identity_test/syn_74.wav", "onoffCaption": "cat meowing at 1.655-3.045", "frequencyCaption": "cat meowing one times"}
22
+ {"filepath": "data/single_event_multi_identity_test/syn_82.wav", "onoffCaption": "thump thud at 1.925-4.6, 5.398-7.698", "frequencyCaption": "thump thud two times"}
23
+ {"filepath": "data/single_event_multi_identity_test/syn_91.wav", "onoffCaption": "duck quacking at 0.497-2.497", "frequencyCaption": "duck quacking one times"}
24
+ {"filepath": "data/single_event_multi_identity_test/syn_96.wav", "onoffCaption": "cat meowing at 0.044-1.37, 3.201-4.591, 5.458-6.848", "frequencyCaption": "cat meowing three times"}
25
+ {"filepath": "data/single_event_multi_identity_test/syn_98.wav", "onoffCaption": "woman laughing at 2.458-6.916, 7.905-10.0", "frequencyCaption": "woman laughing two times"}
26
+ {"filepath": "data/single_event_multi_identity_test/syn_101.wav", "onoffCaption": "burping belching at 1.697-4.654, 5.403-7.562", "frequencyCaption": "burping belching two times"}
27
+ {"filepath": "data/single_event_multi_identity_test/syn_106.wav", "onoffCaption": "gunshot at 0.047-2.047", "frequencyCaption": "gunshot one times"}
28
+ {"filepath": "data/single_event_multi_identity_test/syn_108.wav", "onoffCaption": "cat meowing at 1.96-3.35, 4.662-5.988", "frequencyCaption": "cat meowing two times"}
29
+ {"filepath": "data/single_event_multi_identity_test/syn_112.wav", "onoffCaption": "train horn at 3.416-8.496", "frequencyCaption": "train horn one times"}
30
+ {"filepath": "data/single_event_multi_identity_test/syn_115.wav", "onoffCaption": "sheep goat bleating at 3.021-5.021, 6.26-8.26", "frequencyCaption": "sheep goat bleating two times"}
31
+ {"filepath": "data/single_event_multi_identity_test/syn_122.wav", "onoffCaption": "tapping clicking clanking at 1.126-4.566, 6.974-9.783", "frequencyCaption": "tapping clicking clanking two times"}
32
+ {"filepath": "data/single_event_multi_identity_test/syn_125.wav", "onoffCaption": "car horn honking at 3.106-5.79, 6.31-9.271", "frequencyCaption": "car horn honking two times"}
33
+ {"filepath": "data/single_event_multi_identity_test/syn_131.wav", "onoffCaption": "cow mooing at 2.423-5.047, 6.252-9.052", "frequencyCaption": "cow mooing two times"}
34
+ {"filepath": "data/single_event_multi_identity_test/syn_136.wav", "onoffCaption": "tapping clicking clanking at 0.672-4.112, 5.733-7.916", "frequencyCaption": "tapping clicking clanking two times"}
35
+ {"filepath": "data/single_event_multi_identity_test/syn_138.wav", "onoffCaption": "burping belching at 0.093-3.05, 3.962-6.121, 7.309-9.468", "frequencyCaption": "burping belching three times"}
36
+ {"filepath": "data/single_event_multi_identity_test/syn_140.wav", "onoffCaption": "duck quacking at 1.928-3.928, 5.108-6.926", "frequencyCaption": "duck quacking two times"}
37
+ {"filepath": "data/single_event_multi_identity_test/syn_147.wav", "onoffCaption": "burping belching at 2.269-4.428, 5.085-8.042", "frequencyCaption": "burping belching two times"}
38
+ {"filepath": "data/single_event_multi_identity_test/syn_149.wav", "onoffCaption": "gunshot at 0.434-2.434", "frequencyCaption": "gunshot one times"}
39
+ {"filepath": "data/single_event_multi_identity_test/syn_153.wav", "onoffCaption": "cow mooing at 3.209-5.833, 6.681-9.481", "frequencyCaption": "cow mooing two times"}
40
+ {"filepath": "data/single_event_multi_identity_test/syn_154.wav", "onoffCaption": "train horn at 3.48-6.48, 7.121-9.68", "frequencyCaption": "train horn two times"}
41
+ {"filepath": "data/single_event_multi_identity_test/syn_163.wav", "onoffCaption": "cow mooing at 1.335-3.959, 6.377-9.177", "frequencyCaption": "cow mooing two times"}
42
+ {"filepath": "data/single_event_multi_identity_test/syn_164.wav", "onoffCaption": "door slamming at 3.391-4.802, 5.918-8.157", "frequencyCaption": "door slamming two times"}
43
+ {"filepath": "data/single_event_multi_identity_test/syn_170.wav", "onoffCaption": "whistling at 0.053-8.853", "frequencyCaption": "whistling one times"}
44
+ {"filepath": "data/single_event_multi_identity_test/syn_177.wav", "onoffCaption": "door knocking at 0.585-2.712, 4.192-6.756", "frequencyCaption": "door knocking two times"}
45
+ {"filepath": "data/single_event_multi_identity_test/syn_179.wav", "onoffCaption": "gunshot at 2.477-4.477", "frequencyCaption": "gunshot one times"}
46
+ {"filepath": "data/single_event_multi_identity_test/syn_181.wav", "onoffCaption": "door knocking at 2.753-5.317", "frequencyCaption": "door knocking one times"}
47
+ {"filepath": "data/single_event_multi_identity_test/syn_186.wav", "onoffCaption": "sneeze at 2.336-6.489, 7.757-10.0", "frequencyCaption": "sneeze two times"}
48
+ {"filepath": "data/single_event_multi_identity_test/syn_188.wav", "onoffCaption": "explosion at 1.933-6.855", "frequencyCaption": "explosion one times"}
49
+ {"filepath": "data/single_event_multi_identity_test/syn_192.wav", "onoffCaption": "cat meowing at 0.139-1.465, 2.845-4.235", "frequencyCaption": "cat meowing two times"}
50
+ {"filepath": "data/single_event_multi_identity_test/syn_195.wav", "onoffCaption": "duck quacking at 3.185-5.003, 5.701-7.701", "frequencyCaption": "duck quacking two times"}
51
+ {"filepath": "data/single_event_multi_identity_test/syn_3.wav", "onoffCaption": "burping belching at 0.203-3.16, 3.696-5.855", "frequencyCaption": "burping belching two times"}
52
+ {"filepath": "data/single_event_multi_identity_test/syn_4.wav", "onoffCaption": "cat meowing at 1.562-2.888", "frequencyCaption": "cat meowing one times"}
53
+ {"filepath": "data/single_event_multi_identity_test/syn_13.wav", "onoffCaption": "tapping clicking clanking at 0.838-4.278, 4.839-6.935, 7.732-9.827", "frequencyCaption": "tapping clicking clanking three times"}
54
+ {"filepath": "data/single_event_multi_identity_test/syn_14.wav", "onoffCaption": "tapping clicking clanking at 0.51-3.95, 5.245-8.17", "frequencyCaption": "tapping clicking clanking two times"}
55
+ {"filepath": "data/single_event_multi_identity_test/syn_23.wav", "onoffCaption": "cow mooing at 0.467-3.267, 4.388-7.012", "frequencyCaption": "cow mooing two times"}
56
+ {"filepath": "data/single_event_multi_identity_test/syn_24.wav", "onoffCaption": "thump thud at 3.239-5.539, 6.108-8.783", "frequencyCaption": "thump thud two times"}
57
+ {"filepath": "data/single_event_multi_identity_test/syn_30.wav", "onoffCaption": "explosion at 2.75-5.502, 7.44-10.0", "frequencyCaption": "explosion two times"}
58
+ {"filepath": "data/single_event_multi_identity_test/syn_37.wav", "onoffCaption": "tapping clicking clanking at 2.357-5.797, 7.176-9.79", "frequencyCaption": "tapping clicking clanking two times"}
59
+ {"filepath": "data/single_event_multi_identity_test/syn_39.wav", "onoffCaption": "burping belching at 1.038-3.197, 4.613-7.57", "frequencyCaption": "burping belching two times"}
60
+ {"filepath": "data/single_event_multi_identity_test/syn_41.wav", "onoffCaption": "car horn honking at 2.524-5.485, 6.594-9.278", "frequencyCaption": "car horn honking two times"}
61
+ {"filepath": "data/single_event_multi_identity_test/syn_48.wav", "onoffCaption": "train horn at 2.211-7.291", "frequencyCaption": "train horn one times"}
62
+ {"filepath": "data/single_event_multi_identity_test/syn_52.wav", "onoffCaption": "dog barking at 2.157-4.157, 5.953-7.953", "frequencyCaption": "dog barking two times"}
63
+ {"filepath": "data/single_event_multi_identity_test/syn_55.wav", "onoffCaption": "spraying at 1.616-2.718, 3.653-4.449, 5.396-6.498", "frequencyCaption": "spraying three times"}
64
+ {"filepath": "data/single_event_multi_identity_test/syn_62.wav", "onoffCaption": "woman laughing at 0.881-5.339, 6.657-9.421", "frequencyCaption": "woman laughing two times"}
65
+ {"filepath": "data/single_event_multi_identity_test/syn_65.wav", "onoffCaption": "tapping clicking clanking at 1.976-5.416, 6.573-9.12", "frequencyCaption": "tapping clicking clanking two times"}
66
+ {"filepath": "data/single_event_multi_identity_test/syn_71.wav", "onoffCaption": "train horn at 2.442-7.522", "frequencyCaption": "train horn one times"}
67
+ {"filepath": "data/single_event_multi_identity_test/syn_76.wav", "onoffCaption": "door knocking at 0.618-3.182", "frequencyCaption": "door knocking one times"}
68
+ {"filepath": "data/single_event_multi_identity_test/syn_78.wav", "onoffCaption": "door knocking at 0.065-2.192, 3.439-6.003", "frequencyCaption": "door knocking two times"}
69
+ {"filepath": "data/single_event_multi_identity_test/syn_80.wav", "onoffCaption": "car horn honking at 3.533-6.494", "frequencyCaption": "car horn honking one times"}
70
+ {"filepath": "data/single_event_multi_identity_test/syn_85.wav", "onoffCaption": "gunshot at 1.931-3.931, 4.716-6.716, 7.891-9.891", "frequencyCaption": "gunshot three times"}
71
+ {"filepath": "data/single_event_multi_identity_test/syn_87.wav", "onoffCaption": "thump thud at 1.759-4.059, 6.133-8.808", "frequencyCaption": "thump thud two times"}
72
+ {"filepath": "data/single_event_multi_identity_test/syn_89.wav", "onoffCaption": "door knocking at 0.065-2.192, 3.164-5.728", "frequencyCaption": "door knocking two times"}
73
+ {"filepath": "data/single_event_multi_identity_test/syn_93.wav", "onoffCaption": "whistling at 0.042-8.842", "frequencyCaption": "whistling one times"}
74
+ {"filepath": "data/single_event_multi_identity_test/syn_94.wav", "onoffCaption": "burping belching at 0.167-2.326, 3.873-6.83", "frequencyCaption": "burping belching two times"}
75
+ {"filepath": "data/single_event_multi_identity_test/syn_103.wav", "onoffCaption": "dog barking at 1.282-3.282, 4.117-6.117, 6.789-8.789", "frequencyCaption": "dog barking three times"}
76
+ {"filepath": "data/single_event_multi_identity_test/syn_104.wav", "onoffCaption": "thump thud at 1.988-4.663, 7.028-9.328", "frequencyCaption": "thump thud two times"}
77
+ {"filepath": "data/single_event_multi_identity_test/syn_110.wav", "onoffCaption": "whistling at 1.555-6.149", "frequencyCaption": "whistling one times"}
78
+ {"filepath": "data/single_event_multi_identity_test/syn_117.wav", "onoffCaption": "tapping clicking clanking at 0.487-3.927", "frequencyCaption": "tapping clicking clanking one times"}
79
+ {"filepath": "data/single_event_multi_identity_test/syn_119.wav", "onoffCaption": "duck quacking at 2.537-4.355, 5.889-7.889", "frequencyCaption": "duck quacking two times"}
80
+ {"filepath": "data/single_event_multi_identity_test/syn_120.wav", "onoffCaption": "dog barking at 0.013-2.013, 3.064-5.064, 5.694-7.694", "frequencyCaption": "dog barking three times"}
81
+ {"filepath": "data/single_event_multi_identity_test/syn_127.wav", "onoffCaption": "duck quacking at 0.78-2.78, 5.24-7.058", "frequencyCaption": "duck quacking two times"}
82
+ {"filepath": "data/single_event_multi_identity_test/syn_129.wav", "onoffCaption": "burping belching at 1.965-4.922, 6.696-8.855", "frequencyCaption": "burping belching two times"}
83
+ {"filepath": "data/single_event_multi_identity_test/syn_133.wav", "onoffCaption": "train horn at 3.059-8.139", "frequencyCaption": "train horn one times"}
84
+ {"filepath": "data/single_event_multi_identity_test/syn_134.wav", "onoffCaption": "spraying at 0.184-0.98, 2.498-3.6, 4.402-5.198", "frequencyCaption": "spraying three times"}
85
+ {"filepath": "data/single_event_multi_identity_test/syn_142.wav", "onoffCaption": "cow mooing at 2.715-5.339, 6.568-9.368", "frequencyCaption": "cow mooing two times"}
86
+ {"filepath": "data/single_event_multi_identity_test/syn_145.wav", "onoffCaption": "cow mooing at 0.071-2.695, 4.586-7.386", "frequencyCaption": "cow mooing two times"}
87
+ {"filepath": "data/single_event_multi_identity_test/syn_151.wav", "onoffCaption": "duck quacking at 0.425-2.425, 4.73-6.548", "frequencyCaption": "duck quacking two times"}
88
+ {"filepath": "data/single_event_multi_identity_test/syn_156.wav", "onoffCaption": "thump thud at 0.071-2.746, 3.838-6.138, 7.435-9.735", "frequencyCaption": "thump thud three times"}
89
+ {"filepath": "data/single_event_multi_identity_test/syn_158.wav", "onoffCaption": "burping belching at 0.027-2.186", "frequencyCaption": "burping belching one times"}
90
+ {"filepath": "data/single_event_multi_identity_test/syn_161.wav", "onoffCaption": "car horn honking at 0.937-3.898, 5.036-7.72", "frequencyCaption": "car horn honking two times"}
91
+ {"filepath": "data/single_event_multi_identity_test/syn_166.wav", "onoffCaption": "burping belching at 0.323-3.28, 4.07-6.229, 7.049-9.208", "frequencyCaption": "burping belching three times"}
92
+ {"filepath": "data/single_event_multi_identity_test/syn_168.wav", "onoffCaption": "door slamming at 0.115-1.526, 2.595-4.834, 5.389-7.628", "frequencyCaption": "door slamming three times"}
93
+ {"filepath": "data/single_event_multi_identity_test/syn_172.wav", "onoffCaption": "woman laughing at 3.125-5.889", "frequencyCaption": "woman laughing one times"}
94
+ {"filepath": "data/single_event_multi_identity_test/syn_175.wav", "onoffCaption": "spraying at 0.007-0.803", "frequencyCaption": "spraying one times"}
95
+ {"filepath": "data/single_event_multi_identity_test/syn_183.wav", "onoffCaption": "woman laughing at 2.259-6.717, 7.786-10.0", "frequencyCaption": "woman laughing two times"}
96
+ {"filepath": "data/single_event_multi_identity_test/syn_184.wav", "onoffCaption": "door slamming at 3.397-4.808, 6.096-8.335", "frequencyCaption": "door slamming two times"}
97
+ {"filepath": "data/single_event_multi_identity_test/syn_190.wav", "onoffCaption": "explosion at 0.228-5.15, 6.074-8.826", "frequencyCaption": "explosion two times"}
98
+ {"filepath": "data/single_event_multi_identity_test/syn_197.wav", "onoffCaption": "car horn honking at 3.732-6.416, 7.567-10.0", "frequencyCaption": "car horn honking two times"}
99
+ {"filepath": "data/single_event_multi_identity_test/syn_199.wav", "onoffCaption": "car horn honking at 1.911-4.872", "frequencyCaption": "car horn honking one times"}
100
+ {"filepath": "data/single_event_multi_identity_test/syn_200.wav", "onoffCaption": "train horn at 0.413-3.413", "frequencyCaption": "train horn one times"}
101
+ {"filepath": "data/single_event_multi_identity_test/syn_2.wav", "onoffCaption": "cat meowing at 1.299-2.689", "frequencyCaption": "cat meowing one times"}
102
+ {"filepath": "data/single_event_multi_identity_test/syn_5.wav", "onoffCaption": "dog barking at 3.791-5.791, 6.571-8.571", "frequencyCaption": "dog barking two times"}
103
+ {"filepath": "data/single_event_multi_identity_test/syn_12.wav", "onoffCaption": "tapping clicking clanking at 1.245-4.685", "frequencyCaption": "tapping clicking clanking one times"}
104
+ {"filepath": "data/single_event_multi_identity_test/syn_15.wav", "onoffCaption": "explosion at 3.815-6.567, 7.214-9.546", "frequencyCaption": "explosion two times"}
105
+ {"filepath": "data/single_event_multi_identity_test/syn_22.wav", "onoffCaption": "sheep goat bleating at 0.26-2.26, 3.592-5.592, 7.325-9.325", "frequencyCaption": "sheep goat bleating three times"}
106
+ {"filepath": "data/single_event_multi_identity_test/syn_25.wav", "onoffCaption": "gunshot at 0.166-2.166, 3.749-5.749", "frequencyCaption": "gunshot two times"}
107
+ {"filepath": "data/single_event_multi_identity_test/syn_31.wav", "onoffCaption": "sneeze at 3.917-8.07", "frequencyCaption": "sneeze one times"}
108
+ {"filepath": "data/single_event_multi_identity_test/syn_36.wav", "onoffCaption": "sheep goat bleating at 2.86-4.86, 7.119-9.119", "frequencyCaption": "sheep goat bleating two times"}
109
+ {"filepath": "data/single_event_multi_identity_test/syn_38.wav", "onoffCaption": "whistling at 2.996-7.59", "frequencyCaption": "whistling one times"}
110
+ {"filepath": "data/single_event_multi_identity_test/syn_40.wav", "onoffCaption": "woman laughing at 0.024-4.482, 5.882-8.646", "frequencyCaption": "woman laughing two times"}
111
+ {"filepath": "data/single_event_multi_identity_test/syn_46.wav", "onoffCaption": "tapping clicking clanking at 2.067-5.507", "frequencyCaption": "tapping clicking clanking one times"}
112
+ {"filepath": "data/single_event_multi_identity_test/syn_47.wav", "onoffCaption": "cow mooing at 0.008-2.808, 3.956-6.58, 7.995-10.0", "frequencyCaption": "cow mooing three times"}
113
+ {"filepath": "data/single_event_multi_identity_test/syn_49.wav", "onoffCaption": "dog barking at 3.464-5.464", "frequencyCaption": "dog barking one times"}
114
+ {"filepath": "data/single_event_multi_identity_test/syn_51.wav", "onoffCaption": "whistling at 0.26-9.06", "frequencyCaption": "whistling one times"}
115
+ {"filepath": "data/single_event_multi_identity_test/syn_53.wav", "onoffCaption": "whistling at 0.748-5.342, 6.45-8.456", "frequencyCaption": "whistling two times"}
116
+ {"filepath": "data/single_event_multi_identity_test/syn_54.wav", "onoffCaption": "cow mooing at 0.48-3.28, 4.237-6.861", "frequencyCaption": "cow mooing two times"}
117
+ {"filepath": "data/single_event_multi_identity_test/syn_63.wav", "onoffCaption": "explosion at 1.214-6.136", "frequencyCaption": "explosion one times"}
118
+ {"filepath": "data/single_event_multi_identity_test/syn_64.wav", "onoffCaption": "whistling at 0.012-4.606, 5.649-8.052", "frequencyCaption": "whistling two times"}
119
+ {"filepath": "data/single_event_multi_identity_test/syn_70.wav", "onoffCaption": "dog barking at 4.239-6.239", "frequencyCaption": "dog barking one times"}
120
+ {"filepath": "data/single_event_multi_identity_test/syn_77.wav", "onoffCaption": "train horn at 2.336-7.416", "frequencyCaption": "train horn one times"}
121
+ {"filepath": "data/single_event_multi_identity_test/syn_79.wav", "onoffCaption": "train horn at 2.15-7.23", "frequencyCaption": "train horn one times"}
122
+ {"filepath": "data/single_event_multi_identity_test/syn_81.wav", "onoffCaption": "tapping clicking clanking at 3.241-6.681", "frequencyCaption": "tapping clicking clanking one times"}
123
+ {"filepath": "data/single_event_multi_identity_test/syn_86.wav", "onoffCaption": "gunshot at 0.406-2.406, 4.136-6.136", "frequencyCaption": "gunshot two times"}
124
+ {"filepath": "data/single_event_multi_identity_test/syn_88.wav", "onoffCaption": "car horn honking at 0.051-3.012, 4.062-6.746, 7.319-10.0", "frequencyCaption": "car horn honking three times"}
125
+ {"filepath": "data/single_event_multi_identity_test/syn_92.wav", "onoffCaption": "door slamming at 1.032-2.443, 4.422-6.661", "frequencyCaption": "door slamming two times"}
126
+ {"filepath": "data/single_event_multi_identity_test/syn_95.wav", "onoffCaption": "woman laughing at 0.147-4.605, 5.939-8.703", "frequencyCaption": "woman laughing two times"}
127
+ {"filepath": "data/single_event_multi_identity_test/syn_102.wav", "onoffCaption": "duck quacking at 0.363-2.363, 2.979-4.797", "frequencyCaption": "duck quacking two times"}
128
+ {"filepath": "data/single_event_multi_identity_test/syn_105.wav", "onoffCaption": "door slamming at 0.253-1.664", "frequencyCaption": "door slamming one times"}
129
+ {"filepath": "data/single_event_multi_identity_test/syn_111.wav", "onoffCaption": "dog barking at 0.562-2.562, 4.25-6.25", "frequencyCaption": "dog barking two times"}
130
+ {"filepath": "data/single_event_multi_identity_test/syn_116.wav", "onoffCaption": "sheep goat bleating at 2.658-4.658", "frequencyCaption": "sheep goat bleating one times"}
131
+ {"filepath": "data/single_event_multi_identity_test/syn_118.wav", "onoffCaption": "sheep goat bleating at 2.634-4.634", "frequencyCaption": "sheep goat bleating one times"}
132
+ {"filepath": "data/single_event_multi_identity_test/syn_121.wav", "onoffCaption": "cat meowing at 2.182-3.508", "frequencyCaption": "cat meowing one times"}
133
+ {"filepath": "data/single_event_multi_identity_test/syn_123.wav", "onoffCaption": "sheep goat bleating at 2.042-4.042, 5.044-7.044", "frequencyCaption": "sheep goat bleating two times"}
134
+ {"filepath": "data/single_event_multi_identity_test/syn_126.wav", "onoffCaption": "burping belching at 0.139-3.096, 4.403-6.562", "frequencyCaption": "burping belching two times"}
135
+ {"filepath": "data/single_event_multi_identity_test/syn_128.wav", "onoffCaption": "train horn at 1.814-4.814", "frequencyCaption": "train horn one times"}
136
+ {"filepath": "data/single_event_multi_identity_test/syn_132.wav", "onoffCaption": "duck quacking at 1.582-3.582, 4.673-6.491", "frequencyCaption": "duck quacking two times"}
137
+ {"filepath": "data/single_event_multi_identity_test/syn_135.wav", "onoffCaption": "whistling at 1.414-6.008, 7.012-9.463", "frequencyCaption": "whistling two times"}
138
+ {"filepath": "data/single_event_multi_identity_test/syn_139.wav", "onoffCaption": "spraying at 1.819-2.615, 3.181-4.283", "frequencyCaption": "spraying two times"}
139
+ {"filepath": "data/single_event_multi_identity_test/syn_143.wav", "onoffCaption": "door knocking at 0.495-3.059, 4.039-6.166, 7.128-9.692", "frequencyCaption": "door knocking three times"}
140
+ {"filepath": "data/single_event_multi_identity_test/syn_144.wav", "onoffCaption": "spraying at 0.584-1.686, 2.49-3.286, 3.892-4.688", "frequencyCaption": "spraying three times"}
141
+ {"filepath": "data/single_event_multi_identity_test/syn_150.wav", "onoffCaption": "duck quacking at 2.654-4.654", "frequencyCaption": "duck quacking one times"}
142
+ {"filepath": "data/single_event_multi_identity_test/syn_157.wav", "onoffCaption": "explosion at 2.478-5.23, 6.261-9.209", "frequencyCaption": "explosion two times"}
143
+ {"filepath": "data/single_event_multi_identity_test/syn_159.wav", "onoffCaption": "sneeze at 1.342-4.695, 6.662-9.384", "frequencyCaption": "sneeze two times"}
144
+ {"filepath": "data/single_event_multi_identity_test/syn_160.wav", "onoffCaption": "woman laughing at 0.352-3.116", "frequencyCaption": "woman laughing one times"}
145
+ {"filepath": "data/single_event_multi_identity_test/syn_167.wav", "onoffCaption": "thump thud at 0.177-2.852, 4.459-6.759", "frequencyCaption": "thump thud two times"}
146
+ {"filepath": "data/single_event_multi_identity_test/syn_169.wav", "onoffCaption": "gunshot at 0.088-2.088", "frequencyCaption": "gunshot one times"}
147
+ {"filepath": "data/single_event_multi_identity_test/syn_173.wav", "onoffCaption": "explosion at 0.195-5.117", "frequencyCaption": "explosion one times"}
148
+ {"filepath": "data/single_event_multi_identity_test/syn_174.wav", "onoffCaption": "duck quacking at 0.089-2.089, 4.166-5.984", "frequencyCaption": "duck quacking two times"}
149
+ {"filepath": "data/single_event_multi_identity_test/syn_176.wav", "onoffCaption": "gunshot at 3.54-5.54, 7.238-9.238", "frequencyCaption": "gunshot two times"}
150
+ {"filepath": "data/single_event_multi_identity_test/syn_182.wav", "onoffCaption": "car horn honking at 0.14-2.824", "frequencyCaption": "car horn honking one times"}
151
+ {"filepath": "data/single_event_multi_identity_test/syn_185.wav", "onoffCaption": "dog barking at 3.434-5.434, 6.333-8.333", "frequencyCaption": "dog barking two times"}
152
+ {"filepath": "data/single_event_multi_identity_test/syn_189.wav", "onoffCaption": "burping belching at 0.432-3.389, 4.403-6.562", "frequencyCaption": "burping belching two times"}
153
+ {"filepath": "data/single_event_multi_identity_test/syn_191.wav", "onoffCaption": "tapping clicking clanking at 2.168-5.608", "frequencyCaption": "tapping clicking clanking one times"}
154
+ {"filepath": "data/single_event_multi_identity_test/syn_193.wav", "onoffCaption": "dog barking at 3.219-5.219", "frequencyCaption": "dog barking one times"}
155
+ {"filepath": "data/single_event_multi_identity_test/syn_196.wav", "onoffCaption": "duck quacking at 1.8-3.618", "frequencyCaption": "duck quacking one times"}
156
+ {"filepath": "data/single_event_multi_identity_test/syn_198.wav", "onoffCaption": "sheep goat bleating at 0.073-2.073", "frequencyCaption": "sheep goat bleating one times"}
157
+ {"filepath": "data/single_event_multi_identity_test/syn_7.wav", "onoffCaption": "door slamming at 2.809-4.22, 6.263-8.502", "frequencyCaption": "door slamming two times"}
158
+ {"filepath": "data/single_event_multi_identity_test/syn_9.wav", "onoffCaption": "sneeze at 0.07-4.223, 4.927-7.216", "frequencyCaption": "sneeze two times"}
159
+ {"filepath": "data/single_event_multi_identity_test/syn_10.wav", "onoffCaption": "door slamming at 2.191-3.602", "frequencyCaption": "door slamming one times"}
160
+ {"filepath": "data/single_event_multi_identity_test/syn_17.wav", "onoffCaption": "gunshot at 0.033-2.033", "frequencyCaption": "gunshot one times"}
161
+ {"filepath": "data/single_event_multi_identity_test/syn_19.wav", "onoffCaption": "thump thud at 2.571-4.871, 6.726-9.401", "frequencyCaption": "thump thud two times"}
162
+ {"filepath": "data/single_event_multi_identity_test/syn_20.wav", "onoffCaption": "dog barking at 2.557-4.557, 5.093-7.093, 7.963-9.963", "frequencyCaption": "dog barking three times"}
163
+ {"filepath": "data/single_event_multi_identity_test/syn_27.wav", "onoffCaption": "whistling at 2.141-6.735, 7.84-10.0", "frequencyCaption": "whistling two times"}
164
+ {"filepath": "data/single_event_multi_identity_test/syn_29.wav", "onoffCaption": "woman laughing at 3.051-7.509", "frequencyCaption": "woman laughing one times"}
165
+ {"filepath": "data/single_event_multi_identity_test/syn_33.wav", "onoffCaption": "dog barking at 1.397-3.397, 5.014-7.014", "frequencyCaption": "dog barking two times"}
166
+ {"filepath": "data/single_event_multi_identity_test/syn_34.wav", "onoffCaption": "dog barking at 0.691-2.691, 4.339-6.339, 7.597-9.597", "frequencyCaption": "dog barking three times"}
167
+ {"filepath": "data/single_event_multi_identity_test/syn_42.wav", "onoffCaption": "door slamming at 0.111-1.522, 2.919-5.158", "frequencyCaption": "door slamming two times"}
168
+ {"filepath": "data/single_event_multi_identity_test/syn_45.wav", "onoffCaption": "woman laughing at 0.913-5.371", "frequencyCaption": "woman laughing one times"}
169
+ {"filepath": "data/single_event_multi_identity_test/syn_56.wav", "onoffCaption": "cat meowing at 3.25-4.576", "frequencyCaption": "cat meowing one times"}
170
+ {"filepath": "data/single_event_multi_identity_test/syn_58.wav", "onoffCaption": "spraying at 2.012-2.808", "frequencyCaption": "spraying one times"}
171
+ {"filepath": "data/single_event_multi_identity_test/syn_61.wav", "onoffCaption": "sheep goat bleating at 0.44-2.44, 3.141-5.141", "frequencyCaption": "sheep goat bleating two times"}
172
+ {"filepath": "data/single_event_multi_identity_test/syn_66.wav", "onoffCaption": "duck quacking at 0.199-2.199", "frequencyCaption": "duck quacking one times"}
173
+ {"filepath": "data/single_event_multi_identity_test/syn_68.wav", "onoffCaption": "door slamming at 0.555-1.966", "frequencyCaption": "door slamming one times"}
174
+ {"filepath": "data/single_event_multi_identity_test/syn_72.wav", "onoffCaption": "duck quacking at 3.008-5.008", "frequencyCaption": "duck quacking one times"}
175
+ {"filepath": "data/single_event_multi_identity_test/syn_75.wav", "onoffCaption": "door slamming at 2.007-4.246, 5.403-6.814, 7.324-9.563", "frequencyCaption": "door slamming three times"}
176
+ {"filepath": "data/single_event_multi_identity_test/syn_83.wav", "onoffCaption": "spraying at 0.42-1.522, 2.179-2.975, 4.216-5.012", "frequencyCaption": "spraying three times"}
177
+ {"filepath": "data/single_event_multi_identity_test/syn_84.wav", "onoffCaption": "burping belching at 1.998-4.955", "frequencyCaption": "burping belching one times"}
178
+ {"filepath": "data/single_event_multi_identity_test/syn_90.wav", "onoffCaption": "whistling at 0.292-9.092", "frequencyCaption": "whistling one times"}
179
+ {"filepath": "data/single_event_multi_identity_test/syn_97.wav", "onoffCaption": "dog barking at 1.995-3.995", "frequencyCaption": "dog barking one times"}
180
+ {"filepath": "data/single_event_multi_identity_test/syn_99.wav", "onoffCaption": "gunshot at 1.846-3.846, 5.067-7.067", "frequencyCaption": "gunshot two times"}
181
+ {"filepath": "data/single_event_multi_identity_test/syn_100.wav", "onoffCaption": "gunshot at 2.965-4.965, 5.836-7.836", "frequencyCaption": "gunshot two times"}
182
+ {"filepath": "data/single_event_multi_identity_test/syn_107.wav", "onoffCaption": "cat meowing at 0.382-1.772, 4.195-5.521, 7.481-8.871", "frequencyCaption": "cat meowing three times"}
183
+ {"filepath": "data/single_event_multi_identity_test/syn_109.wav", "onoffCaption": "cat meowing at 1.827-3.217, 5.396-6.722, 8.387-9.777", "frequencyCaption": "cat meowing three times"}
184
+ {"filepath": "data/single_event_multi_identity_test/syn_113.wav", "onoffCaption": "door slamming at 1.281-3.52, 4.645-6.056", "frequencyCaption": "door slamming two times"}
185
+ {"filepath": "data/single_event_multi_identity_test/syn_114.wav", "onoffCaption": "explosion at 2.267-7.189", "frequencyCaption": "explosion one times"}
186
+ {"filepath": "data/single_event_multi_identity_test/syn_124.wav", "onoffCaption": "woman laughing at 0.666-5.124, 7.521-10.0", "frequencyCaption": "woman laughing two times"}
187
+ {"filepath": "data/single_event_multi_identity_test/syn_130.wav", "onoffCaption": "gunshot at 2.672-4.672", "frequencyCaption": "gunshot one times"}
188
+ {"filepath": "data/single_event_multi_identity_test/syn_137.wav", "onoffCaption": "train horn at 0.682-3.682, 4.465-6.698, 7.809-10.0", "frequencyCaption": "train horn three times"}
189
+ {"filepath": "data/single_event_multi_identity_test/syn_141.wav", "onoffCaption": "woman laughing at 0.105-2.869", "frequencyCaption": "woman laughing one times"}
190
+ {"filepath": "data/single_event_multi_identity_test/syn_146.wav", "onoffCaption": "sneeze at 1.102-4.455", "frequencyCaption": "sneeze one times"}
191
+ {"filepath": "data/single_event_multi_identity_test/syn_148.wav", "onoffCaption": "dog barking at 0.061-2.061, 3.265-5.265, 6.197-8.197", "frequencyCaption": "dog barking three times"}
192
+ {"filepath": "data/single_event_multi_identity_test/syn_152.wav", "onoffCaption": "dog barking at 0.127-2.127", "frequencyCaption": "dog barking one times"}
193
+ {"filepath": "data/single_event_multi_identity_test/syn_155.wav", "onoffCaption": "spraying at 3.549-4.651", "frequencyCaption": "spraying one times"}
194
+ {"filepath": "data/single_event_multi_identity_test/syn_162.wav", "onoffCaption": "explosion at 0.391-3.143, 3.673-5.706", "frequencyCaption": "explosion two times"}
195
+ {"filepath": "data/single_event_multi_identity_test/syn_165.wav", "onoffCaption": "whistling at 3.448-8.042", "frequencyCaption": "whistling one times"}
196
+ {"filepath": "data/single_event_multi_identity_test/syn_171.wav", "onoffCaption": "duck quacking at 2.752-4.752", "frequencyCaption": "duck quacking one times"}
197
+ {"filepath": "data/single_event_multi_identity_test/syn_178.wav", "onoffCaption": "tapping clicking clanking at 1.713-5.153, 6.827-9.222", "frequencyCaption": "tapping clicking clanking two times"}
198
+ {"filepath": "data/single_event_multi_identity_test/syn_180.wav", "onoffCaption": "cow mooing at 0.181-2.981", "frequencyCaption": "cow mooing one times"}
199
+ {"filepath": "data/single_event_multi_identity_test/syn_187.wav", "onoffCaption": "explosion at 2.424-5.176", "frequencyCaption": "explosion one times"}
200
+ {"filepath": "data/single_event_multi_identity_test/syn_194.wav", "onoffCaption": "gunshot at 2.339-4.339", "frequencyCaption": "gunshot one times"}
201
+ {"filepath": "data/single_event_single_identity_test/syn_11.wav", "onoffCaption": "door knocking at 3.808-5.935, 6.708-8.835", "frequencyCaption": "door knocking two times"}
202
+ {"filepath": "data/single_event_single_identity_test/syn_16.wav", "onoffCaption": "burping belching at 2.569-5.526", "frequencyCaption": "burping belching one times"}
203
+ {"filepath": "data/single_event_single_identity_test/syn_18.wav", "onoffCaption": "burping belching at 2.907-5.066", "frequencyCaption": "burping belching one times"}
204
+ {"filepath": "data/single_event_single_identity_test/syn_21.wav", "onoffCaption": "burping belching at 0.64-2.799", "frequencyCaption": "burping belching one times"}
205
+ {"filepath": "data/single_event_single_identity_test/syn_26.wav", "onoffCaption": "spraying at 0.127-0.923", "frequencyCaption": "spraying one times"}
206
+ {"filepath": "data/single_event_single_identity_test/syn_28.wav", "onoffCaption": "train horn at 3.589-8.669", "frequencyCaption": "train horn one times"}
207
+ {"filepath": "data/single_event_single_identity_test/syn_32.wav", "onoffCaption": "gunshot at 1.173-3.173, 3.96-5.96, 6.617-8.617", "frequencyCaption": "gunshot three times"}
208
+ {"filepath": "data/single_event_single_identity_test/syn_35.wav", "onoffCaption": "woman laughing at 0.948-5.406, 7.602-10.0", "frequencyCaption": "woman laughing two times"}
209
+ {"filepath": "data/single_event_single_identity_test/syn_43.wav", "onoffCaption": "door slamming at 3.246-4.657, 6.312-7.723", "frequencyCaption": "door slamming two times"}
210
+ {"filepath": "data/single_event_single_identity_test/syn_44.wav", "onoffCaption": "dog barking at 1.211-3.211, 4.206-6.206, 6.728-8.728", "frequencyCaption": "dog barking three times"}
211
+ {"filepath": "data/single_event_single_identity_test/syn_50.wav", "onoffCaption": "door knocking at 0.488-3.052, 5.244-7.808", "frequencyCaption": "door knocking two times"}
212
+ {"filepath": "data/single_event_single_identity_test/syn_57.wav", "onoffCaption": "train horn at 0.177-5.257", "frequencyCaption": "train horn one times"}
213
+ {"filepath": "data/single_event_single_identity_test/syn_59.wav", "onoffCaption": "gunshot at 0.24-2.24, 3.277-5.277, 7.394-9.394", "frequencyCaption": "gunshot three times"}
214
+ {"filepath": "data/single_event_single_identity_test/syn_60.wav", "onoffCaption": "cow mooing at 1.847-4.471, 6.336-8.96", "frequencyCaption": "cow mooing two times"}
215
+ {"filepath": "data/single_event_single_identity_test/syn_67.wav", "onoffCaption": "cow mooing at 2.819-5.443, 6.06-8.684", "frequencyCaption": "cow mooing two times"}
216
+ {"filepath": "data/single_event_single_identity_test/syn_69.wav", "onoffCaption": "burping belching at 1.971-4.928, 6.428-9.385", "frequencyCaption": "burping belching two times"}
217
+ {"filepath": "data/single_event_single_identity_test/syn_73.wav", "onoffCaption": "dog barking at 0.094-2.094, 3.294-5.294, 6.771-8.771", "frequencyCaption": "dog barking three times"}
218
+ {"filepath": "data/single_event_single_identity_test/syn_74.wav", "onoffCaption": "cow mooing at 2.351-4.975, 5.558-8.182", "frequencyCaption": "cow mooing two times"}
219
+ {"filepath": "data/single_event_single_identity_test/syn_82.wav", "onoffCaption": "woman laughing at 2.876-7.334", "frequencyCaption": "woman laughing one times"}
220
+ {"filepath": "data/single_event_single_identity_test/syn_85.wav", "onoffCaption": "dog barking at 2.785-4.785", "frequencyCaption": "dog barking one times"}
221
+ {"filepath": "data/single_event_single_identity_test/syn_91.wav", "onoffCaption": "tapping clicking clanking at 1.295-4.735", "frequencyCaption": "tapping clicking clanking one times"}
222
+ {"filepath": "data/single_event_single_identity_test/syn_96.wav", "onoffCaption": "door knocking at 0.452-2.579", "frequencyCaption": "door knocking one times"}
223
+ {"filepath": "data/single_event_single_identity_test/syn_98.wav", "onoffCaption": "door slamming at 2.339-4.578", "frequencyCaption": "door slamming one times"}
224
+ {"filepath": "data/single_event_single_identity_test/syn_101.wav", "onoffCaption": "spraying at 0.013-1.115, 1.805-2.907, 5.09-6.192", "frequencyCaption": "spraying three times"}
225
+ {"filepath": "data/single_event_single_identity_test/syn_106.wav", "onoffCaption": "spraying at 2.518-3.314", "frequencyCaption": "spraying one times"}
226
+ {"filepath": "data/single_event_single_identity_test/syn_108.wav", "onoffCaption": "gunshot at 3.946-5.946, 6.959-8.959", "frequencyCaption": "gunshot two times"}
227
+ {"filepath": "data/single_event_single_identity_test/syn_112.wav", "onoffCaption": "burping belching at 3.346-6.303, 7.74-10.0", "frequencyCaption": "burping belching two times"}
228
+ {"filepath": "data/single_event_single_identity_test/syn_115.wav", "onoffCaption": "explosion at 0.084-5.006", "frequencyCaption": "explosion one times"}
229
+ {"filepath": "data/single_event_single_identity_test/syn_122.wav", "onoffCaption": "tapping clicking clanking at 0.407-3.847", "frequencyCaption": "tapping clicking clanking one times"}
230
+ {"filepath": "data/single_event_single_identity_test/syn_125.wav", "onoffCaption": "explosion at 0.371-3.123, 5.335-8.087", "frequencyCaption": "explosion two times"}
231
+ {"filepath": "data/single_event_single_identity_test/syn_131.wav", "onoffCaption": "door slamming at 0.346-1.757, 2.569-3.98, 5.839-7.25", "frequencyCaption": "door slamming three times"}
232
+ {"filepath": "data/single_event_single_identity_test/syn_136.wav", "onoffCaption": "car horn honking at 0.066-2.75", "frequencyCaption": "car horn honking one times"}
233
+ {"filepath": "data/single_event_single_identity_test/syn_138.wav", "onoffCaption": "explosion at 2.129-4.881", "frequencyCaption": "explosion one times"}
234
+ {"filepath": "data/single_event_single_identity_test/syn_140.wav", "onoffCaption": "train horn at 1.872-6.952, 7.829-10.0", "frequencyCaption": "train horn two times"}
235
+ {"filepath": "data/single_event_single_identity_test/syn_147.wav", "onoffCaption": "explosion at 0.38-3.132, 4.352-7.104, 7.977-10.0", "frequencyCaption": "explosion three times"}
236
+ {"filepath": "data/single_event_single_identity_test/syn_149.wav", "onoffCaption": "spraying at 0.031-1.133, 1.86-2.962, 3.961-5.063", "frequencyCaption": "spraying three times"}
237
+ {"filepath": "data/single_event_single_identity_test/syn_153.wav", "onoffCaption": "dog barking at 0.435-2.435, 4.016-6.016", "frequencyCaption": "dog barking two times"}
238
+ {"filepath": "data/single_event_single_identity_test/syn_154.wav", "onoffCaption": "explosion at 1.704-6.626", "frequencyCaption": "explosion one times"}
239
+ {"filepath": "data/single_event_single_identity_test/syn_163.wav", "onoffCaption": "sneeze at 2.736-6.889", "frequencyCaption": "sneeze one times"}
240
+ {"filepath": "data/single_event_single_identity_test/syn_164.wav", "onoffCaption": "sneeze at 2.624-6.777", "frequencyCaption": "sneeze one times"}
241
+ {"filepath": "data/single_event_single_identity_test/syn_170.wav", "onoffCaption": "burping belching at 3.451-6.408", "frequencyCaption": "burping belching one times"}
242
+ {"filepath": "data/single_event_single_identity_test/syn_175.wav", "onoffCaption": "explosion at 1.902-6.824", "frequencyCaption": "explosion one times"}
243
+ {"filepath": "data/single_event_single_identity_test/syn_177.wav", "onoffCaption": "door knocking at 3.219-5.346, 7.058-9.185", "frequencyCaption": "door knocking two times"}
244
+ {"filepath": "data/single_event_single_identity_test/syn_179.wav", "onoffCaption": "explosion at 2.521-5.273", "frequencyCaption": "explosion one times"}
245
+ {"filepath": "data/single_event_single_identity_test/syn_181.wav", "onoffCaption": "train horn at 0.212-3.212", "frequencyCaption": "train horn one times"}
246
+ {"filepath": "data/single_event_single_identity_test/syn_186.wav", "onoffCaption": "sheep goat bleating at 0.651-2.651, 3.512-5.512", "frequencyCaption": "sheep goat bleating two times"}
247
+ {"filepath": "data/single_event_single_identity_test/syn_188.wav", "onoffCaption": "whistling at 0.87-9.67", "frequencyCaption": "whistling one times"}
248
+ {"filepath": "data/single_event_single_identity_test/syn_190.wav", "onoffCaption": "woman laughing at 0.484-3.248, 4.163-6.927", "frequencyCaption": "woman laughing two times"}
249
+ {"filepath": "data/single_event_single_identity_test/syn_192.wav", "onoffCaption": "door knocking at 1.863-3.99, 5.187-7.314", "frequencyCaption": "door knocking two times"}
250
+ {"filepath": "data/single_event_single_identity_test/syn_195.wav", "onoffCaption": "cow mooing at 0.958-3.582, 5.272-7.896", "frequencyCaption": "cow mooing two times"}
251
+ {"filepath": "data/single_event_single_identity_test/syn_13.wav", "onoffCaption": "tapping clicking clanking at 3.109-6.549", "frequencyCaption": "tapping clicking clanking one times"}
252
+ {"filepath": "data/single_event_single_identity_test/syn_14.wav", "onoffCaption": "woman laughing at 0.127-2.891", "frequencyCaption": "woman laughing one times"}
253
+ {"filepath": "data/single_event_single_identity_test/syn_23.wav", "onoffCaption": "whistling at 0.074-8.874", "frequencyCaption": "whistling one times"}
254
+ {"filepath": "data/single_event_single_identity_test/syn_24.wav", "onoffCaption": "dog barking at 0.91-2.91", "frequencyCaption": "dog barking one times"}
255
+ {"filepath": "data/single_event_single_identity_test/syn_30.wav", "onoffCaption": "whistling at 0.978-5.572", "frequencyCaption": "whistling one times"}
256
+ {"filepath": "data/single_event_single_identity_test/syn_37.wav", "onoffCaption": "whistling at 2.107-6.701", "frequencyCaption": "whistling one times"}
257
+ {"filepath": "data/single_event_single_identity_test/syn_39.wav", "onoffCaption": "whistling at 0.165-4.759, 5.362-9.956", "frequencyCaption": "whistling two times"}
258
+ {"filepath": "data/single_event_single_identity_test/syn_41.wav", "onoffCaption": "sheep goat bleating at 0.023-2.023, 3.507-5.507", "frequencyCaption": "sheep goat bleating two times"}
259
+ {"filepath": "data/single_event_single_identity_test/syn_46.wav", "onoffCaption": "car horn honking at 1.978-4.939, 5.578-8.539", "frequencyCaption": "car horn honking two times"}
260
+ {"filepath": "data/single_event_single_identity_test/syn_48.wav", "onoffCaption": "thump thud at 1.392-4.067, 5.357-8.032", "frequencyCaption": "thump thud two times"}
261
+ {"filepath": "data/single_event_single_identity_test/syn_52.wav", "onoffCaption": "dog barking at 0.25-2.25, 3.486-5.486, 6.439-8.439", "frequencyCaption": "dog barking three times"}
262
+ {"filepath": "data/single_event_single_identity_test/syn_55.wav", "onoffCaption": "gunshot at 2.722-4.722, 6.936-8.936", "frequencyCaption": "gunshot two times"}
263
+ {"filepath": "data/single_event_single_identity_test/syn_62.wav", "onoffCaption": "burping belching at 0.459-3.416, 4.188-7.145", "frequencyCaption": "burping belching two times"}
264
+ {"filepath": "data/single_event_single_identity_test/syn_65.wav", "onoffCaption": "sheep goat bleating at 0.55-2.55, 4.457-6.457", "frequencyCaption": "sheep goat bleating two times"}
265
+ {"filepath": "data/single_event_single_identity_test/syn_71.wav", "onoffCaption": "tapping clicking clanking at 3.396-6.836", "frequencyCaption": "tapping clicking clanking one times"}
266
+ {"filepath": "data/single_event_single_identity_test/syn_76.wav", "onoffCaption": "sheep goat bleating at 0.056-2.056, 3.47-5.47", "frequencyCaption": "sheep goat bleating two times"}
267
+ {"filepath": "data/single_event_single_identity_test/syn_78.wav", "onoffCaption": "train horn at 0.083-5.163, 6.748-10.0", "frequencyCaption": "train horn two times"}
268
+ {"filepath": "data/single_event_single_identity_test/syn_80.wav", "onoffCaption": "whistling at 1.269-5.863, 6.498-10.0", "frequencyCaption": "whistling two times"}
269
+ {"filepath": "data/single_event_single_identity_test/syn_87.wav", "onoffCaption": "car horn honking at 1.885-4.569, 5.797-8.481", "frequencyCaption": "car horn honking two times"}
270
+ {"filepath": "data/single_event_single_identity_test/syn_89.wav", "onoffCaption": "train horn at 0.507-3.507", "frequencyCaption": "train horn one times"}
271
+ {"filepath": "data/single_event_single_identity_test/syn_93.wav", "onoffCaption": "dog barking at 3.063-5.063, 6.381-8.381", "frequencyCaption": "dog barking two times"}
272
+ {"filepath": "data/single_event_single_identity_test/syn_94.wav", "onoffCaption": "duck quacking at 2.332-4.15", "frequencyCaption": "duck quacking one times"}
273
+ {"filepath": "data/single_event_single_identity_test/syn_103.wav", "onoffCaption": "gunshot at 1.066-3.066", "frequencyCaption": "gunshot one times"}
274
+ {"filepath": "data/single_event_single_identity_test/syn_104.wav", "onoffCaption": "cat meowing at 0.488-1.878, 4.297-5.687, 6.263-7.653", "frequencyCaption": "cat meowing three times"}
275
+ {"filepath": "data/single_event_single_identity_test/syn_110.wav", "onoffCaption": "whistling at 0.407-5.001", "frequencyCaption": "whistling one times"}
276
+ {"filepath": "data/single_event_single_identity_test/syn_117.wav", "onoffCaption": "cat meowing at 1.091-2.481, 3.509-4.899", "frequencyCaption": "cat meowing two times"}
277
+ {"filepath": "data/single_event_single_identity_test/syn_119.wav", "onoffCaption": "car horn honking at 0.202-2.886", "frequencyCaption": "car horn honking one times"}
278
+ {"filepath": "data/single_event_single_identity_test/syn_120.wav", "onoffCaption": "door knocking at 2.729-5.293", "frequencyCaption": "door knocking one times"}
279
+ {"filepath": "data/single_event_single_identity_test/syn_127.wav", "onoffCaption": "sheep goat bleating at 2.262-4.262, 5.801-7.801", "frequencyCaption": "sheep goat bleating two times"}
280
+ {"filepath": "data/single_event_single_identity_test/syn_129.wav", "onoffCaption": "sheep goat bleating at 0.602-2.602, 4.548-6.548, 7.151-9.151", "frequencyCaption": "sheep goat bleating three times"}
281
+ {"filepath": "data/single_event_single_identity_test/syn_133.wav", "onoffCaption": "gunshot at 1.679-3.679, 5.98-7.98", "frequencyCaption": "gunshot two times"}
282
+ {"filepath": "data/single_event_single_identity_test/syn_134.wav", "onoffCaption": "sheep goat bleating at 0.091-2.091, 3.322-5.322", "frequencyCaption": "sheep goat bleating two times"}
283
+ {"filepath": "data/single_event_single_identity_test/syn_142.wav", "onoffCaption": "dog barking at 0.622-2.622, 5.087-7.087", "frequencyCaption": "dog barking two times"}
284
+ {"filepath": "data/single_event_single_identity_test/syn_145.wav", "onoffCaption": "train horn at 2.269-5.269", "frequencyCaption": "train horn one times"}
285
+ {"filepath": "data/single_event_single_identity_test/syn_151.wav", "onoffCaption": "burping belching at 0.193-2.352", "frequencyCaption": "burping belching one times"}
286
+ {"filepath": "data/single_event_single_identity_test/syn_156.wav", "onoffCaption": "cow mooing at 1.573-4.373", "frequencyCaption": "cow mooing one times"}
287
+ {"filepath": "data/single_event_single_identity_test/syn_158.wav", "onoffCaption": "door knocking at 1.174-3.301", "frequencyCaption": "door knocking one times"}
288
+ {"filepath": "data/single_event_single_identity_test/syn_161.wav", "onoffCaption": "spraying at 0.159-1.261, 2.033-3.135, 4.44-5.542", "frequencyCaption": "spraying three times"}
289
+ {"filepath": "data/single_event_single_identity_test/syn_166.wav", "onoffCaption": "tapping clicking clanking at 1.641-5.081, 6.146-9.586", "frequencyCaption": "tapping clicking clanking two times"}
290
+ {"filepath": "data/single_event_single_identity_test/syn_168.wav", "onoffCaption": "explosion at 3.277-8.199", "frequencyCaption": "explosion one times"}
291
+ {"filepath": "data/single_event_single_identity_test/syn_172.wav", "onoffCaption": "gunshot at 1.58-3.58", "frequencyCaption": "gunshot one times"}
292
+ {"filepath": "data/single_event_single_identity_test/syn_183.wav", "onoffCaption": "duck quacking at 0.511-2.511", "frequencyCaption": "duck quacking one times"}
293
+ {"filepath": "data/single_event_single_identity_test/syn_184.wav", "onoffCaption": "spraying at 0.044-0.84", "frequencyCaption": "spraying one times"}
294
+ {"filepath": "data/single_event_single_identity_test/syn_197.wav", "onoffCaption": "sheep goat bleating at 2.317-4.317, 6.052-8.052", "frequencyCaption": "sheep goat bleating two times"}
295
+ {"filepath": "data/single_event_single_identity_test/syn_199.wav", "onoffCaption": "dog barking at 3.728-5.728, 6.93-8.93", "frequencyCaption": "dog barking two times"}
296
+ {"filepath": "data/single_event_single_identity_test/syn_200.wav", "onoffCaption": "thump thud at 1.717-4.017, 5.949-8.249", "frequencyCaption": "thump thud two times"}
297
+ {"filepath": "data/single_event_single_identity_test/syn_12.wav", "onoffCaption": "dog barking at 2.048-4.048", "frequencyCaption": "dog barking one times"}
298
+ {"filepath": "data/single_event_single_identity_test/syn_15.wav", "onoffCaption": "dog barking at 0.046-2.046, 4.09-6.09", "frequencyCaption": "dog barking two times"}
299
+ {"filepath": "data/single_event_single_identity_test/syn_22.wav", "onoffCaption": "whistling at 2.136-6.73", "frequencyCaption": "whistling one times"}
300
+ {"filepath": "data/single_event_single_identity_test/syn_25.wav", "onoffCaption": "explosion at 1.944-4.696, 6.227-8.979", "frequencyCaption": "explosion two times"}
301
+ {"filepath": "data/single_event_single_identity_test/syn_31.wav", "onoffCaption": "gunshot at 0.269-2.269, 3.559-5.559, 6.243-8.243", "frequencyCaption": "gunshot three times"}
302
+ {"filepath": "data/single_event_single_identity_test/syn_36.wav", "onoffCaption": "dog barking at 0.991-2.991", "frequencyCaption": "dog barking one times"}
303
+ {"filepath": "data/single_event_single_identity_test/syn_38.wav", "onoffCaption": "dog barking at 3.368-5.368, 6.043-8.043", "frequencyCaption": "dog barking two times"}
304
+ {"filepath": "data/single_event_single_identity_test/syn_40.wav", "onoffCaption": "sheep goat bleating at 0.185-2.185", "frequencyCaption": "sheep goat bleating one times"}
305
+ {"filepath": "data/single_event_single_identity_test/syn_47.wav", "onoffCaption": "door slamming at 0.106-2.345, 2.885-5.124, 5.997-8.236", "frequencyCaption": "door slamming three times"}
306
+ {"filepath": "data/single_event_single_identity_test/syn_49.wav", "onoffCaption": "duck quacking at 0.37-2.37", "frequencyCaption": "duck quacking one times"}
307
+ {"filepath": "data/single_event_single_identity_test/syn_51.wav", "onoffCaption": "cat meowing at 0.245-1.571, 3.125-4.451, 5.016-6.342", "frequencyCaption": "cat meowing three times"}
308
+ {"filepath": "data/single_event_single_identity_test/syn_53.wav", "onoffCaption": "cat meowing at 0.277-1.603", "frequencyCaption": "cat meowing one times"}
309
+ {"filepath": "data/single_event_single_identity_test/syn_54.wav", "onoffCaption": "gunshot at 0.17-2.17, 4.644-6.644", "frequencyCaption": "gunshot two times"}
310
+ {"filepath": "data/single_event_single_identity_test/syn_63.wav", "onoffCaption": "door slamming at 1.788-4.027", "frequencyCaption": "door slamming one times"}
311
+ {"filepath": "data/single_event_single_identity_test/syn_64.wav", "onoffCaption": "sheep goat bleating at 1.736-3.736, 4.735-6.735, 7.944-9.944", "frequencyCaption": "sheep goat bleating three times"}
312
+ {"filepath": "data/single_event_single_identity_test/syn_70.wav", "onoffCaption": "sneeze at 0.231-4.384, 5.433-9.586", "frequencyCaption": "sneeze two times"}
313
+ {"filepath": "data/single_event_single_identity_test/syn_77.wav", "onoffCaption": "dog barking at 3.416-5.416, 5.973-7.973", "frequencyCaption": "dog barking two times"}
314
+ {"filepath": "data/single_event_single_identity_test/syn_79.wav", "onoffCaption": "tapping clicking clanking at 0.931-4.371", "frequencyCaption": "tapping clicking clanking one times"}
315
+ {"filepath": "data/single_event_single_identity_test/syn_81.wav", "onoffCaption": "spraying at 2.201-2.997", "frequencyCaption": "spraying one times"}
316
+ {"filepath": "data/single_event_single_identity_test/syn_86.wav", "onoffCaption": "door knocking at 0.221-2.785", "frequencyCaption": "door knocking one times"}
317
+ {"filepath": "data/single_event_single_identity_test/syn_88.wav", "onoffCaption": "cow mooing at 2.087-4.887, 6.12-8.92", "frequencyCaption": "cow mooing two times"}
318
+ {"filepath": "data/single_event_single_identity_test/syn_92.wav", "onoffCaption": "train horn at 0.429-5.509, 6.408-10.0", "frequencyCaption": "train horn two times"}
319
+ {"filepath": "data/single_event_single_identity_test/syn_95.wav", "onoffCaption": "thump thud at 2.906-5.581", "frequencyCaption": "thump thud one times"}
320
+ {"filepath": "data/single_event_single_identity_test/syn_102.wav", "onoffCaption": "thump thud at 2.581-4.881, 6.222-8.522", "frequencyCaption": "thump thud two times"}
321
+ {"filepath": "data/single_event_single_identity_test/syn_105.wav", "onoffCaption": "door slamming at 0.833-3.072, 4.449-6.688", "frequencyCaption": "door slamming two times"}
322
+ {"filepath": "data/single_event_single_identity_test/syn_111.wav", "onoffCaption": "door knocking at 1.124-3.688, 6.152-8.716", "frequencyCaption": "door knocking two times"}
323
+ {"filepath": "data/single_event_single_identity_test/syn_116.wav", "onoffCaption": "gunshot at 0.875-2.875, 4.735-6.735", "frequencyCaption": "gunshot two times"}
324
+ {"filepath": "data/single_event_single_identity_test/syn_118.wav", "onoffCaption": "cat meowing at 0.483-1.809", "frequencyCaption": "cat meowing one times"}
325
+ {"filepath": "data/single_event_single_identity_test/syn_121.wav", "onoffCaption": "door knocking at 1.619-4.183", "frequencyCaption": "door knocking one times"}
326
+ {"filepath": "data/single_event_single_identity_test/syn_126.wav", "onoffCaption": "sheep goat bleating at 3.885-5.885, 7.836-9.836", "frequencyCaption": "sheep goat bleating two times"}
327
+ {"filepath": "data/single_event_single_identity_test/syn_128.wav", "onoffCaption": "tapping clicking clanking at 2.571-6.011", "frequencyCaption": "tapping clicking clanking one times"}
328
+ {"filepath": "data/single_event_single_identity_test/syn_132.wav", "onoffCaption": "cat meowing at 2.927-4.317, 5.007-6.397, 6.922-8.312", "frequencyCaption": "cat meowing three times"}
329
+ {"filepath": "data/single_event_single_identity_test/syn_135.wav", "onoffCaption": "door slamming at 3.195-5.434, 6.893-9.132", "frequencyCaption": "door slamming two times"}
330
+ {"filepath": "data/single_event_single_identity_test/syn_139.wav", "onoffCaption": "duck quacking at 2.765-4.583, 6.906-8.724", "frequencyCaption": "duck quacking two times"}
331
+ {"filepath": "data/single_event_single_identity_test/syn_143.wav", "onoffCaption": "cat meowing at 2.231-3.621", "frequencyCaption": "cat meowing one times"}
332
+ {"filepath": "data/single_event_single_identity_test/syn_144.wav", "onoffCaption": "cow mooing at 0.562-3.186, 4.31-6.934", "frequencyCaption": "cow mooing two times"}
333
+ {"filepath": "data/single_event_single_identity_test/syn_150.wav", "onoffCaption": "dog barking at 0.436-2.436", "frequencyCaption": "dog barking one times"}
334
+ {"filepath": "data/single_event_single_identity_test/syn_157.wav", "onoffCaption": "sneeze at 3.222-7.375", "frequencyCaption": "sneeze one times"}
335
+ {"filepath": "data/single_event_single_identity_test/syn_159.wav", "onoffCaption": "sneeze at 2.417-6.57", "frequencyCaption": "sneeze one times"}
336
+ {"filepath": "data/single_event_single_identity_test/syn_160.wav", "onoffCaption": "tapping clicking clanking at 0.262-3.702, 5.703-9.143", "frequencyCaption": "tapping clicking clanking two times"}
337
+ {"filepath": "data/single_event_single_identity_test/syn_167.wav", "onoffCaption": "cat meowing at 0.205-1.595, 2.703-4.093", "frequencyCaption": "cat meowing two times"}
338
+ {"filepath": "data/single_event_single_identity_test/syn_169.wav", "onoffCaption": "train horn at 3.293-8.373", "frequencyCaption": "train horn one times"}
339
+ {"filepath": "data/single_event_single_identity_test/syn_173.wav", "onoffCaption": "thump thud at 3.392-5.692", "frequencyCaption": "thump thud one times"}
340
+ {"filepath": "data/single_event_single_identity_test/syn_174.wav", "onoffCaption": "cat meowing at 2.478-3.804, 4.701-6.027, 7.098-8.424", "frequencyCaption": "cat meowing three times"}
341
+ {"filepath": "data/single_event_single_identity_test/syn_182.wav", "onoffCaption": "door knocking at 2.598-4.725, 5.428-7.555", "frequencyCaption": "door knocking two times"}
342
+ {"filepath": "data/single_event_single_identity_test/syn_185.wav", "onoffCaption": "gunshot at 3.329-5.329, 6.811-8.811", "frequencyCaption": "gunshot two times"}
343
+ {"filepath": "data/single_event_single_identity_test/syn_189.wav", "onoffCaption": "door knocking at 2.566-4.693", "frequencyCaption": "door knocking one times"}
344
+ {"filepath": "data/single_event_single_identity_test/syn_191.wav", "onoffCaption": "cow mooing at 2.094-4.894", "frequencyCaption": "cow mooing one times"}
345
+ {"filepath": "data/single_event_single_identity_test/syn_196.wav", "onoffCaption": "door knocking at 0.398-2.525, 3.558-5.685, 6.802-8.929", "frequencyCaption": "door knocking three times"}
346
+ {"filepath": "data/single_event_single_identity_test/syn_198.wav", "onoffCaption": "explosion at 3.575-6.327", "frequencyCaption": "explosion one times"}
347
+ {"filepath": "data/single_event_single_identity_test/syn_10.wav", "onoffCaption": "duck quacking at 0.107-1.925", "frequencyCaption": "duck quacking one times"}
348
+ {"filepath": "data/single_event_single_identity_test/syn_17.wav", "onoffCaption": "burping belching at 0.839-2.998, 4.442-6.601", "frequencyCaption": "burping belching two times"}
349
+ {"filepath": "data/single_event_single_identity_test/syn_19.wav", "onoffCaption": "cat meowing at 2.357-3.683, 5.023-6.349", "frequencyCaption": "cat meowing two times"}
350
+ {"filepath": "data/single_event_single_identity_test/syn_20.wav", "onoffCaption": "tapping clicking clanking at 2.446-5.886, 7.886-10.0", "frequencyCaption": "tapping clicking clanking two times"}
351
+ {"filepath": "data/single_event_single_identity_test/syn_27.wav", "onoffCaption": "spraying at 0.301-1.403, 2.423-3.525, 4.539-5.641", "frequencyCaption": "spraying three times"}
352
+ {"filepath": "data/single_event_single_identity_test/syn_29.wav", "onoffCaption": "tapping clicking clanking at 0.69-4.13, 5.59-9.03", "frequencyCaption": "tapping clicking clanking two times"}
353
+ {"filepath": "data/single_event_single_identity_test/syn_33.wav", "onoffCaption": "train horn at 2.016-7.096", "frequencyCaption": "train horn one times"}
354
+ {"filepath": "data/single_event_single_identity_test/syn_34.wav", "onoffCaption": "burping belching at 3.636-5.795, 7.726-9.885", "frequencyCaption": "burping belching two times"}
355
+ {"filepath": "data/single_event_single_identity_test/syn_42.wav", "onoffCaption": "dog barking at 2.092-4.092", "frequencyCaption": "dog barking one times"}
356
+ {"filepath": "data/single_event_single_identity_test/syn_45.wav", "onoffCaption": "cat meowing at 2.902-4.228", "frequencyCaption": "cat meowing one times"}
357
+ {"filepath": "data/single_event_single_identity_test/syn_56.wav", "onoffCaption": "train horn at 0.125-3.125", "frequencyCaption": "train horn one times"}
358
+ {"filepath": "data/single_event_single_identity_test/syn_58.wav", "onoffCaption": "duck quacking at 0.179-2.179, 4.629-6.629", "frequencyCaption": "duck quacking two times"}
359
+ {"filepath": "data/single_event_single_identity_test/syn_61.wav", "onoffCaption": "spraying at 2.685-3.787", "frequencyCaption": "spraying one times"}
360
+ {"filepath": "data/single_event_single_identity_test/syn_66.wav", "onoffCaption": "cat meowing at 0.1-1.426, 2.691-4.017", "frequencyCaption": "cat meowing two times"}
361
+ {"filepath": "data/single_event_single_identity_test/syn_68.wav", "onoffCaption": "duck quacking at 0.259-2.077", "frequencyCaption": "duck quacking one times"}
362
+ {"filepath": "data/single_event_single_identity_test/syn_72.wav", "onoffCaption": "sneeze at 0.32-3.673, 4.809-8.162", "frequencyCaption": "sneeze two times"}
363
+ {"filepath": "data/single_event_single_identity_test/syn_75.wav", "onoffCaption": "door slamming at 3.048-4.459, 6.382-7.793", "frequencyCaption": "door slamming two times"}
364
+ {"filepath": "data/single_event_single_identity_test/syn_83.wav", "onoffCaption": "dog barking at 1.005-3.005, 5.367-7.367", "frequencyCaption": "dog barking two times"}
365
+ {"filepath": "data/single_event_single_identity_test/syn_84.wav", "onoffCaption": "woman laughing at 0.34-4.798, 6.685-10.0", "frequencyCaption": "woman laughing two times"}
366
+ {"filepath": "data/single_event_single_identity_test/syn_90.wav", "onoffCaption": "dog barking at 0.965-2.965, 3.842-5.842, 7.713-9.713", "frequencyCaption": "dog barking three times"}
367
+ {"filepath": "data/single_event_single_identity_test/syn_97.wav", "onoffCaption": "gunshot at 1.924-3.924", "frequencyCaption": "gunshot one times"}
368
+ {"filepath": "data/single_event_single_identity_test/syn_99.wav", "onoffCaption": "door knocking at 3.167-5.294, 6.941-9.068", "frequencyCaption": "door knocking two times"}
369
+ {"filepath": "data/single_event_single_identity_test/syn_100.wav", "onoffCaption": "burping belching at 2.361-4.52, 5.23-7.389", "frequencyCaption": "burping belching two times"}
370
+ {"filepath": "data/single_event_single_identity_test/syn_107.wav", "onoffCaption": "woman laughing at 2.849-5.613, 6.83-9.594", "frequencyCaption": "woman laughing two times"}
371
+ {"filepath": "data/single_event_single_identity_test/syn_109.wav", "onoffCaption": "cat meowing at 0.321-1.647, 2.314-3.64, 4.695-6.021", "frequencyCaption": "cat meowing three times"}
372
+ {"filepath": "data/single_event_single_identity_test/syn_113.wav", "onoffCaption": "duck quacking at 1.194-3.012", "frequencyCaption": "duck quacking one times"}
373
+ {"filepath": "data/single_event_single_identity_test/syn_114.wav", "onoffCaption": "duck quacking at 0.737-2.737, 3.972-5.972", "frequencyCaption": "duck quacking two times"}
374
+ {"filepath": "data/single_event_single_identity_test/syn_123.wav", "onoffCaption": "woman laughing at 3.064-7.522", "frequencyCaption": "woman laughing one times"}
375
+ {"filepath": "data/single_event_single_identity_test/syn_124.wav", "onoffCaption": "door slamming at 0.317-2.556, 3.904-6.143", "frequencyCaption": "door slamming two times"}
376
+ {"filepath": "data/single_event_single_identity_test/syn_130.wav", "onoffCaption": "duck quacking at 1.714-3.532, 4.074-5.892, 6.517-8.335", "frequencyCaption": "duck quacking three times"}
377
+ {"filepath": "data/single_event_single_identity_test/syn_137.wav", "onoffCaption": "dog barking at 0.126-2.126, 2.714-4.714", "frequencyCaption": "dog barking two times"}
378
+ {"filepath": "data/single_event_single_identity_test/syn_141.wav", "onoffCaption": "woman laughing at 3.098-7.556", "frequencyCaption": "woman laughing one times"}
379
+ {"filepath": "data/single_event_single_identity_test/syn_146.wav", "onoffCaption": "dog barking at 0.087-2.087, 4.127-6.127", "frequencyCaption": "dog barking two times"}
380
+ {"filepath": "data/single_event_single_identity_test/syn_148.wav", "onoffCaption": "thump thud at 2.712-5.387", "frequencyCaption": "thump thud one times"}
381
+ {"filepath": "data/single_event_single_identity_test/syn_152.wav", "onoffCaption": "sheep goat bleating at 1.645-3.645, 5.29-7.29", "frequencyCaption": "sheep goat bleating two times"}
382
+ {"filepath": "data/single_event_single_identity_test/syn_155.wav", "onoffCaption": "woman laughing at 0.079-4.537, 5.539-9.997", "frequencyCaption": "woman laughing two times"}
383
+ {"filepath": "data/single_event_single_identity_test/syn_162.wav", "onoffCaption": "door knocking at 0.465-2.592, 4.247-6.374", "frequencyCaption": "door knocking two times"}
384
+ {"filepath": "data/single_event_single_identity_test/syn_165.wav", "onoffCaption": "door slamming at 0.439-2.678", "frequencyCaption": "door slamming one times"}
385
+ {"filepath": "data/single_event_single_identity_test/syn_171.wav", "onoffCaption": "woman laughing at 0.467-3.231", "frequencyCaption": "woman laughing one times"}
386
+ {"filepath": "data/single_event_single_identity_test/syn_176.wav", "onoffCaption": "burping belching at 0.432-2.591, 5.061-7.22", "frequencyCaption": "burping belching two times"}
387
+ {"filepath": "data/single_event_single_identity_test/syn_178.wav", "onoffCaption": "sheep goat bleating at 4.036-6.036, 6.704-8.704", "frequencyCaption": "sheep goat bleating two times"}
388
+ {"filepath": "data/single_event_single_identity_test/syn_180.wav", "onoffCaption": "cow mooing at 0.178-2.802", "frequencyCaption": "cow mooing one times"}
389
+ {"filepath": "data/single_event_single_identity_test/syn_187.wav", "onoffCaption": "gunshot at 0.523-2.523, 3.427-5.427", "frequencyCaption": "gunshot two times"}
390
+ {"filepath": "data/single_event_single_identity_test/syn_193.wav", "onoffCaption": "tapping clicking clanking at 1.074-4.514, 6.811-10.0", "frequencyCaption": "tapping clicking clanking two times"}
391
+ {"filepath": "data/single_event_single_identity_test/syn_194.wav", "onoffCaption": "train horn at 1.729-6.809", "frequencyCaption": "train horn one times"}
392
+ {"filepath": "data/single_event_single_identity_test/syn_1.wav", "onoffCaption": "cat meowing at 0.393-1.783, 3.975-5.365", "frequencyCaption": "cat meowing two times"}
393
+ {"filepath": "data/single_event_single_identity_test/syn_2.wav", "onoffCaption": "cat meowing at 2.278-3.668, 5.204-6.594", "frequencyCaption": "cat meowing two times"}
394
+ {"filepath": "data/single_event_single_identity_test/syn_3.wav", "onoffCaption": "burping belching at 0.042-2.999, 4.324-7.281, 7.849-10.0", "frequencyCaption": "burping belching three times"}
395
+ {"filepath": "data/single_event_single_identity_test/syn_4.wav", "onoffCaption": "car horn honking at 0.38-3.341, 4.605-7.566", "frequencyCaption": "car horn honking two times"}
396
+ {"filepath": "data/single_event_single_identity_test/syn_5.wav", "onoffCaption": "dog barking at 0.088-2.088", "frequencyCaption": "dog barking one times"}
397
+ {"filepath": "data/single_event_single_identity_test/syn_6.wav", "onoffCaption": "explosion at 2.796-7.718", "frequencyCaption": "explosion one times"}
398
+ {"filepath": "data/single_event_single_identity_test/syn_7.wav", "onoffCaption": "dog barking at 2.565-4.565", "frequencyCaption": "dog barking one times"}
399
+ {"filepath": "data/single_event_single_identity_test/syn_8.wav", "onoffCaption": "burping belching at 0.45-3.407", "frequencyCaption": "burping belching one times"}
400
+ {"filepath": "data/single_event_single_identity_test/syn_9.wav", "onoffCaption": "burping belching at 0.775-3.732", "frequencyCaption": "burping belching one times"}
data/meta_data/train.json ADDED
The diff for this file is too large to render. See raw diff
 
picoaudio/audioldm/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from .ldm import LatentDiffusion
2
+ from .utils import seed_everything, save_wave, get_time, get_duration
3
+ from .pipeline import *
4
+
5
+
6
+
7
+
8
+
picoaudio/audioldm/__main__.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ import os
3
+ from audioldm import text_to_audio, style_transfer, build_model, save_wave, get_time, round_up_duration, get_duration
4
+ import argparse
5
+
6
+ CACHE_DIR = os.getenv(
7
+ "AUDIOLDM_CACHE_DIR",
8
+ os.path.join(os.path.expanduser("~"), ".cache/audioldm"))
9
+
10
+ parser = argparse.ArgumentParser()
11
+
12
+ parser.add_argument(
13
+ "--mode",
14
+ type=str,
15
+ required=False,
16
+ default="generation",
17
+ help="generation: text-to-audio generation; transfer: style transfer",
18
+ choices=["generation", "transfer"]
19
+ )
20
+
21
+ parser.add_argument(
22
+ "-t",
23
+ "--text",
24
+ type=str,
25
+ required=False,
26
+ default="",
27
+ help="Text prompt to the model for audio generation",
28
+ )
29
+
30
+ parser.add_argument(
31
+ "-f",
32
+ "--file_path",
33
+ type=str,
34
+ required=False,
35
+ default=None,
36
+ help="(--mode transfer): Original audio file for style transfer; Or (--mode generation): the guidance audio file for generating simialr audio",
37
+ )
38
+
39
+ parser.add_argument(
40
+ "--transfer_strength",
41
+ type=float,
42
+ required=False,
43
+ default=0.5,
44
+ help="A value between 0 and 1. 0 means original audio without transfer, 1 means completely transfer to the audio indicated by text",
45
+ )
46
+
47
+ parser.add_argument(
48
+ "-s",
49
+ "--save_path",
50
+ type=str,
51
+ required=False,
52
+ help="The path to save model output",
53
+ default="./output",
54
+ )
55
+
56
+ parser.add_argument(
57
+ "--model_name",
58
+ type=str,
59
+ required=False,
60
+ help="The checkpoint you gonna use",
61
+ default="audioldm-s-full",
62
+ choices=["audioldm-s-full", "audioldm-l-full", "audioldm-s-full-v2"]
63
+ )
64
+
65
+ parser.add_argument(
66
+ "-ckpt",
67
+ "--ckpt_path",
68
+ type=str,
69
+ required=False,
70
+ help="The path to the pretrained .ckpt model",
71
+ default=None,
72
+ )
73
+
74
+ parser.add_argument(
75
+ "-b",
76
+ "--batchsize",
77
+ type=int,
78
+ required=False,
79
+ default=1,
80
+ help="Generate how many samples at the same time",
81
+ )
82
+
83
+ parser.add_argument(
84
+ "--ddim_steps",
85
+ type=int,
86
+ required=False,
87
+ default=200,
88
+ help="The sampling step for DDIM",
89
+ )
90
+
91
+ parser.add_argument(
92
+ "-gs",
93
+ "--guidance_scale",
94
+ type=float,
95
+ required=False,
96
+ default=2.5,
97
+ help="Guidance scale (Large => better quality and relavancy to text; Small => better diversity)",
98
+ )
99
+
100
+ parser.add_argument(
101
+ "-dur",
102
+ "--duration",
103
+ type=float,
104
+ required=False,
105
+ default=10.0,
106
+ help="The duration of the samples",
107
+ )
108
+
109
+ parser.add_argument(
110
+ "-n",
111
+ "--n_candidate_gen_per_text",
112
+ type=int,
113
+ required=False,
114
+ default=3,
115
+ help="Automatic quality control. This number control the number of candidates (e.g., generate three audios and choose the best to show you). A Larger value usually lead to better quality with heavier computation",
116
+ )
117
+
118
+ parser.add_argument(
119
+ "--seed",
120
+ type=int,
121
+ required=False,
122
+ default=42,
123
+ help="Change this value (any integer number) will lead to a different generation result.",
124
+ )
125
+
126
+ args = parser.parse_args()
127
+
128
+ if(args.ckpt_path is not None):
129
+ print("Warning: ckpt_path has no effect after version 0.0.20.")
130
+
131
+ assert args.duration % 2.5 == 0, "Duration must be a multiple of 2.5"
132
+
133
+ mode = args.mode
134
+ if(mode == "generation" and args.file_path is not None):
135
+ mode = "generation_audio_to_audio"
136
+ if(len(args.text) > 0):
137
+ print("Warning: You have specified the --file_path. --text will be ignored")
138
+ args.text = ""
139
+
140
+ save_path = os.path.join(args.save_path, mode)
141
+
142
+ if(args.file_path is not None):
143
+ save_path = os.path.join(save_path, os.path.basename(args.file_path.split(".")[0]))
144
+
145
+ text = args.text
146
+ random_seed = args.seed
147
+ duration = args.duration
148
+ guidance_scale = args.guidance_scale
149
+ n_candidate_gen_per_text = args.n_candidate_gen_per_text
150
+
151
+ os.makedirs(save_path, exist_ok=True)
152
+ audioldm = build_model(model_name=args.model_name)
153
+
154
+ if(args.mode == "generation"):
155
+ waveform = text_to_audio(
156
+ audioldm,
157
+ text,
158
+ args.file_path,
159
+ random_seed,
160
+ duration=duration,
161
+ guidance_scale=guidance_scale,
162
+ ddim_steps=args.ddim_steps,
163
+ n_candidate_gen_per_text=n_candidate_gen_per_text,
164
+ batchsize=args.batchsize,
165
+ )
166
+
167
+ elif(args.mode == "transfer"):
168
+ assert args.file_path is not None
169
+ assert os.path.exists(args.file_path), "The original audio file \'%s\' for style transfer does not exist." % args.file_path
170
+ waveform = style_transfer(
171
+ audioldm,
172
+ text,
173
+ args.file_path,
174
+ args.transfer_strength,
175
+ random_seed,
176
+ duration=duration,
177
+ guidance_scale=guidance_scale,
178
+ ddim_steps=args.ddim_steps,
179
+ batchsize=args.batchsize,
180
+ )
181
+ waveform = waveform[:,None,:]
182
+
183
+ save_wave(waveform, save_path, name="%s_%s" % (get_time(), text))
picoaudio/audioldm/audio/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .tools import wav_to_fbank, read_wav_file
2
+ from .stft import TacotronSTFT
picoaudio/audioldm/audio/audio_processing.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import librosa.util as librosa_util
4
+ from scipy.signal import get_window
5
+
6
+
7
+ def window_sumsquare(
8
+ window,
9
+ n_frames,
10
+ hop_length,
11
+ win_length,
12
+ n_fft,
13
+ dtype=np.float32,
14
+ norm=None,
15
+ ):
16
+ """
17
+ # from librosa 0.6
18
+ Compute the sum-square envelope of a window function at a given hop length.
19
+
20
+ This is used to estimate modulation effects induced by windowing
21
+ observations in short-time fourier transforms.
22
+
23
+ Parameters
24
+ ----------
25
+ window : string, tuple, number, callable, or list-like
26
+ Window specification, as in `get_window`
27
+
28
+ n_frames : int > 0
29
+ The number of analysis frames
30
+
31
+ hop_length : int > 0
32
+ The number of samples to advance between frames
33
+
34
+ win_length : [optional]
35
+ The length of the window function. By default, this matches `n_fft`.
36
+
37
+ n_fft : int > 0
38
+ The length of each analysis frame.
39
+
40
+ dtype : np.dtype
41
+ The data type of the output
42
+
43
+ Returns
44
+ -------
45
+ wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
46
+ The sum-squared envelope of the window function
47
+ """
48
+ if win_length is None:
49
+ win_length = n_fft
50
+
51
+ n = n_fft + hop_length * (n_frames - 1)
52
+ x = np.zeros(n, dtype=dtype)
53
+
54
+ # Compute the squared window at the desired length
55
+ win_sq = get_window(window, win_length, fftbins=True)
56
+ win_sq = librosa_util.normalize(win_sq, norm=norm) ** 2
57
+ win_sq = librosa_util.pad_center(win_sq, n_fft)
58
+
59
+ # Fill the envelope
60
+ for i in range(n_frames):
61
+ sample = i * hop_length
62
+ x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]
63
+ return x
64
+
65
+
66
+ def griffin_lim(magnitudes, stft_fn, n_iters=30):
67
+ """
68
+ PARAMS
69
+ ------
70
+ magnitudes: spectrogram magnitudes
71
+ stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
72
+ """
73
+
74
+ angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
75
+ angles = angles.astype(np.float32)
76
+ angles = torch.autograd.Variable(torch.from_numpy(angles))
77
+ signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
78
+
79
+ for i in range(n_iters):
80
+ _, angles = stft_fn.transform(signal)
81
+ signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
82
+ return signal
83
+
84
+
85
+ def dynamic_range_compression(x, normalize_fun=torch.log, C=1, clip_val=1e-5):
86
+ """
87
+ PARAMS
88
+ ------
89
+ C: compression factor
90
+ """
91
+ return normalize_fun(torch.clamp(x, min=clip_val) * C)
92
+
93
+
94
+ def dynamic_range_decompression(x, C=1):
95
+ """
96
+ PARAMS
97
+ ------
98
+ C: compression factor used to compress
99
+ """
100
+ return torch.exp(x) / C
picoaudio/audioldm/audio/stft.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import numpy as np
4
+ from scipy.signal import get_window
5
+ from librosa.util import pad_center, tiny
6
+ from librosa.filters import mel as librosa_mel_fn
7
+
8
+ from audioldm.audio.audio_processing import (
9
+ dynamic_range_compression,
10
+ dynamic_range_decompression,
11
+ window_sumsquare,
12
+ )
13
+
14
+
15
+ class STFT(torch.nn.Module):
16
+ """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
17
+
18
+ def __init__(self, filter_length, hop_length, win_length, window="hann"):
19
+ super(STFT, self).__init__()
20
+ self.filter_length = filter_length
21
+ self.hop_length = hop_length
22
+ self.win_length = win_length
23
+ self.window = window
24
+ self.forward_transform = None
25
+ scale = self.filter_length / self.hop_length
26
+ fourier_basis = np.fft.fft(np.eye(self.filter_length))
27
+
28
+ cutoff = int((self.filter_length / 2 + 1))
29
+ fourier_basis = np.vstack(
30
+ [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]
31
+ )
32
+
33
+ forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
34
+ inverse_basis = torch.FloatTensor(
35
+ np.linalg.pinv(scale * fourier_basis).T[:, None, :]
36
+ )
37
+
38
+ if window is not None:
39
+ assert filter_length >= win_length
40
+ # get window and zero center pad it to filter_length
41
+ fft_window = get_window(window, win_length, fftbins=True)
42
+ fft_window = pad_center(fft_window, size=filter_length)
43
+ fft_window = torch.from_numpy(fft_window).float()
44
+
45
+ # window the bases
46
+ forward_basis *= fft_window
47
+ inverse_basis *= fft_window
48
+
49
+ self.register_buffer("forward_basis", forward_basis.float())
50
+ self.register_buffer("inverse_basis", inverse_basis.float())
51
+
52
+ def transform(self, input_data):
53
+ device = self.forward_basis.device
54
+ input_data = input_data.to(device)
55
+
56
+ num_batches = input_data.size(0)
57
+ num_samples = input_data.size(1)
58
+
59
+ self.num_samples = num_samples
60
+
61
+ # similar to librosa, reflect-pad the input
62
+ input_data = input_data.view(num_batches, 1, num_samples)
63
+ input_data = F.pad(
64
+ input_data.unsqueeze(1),
65
+ (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
66
+ mode="reflect",
67
+ )
68
+ input_data = input_data.squeeze(1)
69
+
70
+ forward_transform = F.conv1d(
71
+ input_data,
72
+ torch.autograd.Variable(self.forward_basis, requires_grad=False),
73
+ stride=self.hop_length,
74
+ padding=0,
75
+ )#.cpu()
76
+
77
+ cutoff = int((self.filter_length / 2) + 1)
78
+ real_part = forward_transform[:, :cutoff, :]
79
+ imag_part = forward_transform[:, cutoff:, :]
80
+
81
+ magnitude = torch.sqrt(real_part**2 + imag_part**2)
82
+ phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data))
83
+
84
+ return magnitude, phase
85
+
86
+ def inverse(self, magnitude, phase):
87
+ device = self.forward_basis.device
88
+ magnitude, phase = magnitude.to(device), phase.to(device)
89
+
90
+ recombine_magnitude_phase = torch.cat(
91
+ [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
92
+ )
93
+
94
+ inverse_transform = F.conv_transpose1d(
95
+ recombine_magnitude_phase,
96
+ torch.autograd.Variable(self.inverse_basis, requires_grad=False),
97
+ stride=self.hop_length,
98
+ padding=0,
99
+ )
100
+
101
+ if self.window is not None:
102
+ window_sum = window_sumsquare(
103
+ self.window,
104
+ magnitude.size(-1),
105
+ hop_length=self.hop_length,
106
+ win_length=self.win_length,
107
+ n_fft=self.filter_length,
108
+ dtype=np.float32,
109
+ )
110
+ # remove modulation effects
111
+ approx_nonzero_indices = torch.from_numpy(
112
+ np.where(window_sum > tiny(window_sum))[0]
113
+ )
114
+ window_sum = torch.autograd.Variable(
115
+ torch.from_numpy(window_sum), requires_grad=False
116
+ )
117
+ window_sum = window_sum
118
+ inverse_transform[:, :, approx_nonzero_indices] /= window_sum[
119
+ approx_nonzero_indices
120
+ ]
121
+
122
+ # scale by hop ratio
123
+ inverse_transform *= float(self.filter_length) / self.hop_length
124
+
125
+ inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :]
126
+ inverse_transform = inverse_transform[:, :, : -int(self.filter_length / 2) :]
127
+
128
+ return inverse_transform
129
+
130
+ def forward(self, input_data):
131
+ self.magnitude, self.phase = self.transform(input_data)
132
+ reconstruction = self.inverse(self.magnitude, self.phase)
133
+ return reconstruction
134
+
135
+
136
+ class TacotronSTFT(torch.nn.Module):
137
+ def __init__(
138
+ self,
139
+ filter_length,
140
+ hop_length,
141
+ win_length,
142
+ n_mel_channels,
143
+ sampling_rate,
144
+ mel_fmin,
145
+ mel_fmax,
146
+ ):
147
+ super(TacotronSTFT, self).__init__()
148
+ self.n_mel_channels = n_mel_channels
149
+ self.sampling_rate = sampling_rate
150
+ self.stft_fn = STFT(filter_length, hop_length, win_length)
151
+ mel_basis = librosa_mel_fn(
152
+ sr=sampling_rate, n_fft=filter_length, n_mels=n_mel_channels, fmin=mel_fmin, fmax=mel_fmax
153
+ )
154
+ mel_basis = torch.from_numpy(mel_basis).float()
155
+ self.register_buffer("mel_basis", mel_basis)
156
+
157
+ def spectral_normalize(self, magnitudes, normalize_fun):
158
+ output = dynamic_range_compression(magnitudes, normalize_fun)
159
+ return output
160
+
161
+ def spectral_de_normalize(self, magnitudes):
162
+ output = dynamic_range_decompression(magnitudes)
163
+ return output
164
+
165
+ def mel_spectrogram(self, y, normalize_fun=torch.log):
166
+ """Computes mel-spectrograms from a batch of waves
167
+ PARAMS
168
+ ------
169
+ y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
170
+
171
+ RETURNS
172
+ -------
173
+ mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
174
+ """
175
+ assert torch.min(y.data) >= -1, torch.min(y.data)
176
+ assert torch.max(y.data) <= 1, torch.max(y.data)
177
+
178
+ magnitudes, phases = self.stft_fn.transform(y)
179
+ magnitudes = magnitudes.data
180
+ mel_output = torch.matmul(self.mel_basis, magnitudes)
181
+ mel_output = self.spectral_normalize(mel_output, normalize_fun)
182
+ energy = torch.norm(magnitudes, dim=1)
183
+
184
+ log_magnitudes = self.spectral_normalize(magnitudes, normalize_fun)
185
+
186
+ return mel_output, log_magnitudes, energy
picoaudio/audioldm/audio/tools.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torchaudio
4
+
5
+
6
+ def get_mel_from_wav(audio, _stft):
7
+ audio = torch.clip(torch.FloatTensor(audio).unsqueeze(0), -1, 1)
8
+ audio = torch.autograd.Variable(audio, requires_grad=False)
9
+ melspec, log_magnitudes_stft, energy = _stft.mel_spectrogram(audio)
10
+ melspec = torch.squeeze(melspec, 0).numpy().astype(np.float32)
11
+ log_magnitudes_stft = (
12
+ torch.squeeze(log_magnitudes_stft, 0).numpy().astype(np.float32)
13
+ )
14
+ energy = torch.squeeze(energy, 0).numpy().astype(np.float32)
15
+ return melspec, log_magnitudes_stft, energy
16
+
17
+
18
+ def _pad_spec(fbank, target_length=1024):
19
+ n_frames = fbank.shape[0]
20
+ p = target_length - n_frames
21
+ # cut and pad
22
+ if p > 0:
23
+ m = torch.nn.ZeroPad2d((0, 0, 0, p))
24
+ fbank = m(fbank)
25
+ elif p < 0:
26
+ fbank = fbank[0:target_length, :]
27
+
28
+ if fbank.size(-1) % 2 != 0:
29
+ fbank = fbank[..., :-1]
30
+
31
+ return fbank
32
+
33
+
34
+ def pad_wav(waveform, segment_length):
35
+ waveform_length = waveform.shape[-1]
36
+ assert waveform_length > 100, "Waveform is too short, %s" % waveform_length
37
+ if segment_length is None or waveform_length == segment_length:
38
+ return waveform
39
+ elif waveform_length > segment_length:
40
+ return waveform[:segment_length]
41
+ elif waveform_length < segment_length:
42
+ temp_wav = np.zeros((1, segment_length))
43
+ temp_wav[:, :waveform_length] = waveform
44
+ return temp_wav
45
+
46
+ def normalize_wav(waveform):
47
+ waveform = waveform - np.mean(waveform)
48
+ waveform = waveform / (np.max(np.abs(waveform)) + 1e-8)
49
+ return waveform * 0.5
50
+
51
+
52
+ def read_wav_file(filename, segment_length):
53
+ # waveform, sr = librosa.load(filename, sr=None, mono=True) # 4 times slower
54
+ waveform, sr = torchaudio.load(filename) # Faster!!!
55
+ waveform = torchaudio.functional.resample(waveform, orig_freq=sr, new_freq=16000)
56
+ waveform = waveform.numpy()[0, ...]
57
+ waveform = normalize_wav(waveform)
58
+ waveform = waveform[None, ...]
59
+ waveform = pad_wav(waveform, segment_length)
60
+
61
+ waveform = waveform / np.max(np.abs(waveform))
62
+ waveform = 0.5 * waveform
63
+
64
+ return waveform
65
+
66
+
67
+ def wav_to_fbank(filename, target_length=1024, fn_STFT=None):
68
+ assert fn_STFT is not None
69
+
70
+ # mixup
71
+ waveform = read_wav_file(filename, target_length * 160) # hop size is 160
72
+
73
+ waveform = waveform[0, ...]
74
+ waveform = torch.FloatTensor(waveform)
75
+
76
+ fbank, log_magnitudes_stft, energy = get_mel_from_wav(waveform, fn_STFT)
77
+
78
+ fbank = torch.FloatTensor(fbank.T)
79
+ log_magnitudes_stft = torch.FloatTensor(log_magnitudes_stft.T)
80
+
81
+ fbank, log_magnitudes_stft = _pad_spec(fbank, target_length), _pad_spec(
82
+ log_magnitudes_stft, target_length
83
+ )
84
+
85
+ return fbank, log_magnitudes_stft, waveform
picoaudio/audioldm/clap/__init__.py ADDED
File without changes
picoaudio/audioldm/clap/encoders.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from audioldm.clap.open_clip import create_model
4
+ from audioldm.clap.training.data import get_audio_features
5
+ import torchaudio
6
+ from transformers import RobertaTokenizer
7
+ import torch.nn.functional as F
8
+
9
+
10
+ class CLAPAudioEmbeddingClassifierFreev2(nn.Module):
11
+ def __init__(
12
+ self,
13
+ pretrained_path="",
14
+ key="class",
15
+ sampling_rate=16000,
16
+ embed_mode="audio",
17
+ amodel = "HTSAT-tiny",
18
+ unconditional_prob=0.1,
19
+ random_mute=False,
20
+ max_random_mute_portion=0.5,
21
+ training_mode=True,
22
+ ):
23
+ super().__init__()
24
+
25
+ self.key = key
26
+ self.device = "cpu"
27
+ self.precision = "fp32"
28
+ self.amodel = amodel # or 'PANN-14'
29
+ self.tmodel = "roberta" # the best text encoder in our training
30
+ self.enable_fusion = False # False if you do not want to use the fusion model
31
+ self.fusion_type = "aff_2d"
32
+ self.pretrained = pretrained_path
33
+ self.embed_mode = embed_mode
34
+ self.embed_mode_orig = embed_mode
35
+ self.sampling_rate = sampling_rate
36
+ self.unconditional_prob = unconditional_prob
37
+ self.random_mute = random_mute
38
+ self.tokenize = RobertaTokenizer.from_pretrained("roberta-base")
39
+ self.max_random_mute_portion = max_random_mute_portion
40
+ self.training_mode = training_mode
41
+ self.model, self.model_cfg = create_model(
42
+ self.amodel,
43
+ self.tmodel,
44
+ self.pretrained,
45
+ precision=self.precision,
46
+ device=self.device,
47
+ enable_fusion=self.enable_fusion,
48
+ fusion_type=self.fusion_type,
49
+ )
50
+ for p in self.model.parameters():
51
+ p.requires_grad = False
52
+
53
+ self.model.eval()
54
+
55
+ def get_unconditional_condition(self, batchsize):
56
+ self.unconditional_token = self.model.get_text_embedding(
57
+ self.tokenizer(["", ""])
58
+ )[0:1]
59
+ return torch.cat([self.unconditional_token.unsqueeze(0)] * batchsize, dim=0)
60
+
61
+ def batch_to_list(self, batch):
62
+ ret = []
63
+ for i in range(batch.size(0)):
64
+ ret.append(batch[i])
65
+ return ret
66
+
67
+ def make_decision(self, probability):
68
+ if float(torch.rand(1)) < probability:
69
+ return True
70
+ else:
71
+ return False
72
+
73
+ def random_uniform(self, start, end):
74
+ val = torch.rand(1).item()
75
+ return start + (end - start) * val
76
+
77
+ def _random_mute(self, waveform):
78
+ # waveform: [bs, t-steps]
79
+ t_steps = waveform.size(-1)
80
+ for i in range(waveform.size(0)):
81
+ mute_size = int(
82
+ self.random_uniform(0, end=int(t_steps * self.max_random_mute_portion))
83
+ )
84
+ mute_start = int(self.random_uniform(0, t_steps - mute_size))
85
+ waveform[i, mute_start : mute_start + mute_size] = 0
86
+ return waveform
87
+
88
+ def cos_similarity(self, waveform, text):
89
+ # waveform: [bs, t_steps]
90
+ with torch.no_grad():
91
+ self.embed_mode = "audio"
92
+ audio_emb = self(waveform.cuda())
93
+ self.embed_mode = "text"
94
+ text_emb = self(text)
95
+ similarity = F.cosine_similarity(audio_emb, text_emb, dim=2), audio_emb, text_emb
96
+ return similarity.squeeze()
97
+
98
+ def forward(self, batch, key=None):
99
+ # If you want this conditioner to be unconditional, set self.unconditional_prob = 1.0
100
+ # If you want this conditioner to be fully conditional, set self.unconditional_prob = 0.0
101
+ if self.model.training == True and not self.training_mode:
102
+ print(
103
+ "The pretrained CLAP model should always be in eval mode. Reloading model just in case you change the parameters."
104
+ )
105
+ self.model, self.model_cfg = create_model(
106
+ self.amodel,
107
+ self.tmodel,
108
+ self.pretrained,
109
+ precision=self.precision,
110
+ device="cuda",
111
+ enable_fusion=self.enable_fusion,
112
+ fusion_type=self.fusion_type,
113
+ )
114
+ for p in self.model.parameters():
115
+ p.requires_grad = False
116
+ self.model.eval()
117
+
118
+ # the 'fusion' truncate mode can be changed to 'rand_trunc' if run in unfusion mode
119
+ if self.embed_mode == "audio":
120
+ with torch.no_grad():
121
+ audio_dict_list = []
122
+ assert (
123
+ self.sampling_rate == 16000
124
+ ), "We only support 16000 sampling rate"
125
+ if self.random_mute:
126
+ batch = self._random_mute(batch)
127
+ # batch: [bs, 1, t-samples]
128
+ batch = torchaudio.functional.resample(
129
+ batch, orig_freq=self.sampling_rate, new_freq=48000
130
+ )
131
+ for waveform in self.batch_to_list(batch):
132
+ audio_dict = {}
133
+ audio_dict = get_audio_features(
134
+ audio_dict,
135
+ waveform,
136
+ 480000,
137
+ data_truncating="fusion",
138
+ data_filling="repeatpad",
139
+ audio_cfg=self.model_cfg["audio_cfg"],
140
+ )
141
+ audio_dict_list.append(audio_dict)
142
+ # [bs, 512]
143
+ embed = self.model.get_audio_embedding(audio_dict_list)
144
+ elif self.embed_mode == "text":
145
+ with torch.no_grad():
146
+ # the 'fusion' truncate mode can be changed to 'rand_trunc' if run in unfusion mode
147
+ text_data = self.tokenizer(batch)
148
+ embed = self.model.get_text_embedding(text_data)
149
+
150
+ embed = embed.unsqueeze(1)
151
+ self.unconditional_token = self.model.get_text_embedding(
152
+ self.tokenizer(["", ""])
153
+ )[0:1]
154
+
155
+ for i in range(embed.size(0)):
156
+ if self.make_decision(self.unconditional_prob):
157
+ embed[i] = self.unconditional_token
158
+
159
+ # [bs, 1, 512]
160
+ return embed.detach()
161
+
162
+ def tokenizer(self, text):
163
+ result = self.tokenize(
164
+ text,
165
+ padding="max_length",
166
+ truncation=True,
167
+ max_length=512,
168
+ return_tensors="pt",
169
+ )
170
+ return {k: v.squeeze(0) for k, v in result.items()}
picoaudio/audioldm/clap/open_clip/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .factory import (
2
+ list_models,
3
+ create_model,
4
+ create_model_and_transforms,
5
+ add_model_config,
6
+ )
7
+ from .loss import ClipLoss, gather_features, LPLoss, lp_gather_features, LPMetrics
8
+ from .model import (
9
+ CLAP,
10
+ CLAPTextCfg,
11
+ CLAPVisionCfg,
12
+ CLAPAudioCfp,
13
+ convert_weights_to_fp16,
14
+ trace_model,
15
+ )
16
+ from .openai import load_openai_model, list_openai_models
17
+ from .pretrained import (
18
+ list_pretrained,
19
+ list_pretrained_tag_models,
20
+ list_pretrained_model_tags,
21
+ get_pretrained_url,
22
+ download_pretrained,
23
+ )
24
+ from .tokenizer import SimpleTokenizer, tokenize
25
+ from .transform import image_transform
picoaudio/audioldm/clap/open_clip/bert.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import BertTokenizer, BertModel
2
+
3
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
4
+ model = BertModel.from_pretrained("bert-base-uncased")
5
+ text = "Replace me by any text you'd like."
6
+
7
+
8
+ def bert_embeddings(text):
9
+ # text = "Replace me by any text you'd like."
10
+ encoded_input = tokenizer(text, return_tensors="pt")
11
+ output = model(**encoded_input)
12
+ return output
13
+
14
+
15
+ from transformers import RobertaTokenizer, RobertaModel
16
+
17
+ tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
18
+ model = RobertaModel.from_pretrained("roberta-base")
19
+ text = "Replace me by any text you'd like."
20
+
21
+
22
+ def Roberta_embeddings(text):
23
+ # text = "Replace me by any text you'd like."
24
+ encoded_input = tokenizer(text, return_tensors="pt")
25
+ output = model(**encoded_input)
26
+ return output
27
+
28
+
29
+ from transformers import BartTokenizer, BartModel
30
+
31
+ tokenizer = BartTokenizer.from_pretrained("facebook/bart-base")
32
+ model = BartModel.from_pretrained("facebook/bart-base")
33
+ text = "Replace me by any text you'd like."
34
+
35
+
36
+ def bart_embeddings(text):
37
+ # text = "Replace me by any text you'd like."
38
+ encoded_input = tokenizer(text, return_tensors="pt")
39
+ output = model(**encoded_input)
40
+ return output
picoaudio/audioldm/clap/open_clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
picoaudio/audioldm/clap/open_clip/factory.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import pathlib
5
+ import re
6
+ from copy import deepcopy
7
+ from pathlib import Path
8
+
9
+ import torch
10
+
11
+ from .model import CLAP, convert_weights_to_fp16
12
+ from .openai import load_openai_model
13
+ from .pretrained import get_pretrained_url, download_pretrained
14
+ from .transform import image_transform
15
+
16
+ _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
17
+ _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
18
+ CACHE_DIR = os.getenv("AUDIOLDM_CACHE_DIR", "~/.cache/audioldm")
19
+
20
+
21
+
22
+ def _natural_key(string_):
23
+ return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
24
+
25
+
26
+ def _rescan_model_configs():
27
+ global _MODEL_CONFIGS
28
+
29
+ config_ext = (".json",)
30
+ config_files = []
31
+ for config_path in _MODEL_CONFIG_PATHS:
32
+ if config_path.is_file() and config_path.suffix in config_ext:
33
+ config_files.append(config_path)
34
+ elif config_path.is_dir():
35
+ for ext in config_ext:
36
+ config_files.extend(config_path.glob(f"*{ext}"))
37
+
38
+ for cf in config_files:
39
+ if os.path.basename(cf)[0] == ".":
40
+ continue # Ignore hidden files
41
+
42
+ with open(cf, "r") as f:
43
+ model_cfg = json.load(f)
44
+ if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
45
+ _MODEL_CONFIGS[cf.stem] = model_cfg
46
+
47
+ _MODEL_CONFIGS = {
48
+ k: v
49
+ for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
50
+ }
51
+
52
+
53
+ _rescan_model_configs() # initial populate of model config registry
54
+
55
+
56
+ def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
57
+ checkpoint = torch.load(checkpoint_path, map_location=map_location)
58
+ if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
59
+ state_dict = checkpoint["state_dict"]
60
+ else:
61
+ state_dict = checkpoint
62
+ if skip_params:
63
+ if next(iter(state_dict.items()))[0].startswith("module"):
64
+ state_dict = {k[7:]: v for k, v in state_dict.items()}
65
+ # for k in state_dict:
66
+ # if k.startswith('transformer'):
67
+ # v = state_dict.pop(k)
68
+ # state_dict['text_branch.' + k[12:]] = v
69
+ return state_dict
70
+
71
+
72
+ def create_model(
73
+ amodel_name: str,
74
+ tmodel_name: str,
75
+ pretrained: str = "",
76
+ precision: str = "fp32",
77
+ device: torch.device = torch.device("cpu"),
78
+ jit: bool = False,
79
+ force_quick_gelu: bool = False,
80
+ openai_model_cache_dir: str = os.path.expanduser(f"{CACHE_DIR}/clip"),
81
+ skip_params=True,
82
+ pretrained_audio: str = "",
83
+ pretrained_text: str = "",
84
+ enable_fusion: bool = False,
85
+ fusion_type: str = "None"
86
+ # pretrained_image: bool = False,
87
+ ):
88
+ amodel_name = amodel_name.replace(
89
+ "/", "-"
90
+ ) # for callers using old naming with / in ViT names
91
+ pretrained_orig = pretrained
92
+ pretrained = pretrained.lower()
93
+ if pretrained == "openai":
94
+ if amodel_name in _MODEL_CONFIGS:
95
+ logging.info(f"Loading {amodel_name} model config.")
96
+ model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
97
+ else:
98
+ logging.error(
99
+ f"Model config for {amodel_name} not found; available models {list_models()}."
100
+ )
101
+ raise RuntimeError(f"Model config for {amodel_name} not found.")
102
+
103
+ logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
104
+ # Hard Code in model name
105
+ model_cfg["text_cfg"]["model_type"] = tmodel_name
106
+ model = load_openai_model(
107
+ "ViT-B-16",
108
+ model_cfg,
109
+ device=device,
110
+ jit=jit,
111
+ cache_dir=openai_model_cache_dir,
112
+ enable_fusion=enable_fusion,
113
+ fusion_type=fusion_type,
114
+ )
115
+ # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
116
+ if precision == "amp" or precision == "fp32":
117
+ model = model.float()
118
+ else:
119
+ if amodel_name in _MODEL_CONFIGS:
120
+ logging.info(f"Loading {amodel_name} model config.")
121
+ model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
122
+ else:
123
+ logging.error(
124
+ f"Model config for {amodel_name} not found; available models {list_models()}."
125
+ )
126
+ raise RuntimeError(f"Model config for {amodel_name} not found.")
127
+
128
+ if force_quick_gelu:
129
+ # override for use of QuickGELU on non-OpenAI transformer models
130
+ model_cfg["quick_gelu"] = True
131
+
132
+ # if pretrained_image:
133
+ # if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
134
+ # # pretrained weight loading for timm models set via vision_cfg
135
+ # model_cfg['vision_cfg']['timm_model_pretrained'] = True
136
+ # else:
137
+ # assert False, 'pretrained image towers currently only supported for timm models'
138
+ model_cfg["text_cfg"]["model_type"] = tmodel_name
139
+ model_cfg["enable_fusion"] = enable_fusion
140
+ model_cfg["fusion_type"] = fusion_type
141
+ model = CLAP(**model_cfg)
142
+
143
+ if pretrained:
144
+ checkpoint_path = ""
145
+ url = get_pretrained_url(amodel_name, pretrained)
146
+ if url:
147
+ checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
148
+ elif os.path.exists(pretrained_orig):
149
+ checkpoint_path = pretrained_orig
150
+ if checkpoint_path:
151
+ logging.info(
152
+ f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained})."
153
+ )
154
+ ckpt = load_state_dict(checkpoint_path, skip_params=True)
155
+ model.load_state_dict(ckpt)
156
+ param_names = [n for n, p in model.named_parameters()]
157
+ # for n in param_names:
158
+ # print(n, "\t", "Loaded" if n in ckpt else "Unloaded")
159
+ else:
160
+ logging.warning(
161
+ f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
162
+ )
163
+ raise RuntimeError(
164
+ f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
165
+ )
166
+
167
+ if pretrained_audio:
168
+ if amodel_name.startswith("PANN"):
169
+ if "Cnn14_mAP" in pretrained_audio: # official checkpoint
170
+ audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
171
+ audio_ckpt = audio_ckpt["model"]
172
+ keys = list(audio_ckpt.keys())
173
+ for key in keys:
174
+ if (
175
+ "spectrogram_extractor" not in key
176
+ and "logmel_extractor" not in key
177
+ ):
178
+ v = audio_ckpt.pop(key)
179
+ audio_ckpt["audio_branch." + key] = v
180
+ elif os.path.basename(pretrained_audio).startswith(
181
+ "PANN"
182
+ ): # checkpoint trained via HTSAT codebase
183
+ audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
184
+ audio_ckpt = audio_ckpt["state_dict"]
185
+ keys = list(audio_ckpt.keys())
186
+ for key in keys:
187
+ if key.startswith("sed_model"):
188
+ v = audio_ckpt.pop(key)
189
+ audio_ckpt["audio_branch." + key[10:]] = v
190
+ elif os.path.basename(pretrained_audio).startswith(
191
+ "finetuned"
192
+ ): # checkpoint trained via linear probe codebase
193
+ audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
194
+ else:
195
+ raise ValueError("Unknown audio checkpoint")
196
+ elif amodel_name.startswith("HTSAT"):
197
+ if "HTSAT_AudioSet_Saved" in pretrained_audio: # official checkpoint
198
+ audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
199
+ audio_ckpt = audio_ckpt["state_dict"]
200
+ keys = list(audio_ckpt.keys())
201
+ for key in keys:
202
+ if key.startswith("sed_model") and (
203
+ "spectrogram_extractor" not in key
204
+ and "logmel_extractor" not in key
205
+ ):
206
+ v = audio_ckpt.pop(key)
207
+ audio_ckpt["audio_branch." + key[10:]] = v
208
+ elif os.path.basename(pretrained_audio).startswith(
209
+ "HTSAT"
210
+ ): # checkpoint trained via HTSAT codebase
211
+ audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
212
+ audio_ckpt = audio_ckpt["state_dict"]
213
+ keys = list(audio_ckpt.keys())
214
+ for key in keys:
215
+ if key.startswith("sed_model"):
216
+ v = audio_ckpt.pop(key)
217
+ audio_ckpt["audio_branch." + key[10:]] = v
218
+ elif os.path.basename(pretrained_audio).startswith(
219
+ "finetuned"
220
+ ): # checkpoint trained via linear probe codebase
221
+ audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
222
+ else:
223
+ raise ValueError("Unknown audio checkpoint")
224
+ else:
225
+ raise f"this audio encoder pretrained checkpoint is not support"
226
+
227
+ model.load_state_dict(audio_ckpt, strict=False)
228
+ logging.info(
229
+ f"Loading pretrained {amodel_name} weights ({pretrained_audio})."
230
+ )
231
+ param_names = [n for n, p in model.named_parameters()]
232
+ for n in param_names:
233
+ print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
234
+
235
+ model.to(device=device)
236
+ if precision == "fp16":
237
+ assert device.type != "cpu"
238
+ convert_weights_to_fp16(model)
239
+
240
+ if jit:
241
+ model = torch.jit.script(model)
242
+
243
+ return model, model_cfg
244
+
245
+
246
+ def create_model_and_transforms(
247
+ model_name: str,
248
+ pretrained: str = "",
249
+ precision: str = "fp32",
250
+ device: torch.device = torch.device("cpu"),
251
+ jit: bool = False,
252
+ force_quick_gelu: bool = False,
253
+ # pretrained_image: bool = False,
254
+ ):
255
+ model = create_model(
256
+ model_name,
257
+ pretrained,
258
+ precision,
259
+ device,
260
+ jit,
261
+ force_quick_gelu=force_quick_gelu,
262
+ # pretrained_image=pretrained_image
263
+ )
264
+ preprocess_train = image_transform(model.visual.image_size, is_train=True)
265
+ preprocess_val = image_transform(model.visual.image_size, is_train=False)
266
+ return model, preprocess_train, preprocess_val
267
+
268
+
269
+ def list_models():
270
+ """enumerate available model architectures based on config files"""
271
+ return list(_MODEL_CONFIGS.keys())
272
+
273
+
274
+ def add_model_config(path):
275
+ """add model config path or file and update registry"""
276
+ if not isinstance(path, Path):
277
+ path = Path(path)
278
+ _MODEL_CONFIG_PATHS.append(path)
279
+ _rescan_model_configs()
picoaudio/audioldm/clap/open_clip/feature_fusion.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Feature Fusion for Varible-Length Data Processing
3
+ AFF/iAFF is referred and modified from https://github.com/YimianDai/open-aff/blob/master/aff_pytorch/aff_net/fusion.py
4
+ According to the paper: Yimian Dai et al, Attentional Feature Fusion, IEEE Winter Conference on Applications of Computer Vision, WACV 2021
5
+ """
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+
11
+ class DAF(nn.Module):
12
+ """
13
+ 直接相加 DirectAddFuse
14
+ """
15
+
16
+ def __init__(self):
17
+ super(DAF, self).__init__()
18
+
19
+ def forward(self, x, residual):
20
+ return x + residual
21
+
22
+
23
+ class iAFF(nn.Module):
24
+ """
25
+ 多特征融合 iAFF
26
+ """
27
+
28
+ def __init__(self, channels=64, r=4, type="2D"):
29
+ super(iAFF, self).__init__()
30
+ inter_channels = int(channels // r)
31
+
32
+ if type == "1D":
33
+ # 本地注意力
34
+ self.local_att = nn.Sequential(
35
+ nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
36
+ nn.BatchNorm1d(inter_channels),
37
+ nn.ReLU(inplace=True),
38
+ nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
39
+ nn.BatchNorm1d(channels),
40
+ )
41
+
42
+ # 全局注意力
43
+ self.global_att = nn.Sequential(
44
+ nn.AdaptiveAvgPool1d(1),
45
+ nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
46
+ nn.BatchNorm1d(inter_channels),
47
+ nn.ReLU(inplace=True),
48
+ nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
49
+ nn.BatchNorm1d(channels),
50
+ )
51
+
52
+ # 第二次本地注意力
53
+ self.local_att2 = nn.Sequential(
54
+ nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
55
+ nn.BatchNorm1d(inter_channels),
56
+ nn.ReLU(inplace=True),
57
+ nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
58
+ nn.BatchNorm1d(channels),
59
+ )
60
+ # 第二次全局注意力
61
+ self.global_att2 = nn.Sequential(
62
+ nn.AdaptiveAvgPool1d(1),
63
+ nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
64
+ nn.BatchNorm1d(inter_channels),
65
+ nn.ReLU(inplace=True),
66
+ nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
67
+ nn.BatchNorm1d(channels),
68
+ )
69
+ elif type == "2D":
70
+ # 本地注意力
71
+ self.local_att = nn.Sequential(
72
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
73
+ nn.BatchNorm2d(inter_channels),
74
+ nn.ReLU(inplace=True),
75
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
76
+ nn.BatchNorm2d(channels),
77
+ )
78
+
79
+ # 全局注意力
80
+ self.global_att = nn.Sequential(
81
+ nn.AdaptiveAvgPool2d(1),
82
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
83
+ nn.BatchNorm2d(inter_channels),
84
+ nn.ReLU(inplace=True),
85
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
86
+ nn.BatchNorm2d(channels),
87
+ )
88
+
89
+ # 第二次本地注意力
90
+ self.local_att2 = nn.Sequential(
91
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
92
+ nn.BatchNorm2d(inter_channels),
93
+ nn.ReLU(inplace=True),
94
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
95
+ nn.BatchNorm2d(channels),
96
+ )
97
+ # 第二次全局注意力
98
+ self.global_att2 = nn.Sequential(
99
+ nn.AdaptiveAvgPool2d(1),
100
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
101
+ nn.BatchNorm2d(inter_channels),
102
+ nn.ReLU(inplace=True),
103
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
104
+ nn.BatchNorm2d(channels),
105
+ )
106
+ else:
107
+ raise f"the type is not supported"
108
+
109
+ self.sigmoid = nn.Sigmoid()
110
+
111
+ def forward(self, x, residual):
112
+ flag = False
113
+ xa = x + residual
114
+ if xa.size(0) == 1:
115
+ xa = torch.cat([xa, xa], dim=0)
116
+ flag = True
117
+ xl = self.local_att(xa)
118
+ xg = self.global_att(xa)
119
+ xlg = xl + xg
120
+ wei = self.sigmoid(xlg)
121
+ xi = x * wei + residual * (1 - wei)
122
+
123
+ xl2 = self.local_att2(xi)
124
+ xg2 = self.global_att(xi)
125
+ xlg2 = xl2 + xg2
126
+ wei2 = self.sigmoid(xlg2)
127
+ xo = x * wei2 + residual * (1 - wei2)
128
+ if flag:
129
+ xo = xo[0].unsqueeze(0)
130
+ return xo
131
+
132
+
133
+ class AFF(nn.Module):
134
+ """
135
+ 多特征融合 AFF
136
+ """
137
+
138
+ def __init__(self, channels=64, r=4, type="2D"):
139
+ super(AFF, self).__init__()
140
+ inter_channels = int(channels // r)
141
+
142
+ if type == "1D":
143
+ self.local_att = nn.Sequential(
144
+ nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
145
+ nn.BatchNorm1d(inter_channels),
146
+ nn.ReLU(inplace=True),
147
+ nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
148
+ nn.BatchNorm1d(channels),
149
+ )
150
+ self.global_att = nn.Sequential(
151
+ nn.AdaptiveAvgPool1d(1),
152
+ nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
153
+ nn.BatchNorm1d(inter_channels),
154
+ nn.ReLU(inplace=True),
155
+ nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
156
+ nn.BatchNorm1d(channels),
157
+ )
158
+ elif type == "2D":
159
+ self.local_att = nn.Sequential(
160
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
161
+ nn.BatchNorm2d(inter_channels),
162
+ nn.ReLU(inplace=True),
163
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
164
+ nn.BatchNorm2d(channels),
165
+ )
166
+ self.global_att = nn.Sequential(
167
+ nn.AdaptiveAvgPool2d(1),
168
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
169
+ nn.BatchNorm2d(inter_channels),
170
+ nn.ReLU(inplace=True),
171
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
172
+ nn.BatchNorm2d(channels),
173
+ )
174
+ else:
175
+ raise f"the type is not supported."
176
+
177
+ self.sigmoid = nn.Sigmoid()
178
+
179
+ def forward(self, x, residual):
180
+ flag = False
181
+ xa = x + residual
182
+ if xa.size(0) == 1:
183
+ xa = torch.cat([xa, xa], dim=0)
184
+ flag = True
185
+ xl = self.local_att(xa)
186
+ xg = self.global_att(xa)
187
+ xlg = xl + xg
188
+ wei = self.sigmoid(xlg)
189
+ xo = 2 * x * wei + 2 * residual * (1 - wei)
190
+ if flag:
191
+ xo = xo[0].unsqueeze(0)
192
+ return xo
picoaudio/audioldm/clap/open_clip/htsat.py ADDED
@@ -0,0 +1,1308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ke Chen
2
3
+ # HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION
4
+ # Some layers designed on the model
5
+ # below codes are based and referred from https://github.com/microsoft/Swin-Transformer
6
+ # Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14030.pdf
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from itertools import repeat
12
+ import collections.abc
13
+ import math
14
+ import warnings
15
+
16
+ from torch.nn.init import _calculate_fan_in_and_fan_out
17
+ import torch.utils.checkpoint as checkpoint
18
+
19
+ import random
20
+
21
+ from torchlibrosa.stft import Spectrogram, LogmelFilterBank
22
+ from torchlibrosa.augmentation import SpecAugmentation
23
+
24
+ from itertools import repeat
25
+ from .utils import do_mixup, interpolate
26
+
27
+ from .feature_fusion import iAFF, AFF, DAF
28
+
29
+ # from PyTorch internals
30
+ def _ntuple(n):
31
+ def parse(x):
32
+ if isinstance(x, collections.abc.Iterable):
33
+ return x
34
+ return tuple(repeat(x, n))
35
+
36
+ return parse
37
+
38
+
39
+ to_1tuple = _ntuple(1)
40
+ to_2tuple = _ntuple(2)
41
+ to_3tuple = _ntuple(3)
42
+ to_4tuple = _ntuple(4)
43
+ to_ntuple = _ntuple
44
+
45
+
46
+ def drop_path(x, drop_prob: float = 0.0, training: bool = False):
47
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
48
+ This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
49
+ the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
50
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
51
+ changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
52
+ 'survival rate' as the argument.
53
+ """
54
+ if drop_prob == 0.0 or not training:
55
+ return x
56
+ keep_prob = 1 - drop_prob
57
+ shape = (x.shape[0],) + (1,) * (
58
+ x.ndim - 1
59
+ ) # work with diff dim tensors, not just 2D ConvNets
60
+ random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
61
+ random_tensor.floor_() # binarize
62
+ output = x.div(keep_prob) * random_tensor
63
+ return output
64
+
65
+
66
+ class DropPath(nn.Module):
67
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
68
+
69
+ def __init__(self, drop_prob=None):
70
+ super(DropPath, self).__init__()
71
+ self.drop_prob = drop_prob
72
+
73
+ def forward(self, x):
74
+ return drop_path(x, self.drop_prob, self.training)
75
+
76
+
77
+ class PatchEmbed(nn.Module):
78
+ """2D Image to Patch Embedding"""
79
+
80
+ def __init__(
81
+ self,
82
+ img_size=224,
83
+ patch_size=16,
84
+ in_chans=3,
85
+ embed_dim=768,
86
+ norm_layer=None,
87
+ flatten=True,
88
+ patch_stride=16,
89
+ enable_fusion=False,
90
+ fusion_type="None",
91
+ ):
92
+ super().__init__()
93
+ img_size = to_2tuple(img_size)
94
+ patch_size = to_2tuple(patch_size)
95
+ patch_stride = to_2tuple(patch_stride)
96
+ self.img_size = img_size
97
+ self.patch_size = patch_size
98
+ self.patch_stride = patch_stride
99
+ self.grid_size = (
100
+ img_size[0] // patch_stride[0],
101
+ img_size[1] // patch_stride[1],
102
+ )
103
+ self.num_patches = self.grid_size[0] * self.grid_size[1]
104
+ self.flatten = flatten
105
+ self.in_chans = in_chans
106
+ self.embed_dim = embed_dim
107
+
108
+ self.enable_fusion = enable_fusion
109
+ self.fusion_type = fusion_type
110
+
111
+ padding = (
112
+ (patch_size[0] - patch_stride[0]) // 2,
113
+ (patch_size[1] - patch_stride[1]) // 2,
114
+ )
115
+
116
+ if (self.enable_fusion) and (self.fusion_type == "channel_map"):
117
+ self.proj = nn.Conv2d(
118
+ in_chans * 4,
119
+ embed_dim,
120
+ kernel_size=patch_size,
121
+ stride=patch_stride,
122
+ padding=padding,
123
+ )
124
+ else:
125
+ self.proj = nn.Conv2d(
126
+ in_chans,
127
+ embed_dim,
128
+ kernel_size=patch_size,
129
+ stride=patch_stride,
130
+ padding=padding,
131
+ )
132
+ self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
133
+
134
+ if (self.enable_fusion) and (
135
+ self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
136
+ ):
137
+ self.mel_conv2d = nn.Conv2d(
138
+ in_chans,
139
+ embed_dim,
140
+ kernel_size=(patch_size[0], patch_size[1] * 3),
141
+ stride=(patch_stride[0], patch_stride[1] * 3),
142
+ padding=padding,
143
+ )
144
+ if self.fusion_type == "daf_2d":
145
+ self.fusion_model = DAF()
146
+ elif self.fusion_type == "aff_2d":
147
+ self.fusion_model = AFF(channels=embed_dim, type="2D")
148
+ elif self.fusion_type == "iaff_2d":
149
+ self.fusion_model = iAFF(channels=embed_dim, type="2D")
150
+
151
+ def forward(self, x, longer_idx=None):
152
+ if (self.enable_fusion) and (
153
+ self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
154
+ ):
155
+ global_x = x[:, 0:1, :, :]
156
+
157
+ # global processing
158
+ B, C, H, W = global_x.shape
159
+ assert (
160
+ H == self.img_size[0] and W == self.img_size[1]
161
+ ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
162
+ global_x = self.proj(global_x)
163
+ TW = global_x.size(-1)
164
+ if len(longer_idx) > 0:
165
+ # local processing
166
+ local_x = x[longer_idx, 1:, :, :].contiguous()
167
+ B, C, H, W = local_x.shape
168
+ local_x = local_x.view(B * C, 1, H, W)
169
+ local_x = self.mel_conv2d(local_x)
170
+ local_x = local_x.view(
171
+ B, C, local_x.size(1), local_x.size(2), local_x.size(3)
172
+ )
173
+ local_x = local_x.permute((0, 2, 3, 1, 4)).contiguous().flatten(3)
174
+ TB, TC, TH, _ = local_x.size()
175
+ if local_x.size(-1) < TW:
176
+ local_x = torch.cat(
177
+ [
178
+ local_x,
179
+ torch.zeros(
180
+ (TB, TC, TH, TW - local_x.size(-1)),
181
+ device=global_x.device,
182
+ ),
183
+ ],
184
+ dim=-1,
185
+ )
186
+ else:
187
+ local_x = local_x[:, :, :, :TW]
188
+
189
+ global_x[longer_idx] = self.fusion_model(global_x[longer_idx], local_x)
190
+ x = global_x
191
+ else:
192
+ B, C, H, W = x.shape
193
+ assert (
194
+ H == self.img_size[0] and W == self.img_size[1]
195
+ ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
196
+ x = self.proj(x)
197
+
198
+ if self.flatten:
199
+ x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
200
+ x = self.norm(x)
201
+ return x
202
+
203
+
204
+ class Mlp(nn.Module):
205
+ """MLP as used in Vision Transformer, MLP-Mixer and related networks"""
206
+
207
+ def __init__(
208
+ self,
209
+ in_features,
210
+ hidden_features=None,
211
+ out_features=None,
212
+ act_layer=nn.GELU,
213
+ drop=0.0,
214
+ ):
215
+ super().__init__()
216
+ out_features = out_features or in_features
217
+ hidden_features = hidden_features or in_features
218
+ self.fc1 = nn.Linear(in_features, hidden_features)
219
+ self.act = act_layer()
220
+ self.fc2 = nn.Linear(hidden_features, out_features)
221
+ self.drop = nn.Dropout(drop)
222
+
223
+ def forward(self, x):
224
+ x = self.fc1(x)
225
+ x = self.act(x)
226
+ x = self.drop(x)
227
+ x = self.fc2(x)
228
+ x = self.drop(x)
229
+ return x
230
+
231
+
232
+ def _no_grad_trunc_normal_(tensor, mean, std, a, b):
233
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
234
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
235
+ def norm_cdf(x):
236
+ # Computes standard normal cumulative distribution function
237
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
238
+
239
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
240
+ warnings.warn(
241
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
242
+ "The distribution of values may be incorrect.",
243
+ stacklevel=2,
244
+ )
245
+
246
+ with torch.no_grad():
247
+ # Values are generated by using a truncated uniform distribution and
248
+ # then using the inverse CDF for the normal distribution.
249
+ # Get upper and lower cdf values
250
+ l = norm_cdf((a - mean) / std)
251
+ u = norm_cdf((b - mean) / std)
252
+
253
+ # Uniformly fill tensor with values from [l, u], then translate to
254
+ # [2l-1, 2u-1].
255
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
256
+
257
+ # Use inverse cdf transform for normal distribution to get truncated
258
+ # standard normal
259
+ tensor.erfinv_()
260
+
261
+ # Transform to proper mean, std
262
+ tensor.mul_(std * math.sqrt(2.0))
263
+ tensor.add_(mean)
264
+
265
+ # Clamp to ensure it's in the proper range
266
+ tensor.clamp_(min=a, max=b)
267
+ return tensor
268
+
269
+
270
+ def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
271
+ # type: (Tensor, float, float, float, float) -> Tensor
272
+ r"""Fills the input Tensor with values drawn from a truncated
273
+ normal distribution. The values are effectively drawn from the
274
+ normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
275
+ with values outside :math:`[a, b]` redrawn until they are within
276
+ the bounds. The method used for generating the random values works
277
+ best when :math:`a \leq \text{mean} \leq b`.
278
+ Args:
279
+ tensor: an n-dimensional `torch.Tensor`
280
+ mean: the mean of the normal distribution
281
+ std: the standard deviation of the normal distribution
282
+ a: the minimum cutoff value
283
+ b: the maximum cutoff value
284
+ Examples:
285
+ >>> w = torch.empty(3, 5)
286
+ >>> nn.init.trunc_normal_(w)
287
+ """
288
+ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
289
+
290
+
291
+ def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
292
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
293
+ if mode == "fan_in":
294
+ denom = fan_in
295
+ elif mode == "fan_out":
296
+ denom = fan_out
297
+ elif mode == "fan_avg":
298
+ denom = (fan_in + fan_out) / 2
299
+
300
+ variance = scale / denom
301
+
302
+ if distribution == "truncated_normal":
303
+ # constant is stddev of standard normal truncated to (-2, 2)
304
+ trunc_normal_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
305
+ elif distribution == "normal":
306
+ tensor.normal_(std=math.sqrt(variance))
307
+ elif distribution == "uniform":
308
+ bound = math.sqrt(3 * variance)
309
+ tensor.uniform_(-bound, bound)
310
+ else:
311
+ raise ValueError(f"invalid distribution {distribution}")
312
+
313
+
314
+ def lecun_normal_(tensor):
315
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
316
+
317
+
318
+ def window_partition(x, window_size):
319
+ """
320
+ Args:
321
+ x: (B, H, W, C)
322
+ window_size (int): window size
323
+ Returns:
324
+ windows: (num_windows*B, window_size, window_size, C)
325
+ """
326
+ B, H, W, C = x.shape
327
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
328
+ windows = (
329
+ x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
330
+ )
331
+ return windows
332
+
333
+
334
+ def window_reverse(windows, window_size, H, W):
335
+ """
336
+ Args:
337
+ windows: (num_windows*B, window_size, window_size, C)
338
+ window_size (int): Window size
339
+ H (int): Height of image
340
+ W (int): Width of image
341
+ Returns:
342
+ x: (B, H, W, C)
343
+ """
344
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
345
+ x = windows.view(
346
+ B, H // window_size, W // window_size, window_size, window_size, -1
347
+ )
348
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
349
+ return x
350
+
351
+
352
+ class WindowAttention(nn.Module):
353
+ r"""Window based multi-head self attention (W-MSA) module with relative position bias.
354
+ It supports both of shifted and non-shifted window.
355
+ Args:
356
+ dim (int): Number of input channels.
357
+ window_size (tuple[int]): The height and width of the window.
358
+ num_heads (int): Number of attention heads.
359
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
360
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
361
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
362
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
363
+ """
364
+
365
+ def __init__(
366
+ self,
367
+ dim,
368
+ window_size,
369
+ num_heads,
370
+ qkv_bias=True,
371
+ qk_scale=None,
372
+ attn_drop=0.0,
373
+ proj_drop=0.0,
374
+ ):
375
+
376
+ super().__init__()
377
+ self.dim = dim
378
+ self.window_size = window_size # Wh, Ww
379
+ self.num_heads = num_heads
380
+ head_dim = dim // num_heads
381
+ self.scale = qk_scale or head_dim**-0.5
382
+
383
+ # define a parameter table of relative position bias
384
+ self.relative_position_bias_table = nn.Parameter(
385
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
386
+ ) # 2*Wh-1 * 2*Ww-1, nH
387
+
388
+ # get pair-wise relative position index for each token inside the window
389
+ coords_h = torch.arange(self.window_size[0])
390
+ coords_w = torch.arange(self.window_size[1])
391
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
392
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
393
+ relative_coords = (
394
+ coords_flatten[:, :, None] - coords_flatten[:, None, :]
395
+ ) # 2, Wh*Ww, Wh*Ww
396
+ relative_coords = relative_coords.permute(
397
+ 1, 2, 0
398
+ ).contiguous() # Wh*Ww, Wh*Ww, 2
399
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
400
+ relative_coords[:, :, 1] += self.window_size[1] - 1
401
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
402
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
403
+ self.register_buffer("relative_position_index", relative_position_index)
404
+
405
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
406
+ self.attn_drop = nn.Dropout(attn_drop)
407
+ self.proj = nn.Linear(dim, dim)
408
+ self.proj_drop = nn.Dropout(proj_drop)
409
+
410
+ trunc_normal_(self.relative_position_bias_table, std=0.02)
411
+ self.softmax = nn.Softmax(dim=-1)
412
+
413
+ def forward(self, x, mask=None):
414
+ """
415
+ Args:
416
+ x: input features with shape of (num_windows*B, N, C)
417
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
418
+ """
419
+ B_, N, C = x.shape
420
+ qkv = (
421
+ self.qkv(x)
422
+ .reshape(B_, N, 3, self.num_heads, C // self.num_heads)
423
+ .permute(2, 0, 3, 1, 4)
424
+ )
425
+ q, k, v = (
426
+ qkv[0],
427
+ qkv[1],
428
+ qkv[2],
429
+ ) # make torchscript happy (cannot use tensor as tuple)
430
+
431
+ q = q * self.scale
432
+ attn = q @ k.transpose(-2, -1)
433
+
434
+ relative_position_bias = self.relative_position_bias_table[
435
+ self.relative_position_index.view(-1)
436
+ ].view(
437
+ self.window_size[0] * self.window_size[1],
438
+ self.window_size[0] * self.window_size[1],
439
+ -1,
440
+ ) # Wh*Ww,Wh*Ww,nH
441
+ relative_position_bias = relative_position_bias.permute(
442
+ 2, 0, 1
443
+ ).contiguous() # nH, Wh*Ww, Wh*Ww
444
+ attn = attn + relative_position_bias.unsqueeze(0)
445
+
446
+ if mask is not None:
447
+ nW = mask.shape[0]
448
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
449
+ 1
450
+ ).unsqueeze(0)
451
+ attn = attn.view(-1, self.num_heads, N, N)
452
+ attn = self.softmax(attn)
453
+ else:
454
+ attn = self.softmax(attn)
455
+
456
+ attn = self.attn_drop(attn)
457
+
458
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
459
+ x = self.proj(x)
460
+ x = self.proj_drop(x)
461
+ return x, attn
462
+
463
+ def extra_repr(self):
464
+ return f"dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}"
465
+
466
+
467
+ # We use the model based on Swintransformer Block, therefore we can use the swin-transformer pretrained model
468
+ class SwinTransformerBlock(nn.Module):
469
+ r"""Swin Transformer Block.
470
+ Args:
471
+ dim (int): Number of input channels.
472
+ input_resolution (tuple[int]): Input resulotion.
473
+ num_heads (int): Number of attention heads.
474
+ window_size (int): Window size.
475
+ shift_size (int): Shift size for SW-MSA.
476
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
477
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
478
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
479
+ drop (float, optional): Dropout rate. Default: 0.0
480
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
481
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
482
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
483
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
484
+ """
485
+
486
+ def __init__(
487
+ self,
488
+ dim,
489
+ input_resolution,
490
+ num_heads,
491
+ window_size=7,
492
+ shift_size=0,
493
+ mlp_ratio=4.0,
494
+ qkv_bias=True,
495
+ qk_scale=None,
496
+ drop=0.0,
497
+ attn_drop=0.0,
498
+ drop_path=0.0,
499
+ act_layer=nn.GELU,
500
+ norm_layer=nn.LayerNorm,
501
+ norm_before_mlp="ln",
502
+ ):
503
+ super().__init__()
504
+ self.dim = dim
505
+ self.input_resolution = input_resolution
506
+ self.num_heads = num_heads
507
+ self.window_size = window_size
508
+ self.shift_size = shift_size
509
+ self.mlp_ratio = mlp_ratio
510
+ self.norm_before_mlp = norm_before_mlp
511
+ if min(self.input_resolution) <= self.window_size:
512
+ # if window size is larger than input resolution, we don't partition windows
513
+ self.shift_size = 0
514
+ self.window_size = min(self.input_resolution)
515
+ assert (
516
+ 0 <= self.shift_size < self.window_size
517
+ ), "shift_size must in 0-window_size"
518
+
519
+ self.norm1 = norm_layer(dim)
520
+ self.attn = WindowAttention(
521
+ dim,
522
+ window_size=to_2tuple(self.window_size),
523
+ num_heads=num_heads,
524
+ qkv_bias=qkv_bias,
525
+ qk_scale=qk_scale,
526
+ attn_drop=attn_drop,
527
+ proj_drop=drop,
528
+ )
529
+
530
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
531
+ if self.norm_before_mlp == "ln":
532
+ self.norm2 = nn.LayerNorm(dim)
533
+ elif self.norm_before_mlp == "bn":
534
+ self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(
535
+ 1, 2
536
+ )
537
+ else:
538
+ raise NotImplementedError
539
+ mlp_hidden_dim = int(dim * mlp_ratio)
540
+ self.mlp = Mlp(
541
+ in_features=dim,
542
+ hidden_features=mlp_hidden_dim,
543
+ act_layer=act_layer,
544
+ drop=drop,
545
+ )
546
+
547
+ if self.shift_size > 0:
548
+ # calculate attention mask for SW-MSA
549
+ H, W = self.input_resolution
550
+ img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
551
+ h_slices = (
552
+ slice(0, -self.window_size),
553
+ slice(-self.window_size, -self.shift_size),
554
+ slice(-self.shift_size, None),
555
+ )
556
+ w_slices = (
557
+ slice(0, -self.window_size),
558
+ slice(-self.window_size, -self.shift_size),
559
+ slice(-self.shift_size, None),
560
+ )
561
+ cnt = 0
562
+ for h in h_slices:
563
+ for w in w_slices:
564
+ img_mask[:, h, w, :] = cnt
565
+ cnt += 1
566
+
567
+ mask_windows = window_partition(
568
+ img_mask, self.window_size
569
+ ) # nW, window_size, window_size, 1
570
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
571
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
572
+ attn_mask = attn_mask.masked_fill(
573
+ attn_mask != 0, float(-100.0)
574
+ ).masked_fill(attn_mask == 0, float(0.0))
575
+ else:
576
+ attn_mask = None
577
+
578
+ self.register_buffer("attn_mask", attn_mask)
579
+
580
+ def forward(self, x):
581
+ # pdb.set_trace()
582
+ H, W = self.input_resolution
583
+ # print("H: ", H)
584
+ # print("W: ", W)
585
+ # pdb.set_trace()
586
+ B, L, C = x.shape
587
+ # assert L == H * W, "input feature has wrong size"
588
+
589
+ shortcut = x
590
+ x = self.norm1(x)
591
+ x = x.view(B, H, W, C)
592
+
593
+ # cyclic shift
594
+ if self.shift_size > 0:
595
+ shifted_x = torch.roll(
596
+ x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
597
+ )
598
+ else:
599
+ shifted_x = x
600
+
601
+ # partition windows
602
+ x_windows = window_partition(
603
+ shifted_x, self.window_size
604
+ ) # nW*B, window_size, window_size, C
605
+ x_windows = x_windows.view(
606
+ -1, self.window_size * self.window_size, C
607
+ ) # nW*B, window_size*window_size, C
608
+
609
+ # W-MSA/SW-MSA
610
+ attn_windows, attn = self.attn(
611
+ x_windows, mask=self.attn_mask
612
+ ) # nW*B, window_size*window_size, C
613
+
614
+ # merge windows
615
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
616
+ shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
617
+
618
+ # reverse cyclic shift
619
+ if self.shift_size > 0:
620
+ x = torch.roll(
621
+ shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
622
+ )
623
+ else:
624
+ x = shifted_x
625
+ x = x.view(B, H * W, C)
626
+
627
+ # FFN
628
+ x = shortcut + self.drop_path(x)
629
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
630
+
631
+ return x, attn
632
+
633
+ def extra_repr(self):
634
+ return (
635
+ f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, "
636
+ f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
637
+ )
638
+
639
+
640
+ class PatchMerging(nn.Module):
641
+ r"""Patch Merging Layer.
642
+ Args:
643
+ input_resolution (tuple[int]): Resolution of input feature.
644
+ dim (int): Number of input channels.
645
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
646
+ """
647
+
648
+ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
649
+ super().__init__()
650
+ self.input_resolution = input_resolution
651
+ self.dim = dim
652
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
653
+ self.norm = norm_layer(4 * dim)
654
+
655
+ def forward(self, x):
656
+ """
657
+ x: B, H*W, C
658
+ """
659
+ H, W = self.input_resolution
660
+ B, L, C = x.shape
661
+ assert L == H * W, "input feature has wrong size"
662
+ assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
663
+
664
+ x = x.view(B, H, W, C)
665
+
666
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
667
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
668
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
669
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
670
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
671
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
672
+
673
+ x = self.norm(x)
674
+ x = self.reduction(x)
675
+
676
+ return x
677
+
678
+ def extra_repr(self):
679
+ return f"input_resolution={self.input_resolution}, dim={self.dim}"
680
+
681
+
682
+ class BasicLayer(nn.Module):
683
+ """A basic Swin Transformer layer for one stage.
684
+ Args:
685
+ dim (int): Number of input channels.
686
+ input_resolution (tuple[int]): Input resolution.
687
+ depth (int): Number of blocks.
688
+ num_heads (int): Number of attention heads.
689
+ window_size (int): Local window size.
690
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
691
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
692
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
693
+ drop (float, optional): Dropout rate. Default: 0.0
694
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
695
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
696
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
697
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
698
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
699
+ """
700
+
701
+ def __init__(
702
+ self,
703
+ dim,
704
+ input_resolution,
705
+ depth,
706
+ num_heads,
707
+ window_size,
708
+ mlp_ratio=4.0,
709
+ qkv_bias=True,
710
+ qk_scale=None,
711
+ drop=0.0,
712
+ attn_drop=0.0,
713
+ drop_path=0.0,
714
+ norm_layer=nn.LayerNorm,
715
+ downsample=None,
716
+ use_checkpoint=False,
717
+ norm_before_mlp="ln",
718
+ ):
719
+
720
+ super().__init__()
721
+ self.dim = dim
722
+ self.input_resolution = input_resolution
723
+ self.depth = depth
724
+ self.use_checkpoint = use_checkpoint
725
+
726
+ # build blocks
727
+ self.blocks = nn.ModuleList(
728
+ [
729
+ SwinTransformerBlock(
730
+ dim=dim,
731
+ input_resolution=input_resolution,
732
+ num_heads=num_heads,
733
+ window_size=window_size,
734
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
735
+ mlp_ratio=mlp_ratio,
736
+ qkv_bias=qkv_bias,
737
+ qk_scale=qk_scale,
738
+ drop=drop,
739
+ attn_drop=attn_drop,
740
+ drop_path=drop_path[i]
741
+ if isinstance(drop_path, list)
742
+ else drop_path,
743
+ norm_layer=norm_layer,
744
+ norm_before_mlp=norm_before_mlp,
745
+ )
746
+ for i in range(depth)
747
+ ]
748
+ )
749
+
750
+ # patch merging layer
751
+ if downsample is not None:
752
+ self.downsample = downsample(
753
+ input_resolution, dim=dim, norm_layer=norm_layer
754
+ )
755
+ else:
756
+ self.downsample = None
757
+
758
+ def forward(self, x):
759
+ attns = []
760
+ for blk in self.blocks:
761
+ if self.use_checkpoint:
762
+ x = checkpoint.checkpoint(blk, x)
763
+ else:
764
+ x, attn = blk(x)
765
+ if not self.training:
766
+ attns.append(attn.unsqueeze(0))
767
+ if self.downsample is not None:
768
+ x = self.downsample(x)
769
+ if not self.training:
770
+ attn = torch.cat(attns, dim=0)
771
+ attn = torch.mean(attn, dim=0)
772
+ return x, attn
773
+
774
+ def extra_repr(self):
775
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
776
+
777
+
778
+ # The Core of HTSAT
779
+ class HTSAT_Swin_Transformer(nn.Module):
780
+ r"""HTSAT based on the Swin Transformer
781
+ Args:
782
+ spec_size (int | tuple(int)): Input Spectrogram size. Default 256
783
+ patch_size (int | tuple(int)): Patch size. Default: 4
784
+ path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4
785
+ in_chans (int): Number of input image channels. Default: 1 (mono)
786
+ num_classes (int): Number of classes for classification head. Default: 527
787
+ embed_dim (int): Patch embedding dimension. Default: 96
788
+ depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer.
789
+ num_heads (tuple(int)): Number of attention heads in different layers.
790
+ window_size (int): Window size. Default: 8
791
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
792
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
793
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
794
+ drop_rate (float): Dropout rate. Default: 0
795
+ attn_drop_rate (float): Attention dropout rate. Default: 0
796
+ drop_path_rate (float): Stochastic depth rate. Default: 0.1
797
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
798
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
799
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True
800
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
801
+ config (module): The configuration Module from config.py
802
+ """
803
+
804
+ def __init__(
805
+ self,
806
+ spec_size=256,
807
+ patch_size=4,
808
+ patch_stride=(4, 4),
809
+ in_chans=1,
810
+ num_classes=527,
811
+ embed_dim=96,
812
+ depths=[2, 2, 6, 2],
813
+ num_heads=[4, 8, 16, 32],
814
+ window_size=8,
815
+ mlp_ratio=4.0,
816
+ qkv_bias=True,
817
+ qk_scale=None,
818
+ drop_rate=0.0,
819
+ attn_drop_rate=0.0,
820
+ drop_path_rate=0.1,
821
+ norm_layer=nn.LayerNorm,
822
+ ape=False,
823
+ patch_norm=True,
824
+ use_checkpoint=False,
825
+ norm_before_mlp="ln",
826
+ config=None,
827
+ enable_fusion=False,
828
+ fusion_type="None",
829
+ **kwargs,
830
+ ):
831
+ super(HTSAT_Swin_Transformer, self).__init__()
832
+
833
+ self.config = config
834
+ self.spec_size = spec_size
835
+ self.patch_stride = patch_stride
836
+ self.patch_size = patch_size
837
+ self.window_size = window_size
838
+ self.embed_dim = embed_dim
839
+ self.depths = depths
840
+ self.ape = ape
841
+ self.in_chans = in_chans
842
+ self.num_classes = num_classes
843
+ self.num_heads = num_heads
844
+ self.num_layers = len(self.depths)
845
+ self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1))
846
+
847
+ self.drop_rate = drop_rate
848
+ self.attn_drop_rate = attn_drop_rate
849
+ self.drop_path_rate = drop_path_rate
850
+
851
+ self.qkv_bias = qkv_bias
852
+ self.qk_scale = None
853
+
854
+ self.patch_norm = patch_norm
855
+ self.norm_layer = norm_layer if self.patch_norm else None
856
+ self.norm_before_mlp = norm_before_mlp
857
+ self.mlp_ratio = mlp_ratio
858
+
859
+ self.use_checkpoint = use_checkpoint
860
+
861
+ self.enable_fusion = enable_fusion
862
+ self.fusion_type = fusion_type
863
+
864
+ # process mel-spec ; used only once
865
+ self.freq_ratio = self.spec_size // self.config.mel_bins
866
+ window = "hann"
867
+ center = True
868
+ pad_mode = "reflect"
869
+ ref = 1.0
870
+ amin = 1e-10
871
+ top_db = None
872
+ self.interpolate_ratio = 32 # Downsampled ratio
873
+ # Spectrogram extractor
874
+ self.spectrogram_extractor = Spectrogram(
875
+ n_fft=config.window_size,
876
+ hop_length=config.hop_size,
877
+ win_length=config.window_size,
878
+ window=window,
879
+ center=center,
880
+ pad_mode=pad_mode,
881
+ freeze_parameters=True,
882
+ )
883
+ # Logmel feature extractor
884
+ self.logmel_extractor = LogmelFilterBank(
885
+ sr=config.sample_rate,
886
+ n_fft=config.window_size,
887
+ n_mels=config.mel_bins,
888
+ fmin=config.fmin,
889
+ fmax=config.fmax,
890
+ ref=ref,
891
+ amin=amin,
892
+ top_db=top_db,
893
+ freeze_parameters=True,
894
+ )
895
+ # Spec augmenter
896
+ self.spec_augmenter = SpecAugmentation(
897
+ time_drop_width=64,
898
+ time_stripes_num=2,
899
+ freq_drop_width=8,
900
+ freq_stripes_num=2,
901
+ ) # 2 2
902
+ self.bn0 = nn.BatchNorm2d(self.config.mel_bins)
903
+
904
+ # split spctrogram into non-overlapping patches
905
+ self.patch_embed = PatchEmbed(
906
+ img_size=self.spec_size,
907
+ patch_size=self.patch_size,
908
+ in_chans=self.in_chans,
909
+ embed_dim=self.embed_dim,
910
+ norm_layer=self.norm_layer,
911
+ patch_stride=patch_stride,
912
+ enable_fusion=self.enable_fusion,
913
+ fusion_type=self.fusion_type,
914
+ )
915
+
916
+ num_patches = self.patch_embed.num_patches
917
+ patches_resolution = self.patch_embed.grid_size
918
+ self.patches_resolution = patches_resolution
919
+
920
+ # absolute position embedding
921
+ if self.ape:
922
+ self.absolute_pos_embed = nn.Parameter(
923
+ torch.zeros(1, num_patches, self.embed_dim)
924
+ )
925
+ trunc_normal_(self.absolute_pos_embed, std=0.02)
926
+
927
+ self.pos_drop = nn.Dropout(p=self.drop_rate)
928
+
929
+ # stochastic depth
930
+ dpr = [
931
+ x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))
932
+ ] # stochastic depth decay rule
933
+
934
+ # build layers
935
+ self.layers = nn.ModuleList()
936
+ for i_layer in range(self.num_layers):
937
+ layer = BasicLayer(
938
+ dim=int(self.embed_dim * 2**i_layer),
939
+ input_resolution=(
940
+ patches_resolution[0] // (2**i_layer),
941
+ patches_resolution[1] // (2**i_layer),
942
+ ),
943
+ depth=self.depths[i_layer],
944
+ num_heads=self.num_heads[i_layer],
945
+ window_size=self.window_size,
946
+ mlp_ratio=self.mlp_ratio,
947
+ qkv_bias=self.qkv_bias,
948
+ qk_scale=self.qk_scale,
949
+ drop=self.drop_rate,
950
+ attn_drop=self.attn_drop_rate,
951
+ drop_path=dpr[
952
+ sum(self.depths[:i_layer]) : sum(self.depths[: i_layer + 1])
953
+ ],
954
+ norm_layer=self.norm_layer,
955
+ downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
956
+ use_checkpoint=use_checkpoint,
957
+ norm_before_mlp=self.norm_before_mlp,
958
+ )
959
+ self.layers.append(layer)
960
+
961
+ self.norm = self.norm_layer(self.num_features)
962
+ self.avgpool = nn.AdaptiveAvgPool1d(1)
963
+ self.maxpool = nn.AdaptiveMaxPool1d(1)
964
+
965
+ SF = (
966
+ self.spec_size
967
+ // (2 ** (len(self.depths) - 1))
968
+ // self.patch_stride[0]
969
+ // self.freq_ratio
970
+ )
971
+ self.tscam_conv = nn.Conv2d(
972
+ in_channels=self.num_features,
973
+ out_channels=self.num_classes,
974
+ kernel_size=(SF, 3),
975
+ padding=(0, 1),
976
+ )
977
+ self.head = nn.Linear(num_classes, num_classes)
978
+
979
+ if (self.enable_fusion) and (
980
+ self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]
981
+ ):
982
+ self.mel_conv1d = nn.Sequential(
983
+ nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
984
+ nn.BatchNorm1d(64),
985
+ )
986
+ if self.fusion_type == "daf_1d":
987
+ self.fusion_model = DAF()
988
+ elif self.fusion_type == "aff_1d":
989
+ self.fusion_model = AFF(channels=64, type="1D")
990
+ elif self.fusion_type == "iaff_1d":
991
+ self.fusion_model = iAFF(channels=64, type="1D")
992
+
993
+ self.apply(self._init_weights)
994
+
995
+ def _init_weights(self, m):
996
+ if isinstance(m, nn.Linear):
997
+ trunc_normal_(m.weight, std=0.02)
998
+ if isinstance(m, nn.Linear) and m.bias is not None:
999
+ nn.init.constant_(m.bias, 0)
1000
+ elif isinstance(m, nn.LayerNorm):
1001
+ nn.init.constant_(m.bias, 0)
1002
+ nn.init.constant_(m.weight, 1.0)
1003
+
1004
+ @torch.jit.ignore
1005
+ def no_weight_decay(self):
1006
+ return {"absolute_pos_embed"}
1007
+
1008
+ @torch.jit.ignore
1009
+ def no_weight_decay_keywords(self):
1010
+ return {"relative_position_bias_table"}
1011
+
1012
+ def forward_features(self, x, longer_idx=None):
1013
+ # A deprecated optimization for using a hierarchical output from different blocks
1014
+
1015
+ frames_num = x.shape[2]
1016
+ x = self.patch_embed(x, longer_idx=longer_idx)
1017
+ if self.ape:
1018
+ x = x + self.absolute_pos_embed
1019
+ x = self.pos_drop(x)
1020
+ for i, layer in enumerate(self.layers):
1021
+ x, attn = layer(x)
1022
+ # for x
1023
+ x = self.norm(x)
1024
+ B, N, C = x.shape
1025
+ SF = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
1026
+ ST = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
1027
+ x = x.permute(0, 2, 1).contiguous().reshape(B, C, SF, ST)
1028
+ B, C, F, T = x.shape
1029
+ # group 2D CNN
1030
+ c_freq_bin = F // self.freq_ratio
1031
+ x = x.reshape(B, C, F // c_freq_bin, c_freq_bin, T)
1032
+ x = x.permute(0, 1, 3, 2, 4).contiguous().reshape(B, C, c_freq_bin, -1)
1033
+ # get latent_output
1034
+ fine_grained_latent_output = torch.mean(x, dim=2)
1035
+ fine_grained_latent_output = interpolate(
1036
+ fine_grained_latent_output.permute(0, 2, 1).contiguous(),
1037
+ 8 * self.patch_stride[1],
1038
+ )
1039
+
1040
+ latent_output = self.avgpool(torch.flatten(x, 2))
1041
+ latent_output = torch.flatten(latent_output, 1)
1042
+
1043
+ # display the attention map, if needed
1044
+
1045
+ x = self.tscam_conv(x)
1046
+ x = torch.flatten(x, 2) # B, C, T
1047
+
1048
+ fpx = interpolate(
1049
+ torch.sigmoid(x).permute(0, 2, 1).contiguous(), 8 * self.patch_stride[1]
1050
+ )
1051
+
1052
+ x = self.avgpool(x)
1053
+ x = torch.flatten(x, 1)
1054
+
1055
+ output_dict = {
1056
+ "framewise_output": fpx, # already sigmoided
1057
+ "clipwise_output": torch.sigmoid(x),
1058
+ "fine_grained_embedding": fine_grained_latent_output,
1059
+ "embedding": latent_output,
1060
+ }
1061
+
1062
+ return output_dict
1063
+
1064
+ def crop_wav(self, x, crop_size, spe_pos=None):
1065
+ time_steps = x.shape[2]
1066
+ tx = torch.zeros(x.shape[0], x.shape[1], crop_size, x.shape[3]).to(x.device)
1067
+ for i in range(len(x)):
1068
+ if spe_pos is None:
1069
+ crop_pos = random.randint(0, time_steps - crop_size - 1)
1070
+ else:
1071
+ crop_pos = spe_pos
1072
+ tx[i][0] = x[i, 0, crop_pos : crop_pos + crop_size, :]
1073
+ return tx
1074
+
1075
+ # Reshape the wavform to a img size, if you want to use the pretrained swin transformer model
1076
+ def reshape_wav2img(self, x):
1077
+ B, C, T, F = x.shape
1078
+ target_T = int(self.spec_size * self.freq_ratio)
1079
+ target_F = self.spec_size // self.freq_ratio
1080
+ assert (
1081
+ T <= target_T and F <= target_F
1082
+ ), "the wav size should less than or equal to the swin input size"
1083
+ # to avoid bicubic zero error
1084
+ if T < target_T:
1085
+ x = nn.functional.interpolate(
1086
+ x, (target_T, x.shape[3]), mode="bicubic", align_corners=True
1087
+ )
1088
+ if F < target_F:
1089
+ x = nn.functional.interpolate(
1090
+ x, (x.shape[2], target_F), mode="bicubic", align_corners=True
1091
+ )
1092
+ x = x.permute(0, 1, 3, 2).contiguous()
1093
+ x = x.reshape(
1094
+ x.shape[0],
1095
+ x.shape[1],
1096
+ x.shape[2],
1097
+ self.freq_ratio,
1098
+ x.shape[3] // self.freq_ratio,
1099
+ )
1100
+ # print(x.shape)
1101
+ x = x.permute(0, 1, 3, 2, 4).contiguous()
1102
+ x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3], x.shape[4])
1103
+ return x
1104
+
1105
+ # Repeat the wavform to a img size, if you want to use the pretrained swin transformer model
1106
+ def repeat_wat2img(self, x, cur_pos):
1107
+ B, C, T, F = x.shape
1108
+ target_T = int(self.spec_size * self.freq_ratio)
1109
+ target_F = self.spec_size // self.freq_ratio
1110
+ assert (
1111
+ T <= target_T and F <= target_F
1112
+ ), "the wav size should less than or equal to the swin input size"
1113
+ # to avoid bicubic zero error
1114
+ if T < target_T:
1115
+ x = nn.functional.interpolate(
1116
+ x, (target_T, x.shape[3]), mode="bicubic", align_corners=True
1117
+ )
1118
+ if F < target_F:
1119
+ x = nn.functional.interpolate(
1120
+ x, (x.shape[2], target_F), mode="bicubic", align_corners=True
1121
+ )
1122
+ x = x.permute(0, 1, 3, 2).contiguous() # B C F T
1123
+ x = x[:, :, :, cur_pos : cur_pos + self.spec_size]
1124
+ x = x.repeat(repeats=(1, 1, 4, 1))
1125
+ return x
1126
+
1127
+ def forward(
1128
+ self, x: torch.Tensor, mixup_lambda=None, infer_mode=False, device=None
1129
+ ): # out_feat_keys: List[str] = None):
1130
+
1131
+ if self.enable_fusion and x["longer"].sum() == 0:
1132
+ # if no audio is longer than 10s, then randomly select one audio to be longer
1133
+ x["longer"][torch.randint(0, x["longer"].shape[0], (1,))] = True
1134
+
1135
+ if not self.enable_fusion:
1136
+ x = x["waveform"].to(device=device, non_blocking=True)
1137
+ x = self.spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
1138
+ x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
1139
+ x = x.transpose(1, 3)
1140
+ x = self.bn0(x)
1141
+ x = x.transpose(1, 3)
1142
+ if self.training:
1143
+ x = self.spec_augmenter(x)
1144
+
1145
+ if self.training and mixup_lambda is not None:
1146
+ x = do_mixup(x, mixup_lambda)
1147
+
1148
+ x = self.reshape_wav2img(x)
1149
+ output_dict = self.forward_features(x)
1150
+ else:
1151
+ longer_list = x["longer"].to(device=device, non_blocking=True)
1152
+ x = x["mel_fusion"].to(device=device, non_blocking=True)
1153
+ x = x.transpose(1, 3)
1154
+ x = self.bn0(x)
1155
+ x = x.transpose(1, 3)
1156
+ longer_list_idx = torch.where(longer_list)[0]
1157
+ if self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]:
1158
+ new_x = x[:, 0:1, :, :].clone().contiguous()
1159
+ if len(longer_list_idx) > 0:
1160
+ # local processing
1161
+ fusion_x_local = x[longer_list_idx, 1:, :, :].clone().contiguous()
1162
+ FB, FC, FT, FF = fusion_x_local.size()
1163
+ fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
1164
+ fusion_x_local = torch.permute(
1165
+ fusion_x_local, (0, 2, 1)
1166
+ ).contiguous()
1167
+ fusion_x_local = self.mel_conv1d(fusion_x_local)
1168
+ fusion_x_local = fusion_x_local.view(
1169
+ FB, FC, FF, fusion_x_local.size(-1)
1170
+ )
1171
+ fusion_x_local = (
1172
+ torch.permute(fusion_x_local, (0, 2, 1, 3))
1173
+ .contiguous()
1174
+ .flatten(2)
1175
+ )
1176
+ if fusion_x_local.size(-1) < FT:
1177
+ fusion_x_local = torch.cat(
1178
+ [
1179
+ fusion_x_local,
1180
+ torch.zeros(
1181
+ (FB, FF, FT - fusion_x_local.size(-1)),
1182
+ device=device,
1183
+ ),
1184
+ ],
1185
+ dim=-1,
1186
+ )
1187
+ else:
1188
+ fusion_x_local = fusion_x_local[:, :, :FT]
1189
+ # 1D fusion
1190
+ new_x = new_x.squeeze(1).permute((0, 2, 1)).contiguous()
1191
+ new_x[longer_list_idx] = self.fusion_model(
1192
+ new_x[longer_list_idx], fusion_x_local
1193
+ )
1194
+ x = new_x.permute((0, 2, 1)).contiguous()[:, None, :, :]
1195
+ else:
1196
+ x = new_x
1197
+
1198
+ elif self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d", "channel_map"]:
1199
+ x = x # no change
1200
+
1201
+ if self.training:
1202
+ x = self.spec_augmenter(x)
1203
+ if self.training and mixup_lambda is not None:
1204
+ x = do_mixup(x, mixup_lambda)
1205
+
1206
+ x = self.reshape_wav2img(x)
1207
+ output_dict = self.forward_features(x, longer_idx=longer_list_idx)
1208
+
1209
+ # if infer_mode:
1210
+ # # in infer mode. we need to handle different length audio input
1211
+ # frame_num = x.shape[2]
1212
+ # target_T = int(self.spec_size * self.freq_ratio)
1213
+ # repeat_ratio = math.floor(target_T / frame_num)
1214
+ # x = x.repeat(repeats=(1,1,repeat_ratio,1))
1215
+ # x = self.reshape_wav2img(x)
1216
+ # output_dict = self.forward_features(x)
1217
+ # else:
1218
+ # if x.shape[2] > self.freq_ratio * self.spec_size:
1219
+ # if self.training:
1220
+ # x = self.crop_wav(x, crop_size=self.freq_ratio * self.spec_size)
1221
+ # x = self.reshape_wav2img(x)
1222
+ # output_dict = self.forward_features(x)
1223
+ # else:
1224
+ # # Change: Hard code here
1225
+ # overlap_size = (x.shape[2] - 1) // 4
1226
+ # output_dicts = []
1227
+ # crop_size = (x.shape[2] - 1) // 2
1228
+ # for cur_pos in range(0, x.shape[2] - crop_size - 1, overlap_size):
1229
+ # tx = self.crop_wav(x, crop_size = crop_size, spe_pos = cur_pos)
1230
+ # tx = self.reshape_wav2img(tx)
1231
+ # output_dicts.append(self.forward_features(tx))
1232
+ # clipwise_output = torch.zeros_like(output_dicts[0]["clipwise_output"]).float().to(x.device)
1233
+ # framewise_output = torch.zeros_like(output_dicts[0]["framewise_output"]).float().to(x.device)
1234
+ # for d in output_dicts:
1235
+ # clipwise_output += d["clipwise_output"]
1236
+ # framewise_output += d["framewise_output"]
1237
+ # clipwise_output = clipwise_output / len(output_dicts)
1238
+ # framewise_output = framewise_output / len(output_dicts)
1239
+ # output_dict = {
1240
+ # 'framewise_output': framewise_output,
1241
+ # 'clipwise_output': clipwise_output
1242
+ # }
1243
+ # else: # this part is typically used, and most easy one
1244
+ # x = self.reshape_wav2img(x)
1245
+ # output_dict = self.forward_features(x)
1246
+ # x = self.head(x)
1247
+
1248
+ # We process the data in the dataloader part, in that here we only consider the input_T < fixed_T
1249
+
1250
+ return output_dict
1251
+
1252
+
1253
+ def create_htsat_model(audio_cfg, enable_fusion=False, fusion_type="None"):
1254
+ try:
1255
+
1256
+ assert audio_cfg.model_name in [
1257
+ "tiny",
1258
+ "base",
1259
+ "large",
1260
+ ], "model name for HTS-AT is wrong!"
1261
+ if audio_cfg.model_name == "tiny":
1262
+ model = HTSAT_Swin_Transformer(
1263
+ spec_size=256,
1264
+ patch_size=4,
1265
+ patch_stride=(4, 4),
1266
+ num_classes=audio_cfg.class_num,
1267
+ embed_dim=96,
1268
+ depths=[2, 2, 6, 2],
1269
+ num_heads=[4, 8, 16, 32],
1270
+ window_size=8,
1271
+ config=audio_cfg,
1272
+ enable_fusion=enable_fusion,
1273
+ fusion_type=fusion_type,
1274
+ )
1275
+ elif audio_cfg.model_name == "base":
1276
+ model = HTSAT_Swin_Transformer(
1277
+ spec_size=256,
1278
+ patch_size=4,
1279
+ patch_stride=(4, 4),
1280
+ num_classes=audio_cfg.class_num,
1281
+ embed_dim=128,
1282
+ depths=[2, 2, 12, 2],
1283
+ num_heads=[4, 8, 16, 32],
1284
+ window_size=8,
1285
+ config=audio_cfg,
1286
+ enable_fusion=enable_fusion,
1287
+ fusion_type=fusion_type,
1288
+ )
1289
+ elif audio_cfg.model_name == "large":
1290
+ model = HTSAT_Swin_Transformer(
1291
+ spec_size=256,
1292
+ patch_size=4,
1293
+ patch_stride=(4, 4),
1294
+ num_classes=audio_cfg.class_num,
1295
+ embed_dim=256,
1296
+ depths=[2, 2, 12, 2],
1297
+ num_heads=[4, 8, 16, 32],
1298
+ window_size=8,
1299
+ config=audio_cfg,
1300
+ enable_fusion=enable_fusion,
1301
+ fusion_type=fusion_type,
1302
+ )
1303
+
1304
+ return model
1305
+ except:
1306
+ raise RuntimeError(
1307
+ f"Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough."
1308
+ )
picoaudio/audioldm/clap/open_clip/linear_probe.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch.nn.functional as F
3
+ from torch import nn
4
+ from .model import MLPLayers
5
+
6
+
7
+ class LinearProbe(nn.Module):
8
+ def __init__(self, model, mlp, freeze, in_ch, out_ch, act=None):
9
+ """
10
+ Args:
11
+ model: nn.Module
12
+ mlp: bool, if True, then use the MLP layer as the linear probe module
13
+ freeze: bool, if Ture, then freeze all the CLAP model's layers when training the linear probe
14
+ in_ch: int, the output channel from CLAP model
15
+ out_ch: int, the output channel from linear probe (class_num)
16
+ act: torch.nn.functional, the activation function before the loss function
17
+ """
18
+ super().__init__()
19
+ in_ch = 512
20
+ self.clap_model = model
21
+ self.clap_model.text_branch = None # to save memory
22
+ self.freeze = freeze
23
+ if mlp:
24
+ self.lp_layer = MLPLayers(units=[in_ch, in_ch * 2, out_ch])
25
+ else:
26
+ self.lp_layer = nn.Linear(in_ch, out_ch)
27
+
28
+ if self.freeze:
29
+ for param in self.clap_model.parameters():
30
+ param.requires_grad = False
31
+
32
+ if act == "None":
33
+ self.act = None
34
+ elif act == "relu":
35
+ self.act = nn.ReLU()
36
+ elif act == "elu":
37
+ self.act = nn.ELU()
38
+ elif act == "prelu":
39
+ self.act = nn.PReLU(num_parameters=in_ch)
40
+ elif act == "softmax":
41
+ self.act = nn.Softmax(dim=-1)
42
+ elif act == "sigmoid":
43
+ self.act = nn.Sigmoid()
44
+
45
+ def forward(self, x, mix_lambda=None, device=None):
46
+ """
47
+ Args:
48
+ x: waveform, torch.tensor [batch, t_samples] / batch of mel_spec and longer list
49
+ mix_lambda: torch.tensor [batch], the mixup lambda
50
+ Returns:
51
+ class_prob: torch.tensor [batch, class_num]
52
+
53
+ """
54
+ # batchnorm cancel grandient
55
+ if self.freeze:
56
+ self.clap_model.eval()
57
+
58
+ x = self.clap_model.audio_projection(
59
+ self.clap_model.audio_branch(x, mixup_lambda=mix_lambda, device=device)[
60
+ "embedding"
61
+ ]
62
+ )
63
+ out = self.lp_layer(x)
64
+ if self.act is not None:
65
+ out = self.act(out)
66
+ return out
picoaudio/audioldm/clap/open_clip/loss.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from multiprocessing.sharedctypes import Value
2
+ import torch
3
+ import torch.distributed.nn
4
+ from torch import distributed as dist, nn as nn
5
+ from torch.nn import functional as F
6
+ import numpy as np
7
+ from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
8
+
9
+ try:
10
+ import horovod.torch as hvd
11
+ except ImportError:
12
+ hvd = None
13
+
14
+
15
+ def gather_features(
16
+ audio_features,
17
+ text_features,
18
+ audio_features_mlp=None,
19
+ text_features_mlp=None,
20
+ local_loss=False,
21
+ gather_with_grad=False,
22
+ rank=0,
23
+ world_size=1,
24
+ use_horovod=False,
25
+ mlp_loss=False,
26
+ ):
27
+ if use_horovod:
28
+ assert hvd is not None, "Please install horovod"
29
+ if gather_with_grad:
30
+ all_audio_features = hvd.allgather(audio_features)
31
+ all_text_features = hvd.allgather(text_features)
32
+ if mlp_loss:
33
+ all_audio_features_mlp = hvd.allgather(audio_features_mlp)
34
+ all_text_features_mlp = hvd.allgather(text_features_mlp)
35
+ else:
36
+ with torch.no_grad():
37
+ all_audio_features = hvd.allgather(audio_features)
38
+ all_text_features = hvd.allgather(text_features)
39
+ if mlp_loss:
40
+ all_audio_features_mlp = hvd.allgather(audio_features_mlp)
41
+ all_text_features_mlp = hvd.allgather(text_features_mlp)
42
+ if not local_loss:
43
+ # ensure grads for local rank when all_* features don't have a gradient
44
+ gathered_audio_features = list(
45
+ all_audio_features.chunk(world_size, dim=0)
46
+ )
47
+ gathered_text_features = list(
48
+ all_text_features.chunk(world_size, dim=0)
49
+ )
50
+ gathered_audio_features[rank] = audio_features
51
+ gathered_text_features[rank] = text_features
52
+ all_audio_features = torch.cat(gathered_audio_features, dim=0)
53
+ all_text_features = torch.cat(gathered_text_features, dim=0)
54
+ if mlp_loss:
55
+ gathered_audio_features_mlp = list(
56
+ all_audio_features_mlp.chunk(world_size, dim=0)
57
+ )
58
+ gathered_text_features_mlp = list(
59
+ all_text_features_mlp.chunk(world_size, dim=0)
60
+ )
61
+ gathered_audio_features_mlp[rank] = audio_features_mlp
62
+ gathered_text_features_mlp[rank] = text_features_mlp
63
+ all_audio_features_mlp = torch.cat(
64
+ gathered_audio_features_mlp, dim=0
65
+ )
66
+ all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
67
+ else:
68
+ # We gather tensors from all gpus
69
+ if gather_with_grad:
70
+ all_audio_features = torch.cat(
71
+ torch.distributed.nn.all_gather(audio_features), dim=0
72
+ )
73
+ all_text_features = torch.cat(
74
+ torch.distributed.nn.all_gather(text_features), dim=0
75
+ )
76
+ if mlp_loss:
77
+ all_audio_features_mlp = torch.cat(
78
+ torch.distributed.nn.all_gather(audio_features_mlp), dim=0
79
+ )
80
+ all_text_features_mlp = torch.cat(
81
+ torch.distributed.nn.all_gather(text_features_mlp), dim=0
82
+ )
83
+ else:
84
+ gathered_audio_features = [
85
+ torch.zeros_like(audio_features) for _ in range(world_size)
86
+ ]
87
+ gathered_text_features = [
88
+ torch.zeros_like(text_features) for _ in range(world_size)
89
+ ]
90
+ dist.all_gather(gathered_audio_features, audio_features)
91
+ dist.all_gather(gathered_text_features, text_features)
92
+ if mlp_loss:
93
+ gathered_audio_features_mlp = [
94
+ torch.zeros_like(audio_features_mlp) for _ in range(world_size)
95
+ ]
96
+ gathered_text_features_mlp = [
97
+ torch.zeros_like(text_features_mlp) for _ in range(world_size)
98
+ ]
99
+ dist.all_gather(gathered_audio_features_mlp, audio_features_mlp)
100
+ dist.all_gather(gathered_text_features_mlp, text_features_mlp)
101
+ if not local_loss:
102
+ # ensure grads for local rank when all_* features don't have a gradient
103
+ gathered_audio_features[rank] = audio_features
104
+ gathered_text_features[rank] = text_features
105
+ if mlp_loss:
106
+ gathered_audio_features_mlp[rank] = audio_features_mlp
107
+ gathered_text_features_mlp[rank] = text_features_mlp
108
+
109
+ all_audio_features = torch.cat(gathered_audio_features, dim=0)
110
+ all_text_features = torch.cat(gathered_text_features, dim=0)
111
+ if mlp_loss:
112
+ all_audio_features_mlp = torch.cat(gathered_audio_features_mlp, dim=0)
113
+ all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
114
+ if mlp_loss:
115
+ return (
116
+ all_audio_features,
117
+ all_text_features,
118
+ all_audio_features_mlp,
119
+ all_text_features_mlp,
120
+ )
121
+ else:
122
+ return all_audio_features, all_text_features
123
+
124
+
125
+ class ClipLoss(nn.Module):
126
+ def __init__(
127
+ self,
128
+ local_loss=False,
129
+ gather_with_grad=False,
130
+ cache_labels=False,
131
+ rank=0,
132
+ world_size=1,
133
+ use_horovod=False,
134
+ mlp_loss=False,
135
+ weight_loss_kappa=0,
136
+ ):
137
+ super().__init__()
138
+ self.local_loss = local_loss
139
+ self.gather_with_grad = gather_with_grad
140
+ self.cache_labels = cache_labels
141
+ self.rank = rank
142
+ self.world_size = world_size
143
+ self.use_horovod = use_horovod
144
+ self.mlp_loss = mlp_loss
145
+ self.weighted_loss = bool(weight_loss_kappa != 0)
146
+ self.weight_loss_kappa = weight_loss_kappa
147
+ # cache state
148
+ self.prev_num_logits = 0
149
+ self.labels = {}
150
+
151
+ def forward(
152
+ self,
153
+ audio_features,
154
+ text_features,
155
+ logit_scale_a,
156
+ logit_scale_t=None,
157
+ audio_features_mlp=None,
158
+ text_features_mlp=None,
159
+ ):
160
+ device = audio_features.device
161
+ if self.mlp_loss:
162
+ if self.world_size > 1:
163
+ (
164
+ all_audio_features,
165
+ all_text_features,
166
+ all_audio_features_mlp,
167
+ all_text_features_mlp,
168
+ ) = gather_features(
169
+ audio_features=audio_features,
170
+ text_features=text_features,
171
+ audio_features_mlp=audio_features_mlp,
172
+ text_features_mlp=text_features_mlp,
173
+ local_loss=self.local_loss,
174
+ gather_with_grad=self.gather_with_grad,
175
+ rank=self.rank,
176
+ world_size=self.world_size,
177
+ use_horovod=self.use_horovod,
178
+ mlp_loss=self.mlp_loss,
179
+ )
180
+ if self.local_loss:
181
+ a_logits_per_audio = (
182
+ logit_scale_a * audio_features @ all_text_features_mlp.T
183
+ )
184
+ a_logits_per_text = (
185
+ logit_scale_a * text_features_mlp @ all_audio_features.T
186
+ )
187
+ t_logits_per_audio = (
188
+ logit_scale_t * audio_features_mlp @ all_text_features.T
189
+ )
190
+ t_logits_per_text = (
191
+ logit_scale_t * text_features @ all_audio_features_mlp.T
192
+ )
193
+ else:
194
+ a_logits_per_audio = (
195
+ logit_scale_a * all_audio_features @ all_text_features_mlp.T
196
+ )
197
+ a_logits_per_text = a_logits_per_audio.T
198
+ t_logits_per_audio = (
199
+ logit_scale_t * all_audio_features_mlp @ all_text_features.T
200
+ )
201
+ t_logits_per_text = t_logits_per_audio.T
202
+ else:
203
+ a_logits_per_audio = (
204
+ logit_scale_a * audio_features @ text_features_mlp.T
205
+ )
206
+ a_logits_per_text = logit_scale_a * text_features_mlp @ audio_features.T
207
+ t_logits_per_audio = (
208
+ logit_scale_t * audio_features_mlp @ text_features.T
209
+ )
210
+ t_logits_per_text = logit_scale_t * text_features @ audio_features_mlp.T
211
+
212
+ # calculated ground-truth and cache if enabled
213
+ num_logits = a_logits_per_audio.shape[0]
214
+ if self.prev_num_logits != num_logits or device not in self.labels:
215
+ labels = torch.arange(num_logits, device=device, dtype=torch.long)
216
+ if self.world_size > 1 and self.local_loss:
217
+ labels = labels + num_logits * self.rank
218
+ if self.cache_labels:
219
+ self.labels[device] = labels
220
+ self.prev_num_logits = num_logits
221
+ else:
222
+ labels = self.labels[device]
223
+
224
+ if not self.weighted_loss:
225
+ total_loss = (
226
+ F.cross_entropy(a_logits_per_audio, labels)
227
+ + F.cross_entropy(a_logits_per_text, labels)
228
+ + F.cross_entropy(t_logits_per_audio, labels)
229
+ + F.cross_entropy(t_logits_per_text, labels)
230
+ ) / 4
231
+ else:
232
+ audio_weight = (audio_features @ audio_features.T).detach()
233
+ audio_weight = (
234
+ torch.exp(
235
+ torch.sum(audio_weight, axis=1)
236
+ / (self.weight_loss_kappa * len(audio_weight))
237
+ )
238
+ ).detach()
239
+ text_weight = (text_features @ text_features.T).detach()
240
+ text_weight = (
241
+ torch.exp(
242
+ torch.sum(text_weight, axis=1)
243
+ / (self.weight_loss_kappa * len(text_features))
244
+ )
245
+ ).detach()
246
+ total_loss = (
247
+ F.cross_entropy(a_logits_per_audio, labels, weight=audio_weight)
248
+ + F.cross_entropy(a_logits_per_text, labels, weight=audio_weight)
249
+ + F.cross_entropy(t_logits_per_audio, labels, weight=text_weight)
250
+ + F.cross_entropy(t_logits_per_text, labels, weight=text_weight)
251
+ ) / 4
252
+ else:
253
+ if self.world_size > 1:
254
+ all_audio_features, all_text_features = gather_features(
255
+ audio_features=audio_features,
256
+ text_features=text_features,
257
+ local_loss=self.local_loss,
258
+ gather_with_grad=self.gather_with_grad,
259
+ rank=self.rank,
260
+ world_size=self.world_size,
261
+ use_horovod=self.use_horovod,
262
+ mlp_loss=self.mlp_loss,
263
+ )
264
+
265
+ if self.local_loss:
266
+ logits_per_audio = (
267
+ logit_scale_a * audio_features @ all_text_features.T
268
+ )
269
+ logits_per_text = (
270
+ logit_scale_a * text_features @ all_audio_features.T
271
+ )
272
+ else:
273
+ logits_per_audio = (
274
+ logit_scale_a * all_audio_features @ all_text_features.T
275
+ )
276
+ logits_per_text = logits_per_audio.T
277
+ else:
278
+ logits_per_audio = logit_scale_a * audio_features @ text_features.T
279
+ logits_per_text = logit_scale_a * text_features @ audio_features.T
280
+
281
+ # calculated ground-truth and cache if enabled
282
+ num_logits = logits_per_audio.shape[0]
283
+ if self.prev_num_logits != num_logits or device not in self.labels:
284
+ labels = torch.arange(num_logits, device=device, dtype=torch.long)
285
+ if self.world_size > 1 and self.local_loss:
286
+ labels = labels + num_logits * self.rank
287
+ if self.cache_labels:
288
+ self.labels[device] = labels
289
+ self.prev_num_logits = num_logits
290
+ else:
291
+ labels = self.labels[device]
292
+ if not self.weighted_loss:
293
+ total_loss = (
294
+ F.cross_entropy(logits_per_audio, labels)
295
+ + F.cross_entropy(logits_per_text, labels)
296
+ ) / 2
297
+ else:
298
+ audio_weight = (all_audio_features @ all_audio_features.T).detach()
299
+ audio_weight = (
300
+ torch.exp(
301
+ torch.sum(audio_weight, axis=1)
302
+ / (self.weight_loss_kappa * len(all_audio_features))
303
+ )
304
+ ).detach()
305
+ text_weight = (all_text_features @ all_text_features.T).detach()
306
+ text_weight = (
307
+ torch.exp(
308
+ torch.sum(text_weight, axis=1)
309
+ / (self.weight_loss_kappa * len(all_text_features))
310
+ )
311
+ ).detach()
312
+ total_loss = (
313
+ F.cross_entropy(logits_per_audio, labels, weight=text_weight)
314
+ + F.cross_entropy(logits_per_text, labels, weight=audio_weight)
315
+ ) / 2
316
+ return total_loss
317
+
318
+
319
+ def lp_gather_features(pred, target, world_size=1, use_horovod=False):
320
+ if use_horovod:
321
+ assert hvd is not None, "Please install horovod"
322
+ with torch.no_grad():
323
+ all_preds = hvd.allgather(pred)
324
+ all_targets = hvd.allgath(target)
325
+ else:
326
+ gathered_preds = [torch.zeros_like(pred) for _ in range(world_size)]
327
+ gathered_targets = [torch.zeros_like(target) for _ in range(world_size)]
328
+
329
+ dist.all_gather(gathered_preds, pred)
330
+ dist.all_gather(gathered_targets, target)
331
+ all_preds = torch.cat(gathered_preds, dim=0)
332
+ all_targets = torch.cat(gathered_targets, dim=0)
333
+
334
+ return all_preds, all_targets
335
+
336
+
337
+ def get_map(pred, target):
338
+ pred = torch.sigmoid(pred).numpy()
339
+ target = target.numpy()
340
+ return np.mean(average_precision_score(target, pred, average=None))
341
+
342
+
343
+ def get_acc(pred, target):
344
+ pred = torch.argmax(pred, 1).numpy()
345
+ target = torch.argmax(target, 1).numpy()
346
+ return accuracy_score(target, pred)
347
+
348
+
349
+ def get_mauc(pred, target):
350
+ pred = torch.sigmoid(pred).numpy()
351
+ target = target.numpy()
352
+ return np.mean(roc_auc_score(target, pred, average=None))
353
+
354
+
355
+ class LPMetrics(object):
356
+ def __init__(self, metric_names=["map", "acc", "mauc"]):
357
+ self.metrics = []
358
+ for name in metric_names:
359
+ self.metrics.append(self.get_metric(name))
360
+ self.metric_names = metric_names
361
+
362
+ def get_metric(self, name):
363
+ if name == "map":
364
+ return get_map
365
+ elif name == "acc":
366
+ return get_acc
367
+ elif name == "mauc":
368
+ return get_mauc
369
+ else:
370
+ raise ValueError(f"the metric should be at least one of [map, acc, mauc]")
371
+
372
+ def evaluate_mertics(self, pred, target):
373
+ metric_dict = {}
374
+ for i in range(len(self.metric_names)):
375
+ metric_dict[self.metric_names[i]] = self.metrics[i](pred, target)
376
+ return metric_dict
377
+
378
+
379
+ def calc_celoss(pred, target):
380
+ target = torch.argmax(target, 1).long()
381
+ return nn.CrossEntropyLoss()(pred, target)
382
+
383
+
384
+ class LPLoss(nn.Module):
385
+ def __init__(self, loss_name):
386
+ super().__init__()
387
+ if loss_name == "bce":
388
+ self.loss_func = nn.BCEWithLogitsLoss()
389
+ elif loss_name == "ce":
390
+ self.loss_func = calc_celoss
391
+ elif loss_name == "mse":
392
+ self.loss_func = nn.MSELoss()
393
+ else:
394
+ raise ValueError(f"the loss func should be at least one of [bce, ce, mse]")
395
+
396
+ def forward(self, pred, target):
397
+ loss = self.loss_func(pred, target)
398
+ return loss
picoaudio/audioldm/clap/open_clip/model.py ADDED
@@ -0,0 +1,936 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ CLAP Model
2
+
3
+ Adapted from CLIP: https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
+ Adapted to the Audio Task.
5
+ """
6
+
7
+ from collections import OrderedDict
8
+ from dataclasses import dataclass
9
+ from email.mime import audio
10
+ from typing import Tuple, Union, Callable, Optional
11
+
12
+ import numpy as np
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from torch import nn
16
+
17
+ from .timm_model import TimmModel
18
+ import logging
19
+ from .utils import freeze_batch_norm_2d
20
+
21
+ from .pann_model import create_pann_model
22
+ from .htsat import create_htsat_model
23
+ from transformers import BertModel, RobertaModel, BartModel
24
+ from transformers.tokenization_utils_base import BatchEncoding
25
+
26
+
27
+ class MLPLayers(nn.Module):
28
+ def __init__(self, units=[512, 512, 512], nonlin=nn.ReLU(), dropout=0.1):
29
+ super(MLPLayers, self).__init__()
30
+ self.nonlin = nonlin
31
+ self.dropout = dropout
32
+
33
+ sequence = []
34
+ for u0, u1 in zip(units[:-1], units[1:]):
35
+ sequence.append(nn.Linear(u0, u1))
36
+ sequence.append(self.nonlin)
37
+ sequence.append(nn.Dropout(self.dropout))
38
+ sequence = sequence[:-2]
39
+
40
+ self.sequential = nn.Sequential(*sequence)
41
+
42
+ def forward(self, X):
43
+ X = self.sequential(X)
44
+ return X
45
+
46
+
47
+ class Bottleneck(nn.Module):
48
+ expansion = 4
49
+
50
+ def __init__(self, inplanes, planes, stride=1):
51
+ super().__init__()
52
+
53
+ # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
54
+ self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
55
+ self.bn1 = nn.BatchNorm2d(planes)
56
+
57
+ self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
58
+ self.bn2 = nn.BatchNorm2d(planes)
59
+
60
+ self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
61
+
62
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
63
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
64
+
65
+ self.relu = nn.ReLU(inplace=True)
66
+ self.downsample = None
67
+ self.stride = stride
68
+
69
+ if stride > 1 or inplanes != planes * Bottleneck.expansion:
70
+ # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
71
+ self.downsample = nn.Sequential(
72
+ OrderedDict(
73
+ [
74
+ ("-1", nn.AvgPool2d(stride)),
75
+ (
76
+ "0",
77
+ nn.Conv2d(
78
+ inplanes,
79
+ planes * self.expansion,
80
+ 1,
81
+ stride=1,
82
+ bias=False,
83
+ ),
84
+ ),
85
+ ("1", nn.BatchNorm2d(planes * self.expansion)),
86
+ ]
87
+ )
88
+ )
89
+
90
+ def forward(self, x: torch.Tensor):
91
+ identity = x
92
+
93
+ out = self.relu(self.bn1(self.conv1(x)))
94
+ out = self.relu(self.bn2(self.conv2(out)))
95
+ out = self.avgpool(out)
96
+ out = self.bn3(self.conv3(out))
97
+
98
+ if self.downsample is not None:
99
+ identity = self.downsample(x)
100
+
101
+ out += identity
102
+ out = self.relu(out)
103
+ return out
104
+
105
+
106
+ class AttentionPool2d(nn.Module):
107
+ def __init__(
108
+ self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
109
+ ):
110
+ super().__init__()
111
+ self.positional_embedding = nn.Parameter(
112
+ torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
113
+ )
114
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
115
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
116
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
117
+ self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
118
+ self.num_heads = num_heads
119
+
120
+ def forward(self, x):
121
+ x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
122
+ 2, 0, 1
123
+ ) # NCHW -> (HW)NC
124
+ x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
125
+ x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
126
+ x, _ = F.multi_head_attention_forward(
127
+ query=x,
128
+ key=x,
129
+ value=x,
130
+ embed_dim_to_check=x.shape[-1],
131
+ num_heads=self.num_heads,
132
+ q_proj_weight=self.q_proj.weight,
133
+ k_proj_weight=self.k_proj.weight,
134
+ v_proj_weight=self.v_proj.weight,
135
+ in_proj_weight=None,
136
+ in_proj_bias=torch.cat(
137
+ [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
138
+ ),
139
+ bias_k=None,
140
+ bias_v=None,
141
+ add_zero_attn=False,
142
+ dropout_p=0,
143
+ out_proj_weight=self.c_proj.weight,
144
+ out_proj_bias=self.c_proj.bias,
145
+ use_separate_proj_weight=True,
146
+ training=self.training,
147
+ need_weights=False,
148
+ )
149
+
150
+ return x[0]
151
+
152
+
153
+ class ModifiedResNet(nn.Module):
154
+ """
155
+ A ResNet class that is similar to torchvision's but contains the following changes:
156
+ - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
157
+ - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
158
+ - The final pooling layer is a QKV attention instead of an average pool
159
+ """
160
+
161
+ def __init__(self, layers, output_dim, heads, image_size=224, width=64):
162
+ super().__init__()
163
+ self.output_dim = output_dim
164
+ self.image_size = image_size
165
+
166
+ # the 3-layer stem
167
+ self.conv1 = nn.Conv2d(
168
+ 3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
169
+ )
170
+ self.bn1 = nn.BatchNorm2d(width // 2)
171
+ self.conv2 = nn.Conv2d(
172
+ width // 2, width // 2, kernel_size=3, padding=1, bias=False
173
+ )
174
+ self.bn2 = nn.BatchNorm2d(width // 2)
175
+ self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
176
+ self.bn3 = nn.BatchNorm2d(width)
177
+ self.avgpool = nn.AvgPool2d(2)
178
+ self.relu = nn.ReLU(inplace=True)
179
+
180
+ # residual layers
181
+ self._inplanes = width # this is a *mutable* variable used during construction
182
+ self.layer1 = self._make_layer(width, layers[0])
183
+ self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
184
+ self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
185
+ self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
186
+
187
+ embed_dim = width * 32 # the ResNet feature dimension
188
+ self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
189
+
190
+ self.init_parameters()
191
+
192
+ def _make_layer(self, planes, blocks, stride=1):
193
+ layers = [Bottleneck(self._inplanes, planes, stride)]
194
+
195
+ self._inplanes = planes * Bottleneck.expansion
196
+ for _ in range(1, blocks):
197
+ layers.append(Bottleneck(self._inplanes, planes))
198
+
199
+ return nn.Sequential(*layers)
200
+
201
+ def init_parameters(self):
202
+ if self.attnpool is not None:
203
+ std = self.attnpool.c_proj.in_features**-0.5
204
+ nn.init.normal_(self.attnpool.q_proj.weight, std=std)
205
+ nn.init.normal_(self.attnpool.k_proj.weight, std=std)
206
+ nn.init.normal_(self.attnpool.v_proj.weight, std=std)
207
+ nn.init.normal_(self.attnpool.c_proj.weight, std=std)
208
+
209
+ for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
210
+ for name, param in resnet_block.named_parameters():
211
+ if name.endswith("bn3.weight"):
212
+ nn.init.zeros_(param)
213
+
214
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
215
+ assert (
216
+ unlocked_groups == 0
217
+ ), "partial locking not currently supported for this model"
218
+ for param in self.parameters():
219
+ param.requires_grad = False
220
+ if freeze_bn_stats:
221
+ freeze_batch_norm_2d(self)
222
+
223
+ def stem(self, x):
224
+ for conv, bn in [
225
+ (self.conv1, self.bn1),
226
+ (self.conv2, self.bn2),
227
+ (self.conv3, self.bn3),
228
+ ]:
229
+ x = self.relu(bn(conv(x)))
230
+ x = self.avgpool(x)
231
+ return x
232
+
233
+ def forward(self, x):
234
+ x = self.stem(x)
235
+ x = self.layer1(x)
236
+ x = self.layer2(x)
237
+ x = self.layer3(x)
238
+ x = self.layer4(x)
239
+ x = self.attnpool(x)
240
+
241
+ return x
242
+
243
+
244
+ class LayerNorm(nn.LayerNorm):
245
+ """Subclass torch's LayerNorm to handle fp16."""
246
+
247
+ def forward(self, x: torch.Tensor):
248
+ orig_type = x.dtype
249
+ x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
250
+ return x.to(orig_type)
251
+
252
+
253
+ class QuickGELU(nn.Module):
254
+ # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
255
+ def forward(self, x: torch.Tensor):
256
+ return x * torch.sigmoid(1.702 * x)
257
+
258
+
259
+ class ResidualAttentionBlock(nn.Module):
260
+ def __init__(self, d_model: int, n_head: int, act_layer: Callable = nn.GELU):
261
+ super().__init__()
262
+
263
+ self.attn = nn.MultiheadAttention(d_model, n_head)
264
+ self.ln_1 = LayerNorm(d_model)
265
+ self.mlp = nn.Sequential(
266
+ OrderedDict(
267
+ [
268
+ ("c_fc", nn.Linear(d_model, d_model * 4)),
269
+ ("gelu", act_layer()),
270
+ ("c_proj", nn.Linear(d_model * 4, d_model)),
271
+ ]
272
+ )
273
+ )
274
+ self.ln_2 = LayerNorm(d_model)
275
+
276
+ def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
277
+ return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
278
+
279
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
280
+ x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
281
+ x = x + self.mlp(self.ln_2(x))
282
+ return x
283
+
284
+
285
+ class Transformer(nn.Module):
286
+ def __init__(
287
+ self, width: int, layers: int, heads: int, act_layer: Callable = nn.GELU
288
+ ):
289
+ super().__init__()
290
+ self.width = width
291
+ self.layers = layers
292
+ self.resblocks = nn.ModuleList(
293
+ [
294
+ ResidualAttentionBlock(width, heads, act_layer=act_layer)
295
+ for _ in range(layers)
296
+ ]
297
+ )
298
+
299
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
300
+ for r in self.resblocks:
301
+ x = r(x, attn_mask=attn_mask)
302
+ return x
303
+
304
+
305
+ class VisualTransformer(nn.Module):
306
+ def __init__(
307
+ self,
308
+ image_size: int,
309
+ patch_size: int,
310
+ width: int,
311
+ layers: int,
312
+ heads: int,
313
+ output_dim: int,
314
+ act_layer: Callable = nn.GELU,
315
+ ):
316
+ super().__init__()
317
+ self.image_size = image_size
318
+ self.output_dim = output_dim
319
+ self.conv1 = nn.Conv2d(
320
+ in_channels=3,
321
+ out_channels=width,
322
+ kernel_size=patch_size,
323
+ stride=patch_size,
324
+ bias=False,
325
+ )
326
+
327
+ scale = width**-0.5
328
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
329
+ self.positional_embedding = nn.Parameter(
330
+ scale * torch.randn((image_size // patch_size) ** 2 + 1, width)
331
+ )
332
+ self.ln_pre = LayerNorm(width)
333
+
334
+ self.text_branch = Transformer(width, layers, heads, act_layer=act_layer)
335
+
336
+ self.ln_post = LayerNorm(width)
337
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
338
+
339
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
340
+ assert (
341
+ unlocked_groups == 0
342
+ ), "partial locking not currently supported for this model"
343
+ for param in self.parameters():
344
+ param.requires_grad = False
345
+
346
+ def forward(self, x: torch.Tensor):
347
+ x = self.conv1(x) # shape = [*, width, grid, grid]
348
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
349
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
350
+ x = torch.cat(
351
+ [
352
+ self.class_embedding.to(x.dtype)
353
+ + torch.zeros(
354
+ x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
355
+ ),
356
+ x,
357
+ ],
358
+ dim=1,
359
+ ) # shape = [*, grid ** 2 + 1, width]
360
+ x = x + self.positional_embedding.to(x.dtype)
361
+ x = self.ln_pre(x)
362
+
363
+ x = x.permute(1, 0, 2) # NLD -> LND
364
+ x = self.text_branch(x)
365
+ x = x.permute(1, 0, 2) # LND -> NLD
366
+
367
+ x = self.ln_post(x[:, 0, :])
368
+
369
+ if self.proj is not None:
370
+ x = x @ self.proj
371
+
372
+ return x
373
+
374
+
375
+ @dataclass
376
+ class CLAPVisionCfg:
377
+ layers: Union[Tuple[int, int, int, int], int] = 12
378
+ width: int = 768
379
+ patch_size: int = 16
380
+ image_size: Union[Tuple[int, int], int] = 224
381
+ timm_model_name: str = (
382
+ None # a valid model name overrides layers, width, patch_size
383
+ )
384
+ timm_model_pretrained: bool = (
385
+ False # use (imagenet) pretrained weights for named model
386
+ )
387
+ timm_pool: str = (
388
+ "avg" # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
389
+ )
390
+ timm_proj: str = (
391
+ "linear" # linear projection for timm model output ('linear', 'mlp', '')
392
+ )
393
+
394
+
395
+ # Audio Config Class
396
+ @dataclass
397
+ class CLAPAudioCfp:
398
+ model_type: str = "PANN"
399
+ model_name: str = "Cnn14"
400
+ sample_rate: int = 48000
401
+ # Param
402
+ audio_length: int = 1024
403
+ window_size: int = 1024
404
+ hop_size: int = 1024
405
+ fmin: int = 50
406
+ fmax: int = 14000
407
+ class_num: int = 527
408
+ mel_bins: int = 64
409
+ clip_samples: int = 480000
410
+
411
+
412
+ @dataclass
413
+ class CLAPTextCfg:
414
+ context_length: int
415
+ vocab_size: int
416
+ width: int
417
+ heads: int
418
+ layers: int
419
+ model_type: str
420
+
421
+
422
+ class CLAP(nn.Module):
423
+ def __init__(
424
+ self,
425
+ embed_dim: int,
426
+ audio_cfg: CLAPAudioCfp,
427
+ text_cfg: CLAPTextCfg,
428
+ quick_gelu: bool = False,
429
+ enable_fusion: bool = False,
430
+ fusion_type: str = "None",
431
+ joint_embed_shape: int = 512,
432
+ mlp_act: str = "relu",
433
+ ):
434
+ super().__init__()
435
+ if isinstance(audio_cfg, dict):
436
+ audio_cfg = CLAPAudioCfp(**audio_cfg)
437
+ if isinstance(text_cfg, dict):
438
+ text_cfg = CLAPTextCfg(**text_cfg)
439
+
440
+ self.audio_cfg = audio_cfg
441
+ self.text_cfg = text_cfg
442
+ self.enable_fusion = enable_fusion
443
+ self.fusion_type = fusion_type
444
+ self.joint_embed_shape = joint_embed_shape
445
+ self.mlp_act = mlp_act
446
+
447
+ self.context_length = text_cfg.context_length
448
+
449
+ # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
450
+ # memory efficient in recent PyTorch releases (>= 1.10).
451
+ # NOTE: timm models always use native GELU regardless of quick_gelu flag.
452
+ act_layer = QuickGELU if quick_gelu else nn.GELU
453
+
454
+ if mlp_act == "relu":
455
+ mlp_act_layer = nn.ReLU()
456
+ elif mlp_act == "gelu":
457
+ mlp_act_layer = nn.GELU()
458
+ else:
459
+ raise NotImplementedError
460
+
461
+ # audio branch
462
+ # audio branch parameters
463
+ if audio_cfg.model_type == "PANN":
464
+ self.audio_branch = create_pann_model(audio_cfg, enable_fusion, fusion_type)
465
+ elif audio_cfg.model_type == "HTSAT":
466
+ self.audio_branch = create_htsat_model(
467
+ audio_cfg, enable_fusion, fusion_type
468
+ )
469
+ else:
470
+ logging.error(f"Model config for {audio_cfg.model_type} not found")
471
+ raise RuntimeError(f"Model config for {audio_cfg.model_type} not found.")
472
+
473
+ # text branch
474
+ # text branch parameters
475
+ if text_cfg.model_type == "transformer":
476
+ self.text_branch = Transformer(
477
+ width=text_cfg.width,
478
+ layers=text_cfg.layers,
479
+ heads=text_cfg.heads,
480
+ act_layer=act_layer,
481
+ )
482
+ self.vocab_size = text_cfg.vocab_size
483
+ self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)
484
+ self.positional_embedding = nn.Parameter(
485
+ torch.empty(self.context_length, text_cfg.width)
486
+ )
487
+ self.ln_final = LayerNorm(text_cfg.width)
488
+ self.text_transform = MLPLayers(
489
+ units=[
490
+ self.joint_embed_shape,
491
+ self.joint_embed_shape,
492
+ self.joint_embed_shape,
493
+ ],
494
+ dropout=0.1,
495
+ )
496
+ self.text_projection = nn.Sequential(
497
+ nn.Linear(text_cfg.width, self.joint_embed_shape),
498
+ mlp_act_layer,
499
+ nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
500
+ )
501
+ elif text_cfg.model_type == "bert":
502
+ self.text_branch = BertModel.from_pretrained("bert-base-uncased")
503
+ self.text_transform = MLPLayers(
504
+ units=[
505
+ self.joint_embed_shape,
506
+ self.joint_embed_shape,
507
+ self.joint_embed_shape,
508
+ ],
509
+ dropout=0.1,
510
+ )
511
+ self.text_projection = nn.Sequential(
512
+ nn.Linear(768, self.joint_embed_shape),
513
+ mlp_act_layer,
514
+ nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
515
+ )
516
+ elif text_cfg.model_type == "roberta":
517
+ self.text_branch = RobertaModel.from_pretrained("roberta-base")
518
+ self.text_transform = MLPLayers(
519
+ units=[
520
+ self.joint_embed_shape,
521
+ self.joint_embed_shape,
522
+ self.joint_embed_shape,
523
+ ],
524
+ dropout=0.1,
525
+ )
526
+ self.text_projection = nn.Sequential(
527
+ nn.Linear(768, self.joint_embed_shape),
528
+ mlp_act_layer,
529
+ nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
530
+ )
531
+ elif text_cfg.model_type == "bart":
532
+ self.text_branch = BartModel.from_pretrained("facebook/bart-base")
533
+ self.text_transform = MLPLayers(
534
+ units=[
535
+ self.joint_embed_shape,
536
+ self.joint_embed_shape,
537
+ self.joint_embed_shape,
538
+ ],
539
+ dropout=0.1,
540
+ )
541
+ self.text_projection = nn.Sequential(
542
+ nn.Linear(768, self.joint_embed_shape),
543
+ mlp_act_layer,
544
+ nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
545
+ )
546
+ else:
547
+ logging.error(f"Model config for {text_cfg.model_type} not found")
548
+ raise RuntimeError(f"Model config for {text_cfg.model_type} not found.")
549
+ self.text_branch_type = text_cfg.model_type
550
+ # text branch parameters
551
+
552
+ # audio branch parameters
553
+ self.audio_transform = MLPLayers(
554
+ units=[
555
+ self.joint_embed_shape,
556
+ self.joint_embed_shape,
557
+ self.joint_embed_shape,
558
+ ],
559
+ dropout=0.1,
560
+ )
561
+
562
+ # below here is text branch parameters
563
+
564
+ # ============================================================================================================
565
+ self.audio_projection = nn.Sequential(
566
+ nn.Linear(embed_dim, self.joint_embed_shape),
567
+ mlp_act_layer,
568
+ nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
569
+ )
570
+
571
+ self.logit_scale_a = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
572
+ self.logit_scale_t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
573
+ self.register_buffer("attn_mask", self.build_attention_mask(), persistent=False)
574
+
575
+ self.init_text_branch_parameters()
576
+
577
+ def init_text_branch_parameters(self):
578
+ if self.text_branch_type == "transformer":
579
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
580
+ nn.init.normal_(self.positional_embedding, std=0.01)
581
+ proj_std = (self.text_branch.width**-0.5) * (
582
+ (2 * self.text_branch.layers) ** -0.5
583
+ )
584
+ attn_std = self.text_branch.width**-0.5
585
+ fc_std = (2 * self.text_branch.width) ** -0.5
586
+ for block in self.text_branch.resblocks:
587
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
588
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
589
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
590
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
591
+ if self.text_branch_type == "bert" or self.text_branch_type == "roberta":
592
+ width = self.text_branch.embeddings.word_embeddings.weight.shape[-1]
593
+ elif self.text_branch_type == "bart":
594
+ width = self.text_branch.shared.weight.shape[-1]
595
+ else:
596
+ width = self.text_branch.width
597
+ nn.init.constant_(self.logit_scale_a, np.log(1 / 0.07))
598
+ nn.init.constant_(self.logit_scale_t, np.log(1 / 0.07))
599
+
600
+ # deprecated
601
+ # if hasattr(self.visual, 'init_parameters'):
602
+ # self.visual.init_parameters()
603
+
604
+ # if self.text_projection is not None:
605
+ # nn.init.normal_(self.text_projection, std=width**-0.5)
606
+
607
+ def build_attention_mask(self):
608
+ # lazily create causal attention mask, with full attention between the vision tokens
609
+ # pytorch uses additive attention mask; fill with -inf
610
+ mask = torch.empty(self.context_length, self.context_length)
611
+ mask.fill_(float("-inf"))
612
+ mask.triu_(1) # zero out the lower diagonal
613
+ return mask
614
+
615
+ def encode_audio(self, audio, device):
616
+ return self.audio_branch(
617
+ audio, mixup_lambda=None, device=device
618
+ ) # mix lambda needs to add
619
+
620
+ # def list_of_dict_of_tensor2dict_of_tensor(self, x, device):
621
+ # tmp = {}
622
+ # for k in x[0].keys():
623
+ # tmp[k] = []
624
+ # for i in range(len(x)):
625
+ # tmp[k].append(x[i][k][:77])
626
+ # for k in x[0].keys():
627
+ # tmp[k] = torch.tensor(tmp[k]).to(device=device, non_blocking=True)
628
+ # return tmp
629
+
630
+ def encode_text(self, text, device):
631
+ if self.text_branch_type == "transformer":
632
+ text = text.to(device=device, non_blocking=True)
633
+ x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
634
+
635
+ x = x + self.positional_embedding
636
+ x = x.permute(1, 0, 2) # NLD -> LND
637
+ x = self.text_branch(x, attn_mask=self.attn_mask)
638
+ x = x.permute(1, 0, 2) # LND -> NLD
639
+ x = self.ln_final(x)
640
+
641
+ # x.shape = [batch_size, n_ctx, transformer.width]
642
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
643
+ x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])
644
+ elif self.text_branch_type == "bert":
645
+ # text = self.list_of_dict_of_tensor2dict_of_tensor(text, device)
646
+ # text = BatchEncoding(text)
647
+ x = self.text_branch(
648
+ input_ids=text["input_ids"].to(device=device, non_blocking=True),
649
+ attention_mask=text["attention_mask"].to(
650
+ device=device, non_blocking=True
651
+ ),
652
+ token_type_ids=text["token_type_ids"].to(
653
+ device=device, non_blocking=True
654
+ ),
655
+ )["pooler_output"]
656
+ x = self.text_projection(x)
657
+ elif self.text_branch_type == "roberta":
658
+ x = self.text_branch(
659
+ input_ids=text["input_ids"].to(device=device, non_blocking=True),
660
+ attention_mask=text["attention_mask"].to(
661
+ device=device, non_blocking=True
662
+ ),
663
+ )["pooler_output"]
664
+ x = self.text_projection(x)
665
+ elif self.text_branch_type == "bart":
666
+ x = torch.mean(
667
+ self.text_branch(
668
+ input_ids=text["input_ids"].to(device=device, non_blocking=True),
669
+ attention_mask=text["attention_mask"].to(
670
+ device=device, non_blocking=True
671
+ ),
672
+ )["encoder_last_hidden_state"],
673
+ axis=1,
674
+ )
675
+ x = self.text_projection(x)
676
+ else:
677
+ logging.error(f"Model type {self.text_branch_type} not found")
678
+ raise RuntimeError(f"Model type {self.text_branch_type} not found.")
679
+ return x
680
+
681
+ def forward(self, audio, text, device=None):
682
+ """Forward audio and text into the CLAP
683
+
684
+ Parameters
685
+ ----------
686
+ audio: torch.Tensor (batch_size, audio_length)
687
+ the time-domain audio input / the batch of mel_spec and longer list.
688
+ text: torch.Tensor () // need to add
689
+ the text token input
690
+ """
691
+ if device is None:
692
+ if audio is not None:
693
+ device = audio.device
694
+ elif text is not None:
695
+ device = text.device
696
+ if audio is None and text is None:
697
+ # a hack to get the logit scale
698
+ return self.logit_scale_a.exp(), self.logit_scale_t.exp()
699
+ elif audio is None:
700
+ return self.encode_text(text, device=device)
701
+ elif text is None:
702
+ return self.audio_projection(
703
+ self.encode_audio(audio, device=device)["embedding"]
704
+ )
705
+ audio_features = self.audio_projection(
706
+ self.encode_audio(audio, device=device)["embedding"]
707
+ )
708
+ audio_features = F.normalize(audio_features, dim=-1)
709
+
710
+ text_features = self.encode_text(text, device=device)
711
+ # print("text_features", text_features)
712
+ # print("text_features.shape", text_features.shape)
713
+ # print("text_features.type", type(text_features))
714
+ text_features = F.normalize(text_features, dim=-1)
715
+
716
+ audio_features_mlp = self.audio_transform(audio_features)
717
+ text_features_mlp = self.text_transform(text_features)
718
+ # Four outputs: audio features (basic & MLP), text features (basic & MLP)
719
+ return (
720
+ audio_features,
721
+ text_features,
722
+ audio_features_mlp,
723
+ text_features_mlp,
724
+ self.logit_scale_a.exp(),
725
+ self.logit_scale_t.exp(),
726
+ )
727
+
728
+ def get_logit_scale(self):
729
+ return self.logit_scale_a.exp(), self.logit_scale_t.exp()
730
+
731
+ def get_text_embedding(self, data):
732
+ """Get the text embedding from the model
733
+
734
+ Parameters
735
+ ----------
736
+ data: torch.Tensor
737
+ a tensor of text embedding
738
+
739
+ Returns
740
+ ----------
741
+ text_embed: torch.Tensor
742
+ a tensor of text_embeds (N, D)
743
+
744
+ """
745
+ device = next(self.parameters()).device
746
+ for k in data:
747
+ data[k] = data[k].to(device)
748
+ if len(data[k].size()) < 2:
749
+ data[k] = data[k].unsqueeze(0)
750
+ text_embeds = self.encode_text(data, device=device)
751
+ text_embeds = F.normalize(text_embeds, dim=-1)
752
+
753
+ return text_embeds
754
+
755
+ def get_audio_embedding(self, data):
756
+ """Get the audio embedding from the model
757
+
758
+ Parameters
759
+ ----------
760
+ data: a list of dict
761
+ the audio input dict list from 'get_audio_feature' method
762
+
763
+ Returns
764
+ ----------
765
+ audio_embed: torch.Tensor
766
+ a tensor of audio_embeds (N, D)
767
+
768
+ """
769
+ device = next(self.parameters()).device
770
+ input_dict = {}
771
+ keys = data[0].keys()
772
+ for k in keys:
773
+ input_dict[k] = torch.cat([d[k].unsqueeze(0) for d in data], dim=0).to(
774
+ device
775
+ )
776
+
777
+ audio_embeds = self.audio_projection(
778
+ self.encode_audio(input_dict, device=device)["embedding"]
779
+ )
780
+ audio_embeds = F.normalize(audio_embeds, dim=-1)
781
+
782
+ return audio_embeds
783
+
784
+ def audio_infer(self, audio, hopsize=None, device=None):
785
+ """Forward one audio and produce the audio embedding
786
+
787
+ Parameters
788
+ ----------
789
+ audio: (audio_length)
790
+ the time-domain audio input, notice that it must be only one input
791
+ hopsize: int
792
+ the overlap hopsize as the sliding window
793
+
794
+ Returns
795
+ ----------
796
+ output_dict: {
797
+ key: [n, (embedding_shape)] if "HTS-AT"
798
+ or
799
+ key: [(embedding_shape)] if "PANN"
800
+ }
801
+ the list of key values of the audio branch
802
+
803
+ """
804
+
805
+ assert not self.training, "the inference mode must be run at eval stage"
806
+ output_dict = {}
807
+ # PANN
808
+ if self.audio_cfg.model_type == "PANN":
809
+ audio_input = audio.unsqueeze(dim=0)
810
+ output_dict[key] = self.encode_audio(audio_input, device=device)[
811
+ key
812
+ ].squeeze(dim=0)
813
+ elif self.audio_cfg.model_type == "HTSAT":
814
+ # repeat
815
+ audio_len = len(audio)
816
+ k = self.audio_cfg.clip_samples // audio_len
817
+ if k > 1:
818
+ audio = audio.repeat(k)
819
+ audio_len = len(audio)
820
+
821
+ if hopsize is None:
822
+ hopsize = min(hopsize, audio_len)
823
+
824
+ if audio_len > self.audio_cfg.clip_samples:
825
+ audio_input = [
826
+ audio[pos : pos + self.audio_cfg.clip_samples].clone()
827
+ for pos in range(
828
+ 0, audio_len - self.audio_cfg.clip_samples, hopsize
829
+ )
830
+ ]
831
+ audio_input.append(audio[-self.audio_cfg.clip_samples :].clone())
832
+ audio_input = torch.stack(audio_input)
833
+ output_dict[key] = self.encode_audio(audio_input, device=device)[key]
834
+ else:
835
+ audio_input = audio.unsqueeze(dim=0)
836
+ output_dict[key] = self.encode_audio(audio_input, device=device)[
837
+ key
838
+ ].squeeze(dim=0)
839
+
840
+ return output_dict
841
+
842
+
843
+ def convert_weights_to_fp16(model: nn.Module):
844
+ """Convert applicable model parameters to fp16"""
845
+
846
+ def _convert_weights_to_fp16(l):
847
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
848
+ l.weight.data = l.weight.data.half()
849
+ if l.bias is not None:
850
+ l.bias.data = l.bias.data.half()
851
+
852
+ if isinstance(l, nn.MultiheadAttention):
853
+ for attr in [
854
+ *[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
855
+ "in_proj_bias",
856
+ "bias_k",
857
+ "bias_v",
858
+ ]:
859
+ tensor = getattr(l, attr)
860
+ if tensor is not None:
861
+ tensor.data = tensor.data.half()
862
+
863
+ for name in ["text_projection", "proj"]:
864
+ if hasattr(l, name):
865
+ attr = getattr(l, name)
866
+ if attr is not None:
867
+ attr.data = attr.data.half()
868
+
869
+ model.apply(_convert_weights_to_fp16)
870
+
871
+
872
+ # Ignore the state dict of the vision part
873
+ def build_model_from_openai_state_dict(
874
+ state_dict: dict, model_cfg, enable_fusion: bool = False, fusion_type: str = "None"
875
+ ):
876
+
877
+ embed_dim = model_cfg["embed_dim"]
878
+ audio_cfg = model_cfg["audio_cfg"]
879
+ text_cfg = model_cfg["text_cfg"]
880
+ context_length = state_dict["positional_embedding"].shape[0]
881
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
882
+ transformer_width = state_dict["ln_final.weight"].shape[0]
883
+ transformer_heads = transformer_width // 64
884
+ transformer_layers = len(
885
+ set(
886
+ k.split(".")[2]
887
+ for k in state_dict
888
+ if k.startswith(f"transformer.resblocks")
889
+ )
890
+ )
891
+
892
+ audio_cfg = CLAPAudioCfp(**audio_cfg)
893
+ text_cfg = CLAPTextCfg(**text_cfg)
894
+
895
+ model = CLAP(
896
+ embed_dim,
897
+ audio_cfg=audio_cfg,
898
+ text_cfg=text_cfg,
899
+ quick_gelu=True, # OpenAI models were trained with QuickGELU
900
+ enable_fusion=enable_fusion,
901
+ fusion_type=fusion_type,
902
+ )
903
+ state_dict["logit_scale_a"] = state_dict["logit_scale"]
904
+ state_dict["logit_scale_t"] = state_dict["logit_scale"]
905
+ pop_keys = list(state_dict.keys())[::]
906
+ # pop the visual branch saved weights
907
+ for key in pop_keys:
908
+ if key.startswith("visual."):
909
+ state_dict.pop(key, None)
910
+
911
+ for key in ["logit_scale", "input_resolution", "context_length", "vocab_size"]:
912
+ state_dict.pop(key, None)
913
+
914
+ # not use fp16
915
+ # convert_weights_to_fp16(model)
916
+ model.load_state_dict(state_dict, strict=False)
917
+ return model.eval()
918
+
919
+
920
+ def trace_model(model, batch_size=256, device=torch.device("cpu")):
921
+ model.eval()
922
+ audio_length = model.audio_cfg.audio_length
923
+ example_audio = torch.ones((batch_size, audio_length), device=device)
924
+ example_text = torch.zeros(
925
+ (batch_size, model.context_length), dtype=torch.int, device=device
926
+ )
927
+ model = torch.jit.trace_module(
928
+ model,
929
+ inputs=dict(
930
+ forward=(example_audio, example_text),
931
+ encode_text=(example_text,),
932
+ encode_image=(example_audio,),
933
+ ),
934
+ )
935
+ model.audio_cfg.audio_length = audio_length # Question: what does this do?
936
+ return model
picoaudio/audioldm/clap/open_clip/model_configs/HTSAT-base.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1024,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 14000,
12
+ "class_num": 527,
13
+ "model_type": "HTSAT",
14
+ "model_name": "base"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/HTSAT-large.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 2048,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1024,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 14000,
12
+ "class_num": 527,
13
+ "model_type": "HTSAT",
14
+ "model_name": "large"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/HTSAT-tiny-win-1536.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 768,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1536,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 14000,
12
+ "class_num": 527,
13
+ "model_type": "HTSAT",
14
+ "model_name": "tiny"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/HTSAT-tiny.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 768,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1024,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 14000,
12
+ "class_num": 527,
13
+ "model_type": "HTSAT",
14
+ "model_name": "tiny"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/PANN-10.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1024,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 14000,
12
+ "class_num": 527,
13
+ "model_type": "PANN",
14
+ "model_name": "Cnn10"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/PANN-14-fmax-18k.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 2048,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1024,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 18000,
12
+ "class_num": 527,
13
+ "model_type": "PANN",
14
+ "model_name": "Cnn14"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/PANN-14-fmax-8k-20s.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 2048,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 960000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1024,
9
+ "hop_size": 360,
10
+ "fmin": 50,
11
+ "fmax": 8000,
12
+ "class_num": 527,
13
+ "model_type": "PANN",
14
+ "model_name": "Cnn14"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/PANN-14-tiny-transformer.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 2048,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1024,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 14000,
12
+ "class_num": 527,
13
+ "model_type": "PANN",
14
+ "model_name": "Cnn14"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 4
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/PANN-14-win-1536.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 2048,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1536,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 14000,
12
+ "class_num": 527,
13
+ "model_type": "PANN",
14
+ "model_name": "Cnn14"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/PANN-14.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 2048,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1024,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 14000,
12
+ "class_num": 527,
13
+ "model_type": "PANN",
14
+ "model_name": "Cnn14"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/PANN-6.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "audio_cfg": {
4
+ "audio_length": 1024,
5
+ "clip_samples": 480000,
6
+ "mel_bins": 64,
7
+ "sample_rate": 48000,
8
+ "window_size": 1024,
9
+ "hop_size": 480,
10
+ "fmin": 50,
11
+ "fmax": 14000,
12
+ "class_num": 527,
13
+ "model_type": "PANN",
14
+ "model_name": "Cnn6"
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 512,
20
+ "heads": 8,
21
+ "layers": 12
22
+ }
23
+ }
picoaudio/audioldm/clap/open_clip/model_configs/RN101-quickgelu.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "quick_gelu": true,
4
+ "vision_cfg": {
5
+ "image_size": 224,
6
+ "layers": [
7
+ 3,
8
+ 4,
9
+ 23,
10
+ 3
11
+ ],
12
+ "width": 64,
13
+ "patch_size": null
14
+ },
15
+ "text_cfg": {
16
+ "context_length": 77,
17
+ "vocab_size": 49408,
18
+ "width": 512,
19
+ "heads": 8,
20
+ "layers": 12
21
+ }
22
+ }
picoaudio/audioldm/clap/open_clip/model_configs/RN101.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": [
6
+ 3,
7
+ 4,
8
+ 23,
9
+ 3
10
+ ],
11
+ "width": 64,
12
+ "patch_size": null
13
+ },
14
+ "text_cfg": {
15
+ "context_length": 77,
16
+ "vocab_size": 49408,
17
+ "width": 512,
18
+ "heads": 8,
19
+ "layers": 12
20
+ }
21
+ }
picoaudio/audioldm/clap/open_clip/model_configs/RN50-quickgelu.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "quick_gelu": true,
4
+ "vision_cfg": {
5
+ "image_size": 224,
6
+ "layers": [
7
+ 3,
8
+ 4,
9
+ 6,
10
+ 3
11
+ ],
12
+ "width": 64,
13
+ "patch_size": null
14
+ },
15
+ "text_cfg": {
16
+ "context_length": 77,
17
+ "vocab_size": 49408,
18
+ "width": 512,
19
+ "heads": 8,
20
+ "layers": 12
21
+ }
22
+ }
picoaudio/audioldm/clap/open_clip/model_configs/RN50.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": [
6
+ 3,
7
+ 4,
8
+ 6,
9
+ 3
10
+ ],
11
+ "width": 64,
12
+ "patch_size": null
13
+ },
14
+ "text_cfg": {
15
+ "context_length": 77,
16
+ "vocab_size": 49408,
17
+ "width": 512,
18
+ "heads": 8,
19
+ "layers": 12
20
+ }
21
+ }
picoaudio/audioldm/clap/open_clip/model_configs/RN50x16.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 768,
3
+ "vision_cfg": {
4
+ "image_size": 384,
5
+ "layers": [
6
+ 6,
7
+ 8,
8
+ 18,
9
+ 8
10
+ ],
11
+ "width": 96,
12
+ "patch_size": null
13
+ },
14
+ "text_cfg": {
15
+ "context_length": 77,
16
+ "vocab_size": 49408,
17
+ "width": 768,
18
+ "heads": 12,
19
+ "layers": 12
20
+ }
21
+ }
picoaudio/audioldm/clap/open_clip/model_configs/RN50x4.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 640,
3
+ "vision_cfg": {
4
+ "image_size": 288,
5
+ "layers": [
6
+ 4,
7
+ 6,
8
+ 10,
9
+ 6
10
+ ],
11
+ "width": 80,
12
+ "patch_size": null
13
+ },
14
+ "text_cfg": {
15
+ "context_length": 77,
16
+ "vocab_size": 49408,
17
+ "width": 640,
18
+ "heads": 10,
19
+ "layers": 12
20
+ }
21
+ }
picoaudio/audioldm/clap/open_clip/model_configs/ViT-B-16.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 12,
6
+ "width": 768,
7
+ "patch_size": 16
8
+ },
9
+ "text_cfg": {
10
+ "context_length": 77,
11
+ "vocab_size": 49408,
12
+ "width": 512,
13
+ "heads": 8,
14
+ "layers": 12
15
+ }
16
+ }
picoaudio/audioldm/clap/open_clip/model_configs/ViT-B-32-quickgelu.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "quick_gelu": true,
4
+ "vision_cfg": {
5
+ "image_size": 224,
6
+ "layers": 12,
7
+ "width": 768,
8
+ "patch_size": 32
9
+ },
10
+ "text_cfg": {
11
+ "context_length": 77,
12
+ "vocab_size": 49408,
13
+ "width": 512,
14
+ "heads": 8,
15
+ "layers": 12
16
+ }
17
+ }
picoaudio/audioldm/clap/open_clip/model_configs/ViT-B-32.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 12,
6
+ "width": 768,
7
+ "patch_size": 32
8
+ },
9
+ "text_cfg": {
10
+ "context_length": 77,
11
+ "vocab_size": 49408,
12
+ "width": 512,
13
+ "heads": 8,
14
+ "layers": 12
15
+ }
16
+ }
picoaudio/audioldm/clap/open_clip/model_configs/ViT-L-14.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 768,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 24,
6
+ "width": 1024,
7
+ "patch_size": 14
8
+ },
9
+ "text_cfg": {
10
+ "context_length": 77,
11
+ "vocab_size": 49408,
12
+ "width": 768,
13
+ "heads": 12,
14
+ "layers": 12
15
+ }
16
+ }
picoaudio/audioldm/clap/open_clip/openai.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ OpenAI pretrained model functions
2
+
3
+ Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
+ """
5
+
6
+ import os
7
+ import warnings
8
+ from typing import Union, List
9
+
10
+ import torch
11
+
12
+ from .model import build_model_from_openai_state_dict
13
+ from .pretrained import (
14
+ get_pretrained_url,
15
+ list_pretrained_tag_models,
16
+ download_pretrained,
17
+ )
18
+
19
+ __all__ = ["list_openai_models", "load_openai_model"]
20
+
21
+ CACHE_DIR = os.getenv("AUDIOLDM_CACHE_DIR", "~/.cache")
22
+
23
+
24
+
25
+ def list_openai_models() -> List[str]:
26
+ """Returns the names of available CLIP models"""
27
+ return list_pretrained_tag_models("openai")
28
+
29
+
30
+ def load_openai_model(
31
+ name: str,
32
+ model_cfg,
33
+ device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
34
+ jit=True,
35
+ cache_dir=os.path.expanduser(f"{CACHE_DIR}/clip"),
36
+ enable_fusion: bool = False,
37
+ fusion_type: str = "None",
38
+ ):
39
+ """Load a CLIP model, preserve its text pretrained part, and set in the CLAP model
40
+
41
+ Parameters
42
+ ----------
43
+ name : str
44
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
45
+ device : Union[str, torch.device]
46
+ The device to put the loaded model
47
+ jit : bool
48
+ Whether to load the optimized JIT model (default) or more hackable non-JIT model.
49
+
50
+ Returns
51
+ -------
52
+ model : torch.nn.Module
53
+ The CLAP model
54
+ preprocess : Callable[[PIL.Image], torch.Tensor]
55
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
56
+ """
57
+ if get_pretrained_url(name, "openai"):
58
+ model_path = download_pretrained(
59
+ get_pretrained_url(name, "openai"), root=cache_dir
60
+ )
61
+ elif os.path.isfile(name):
62
+ model_path = name
63
+ else:
64
+ raise RuntimeError(
65
+ f"Model {name} not found; available models = {list_openai_models()}"
66
+ )
67
+
68
+ try:
69
+ # loading JIT archive
70
+ model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
71
+ state_dict = None
72
+ except RuntimeError:
73
+ # loading saved state dict
74
+ if jit:
75
+ warnings.warn(
76
+ f"File {model_path} is not a JIT archive. Loading as a state dict instead"
77
+ )
78
+ jit = False
79
+ state_dict = torch.load(model_path, map_location="cpu")
80
+
81
+ if not jit:
82
+ try:
83
+ model = build_model_from_openai_state_dict(
84
+ state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type
85
+ ).to(device)
86
+ except KeyError:
87
+ sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
88
+ model = build_model_from_openai_state_dict(
89
+ sd, model_cfg, enable_fusion, fusion_type
90
+ ).to(device)
91
+
92
+ if str(device) == "cpu":
93
+ model.float()
94
+ return model
95
+
96
+ # patch the device names
97
+ device_holder = torch.jit.trace(
98
+ lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
99
+ )
100
+ device_node = [
101
+ n
102
+ for n in device_holder.graph.findAllNodes("prim::Constant")
103
+ if "Device" in repr(n)
104
+ ][-1]
105
+
106
+ def patch_device(module):
107
+ try:
108
+ graphs = [module.graph] if hasattr(module, "graph") else []
109
+ except RuntimeError:
110
+ graphs = []
111
+
112
+ if hasattr(module, "forward1"):
113
+ graphs.append(module.forward1.graph)
114
+
115
+ for graph in graphs:
116
+ for node in graph.findAllNodes("prim::Constant"):
117
+ if "value" in node.attributeNames() and str(node["value"]).startswith(
118
+ "cuda"
119
+ ):
120
+ node.copyAttributes(device_node)
121
+
122
+ model.apply(patch_device)
123
+ patch_device(model.encode_audio)
124
+ patch_device(model.encode_text)
125
+
126
+ # patch dtype to float32 on CPU
127
+ if str(device) == "cpu":
128
+ float_holder = torch.jit.trace(
129
+ lambda: torch.ones([]).float(), example_inputs=[]
130
+ )
131
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
132
+ float_node = float_input.node()
133
+
134
+ def patch_float(module):
135
+ try:
136
+ graphs = [module.graph] if hasattr(module, "graph") else []
137
+ except RuntimeError:
138
+ graphs = []
139
+
140
+ if hasattr(module, "forward1"):
141
+ graphs.append(module.forward1.graph)
142
+
143
+ for graph in graphs:
144
+ for node in graph.findAllNodes("aten::to"):
145
+ inputs = list(node.inputs())
146
+ for i in [
147
+ 1,
148
+ 2,
149
+ ]: # dtype can be the second or third argument to aten::to()
150
+ if inputs[i].node()["value"] == 5:
151
+ inputs[i].node().copyAttributes(float_node)
152
+
153
+ model.apply(patch_float)
154
+ patch_float(model.encode_audio)
155
+ patch_float(model.encode_text)
156
+ model.float()
157
+
158
+ model.audio_branch.audio_length = model.audio_cfg.audio_length
159
+ return model
picoaudio/audioldm/clap/open_clip/pann_model.py ADDED
@@ -0,0 +1,704 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition
2
+ # Reference from https://github.com/qiuqiangkong/audioset_tagging_cnn
3
+ # Some layers are re-designed for CLAP
4
+ import os
5
+
6
+ os.environ["NUMBA_CACHE_DIR"] = "/tmp/"
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from torchlibrosa.stft import Spectrogram, LogmelFilterBank
12
+ from torchlibrosa.augmentation import SpecAugmentation
13
+
14
+ from .utils import do_mixup, interpolate, pad_framewise_output
15
+ from .feature_fusion import iAFF, AFF, DAF
16
+
17
+
18
+ def init_layer(layer):
19
+ """Initialize a Linear or Convolutional layer."""
20
+ nn.init.xavier_uniform_(layer.weight)
21
+
22
+ if hasattr(layer, "bias"):
23
+ if layer.bias is not None:
24
+ layer.bias.data.fill_(0.0)
25
+
26
+
27
+ def init_bn(bn):
28
+ """Initialize a Batchnorm layer."""
29
+ bn.bias.data.fill_(0.0)
30
+ bn.weight.data.fill_(1.0)
31
+
32
+
33
+ class ConvBlock(nn.Module):
34
+ def __init__(self, in_channels, out_channels):
35
+
36
+ super(ConvBlock, self).__init__()
37
+
38
+ self.conv1 = nn.Conv2d(
39
+ in_channels=in_channels,
40
+ out_channels=out_channels,
41
+ kernel_size=(3, 3),
42
+ stride=(1, 1),
43
+ padding=(1, 1),
44
+ bias=False,
45
+ )
46
+
47
+ self.conv2 = nn.Conv2d(
48
+ in_channels=out_channels,
49
+ out_channels=out_channels,
50
+ kernel_size=(3, 3),
51
+ stride=(1, 1),
52
+ padding=(1, 1),
53
+ bias=False,
54
+ )
55
+
56
+ self.bn1 = nn.BatchNorm2d(out_channels)
57
+ self.bn2 = nn.BatchNorm2d(out_channels)
58
+
59
+ self.init_weight()
60
+
61
+ def init_weight(self):
62
+ init_layer(self.conv1)
63
+ init_layer(self.conv2)
64
+ init_bn(self.bn1)
65
+ init_bn(self.bn2)
66
+
67
+ def forward(self, input, pool_size=(2, 2), pool_type="avg"):
68
+
69
+ x = input
70
+ x = F.relu_(self.bn1(self.conv1(x)))
71
+ x = F.relu_(self.bn2(self.conv2(x)))
72
+ if pool_type == "max":
73
+ x = F.max_pool2d(x, kernel_size=pool_size)
74
+ elif pool_type == "avg":
75
+ x = F.avg_pool2d(x, kernel_size=pool_size)
76
+ elif pool_type == "avg+max":
77
+ x1 = F.avg_pool2d(x, kernel_size=pool_size)
78
+ x2 = F.max_pool2d(x, kernel_size=pool_size)
79
+ x = x1 + x2
80
+ else:
81
+ raise Exception("Incorrect argument!")
82
+
83
+ return x
84
+
85
+
86
+ class ConvBlock5x5(nn.Module):
87
+ def __init__(self, in_channels, out_channels):
88
+
89
+ super(ConvBlock5x5, self).__init__()
90
+
91
+ self.conv1 = nn.Conv2d(
92
+ in_channels=in_channels,
93
+ out_channels=out_channels,
94
+ kernel_size=(5, 5),
95
+ stride=(1, 1),
96
+ padding=(2, 2),
97
+ bias=False,
98
+ )
99
+
100
+ self.bn1 = nn.BatchNorm2d(out_channels)
101
+
102
+ self.init_weight()
103
+
104
+ def init_weight(self):
105
+ init_layer(self.conv1)
106
+ init_bn(self.bn1)
107
+
108
+ def forward(self, input, pool_size=(2, 2), pool_type="avg"):
109
+
110
+ x = input
111
+ x = F.relu_(self.bn1(self.conv1(x)))
112
+ if pool_type == "max":
113
+ x = F.max_pool2d(x, kernel_size=pool_size)
114
+ elif pool_type == "avg":
115
+ x = F.avg_pool2d(x, kernel_size=pool_size)
116
+ elif pool_type == "avg+max":
117
+ x1 = F.avg_pool2d(x, kernel_size=pool_size)
118
+ x2 = F.max_pool2d(x, kernel_size=pool_size)
119
+ x = x1 + x2
120
+ else:
121
+ raise Exception("Incorrect argument!")
122
+
123
+ return x
124
+
125
+
126
+ class AttBlock(nn.Module):
127
+ def __init__(self, n_in, n_out, activation="linear", temperature=1.0):
128
+ super(AttBlock, self).__init__()
129
+
130
+ self.activation = activation
131
+ self.temperature = temperature
132
+ self.att = nn.Conv1d(
133
+ in_channels=n_in,
134
+ out_channels=n_out,
135
+ kernel_size=1,
136
+ stride=1,
137
+ padding=0,
138
+ bias=True,
139
+ )
140
+ self.cla = nn.Conv1d(
141
+ in_channels=n_in,
142
+ out_channels=n_out,
143
+ kernel_size=1,
144
+ stride=1,
145
+ padding=0,
146
+ bias=True,
147
+ )
148
+
149
+ self.bn_att = nn.BatchNorm1d(n_out)
150
+ self.init_weights()
151
+
152
+ def init_weights(self):
153
+ init_layer(self.att)
154
+ init_layer(self.cla)
155
+ init_bn(self.bn_att)
156
+
157
+ def forward(self, x):
158
+ # x: (n_samples, n_in, n_time)
159
+ norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
160
+ cla = self.nonlinear_transform(self.cla(x))
161
+ x = torch.sum(norm_att * cla, dim=2)
162
+ return x, norm_att, cla
163
+
164
+ def nonlinear_transform(self, x):
165
+ if self.activation == "linear":
166
+ return x
167
+ elif self.activation == "sigmoid":
168
+ return torch.sigmoid(x)
169
+
170
+
171
+ class Cnn14(nn.Module):
172
+ def __init__(
173
+ self,
174
+ sample_rate,
175
+ window_size,
176
+ hop_size,
177
+ mel_bins,
178
+ fmin,
179
+ fmax,
180
+ classes_num,
181
+ enable_fusion=False,
182
+ fusion_type="None",
183
+ ):
184
+
185
+ super(Cnn14, self).__init__()
186
+
187
+ window = "hann"
188
+ center = True
189
+ pad_mode = "reflect"
190
+ ref = 1.0
191
+ amin = 1e-10
192
+ top_db = None
193
+
194
+ self.enable_fusion = enable_fusion
195
+ self.fusion_type = fusion_type
196
+
197
+ # Spectrogram extractor
198
+ self.spectrogram_extractor = Spectrogram(
199
+ n_fft=window_size,
200
+ hop_length=hop_size,
201
+ win_length=window_size,
202
+ window=window,
203
+ center=center,
204
+ pad_mode=pad_mode,
205
+ freeze_parameters=True,
206
+ )
207
+
208
+ # Logmel feature extractor
209
+ self.logmel_extractor = LogmelFilterBank(
210
+ sr=sample_rate,
211
+ n_fft=window_size,
212
+ n_mels=mel_bins,
213
+ fmin=fmin,
214
+ fmax=fmax,
215
+ ref=ref,
216
+ amin=amin,
217
+ top_db=top_db,
218
+ freeze_parameters=True,
219
+ )
220
+
221
+ # Spec augmenter
222
+ self.spec_augmenter = SpecAugmentation(
223
+ time_drop_width=64,
224
+ time_stripes_num=2,
225
+ freq_drop_width=8,
226
+ freq_stripes_num=2,
227
+ )
228
+
229
+ self.bn0 = nn.BatchNorm2d(64)
230
+
231
+ if (self.enable_fusion) and (self.fusion_type == "channel_map"):
232
+ self.conv_block1 = ConvBlock(in_channels=4, out_channels=64)
233
+ else:
234
+ self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
235
+ self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
236
+ self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
237
+ self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
238
+ self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
239
+ self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
240
+
241
+ self.fc1 = nn.Linear(2048, 2048, bias=True)
242
+ self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
243
+
244
+ if (self.enable_fusion) and (
245
+ self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]
246
+ ):
247
+ self.mel_conv1d = nn.Sequential(
248
+ nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
249
+ nn.BatchNorm1d(64), # No Relu
250
+ )
251
+ if self.fusion_type == "daf_1d":
252
+ self.fusion_model = DAF()
253
+ elif self.fusion_type == "aff_1d":
254
+ self.fusion_model = AFF(channels=64, type="1D")
255
+ elif self.fusion_type == "iaff_1d":
256
+ self.fusion_model = iAFF(channels=64, type="1D")
257
+
258
+ if (self.enable_fusion) and (
259
+ self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
260
+ ):
261
+ self.mel_conv2d = nn.Sequential(
262
+ nn.Conv2d(1, 64, kernel_size=(5, 5), stride=(6, 2), padding=(2, 2)),
263
+ nn.BatchNorm2d(64),
264
+ nn.ReLU(inplace=True),
265
+ )
266
+
267
+ if self.fusion_type == "daf_2d":
268
+ self.fusion_model = DAF()
269
+ elif self.fusion_type == "aff_2d":
270
+ self.fusion_model = AFF(channels=64, type="2D")
271
+ elif self.fusion_type == "iaff_2d":
272
+ self.fusion_model = iAFF(channels=64, type="2D")
273
+ self.init_weight()
274
+
275
+ def init_weight(self):
276
+ init_bn(self.bn0)
277
+ init_layer(self.fc1)
278
+ init_layer(self.fc_audioset)
279
+
280
+ def forward(self, input, mixup_lambda=None, device=None):
281
+ """
282
+ Input: (batch_size, data_length)"""
283
+
284
+ if self.enable_fusion and input["longer"].sum() == 0:
285
+ # if no audio is longer than 10s, then randomly select one audio to be longer
286
+ input["longer"][torch.randint(0, input["longer"].shape[0], (1,))] = True
287
+
288
+ if not self.enable_fusion:
289
+ x = self.spectrogram_extractor(
290
+ input["waveform"].to(device=device, non_blocking=True)
291
+ ) # (batch_size, 1, time_steps, freq_bins)
292
+ x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
293
+
294
+ x = x.transpose(1, 3)
295
+ x = self.bn0(x)
296
+ x = x.transpose(1, 3)
297
+ else:
298
+ longer_list = input["longer"].to(device=device, non_blocking=True)
299
+ x = input["mel_fusion"].to(device=device, non_blocking=True)
300
+ longer_list_idx = torch.where(longer_list)[0]
301
+ x = x.transpose(1, 3)
302
+ x = self.bn0(x)
303
+ x = x.transpose(1, 3)
304
+ if self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]:
305
+ new_x = x[:, 0:1, :, :].clone().contiguous()
306
+ # local processing
307
+ if len(longer_list_idx) > 0:
308
+ fusion_x_local = x[longer_list_idx, 1:, :, :].clone().contiguous()
309
+ FB, FC, FT, FF = fusion_x_local.size()
310
+ fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
311
+ fusion_x_local = torch.permute(
312
+ fusion_x_local, (0, 2, 1)
313
+ ).contiguous()
314
+ fusion_x_local = self.mel_conv1d(fusion_x_local)
315
+ fusion_x_local = fusion_x_local.view(
316
+ FB, FC, FF, fusion_x_local.size(-1)
317
+ )
318
+ fusion_x_local = (
319
+ torch.permute(fusion_x_local, (0, 2, 1, 3))
320
+ .contiguous()
321
+ .flatten(2)
322
+ )
323
+ if fusion_x_local.size(-1) < FT:
324
+ fusion_x_local = torch.cat(
325
+ [
326
+ fusion_x_local,
327
+ torch.zeros(
328
+ (FB, FF, FT - fusion_x_local.size(-1)),
329
+ device=device,
330
+ ),
331
+ ],
332
+ dim=-1,
333
+ )
334
+ else:
335
+ fusion_x_local = fusion_x_local[:, :, :FT]
336
+ # 1D fusion
337
+ new_x = new_x.squeeze(1).permute((0, 2, 1)).contiguous()
338
+ new_x[longer_list_idx] = self.fusion_model(
339
+ new_x[longer_list_idx], fusion_x_local
340
+ )
341
+ x = new_x.permute((0, 2, 1)).contiguous()[:, None, :, :]
342
+ else:
343
+ x = new_x
344
+ elif self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d", "channel_map"]:
345
+ x = x # no change
346
+
347
+ if self.training:
348
+ x = self.spec_augmenter(x)
349
+ # Mixup on spectrogram
350
+ if self.training and mixup_lambda is not None:
351
+ x = do_mixup(x, mixup_lambda)
352
+ if (self.enable_fusion) and (
353
+ self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
354
+ ):
355
+ global_x = x[:, 0:1, :, :]
356
+
357
+ # global processing
358
+ B, C, H, W = global_x.shape
359
+ global_x = self.conv_block1(global_x, pool_size=(2, 2), pool_type="avg")
360
+ if len(longer_list_idx) > 0:
361
+ local_x = x[longer_list_idx, 1:, :, :].contiguous()
362
+ TH = global_x.size(-2)
363
+ # local processing
364
+ B, C, H, W = local_x.shape
365
+ local_x = local_x.view(B * C, 1, H, W)
366
+ local_x = self.mel_conv2d(local_x)
367
+ local_x = local_x.view(
368
+ B, C, local_x.size(1), local_x.size(2), local_x.size(3)
369
+ )
370
+ local_x = local_x.permute((0, 2, 1, 3, 4)).contiguous().flatten(2, 3)
371
+ TB, TC, _, TW = local_x.size()
372
+ if local_x.size(-2) < TH:
373
+ local_x = torch.cat(
374
+ [
375
+ local_x,
376
+ torch.zeros(
377
+ (TB, TC, TH - local_x.size(-2), TW),
378
+ device=global_x.device,
379
+ ),
380
+ ],
381
+ dim=-2,
382
+ )
383
+ else:
384
+ local_x = local_x[:, :, :TH, :]
385
+
386
+ global_x[longer_list_idx] = self.fusion_model(
387
+ global_x[longer_list_idx], local_x
388
+ )
389
+ x = global_x
390
+ else:
391
+ x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
392
+
393
+ x = F.dropout(x, p=0.2, training=self.training)
394
+ x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
395
+ x = F.dropout(x, p=0.2, training=self.training)
396
+ x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
397
+ x = F.dropout(x, p=0.2, training=self.training)
398
+ x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
399
+ x = F.dropout(x, p=0.2, training=self.training)
400
+ x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg")
401
+ x = F.dropout(x, p=0.2, training=self.training)
402
+ x = self.conv_block6(x, pool_size=(1, 1), pool_type="avg")
403
+ x = F.dropout(x, p=0.2, training=self.training)
404
+ x = torch.mean(x, dim=3)
405
+
406
+ latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
407
+ latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
408
+ latent_x = latent_x1 + latent_x2
409
+ latent_x = latent_x.transpose(1, 2)
410
+ latent_x = F.relu_(self.fc1(latent_x))
411
+ latent_output = interpolate(latent_x, 32)
412
+
413
+ (x1, _) = torch.max(x, dim=2)
414
+ x2 = torch.mean(x, dim=2)
415
+ x = x1 + x2
416
+ x = F.dropout(x, p=0.5, training=self.training)
417
+ x = F.relu_(self.fc1(x))
418
+ embedding = F.dropout(x, p=0.5, training=self.training)
419
+ clipwise_output = torch.sigmoid(self.fc_audioset(x))
420
+
421
+ output_dict = {
422
+ "clipwise_output": clipwise_output,
423
+ "embedding": embedding,
424
+ "fine_grained_embedding": latent_output,
425
+ }
426
+ return output_dict
427
+
428
+
429
+ class Cnn6(nn.Module):
430
+ def __init__(
431
+ self,
432
+ sample_rate,
433
+ window_size,
434
+ hop_size,
435
+ mel_bins,
436
+ fmin,
437
+ fmax,
438
+ classes_num,
439
+ enable_fusion=False,
440
+ fusion_type="None",
441
+ ):
442
+
443
+ super(Cnn6, self).__init__()
444
+
445
+ window = "hann"
446
+ center = True
447
+ pad_mode = "reflect"
448
+ ref = 1.0
449
+ amin = 1e-10
450
+ top_db = None
451
+
452
+ self.enable_fusion = enable_fusion
453
+ self.fusion_type = fusion_type
454
+
455
+ # Spectrogram extractor
456
+ self.spectrogram_extractor = Spectrogram(
457
+ n_fft=window_size,
458
+ hop_length=hop_size,
459
+ win_length=window_size,
460
+ window=window,
461
+ center=center,
462
+ pad_mode=pad_mode,
463
+ freeze_parameters=True,
464
+ )
465
+
466
+ # Logmel feature extractor
467
+ self.logmel_extractor = LogmelFilterBank(
468
+ sr=sample_rate,
469
+ n_fft=window_size,
470
+ n_mels=mel_bins,
471
+ fmin=fmin,
472
+ fmax=fmax,
473
+ ref=ref,
474
+ amin=amin,
475
+ top_db=top_db,
476
+ freeze_parameters=True,
477
+ )
478
+
479
+ # Spec augmenter
480
+ self.spec_augmenter = SpecAugmentation(
481
+ time_drop_width=64,
482
+ time_stripes_num=2,
483
+ freq_drop_width=8,
484
+ freq_stripes_num=2,
485
+ )
486
+
487
+ self.bn0 = nn.BatchNorm2d(64)
488
+
489
+ self.conv_block1 = ConvBlock5x5(in_channels=1, out_channels=64)
490
+ self.conv_block2 = ConvBlock5x5(in_channels=64, out_channels=128)
491
+ self.conv_block3 = ConvBlock5x5(in_channels=128, out_channels=256)
492
+ self.conv_block4 = ConvBlock5x5(in_channels=256, out_channels=512)
493
+
494
+ self.fc1 = nn.Linear(512, 512, bias=True)
495
+ self.fc_audioset = nn.Linear(512, classes_num, bias=True)
496
+
497
+ self.init_weight()
498
+
499
+ def init_weight(self):
500
+ init_bn(self.bn0)
501
+ init_layer(self.fc1)
502
+ init_layer(self.fc_audioset)
503
+
504
+ def forward(self, input, mixup_lambda=None, device=None):
505
+ """
506
+ Input: (batch_size, data_length)"""
507
+
508
+ x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
509
+ x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
510
+
511
+ x = x.transpose(1, 3)
512
+ x = self.bn0(x)
513
+ x = x.transpose(1, 3)
514
+
515
+ if self.training:
516
+ x = self.spec_augmenter(x)
517
+
518
+ # Mixup on spectrogram
519
+ if self.training and mixup_lambda is not None:
520
+ x = do_mixup(x, mixup_lambda)
521
+
522
+ x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
523
+ x = F.dropout(x, p=0.2, training=self.training)
524
+ x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
525
+ x = F.dropout(x, p=0.2, training=self.training)
526
+ x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
527
+ x = F.dropout(x, p=0.2, training=self.training)
528
+ x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
529
+ x = F.dropout(x, p=0.2, training=self.training)
530
+ x = torch.mean(x, dim=3)
531
+
532
+ latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
533
+ latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
534
+ latent_x = latent_x1 + latent_x2
535
+ latent_x = latent_x.transpose(1, 2)
536
+ latent_x = F.relu_(self.fc1(latent_x))
537
+ latent_output = interpolate(latent_x, 16)
538
+
539
+ (x1, _) = torch.max(x, dim=2)
540
+ x2 = torch.mean(x, dim=2)
541
+ x = x1 + x2
542
+ x = F.dropout(x, p=0.5, training=self.training)
543
+ x = F.relu_(self.fc1(x))
544
+ embedding = F.dropout(x, p=0.5, training=self.training)
545
+ clipwise_output = torch.sigmoid(self.fc_audioset(x))
546
+
547
+ output_dict = {
548
+ "clipwise_output": clipwise_output,
549
+ "embedding": embedding,
550
+ "fine_grained_embedding": latent_output,
551
+ }
552
+
553
+ return output_dict
554
+
555
+
556
+ class Cnn10(nn.Module):
557
+ def __init__(
558
+ self,
559
+ sample_rate,
560
+ window_size,
561
+ hop_size,
562
+ mel_bins,
563
+ fmin,
564
+ fmax,
565
+ classes_num,
566
+ enable_fusion=False,
567
+ fusion_type="None",
568
+ ):
569
+
570
+ super(Cnn10, self).__init__()
571
+
572
+ window = "hann"
573
+ center = True
574
+ pad_mode = "reflect"
575
+ ref = 1.0
576
+ amin = 1e-10
577
+ top_db = None
578
+
579
+ self.enable_fusion = enable_fusion
580
+ self.fusion_type = fusion_type
581
+
582
+ # Spectrogram extractor
583
+ self.spectrogram_extractor = Spectrogram(
584
+ n_fft=window_size,
585
+ hop_length=hop_size,
586
+ win_length=window_size,
587
+ window=window,
588
+ center=center,
589
+ pad_mode=pad_mode,
590
+ freeze_parameters=True,
591
+ )
592
+
593
+ # Logmel feature extractor
594
+ self.logmel_extractor = LogmelFilterBank(
595
+ sr=sample_rate,
596
+ n_fft=window_size,
597
+ n_mels=mel_bins,
598
+ fmin=fmin,
599
+ fmax=fmax,
600
+ ref=ref,
601
+ amin=amin,
602
+ top_db=top_db,
603
+ freeze_parameters=True,
604
+ )
605
+
606
+ # Spec augmenter
607
+ self.spec_augmenter = SpecAugmentation(
608
+ time_drop_width=64,
609
+ time_stripes_num=2,
610
+ freq_drop_width=8,
611
+ freq_stripes_num=2,
612
+ )
613
+
614
+ self.bn0 = nn.BatchNorm2d(64)
615
+
616
+ self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
617
+ self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
618
+ self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
619
+ self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
620
+ self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
621
+
622
+ self.fc1 = nn.Linear(1024, 1024, bias=True)
623
+ self.fc_audioset = nn.Linear(1024, classes_num, bias=True)
624
+
625
+ self.init_weight()
626
+
627
+ def init_weight(self):
628
+ init_bn(self.bn0)
629
+ init_layer(self.fc1)
630
+ init_layer(self.fc_audioset)
631
+
632
+ def forward(self, input, mixup_lambda=None, device=None):
633
+ """
634
+ Input: (batch_size, data_length)"""
635
+
636
+ x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
637
+ x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
638
+
639
+ x = x.transpose(1, 3)
640
+ x = self.bn0(x)
641
+ x = x.transpose(1, 3)
642
+
643
+ if self.training:
644
+ x = self.spec_augmenter(x)
645
+
646
+ # Mixup on spectrogram
647
+ if self.training and mixup_lambda is not None:
648
+ x = do_mixup(x, mixup_lambda)
649
+
650
+ x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
651
+ x = F.dropout(x, p=0.2, training=self.training)
652
+ x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
653
+ x = F.dropout(x, p=0.2, training=self.training)
654
+ x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
655
+ x = F.dropout(x, p=0.2, training=self.training)
656
+ x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
657
+ x = F.dropout(x, p=0.2, training=self.training)
658
+ x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg")
659
+ x = F.dropout(x, p=0.2, training=self.training)
660
+ x = torch.mean(x, dim=3)
661
+
662
+ latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
663
+ latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
664
+ latent_x = latent_x1 + latent_x2
665
+ latent_x = latent_x.transpose(1, 2)
666
+ latent_x = F.relu_(self.fc1(latent_x))
667
+ latent_output = interpolate(latent_x, 32)
668
+
669
+ (x1, _) = torch.max(x, dim=2)
670
+ x2 = torch.mean(x, dim=2)
671
+ x = x1 + x2
672
+ x = F.dropout(x, p=0.5, training=self.training)
673
+ x = F.relu_(self.fc1(x))
674
+ embedding = F.dropout(x, p=0.5, training=self.training)
675
+ clipwise_output = torch.sigmoid(self.fc_audioset(x))
676
+
677
+ output_dict = {
678
+ "clipwise_output": clipwise_output,
679
+ "embedding": embedding,
680
+ "fine_grained_embedding": latent_output,
681
+ }
682
+
683
+ return output_dict
684
+
685
+
686
+ def create_pann_model(audio_cfg, enable_fusion=False, fusion_type="None"):
687
+ try:
688
+ ModelProto = eval(audio_cfg.model_name)
689
+ model = ModelProto(
690
+ sample_rate=audio_cfg.sample_rate,
691
+ window_size=audio_cfg.window_size,
692
+ hop_size=audio_cfg.hop_size,
693
+ mel_bins=audio_cfg.mel_bins,
694
+ fmin=audio_cfg.fmin,
695
+ fmax=audio_cfg.fmax,
696
+ classes_num=audio_cfg.class_num,
697
+ enable_fusion=enable_fusion,
698
+ fusion_type=fusion_type,
699
+ )
700
+ return model
701
+ except:
702
+ raise RuntimeError(
703
+ f"Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough."
704
+ )
picoaudio/audioldm/clap/open_clip/pretrained.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import urllib
4
+ import warnings
5
+
6
+ from tqdm import tqdm
7
+
8
+ CACHE_DIR = os.getenv("AUDIOLDM_CACHE_DIR", "~/.cache")
9
+
10
+ _RN50 = dict(
11
+ openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
12
+ yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
13
+ cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt",
14
+ )
15
+
16
+ _RN50_quickgelu = dict(
17
+ openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
18
+ yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
19
+ cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt",
20
+ )
21
+
22
+ _RN101 = dict(
23
+ openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
24
+ yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt",
25
+ )
26
+
27
+ _RN101_quickgelu = dict(
28
+ openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
29
+ yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt",
30
+ )
31
+
32
+ _RN50x4 = dict(
33
+ openai="https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
34
+ )
35
+
36
+ _RN50x16 = dict(
37
+ openai="https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
38
+ )
39
+
40
+ _RN50x64 = dict(
41
+ openai="https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
42
+ )
43
+
44
+ _VITB32 = dict(
45
+ openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
46
+ laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
47
+ laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
48
+ laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt",
49
+ )
50
+
51
+ _VITB32_quickgelu = dict(
52
+ openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
53
+ laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
54
+ laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
55
+ laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt",
56
+ )
57
+
58
+ _VITB16 = dict(
59
+ openai="https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
60
+ )
61
+
62
+ _VITL14 = dict(
63
+ openai="https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
64
+ )
65
+
66
+ _PRETRAINED = {
67
+ "RN50": _RN50,
68
+ "RN50-quickgelu": _RN50_quickgelu,
69
+ "RN101": _RN101,
70
+ "RN101-quickgelu": _RN101_quickgelu,
71
+ "RN50x4": _RN50x4,
72
+ "RN50x16": _RN50x16,
73
+ "ViT-B-32": _VITB32,
74
+ "ViT-B-32-quickgelu": _VITB32_quickgelu,
75
+ "ViT-B-16": _VITB16,
76
+ "ViT-L-14": _VITL14,
77
+ }
78
+
79
+
80
+ def list_pretrained(as_str: bool = False):
81
+ """returns list of pretrained models
82
+ Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
83
+ """
84
+ return [
85
+ ":".join([k, t]) if as_str else (k, t)
86
+ for k in _PRETRAINED.keys()
87
+ for t in _PRETRAINED[k].keys()
88
+ ]
89
+
90
+
91
+ def list_pretrained_tag_models(tag: str):
92
+ """return all models having the specified pretrain tag"""
93
+ models = []
94
+ for k in _PRETRAINED.keys():
95
+ if tag in _PRETRAINED[k]:
96
+ models.append(k)
97
+ return models
98
+
99
+
100
+ def list_pretrained_model_tags(model: str):
101
+ """return all pretrain tags for the specified model architecture"""
102
+ tags = []
103
+ if model in _PRETRAINED:
104
+ tags.extend(_PRETRAINED[model].keys())
105
+ return tags
106
+
107
+
108
+ def get_pretrained_url(model: str, tag: str):
109
+ if model not in _PRETRAINED:
110
+ return ""
111
+ model_pretrained = _PRETRAINED[model]
112
+ if tag not in model_pretrained:
113
+ return ""
114
+ return model_pretrained[tag]
115
+
116
+
117
+ def download_pretrained(url: str, root: str = os.path.expanduser(f"{CACHE_DIR}/clip")):
118
+ os.makedirs(root, exist_ok=True)
119
+ filename = os.path.basename(url)
120
+
121
+ if "openaipublic" in url:
122
+ expected_sha256 = url.split("/")[-2]
123
+ else:
124
+ expected_sha256 = ""
125
+
126
+ download_target = os.path.join(root, filename)
127
+
128
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
129
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
130
+
131
+ if os.path.isfile(download_target):
132
+ if expected_sha256:
133
+ if (
134
+ hashlib.sha256(open(download_target, "rb").read()).hexdigest()
135
+ == expected_sha256
136
+ ):
137
+ return download_target
138
+ else:
139
+ warnings.warn(
140
+ f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
141
+ )
142
+ else:
143
+ return download_target
144
+
145
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
146
+ with tqdm(
147
+ total=int(source.info().get("Content-Length")),
148
+ ncols=80,
149
+ unit="iB",
150
+ unit_scale=True,
151
+ ) as loop:
152
+ while True:
153
+ buffer = source.read(8192)
154
+ if not buffer:
155
+ break
156
+
157
+ output.write(buffer)
158
+ loop.update(len(buffer))
159
+
160
+ if (
161
+ expected_sha256
162
+ and hashlib.sha256(open(download_target, "rb").read()).hexdigest()
163
+ != expected_sha256
164
+ ):
165
+ raise RuntimeError(
166
+ f"Model has been downloaded but the SHA256 checksum does not not match"
167
+ )
168
+
169
+ return download_target
picoaudio/audioldm/clap/open_clip/timm_model.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ timm model adapter
2
+
3
+ Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
4
+ """
5
+ from collections import OrderedDict
6
+
7
+ import torch.nn as nn
8
+
9
+ try:
10
+ import timm
11
+ from timm.models.layers import Mlp, to_2tuple
12
+ from timm.models.layers.attention_pool2d import RotAttentionPool2d
13
+ from timm.models.layers.attention_pool2d import (
14
+ AttentionPool2d as AbsAttentionPool2d,
15
+ )
16
+ except ImportError as e:
17
+ timm = None
18
+
19
+ from .utils import freeze_batch_norm_2d
20
+
21
+
22
+ class TimmModel(nn.Module):
23
+ """timm model adapter
24
+ # FIXME this adapter is a work in progress, may change in ways that break weight compat
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ model_name,
30
+ embed_dim,
31
+ image_size=224,
32
+ pool="avg",
33
+ proj="linear",
34
+ drop=0.0,
35
+ pretrained=False,
36
+ ):
37
+ super().__init__()
38
+ if timm is None:
39
+ raise RuntimeError("Please `pip install timm` to use timm models.")
40
+
41
+ self.image_size = to_2tuple(image_size)
42
+ self.trunk = timm.create_model(model_name, pretrained=pretrained)
43
+ feat_size = self.trunk.default_cfg.get("pool_size", None)
44
+ feature_ndim = 1 if not feat_size else 2
45
+ if pool in ("abs_attn", "rot_attn"):
46
+ assert feature_ndim == 2
47
+ # if attn pooling used, remove both classifier and default pool
48
+ self.trunk.reset_classifier(0, global_pool="")
49
+ else:
50
+ # reset global pool if pool config set, otherwise leave as network default
51
+ reset_kwargs = dict(global_pool=pool) if pool else {}
52
+ self.trunk.reset_classifier(0, **reset_kwargs)
53
+ prev_chs = self.trunk.num_features
54
+
55
+ head_layers = OrderedDict()
56
+ if pool == "abs_attn":
57
+ head_layers["pool"] = AbsAttentionPool2d(
58
+ prev_chs, feat_size=feat_size, out_features=embed_dim
59
+ )
60
+ prev_chs = embed_dim
61
+ elif pool == "rot_attn":
62
+ head_layers["pool"] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
63
+ prev_chs = embed_dim
64
+ else:
65
+ assert proj, "projection layer needed if non-attention pooling is used."
66
+
67
+ # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
68
+ if proj == "linear":
69
+ head_layers["drop"] = nn.Dropout(drop)
70
+ head_layers["proj"] = nn.Linear(prev_chs, embed_dim)
71
+ elif proj == "mlp":
72
+ head_layers["mlp"] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop)
73
+
74
+ self.head = nn.Sequential(head_layers)
75
+
76
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
77
+ """lock modules
78
+ Args:
79
+ unlocked_groups (int): leave last n layer groups unlocked (default: 0)
80
+ """
81
+ if not unlocked_groups:
82
+ # lock full model
83
+ for param in self.trunk.parameters():
84
+ param.requires_grad = False
85
+ if freeze_bn_stats:
86
+ freeze_batch_norm_2d(self.trunk)
87
+ else:
88
+ # NOTE: partial freeze requires latest timm (master) branch and is subject to change
89
+ try:
90
+ # FIXME import here until API stable and in an official release
91
+ from timm.models.helpers import group_parameters, group_modules
92
+ except ImportError:
93
+ raise RuntimeError(
94
+ "Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`"
95
+ )
96
+ matcher = self.trunk.group_matcher()
97
+ gparams = group_parameters(self.trunk, matcher)
98
+ max_layer_id = max(gparams.keys())
99
+ max_layer_id = max_layer_id - unlocked_groups
100
+ for group_idx in range(max_layer_id + 1):
101
+ group = gparams[group_idx]
102
+ for param in group:
103
+ self.trunk.get_parameter(param).requires_grad = False
104
+ if freeze_bn_stats:
105
+ gmodules = group_modules(self.trunk, matcher, reverse=True)
106
+ gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
107
+ freeze_batch_norm_2d(self.trunk, gmodules)
108
+
109
+ def forward(self, x):
110
+ x = self.trunk(x)
111
+ x = self.head(x)
112
+ return x
picoaudio/audioldm/clap/open_clip/tokenizer.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ CLIP tokenizer
2
+
3
+ Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
+ """
5
+ import gzip
6
+ import html
7
+ import os
8
+ from functools import lru_cache
9
+ from typing import Union, List
10
+
11
+ import ftfy
12
+ import regex as re
13
+ import torch
14
+
15
+
16
+ @lru_cache()
17
+ def default_bpe():
18
+ return os.path.join(
19
+ os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz"
20
+ )
21
+
22
+
23
+ @lru_cache()
24
+ def bytes_to_unicode():
25
+ """
26
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
27
+ The reversible bpe codes work on unicode strings.
28
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
29
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
30
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
31
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
32
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
33
+ """
34
+ bs = (
35
+ list(range(ord("!"), ord("~") + 1))
36
+ + list(range(ord("¡"), ord("¬") + 1))
37
+ + list(range(ord("®"), ord("ÿ") + 1))
38
+ )
39
+ cs = bs[:]
40
+ n = 0
41
+ for b in range(2**8):
42
+ if b not in bs:
43
+ bs.append(b)
44
+ cs.append(2**8 + n)
45
+ n += 1
46
+ cs = [chr(n) for n in cs]
47
+ return dict(zip(bs, cs))
48
+
49
+
50
+ def get_pairs(word):
51
+ """Return set of symbol pairs in a word.
52
+ Word is represented as tuple of symbols (symbols being variable-length strings).
53
+ """
54
+ pairs = set()
55
+ prev_char = word[0]
56
+ for char in word[1:]:
57
+ pairs.add((prev_char, char))
58
+ prev_char = char
59
+ return pairs
60
+
61
+
62
+ def basic_clean(text):
63
+ text = ftfy.fix_text(text)
64
+ text = html.unescape(html.unescape(text))
65
+ return text.strip()
66
+
67
+
68
+ def whitespace_clean(text):
69
+ text = re.sub(r"\s+", " ", text)
70
+ text = text.strip()
71
+ return text
72
+
73
+
74
+ class SimpleTokenizer(object):
75
+ def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
76
+ self.byte_encoder = bytes_to_unicode()
77
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
78
+ merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
79
+ merges = merges[1 : 49152 - 256 - 2 + 1]
80
+ merges = [tuple(merge.split()) for merge in merges]
81
+ vocab = list(bytes_to_unicode().values())
82
+ vocab = vocab + [v + "</w>" for v in vocab]
83
+ for merge in merges:
84
+ vocab.append("".join(merge))
85
+ if not special_tokens:
86
+ special_tokens = ["<start_of_text>", "<end_of_text>"]
87
+ else:
88
+ special_tokens = ["<start_of_text>", "<end_of_text>"] + special_tokens
89
+ vocab.extend(special_tokens)
90
+ self.encoder = dict(zip(vocab, range(len(vocab))))
91
+ self.decoder = {v: k for k, v in self.encoder.items()}
92
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
93
+ self.cache = {t: t for t in special_tokens}
94
+ special = "|".join(special_tokens)
95
+ self.pat = re.compile(
96
+ special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
97
+ re.IGNORECASE,
98
+ )
99
+
100
+ self.vocab_size = len(self.encoder)
101
+ self.all_special_ids = [self.encoder[t] for t in special_tokens]
102
+
103
+ def bpe(self, token):
104
+ if token in self.cache:
105
+ return self.cache[token]
106
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
107
+ pairs = get_pairs(word)
108
+
109
+ if not pairs:
110
+ return token + "</w>"
111
+
112
+ while True:
113
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
114
+ if bigram not in self.bpe_ranks:
115
+ break
116
+ first, second = bigram
117
+ new_word = []
118
+ i = 0
119
+ while i < len(word):
120
+ try:
121
+ j = word.index(first, i)
122
+ new_word.extend(word[i:j])
123
+ i = j
124
+ except:
125
+ new_word.extend(word[i:])
126
+ break
127
+
128
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
129
+ new_word.append(first + second)
130
+ i += 2
131
+ else:
132
+ new_word.append(word[i])
133
+ i += 1
134
+ new_word = tuple(new_word)
135
+ word = new_word
136
+ if len(word) == 1:
137
+ break
138
+ else:
139
+ pairs = get_pairs(word)
140
+ word = " ".join(word)
141
+ self.cache[token] = word
142
+ return word
143
+
144
+ def encode(self, text):
145
+ bpe_tokens = []
146
+ text = whitespace_clean(basic_clean(text)).lower()
147
+ for token in re.findall(self.pat, text):
148
+ token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
149
+ bpe_tokens.extend(
150
+ self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
151
+ )
152
+ return bpe_tokens
153
+
154
+ def decode(self, tokens):
155
+ text = "".join([self.decoder[token] for token in tokens])
156
+ text = (
157
+ bytearray([self.byte_decoder[c] for c in text])
158
+ .decode("utf-8", errors="replace")
159
+ .replace("</w>", " ")
160
+ )
161
+ return text
162
+
163
+
164
+ _tokenizer = SimpleTokenizer()
165
+
166
+
167
+ def tokenize(
168
+ texts: Union[str, List[str]], context_length: int = 77
169
+ ) -> torch.LongTensor:
170
+ """
171
+ Returns the tokenized representation of given input string(s)
172
+
173
+ Parameters
174
+ ----------
175
+ texts : Union[str, List[str]]
176
+ An input string or a list of input strings to tokenize
177
+ context_length : int
178
+ The context length to use; all CLIP models use 77 as the context length
179
+
180
+ Returns
181
+ -------
182
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
183
+ """
184
+ if isinstance(texts, str):
185
+ texts = [texts]
186
+
187
+ sot_token = _tokenizer.encoder["<start_of_text>"]
188
+ eot_token = _tokenizer.encoder["<end_of_text>"]
189
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
190
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
191
+
192
+ for i, tokens in enumerate(all_tokens):
193
+ if len(tokens) > context_length:
194
+ tokens = tokens[:context_length] # Truncate
195
+ result[i, : len(tokens)] = torch.tensor(tokens)
196
+
197
+ return result
picoaudio/audioldm/clap/open_clip/transform.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torchvision.transforms import (
2
+ Normalize,
3
+ Compose,
4
+ RandomResizedCrop,
5
+ InterpolationMode,
6
+ ToTensor,
7
+ Resize,
8
+ CenterCrop,
9
+ )
10
+
11
+
12
+ def _convert_to_rgb(image):
13
+ return image.convert("RGB")
14
+
15
+
16
+ def image_transform(
17
+ image_size: int,
18
+ is_train: bool,
19
+ mean=(0.48145466, 0.4578275, 0.40821073),
20
+ std=(0.26862954, 0.26130258, 0.27577711),
21
+ ):
22
+ normalize = Normalize(mean=mean, std=std)
23
+ if is_train:
24
+ return Compose(
25
+ [
26
+ RandomResizedCrop(
27
+ image_size,
28
+ scale=(0.9, 1.0),
29
+ interpolation=InterpolationMode.BICUBIC,
30
+ ),
31
+ _convert_to_rgb,
32
+ ToTensor(),
33
+ normalize,
34
+ ]
35
+ )
36
+ else:
37
+ return Compose(
38
+ [
39
+ Resize(image_size, interpolation=InterpolationMode.BICUBIC),
40
+ CenterCrop(image_size),
41
+ _convert_to_rgb,
42
+ ToTensor(),
43
+ normalize,
44
+ ]
45
+ )
picoaudio/audioldm/clap/open_clip/utils.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from torch import nn as nn
4
+ from torchvision.ops.misc import FrozenBatchNorm2d
5
+ import logging
6
+
7
+ # import h5py
8
+ from tqdm import tqdm
9
+ import random
10
+ import json
11
+ import os
12
+ import pathlib
13
+
14
+ # TODO: (yusong) this not a good place to store those information and does not scale. Need to be fixed later.
15
+ dataset_split = {
16
+ "audiocaps": ["train", "valid", "test"],
17
+ "audioset": ["balanced_train", "unbalanced_train", "eval"],
18
+ "BBCSoundEffects": ["train", "test"],
19
+ "Clotho": ["train", "test", "valid"],
20
+ "free_to_use_sounds": ["train", "test"],
21
+ "paramount_motion": ["train", "test"],
22
+ "sonniss_game_effects": ["train", "test"],
23
+ "wesoundeffects": ["train", "test"],
24
+ "MACS": ["train", "test"],
25
+ "freesound": ["train", "test"],
26
+ "FSD50K": ["train", "test", "valid"],
27
+ "fsd50k_class_label": ["train", "test", "valid"],
28
+ "esc50": ["train", "test"],
29
+ "audiostock": ["train", "test"],
30
+ "freesound_no_overlap_noesc50": ["train", "test"],
31
+ "epidemic_sound_effects": ["train", "test"],
32
+ "VGGSound": ["train", "test"],
33
+ "urbansound8k_class_label": ["train", "test"],
34
+ "audioset_t5": ["balanced_train", "unbalanced_train", "eval"],
35
+ "epidemic_sound_effects_t5": ["train", "test"],
36
+ "WavText5K": ["train", "test"],
37
+ "esc50_no_overlap": ["train", "test"],
38
+ "usd8k_no_overlap": ["train", "test"],
39
+ "fsd50k_200_class_label": ["train", "test", "valid"],
40
+ }
41
+
42
+
43
+ def freeze_batch_norm_2d(module, module_match={}, name=""):
44
+ """
45
+ Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
46
+ itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
47
+ returned. Otherwise, the module is walked recursively and submodules are converted in place.
48
+
49
+ Args:
50
+ module (torch.nn.Module): Any PyTorch module.
51
+ module_match (dict): Dictionary of full module names to freeze (all if empty)
52
+ name (str): Full module name (prefix)
53
+
54
+ Returns:
55
+ torch.nn.Module: Resulting module
56
+
57
+ Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
58
+ """
59
+ res = module
60
+ is_match = True
61
+ if module_match:
62
+ is_match = name in module_match
63
+ if is_match and isinstance(
64
+ module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)
65
+ ):
66
+ res = FrozenBatchNorm2d(module.num_features)
67
+ res.num_features = module.num_features
68
+ res.affine = module.affine
69
+ if module.affine:
70
+ res.weight.data = module.weight.data.clone().detach()
71
+ res.bias.data = module.bias.data.clone().detach()
72
+ res.running_mean.data = module.running_mean.data
73
+ res.running_var.data = module.running_var.data
74
+ res.eps = module.eps
75
+ else:
76
+ for child_name, child in module.named_children():
77
+ full_child_name = ".".join([name, child_name]) if name else child_name
78
+ new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
79
+ if new_child is not child:
80
+ res.add_module(child_name, new_child)
81
+ return res
82
+
83
+
84
+ def exist(dataset_name, dataset_type):
85
+ """
86
+ Check if dataset exists
87
+ """
88
+ if dataset_type in dataset_split[dataset_name]:
89
+ return True
90
+ else:
91
+ return False
92
+
93
+
94
+ def get_tar_path_from_dataset_name(
95
+ dataset_names, dataset_types, islocal, dataset_path, proportion=1, full_dataset=None
96
+ ):
97
+ """
98
+ Get tar path from dataset name and type
99
+ """
100
+ output = []
101
+ for n in dataset_names:
102
+ if full_dataset is not None and n in full_dataset:
103
+ current_dataset_types = dataset_split[n]
104
+ else:
105
+ current_dataset_types = dataset_types
106
+ for s in current_dataset_types:
107
+ tmp = []
108
+ if islocal:
109
+ sizefilepath_ = f"{dataset_path}/{n}/{s}/sizes.json"
110
+ if not os.path.exists(sizefilepath_):
111
+ sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
112
+ else:
113
+ sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
114
+ if not os.path.exists(sizefilepath_):
115
+ continue
116
+ sizes = json.load(open(sizefilepath_, "r"))
117
+ for k in sizes.keys():
118
+ if islocal:
119
+ tmp.append(f"{dataset_path}/{n}/{s}/{k}")
120
+ else:
121
+ tmp.append(
122
+ f"pipe:aws s3 --cli-connect-timeout 0 cp s3://s-laion-audio/webdataset_tar/{n}/{s}/{k} -"
123
+ )
124
+ if proportion != 1:
125
+ tmp = random.sample(tmp, int(proportion * len(tmp)))
126
+ output.append(tmp)
127
+ return sum(output, [])
128
+
129
+
130
+ def get_tar_path_from_txts(txt_path, islocal, proportion=1):
131
+ """
132
+ Get tar path from txt path
133
+ """
134
+ if isinstance(txt_path, (list, tuple)):
135
+ return sum(
136
+ [
137
+ get_tar_path_from_txts(
138
+ txt_path[i], islocal=islocal, proportion=proportion
139
+ )
140
+ for i in range(len(txt_path))
141
+ ],
142
+ [],
143
+ )
144
+ if isinstance(txt_path, str):
145
+ with open(txt_path) as f:
146
+ lines = f.readlines()
147
+ if islocal:
148
+ lines = [
149
+ lines[i]
150
+ .split("\n")[0]
151
+ .replace("pipe:aws s3 cp s3://s-laion-audio/", "/mnt/audio_clip/")
152
+ for i in range(len(lines))
153
+ ]
154
+ else:
155
+ lines = [
156
+ lines[i].split("\n")[0].replace(".tar", ".tar -")
157
+ for i in range(len(lines))
158
+ ]
159
+ if proportion != 1:
160
+ print("Sampling tars with proportion of {}".format(proportion))
161
+ lines = random.sample(lines, int(proportion * len(lines)))
162
+ return lines
163
+
164
+
165
+ def get_mix_lambda(mixup_alpha, batch_size):
166
+ mixup_lambdas = [
167
+ np.random.beta(mixup_alpha, mixup_alpha, 1)[0] for _ in range(batch_size)
168
+ ]
169
+ return np.array(mixup_lambdas).astype(np.float32)
170
+
171
+
172
+ def do_mixup(x, mixup_lambda):
173
+ """
174
+ Args:
175
+ x: (batch_size , ...)
176
+ mixup_lambda: (batch_size,)
177
+ Returns:
178
+ out: (batch_size, ...)
179
+ """
180
+ out = (
181
+ x.transpose(0, -1) * mixup_lambda
182
+ + torch.flip(x, dims=[0]).transpose(0, -1) * (1 - mixup_lambda)
183
+ ).transpose(0, -1)
184
+ return out
185
+
186
+
187
+ def interpolate(x, ratio):
188
+ """Interpolate data in time domain. This is used to compensate the
189
+ resolution reduction in downsampling of a CNN.
190
+
191
+ Args:
192
+ x: (batch_size, time_steps, classes_num)
193
+ ratio: int, ratio to interpolate
194
+ Returns:
195
+ upsampled: (batch_size, time_steps * ratio, classes_num)
196
+ """
197
+ (batch_size, time_steps, classes_num) = x.shape
198
+ upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
199
+ upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
200
+ return upsampled
201
+
202
+
203
+ def pad_framewise_output(framewise_output, frames_num):
204
+ """Pad framewise_output to the same length as input frames. The pad value
205
+ is the same as the value of the last frame.
206
+ Args:
207
+ framewise_output: (batch_size, frames_num, classes_num)
208
+ frames_num: int, number of frames to pad
209
+ Outputs:
210
+ output: (batch_size, frames_num, classes_num)
211
+ """
212
+ pad = framewise_output[:, -1:, :].repeat(
213
+ 1, frames_num - framewise_output.shape[1], 1
214
+ )
215
+ """tensor for padding"""
216
+
217
+ output = torch.cat((framewise_output, pad), dim=1)
218
+ """(batch_size, frames_num, classes_num)"""
219
+
220
+
221
+ # def process_ipc(index_path, classes_num, filename):
222
+ # # load data
223
+ # logging.info("Load Data...............")
224
+ # ipc = [[] for _ in range(classes_num)]
225
+ # with h5py.File(index_path, "r") as f:
226
+ # for i in tqdm(range(len(f["target"]))):
227
+ # t_class = np.where(f["target"][i])[0]
228
+ # for t in t_class:
229
+ # ipc[t].append(i)
230
+ # print(ipc)
231
+ # np.save(filename, ipc)
232
+ # logging.info("Load Data Succeed...............")
233
+
234
+
235
+ def save_to_dict(s, o_={}):
236
+ sp = s.split(": ")
237
+ o_.update({sp[0]: float(sp[1])})
238
+ return o_
239
+
240
+
241
+ def get_data_from_log(txt_path):
242
+ """
243
+ Output dictionary from out.txt log file
244
+ """
245
+ with open(txt_path) as f:
246
+ lines = f.readlines()
247
+ val_data = {}
248
+ train_data = {}
249
+ train_losses = []
250
+ train_losses_epoch = []
251
+ for i in range(len(lines)):
252
+ if "| INFO |" in lines[i]:
253
+ if "Eval Epoch" in lines[i]:
254
+ if "val_loss" in lines[i]:
255
+ # float(regex.sub("", lines[310].split(" ")[-1]).replace(" ", ""))
256
+ line = lines[i].split("Eval Epoch: ")[-1]
257
+ num_epoch = int(line.split(" ")[0].split(" ")[0])
258
+ d = {
259
+ line.split(" ")[0]
260
+ .split(" ")[1]
261
+ .replace(":", ""): float(line.split(" ")[0].split(" ")[-1])
262
+ }
263
+ for i in range(1, len(line.split(" "))):
264
+ d = save_to_dict(line.split(" ")[i], d)
265
+ val_data[num_epoch] = d
266
+ elif "Train Epoch" in lines[i]:
267
+ num_epoch = int(lines[i].split("Train Epoch: ")[1][0])
268
+ loss = float(lines[i].split("Loss: ")[-1].split(" (")[0])
269
+ train_losses.append(loss)
270
+ train_losses_epoch.append(num_epoch)
271
+ for i in range(len(train_losses)):
272
+ train_data[i] = {
273
+ "num_epoch": train_losses_epoch[i],
274
+ "train_loss": train_losses[i],
275
+ }
276
+ return train_data, val_data
277
+
278
+
279
+ def save_p(obj, filename):
280
+ import pickle
281
+
282
+ try:
283
+ from deepdiff import DeepDiff
284
+ except:
285
+ os.system("pip install deepdiff")
286
+ from deepdiff import DeepDiff
287
+ with open(filename, "wb") as file:
288
+ pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL) # highest protocol
289
+ with open(filename, "rb") as file:
290
+ z = pickle.load(file)
291
+ assert (
292
+ DeepDiff(obj, z, ignore_string_case=True) == {}
293
+ ), "there is something wrong with the saving process"
294
+ return
295
+
296
+
297
+ def load_p(filename):
298
+ import pickle
299
+
300
+ with open(filename, "rb") as file:
301
+ z = pickle.load(file)
302
+ return z
303
+
304
+
305
+ def save_json(data, name="data.json"):
306
+ import json
307
+
308
+ with open(name, "w") as fp:
309
+ json.dump(data, fp)
310
+ return
311
+
312
+
313
+ def load_json(name):
314
+ import json
315
+
316
+ with open(name, "r") as fp:
317
+ data = json.load(fp)
318
+ return data
319
+
320
+
321
+ from multiprocessing import Process, Manager
322
+ from multiprocessing import Process, Value, Array
323
+ from ctypes import c_wchar
324
+
325
+
326
+ def load_class_label(path):
327
+ # https://stackoverflow.com/questions/48004243/how-to-share-large-read-only-dictionary-list-across-processes-in-multiprocessing
328
+ # https://stackoverflow.com/questions/45693949/storing-strings-in-a-multiprocessing-sharedctypes-array
329
+ out = None
330
+ if path is not None:
331
+ if pathlib.Path(path).suffix in [".pkl", ".pickle"]:
332
+ out = load_p(path)
333
+ elif pathlib.Path(path).suffix in [".json", ".txt"]:
334
+ out = load_json(path)
335
+ elif pathlib.Path(path).suffix in [".npy", ".npz"]:
336
+ out = np.load(path)
337
+ elif pathlib.Path(path).suffix in [".csv"]:
338
+ import pandas as pd
339
+
340
+ out = pd.read_csv(path)
341
+ return out
342
+ # if out is None:
343
+ # return None
344
+ # else:
345
+ # key = Array(c_wchar, '\n'.join(list(out.keys())), lock=False)
346
+ # val = Array('i', out.values(), lock=False)
347
+ # return (key, val)
348
+
349
+
350
+ from torch import optim
351
+
352
+
353
+ def get_optimizer(params, lr, betas, eps, momentum, optimizer_name):
354
+ if optimizer_name.lower() == "adamw":
355
+ optimizer = optim.AdamW(params, lr=lr, betas=betas, eps=eps)
356
+ elif optimizer_name.lower() == "sgd":
357
+ optimizer = optim.SGD(params, lr=lr, momentum=momentum)
358
+ elif optimizer_name.lower() == "adam":
359
+ optimizer = optim.Adam(params, lr=lr, betas=betas, eps=eps)
360
+ else:
361
+ raise ValueError("optimizer name is not correct")
362
+ return optimizer