JaesungHuh commited on
Commit
b11593d
·
1 Parent(s): 8324298

First version

Browse files
__pycache__/model.cpython-38.pyc ADDED
Binary file (5.48 kB). View file
 
app.py CHANGED
@@ -1,8 +1,17 @@
1
  import gradio as gr
 
2
  from model import ECAPA_gender
3
 
4
- def greet(audio):
5
- print(audio)
6
 
7
- demo = gr.Interface(fn=greet, inputs="audio", outputs="text")
 
 
 
 
 
 
 
 
8
  demo.launch()
 
1
  import gradio as gr
2
+ import torch
3
  from model import ECAPA_gender
4
 
5
+ model = ECAPA_gender({"C": 1024})
6
+ model.load_state_dict(torch.load("gender_classifier.model", map_location="cpu"))
7
 
8
+ model.eval()
9
+
10
+ def predict_gender(filepath):
11
+ with torch.no_grad():
12
+ output = model.predict(filepath)
13
+ return output
14
+
15
+ audio_component = gr.Audio(type='filepath', label="Upload your audio file here")
16
+ demo = gr.Interface(fn=predict_gender, inputs=audio_component, outputs="text")
17
  demo.launch()
gender_classifier.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0e87253fe40aeef61b33e0fbe0859d9407e8bed92c8512cbaa28593b5f98e13
3
+ size 62042234
model.py CHANGED
@@ -89,7 +89,7 @@ class PreEmphasis(torch.nn.Module):
89
  return F.conv1d(input, self.flipped_filter).squeeze(1)
90
 
91
 
92
- class ECAPA_gender(nn.Module, PyTorchModelHubMixin):
93
  def __init__(self, config):
94
  super(ECAPA_gender, self).__init__()
95
  self.config = config
 
89
  return F.conv1d(input, self.flipped_filter).squeeze(1)
90
 
91
 
92
+ class ECAPA_gender(nn.Module):
93
  def __init__(self, config):
94
  super(ECAPA_gender, self).__init__()
95
  self.config = config