pavan2606 commited on
Commit
f4dbd54
·
verified ·
1 Parent(s): 6cfb37a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -60
app.py CHANGED
@@ -1,60 +1,23 @@
1
- import numpy as np
2
- import pickle
3
- import librosa
4
- from tensorflow.keras.models import Sequential, model_from_json
5
- import gradio as gr
6
-
7
- json_file = open('/content/drive/MyDrive/Project/Audio/CNN_model.json', 'r')
8
- loaded_model_json = json_file.read()
9
- json_file.close()
10
- loaded_model = model_from_json(loaded_model_json)
11
- # load weights into new model
12
- loaded_model.load_weights("/content/drive/MyDrive/Project/Audio/best_model1_weights.h5")
13
-
14
-
15
- with open('/content/drive/MyDrive/Project/Audio/scaler2.pickle', 'rb') as f:
16
- scaler2 = pickle.load(f)
17
-
18
- with open('/content/drive/MyDrive/Project/Audio/encoder2.pickle', 'rb') as f:
19
- encoder2 = pickle.load(f)
20
-
21
-
22
- def zcr(data,frame_length,hop_length):
23
- zcr=librosa.feature.zero_crossing_rate(data,frame_length=frame_length,hop_length=hop_length)
24
- return np.squeeze(zcr)
25
- def rmse(data,frame_length=2048,hop_length=512):
26
- rmse=librosa.feature.rms(y=data,frame_length=frame_length,hop_length=hop_length)
27
- return np.squeeze(rmse)
28
- def mfcc(data,sr,frame_length=2048,hop_length=512,flatten:bool=True):
29
- mfcc=librosa.feature.mfcc(y=data,sr=sr)
30
- return np.squeeze(mfcc.T)if not flatten else np.ravel(mfcc.T)
31
-
32
- def extract_features(data,sr=22050,frame_length=2048,hop_length=512):
33
- result=np.array([])
34
-
35
- result=np.hstack((result,
36
- zcr(data,frame_length,hop_length),
37
- rmse(data,frame_length,hop_length),
38
- mfcc(data,sr,frame_length,hop_length)
39
- ))
40
- return result
41
-
42
- def get_predict_feat(path):
43
- d, s_rate= librosa.load(path, duration=2.5, offset=0.6)
44
- res=extract_features(d)
45
- result=np.array(res)
46
- result=np.reshape(result,newshape=(1,2376))
47
- i_result = scaler2.transform(result)
48
- final_result=np.expand_dims(i_result, axis=2)
49
-
50
- return final_result
51
-
52
- emotions1={1:'Neutral', 2:'Calm', 3:'Happy', 4:'Sad', 5:'Angry', 6:'Fear', 7:'Disgust',8:'Surprise'}
53
- def prediction(path1):
54
- res = get_predict_feat(path1)
55
- predictions = loaded_model.predict(res)
56
- predicted_class = predictions.argmax(axis=1)[0] + 1 # Convert from 0-based indexing to emotion labels
57
- predicted_emotion = emotions1[predicted_class] # Get the corresponding emotion label
58
- return predicted_emotion[0]
59
-
60
- gr.Interface(fn=prediction, inputs="audio", outputs="text").launch()
 
1
+ import streamlit as st
2
+ from speechbrain.inference.interfaces import foreign_class
3
+
4
+ # Initialize the classifier
5
+ classifier = foreign_class(source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP", pymodule_file="custom_interface.py", classname="CustomEncoderWav2vec2Classifier")
6
+
7
+ def emotion(file):
8
+ if file is not None:
9
+ # Classify the file
10
+ out_prob, score, index, text_lab = classifier.classify_file(file.name)
11
+ # Display the output
12
+ st.write(text_lab)
13
+ else:
14
+ st.write("Please upload a file.")
15
+
16
+ def main():
17
+ st.title("Emotion Recognition")
18
+ uploaded_file = st.file_uploader("Upload audio file", type=["wav"])
19
+ if uploaded_file is not None:
20
+ emotion(uploaded_file)
21
+
22
+ if __name__ == "__main__":
23
+ main()