Spaces:
Running
Running
sahandkh1419
commited on
Commit
•
2f5d3a2
1
Parent(s):
efc446d
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,10 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
-
import whisper
|
3 |
-
from sklearn.feature_extraction.text import TfidfVectorizer
|
4 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
5 |
import base64
|
6 |
from pydub import AudioSegment
|
7 |
-
from
|
8 |
-
|
9 |
-
|
10 |
|
11 |
st.set_page_config(
|
12 |
page_title="Sing It Forward App",
|
@@ -68,77 +66,7 @@ st.write('------')
|
|
68 |
|
69 |
|
70 |
|
71 |
-
|
72 |
-
vectorizer = TfidfVectorizer().fit_transform([text1, text2])
|
73 |
-
vectors = vectorizer.toarray()
|
74 |
-
return cosine_similarity(vectors)[0, 1]
|
75 |
-
|
76 |
-
def take_challenge(music_file, typed_lyrics, key, language):
|
77 |
-
st.write("Listen to music since you have to record 15seconds after that")
|
78 |
-
st.audio(music_file)
|
79 |
-
audio_value = st.experimental_audio_input("Sing Rest of music:🎙️", key=key)
|
80 |
-
if audio_value:
|
81 |
-
with open("user_sing.mp3", "wb") as f:
|
82 |
-
f.write(audio_value.getbuffer())
|
83 |
-
|
84 |
-
if language == "en":
|
85 |
-
english_model = whisper.load_model("base.en")
|
86 |
-
user_lyrics = english_model.transcribe("user_sing.mp3", language=language)["text"]
|
87 |
-
else:
|
88 |
-
persian_model = Model.load("hezarai/whisper-small-fa")
|
89 |
-
user_lyrics = persian_model.predict("user_sing.mp3")[0]["text"]
|
90 |
-
|
91 |
-
st.write(user_lyrics)
|
92 |
-
similarity_score = cosine_sim(typed_lyrics, user_lyrics)
|
93 |
-
if similarity_score > 0.85:
|
94 |
-
st.success('Awsome! You are doing great', icon="✅")
|
95 |
-
st.markdown('<style>div.stAlert { background-color: rgba(3, 67, 24, 0.9); }</style>', unsafe_allow_html=True)
|
96 |
-
else:
|
97 |
-
st.error('Awful! Try harder next time', icon="🚨")
|
98 |
-
st.markdown('<style>div.stAlert { background-color: rgba(241, 36, 36, 0.9); }</style>', unsafe_allow_html=True)
|
99 |
-
|
100 |
-
def change_volume(input_file, output_file, volume_factor):
|
101 |
-
sound = AudioSegment.from_mp3(input_file)
|
102 |
-
volume_changed = sound + volume_factor
|
103 |
-
volume_changed.export(output_file, format="mp3")
|
104 |
-
|
105 |
-
def change_speed(input_file, output_file, speed_factor):
|
106 |
-
sound, sr = librosa.load(input_file)
|
107 |
-
speed_changed = librosa.effects.time_stretch(sound, rate=speed_factor)
|
108 |
-
sf.write(output_file, speed_changed, sr)
|
109 |
-
|
110 |
-
def change_pitch(input_file, output_file, pitch_factor):
|
111 |
-
sound, sr = librosa.load(input_file)
|
112 |
-
pitch_changed = librosa.effects.pitch_shift(sound, sr=sr, n_steps=pitch_factor)
|
113 |
-
sf.write(output_file, pitch_changed, sr)
|
114 |
-
|
115 |
-
def low_pass_filter(input_file, output_file, cutoff_freq):
|
116 |
-
sound = AudioSegment.from_mp3(input_file)
|
117 |
-
low_filtered_sound = sound.low_pass_filter(cutoff_freq)
|
118 |
-
low_filtered_sound.export(output_file, format="mp3")
|
119 |
-
|
120 |
-
def high_pass_filter(input_file, output_file, cutoff_freq):
|
121 |
-
sound = AudioSegment.from_mp3(input_file)
|
122 |
-
high_filtered_sound = sound.high_pass_filter(cutoff_freq)
|
123 |
-
high_filtered_sound.export(output_file, format="mp3")
|
124 |
-
|
125 |
-
def pan_left_right(input_file, output_file, pan_factor):
|
126 |
-
sound = AudioSegment.from_mp3(input_file)
|
127 |
-
pan_sound = sound.pan(pan_factor)
|
128 |
-
pan_sound.export(output_file, format="mp3")
|
129 |
-
|
130 |
-
def fade_in_ms(input_file, output_file, fade_factor):
|
131 |
-
sound = AudioSegment.from_mp3(input_file)
|
132 |
-
faded_sound = sound.fade_in(fade_factor)
|
133 |
-
faded_sound.export(output_file, format="mp3")
|
134 |
-
|
135 |
-
def fade_out_ms(input_file, output_file, fade_factor):
|
136 |
-
sound = AudioSegment.from_mp3(input_file)
|
137 |
-
faded_sound = sound.fade_out(fade_factor)
|
138 |
-
faded_sound.export(output_file, format="mp3")
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
tab1, tab2 = st.tabs(["Take Challenge", "Make Challenge"])
|
143 |
|
144 |
with tab1:
|
@@ -223,8 +151,13 @@ with tab2:
|
|
223 |
fade_out_ms(current_input, output_file, fade_out_time)
|
224 |
current_input = output_file
|
225 |
|
|
|
226 |
|
227 |
st.write("Now type what user should sing:")
|
228 |
typed_lyrics = st.text_area("Lyrics to be singed:")
|
229 |
st.write('------')
|
230 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
from warnings import filterwarnings
|
2 |
import streamlit as st
|
|
|
|
|
|
|
3 |
import base64
|
4 |
from pydub import AudioSegment
|
5 |
+
from functions import *
|
6 |
+
filterwarnings('ignore')
|
7 |
+
|
8 |
|
9 |
st.set_page_config(
|
10 |
page_title="Sing It Forward App",
|
|
|
66 |
|
67 |
|
68 |
|
69 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
tab1, tab2 = st.tabs(["Take Challenge", "Make Challenge"])
|
71 |
|
72 |
with tab1:
|
|
|
151 |
fade_out_ms(current_input, output_file, fade_out_time)
|
152 |
current_input = output_file
|
153 |
|
154 |
+
music_background_checkbox = st.checkbox("Keep Music Background?")
|
155 |
|
156 |
st.write("Now type what user should sing:")
|
157 |
typed_lyrics = st.text_area("Lyrics to be singed:")
|
158 |
st.write('------')
|
159 |
+
if music_background_checkbox:
|
160 |
+
result_list = split_vocals(current_input)
|
161 |
+
take_challenge(current_input, typed_lyrics, "unique_key_1", language, music_background_checkbox, result_list[0])
|
162 |
+
else:
|
163 |
+
take_challenge(current_input, typed_lyrics, "unique_key_1", language, music_background_checkbox)
|