allandclive commited on
Commit
25614eb
·
1 Parent(s): 857f1c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -22
app.py CHANGED
@@ -10,39 +10,27 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
  model = CombinedModel("facebook/mms-1b-all", "Sunbird/sunbird-mul-en-mbart-merged", device=device)
11
 
12
  def transcribe(audio_file_mic=None, audio_file_upload=None):
13
- if audio_file_mic:
14
- audio_file = audio_file_mic
15
- elif audio_file_upload:
16
- audio_file = audio_file_upload
17
- else:
18
- return "Please upload an audio file or record one"
19
-
20
- # Make sure audio is 16kHz
21
- speech, sample_rate = librosa.load(audio_file)
22
- if sample_rate != 16000:
23
- speech = librosa.resample(speech, orig_sr=sample_rate, target_sr=16000)
24
- speech = torch.tensor([speech])
25
-
26
- with torch.no_grad():
27
- transcription, translation = model({"audio":speech})
28
-
29
- return transcription, translation[0]
30
 
31
  description = '''Luganda to English Speech Translation'''
32
 
33
  # Define example audio files
34
  example_audio_files = [
35
- "audio/luganda.mp3" # Replace with the path to your first example audio file
 
36
  ]
37
 
38
  # Generate example inputs and outputs
39
  examples = []
40
  for audio_file_path in example_audio_files:
41
  transcription, translation = transcribe(audio_file_upload=audio_file_path)
42
- examples.append({
43
- "input": audio_file_path,
44
- "output": [transcription, translation]
45
- })
 
 
46
 
47
  iface = gr.Interface(
48
  fn=transcribe,
 
10
  model = CombinedModel("facebook/mms-1b-all", "Sunbird/sunbird-mul-en-mbart-merged", device=device)
11
 
12
  def transcribe(audio_file_mic=None, audio_file_upload=None):
13
+ # Your transcribe function code here
14
+ # ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  description = '''Luganda to English Speech Translation'''
17
 
18
  # Define example audio files
19
  example_audio_files = [
20
+ "audio/luganda.mp3", # Replace with the path to your first example audio file
21
+ #"example_audio_files/example2.wav", # Replace with the path to your second example audio file
22
  ]
23
 
24
  # Generate example inputs and outputs
25
  examples = []
26
  for audio_file_path in example_audio_files:
27
  transcription, translation = transcribe(audio_file_upload=audio_file_path)
28
+ examples.append([
29
+ audio_file_path, # First element corresponds to the first input component (audio_file_upload)
30
+ None, # Second element corresponds to the second input component (audio_file_mic). Set to None for this example.
31
+ transcription,
32
+ translation
33
+ ])
34
 
35
  iface = gr.Interface(
36
  fn=transcribe,