jonatasgrosman
commited on
Commit
•
d400ae7
1
Parent(s):
a4af462
adjust README
Browse files
README.md
CHANGED
@@ -54,20 +54,21 @@ model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
|
|
54 |
# Preprocessing the datasets.
|
55 |
# We need to read the audio files as arrays
|
56 |
def speech_file_to_array_fn(batch):
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
60 |
|
61 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
62 |
-
inputs = processor(test_dataset["speech"]
|
63 |
|
64 |
with torch.no_grad():
|
65 |
-
|
66 |
|
67 |
predicted_ids = torch.argmax(logits, dim=-1)
|
68 |
|
69 |
print("Prediction:", processor.batch_decode(predicted_ids))
|
70 |
-
print("Reference:", test_dataset["sentence"]
|
71 |
```
|
72 |
|
73 |
|
@@ -92,7 +93,7 @@ CHARS_TO_IGNORE = [",", "?", ".", "!", "-", ";", ":", '""', "%", "'", '"', "�"
|
|
92 |
test_dataset = load_dataset("common_voice", LANG_ID, split="test")
|
93 |
wer = load_metric("wer")
|
94 |
|
95 |
-
chars_to_ignore_regex = f
|
96 |
|
97 |
processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)
|
98 |
model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
|
@@ -101,7 +102,7 @@ model.to(DEVICE)
|
|
101 |
# Preprocessing the datasets.
|
102 |
# We need to read the audio files as arrays
|
103 |
def speech_file_to_array_fn(batch):
|
104 |
-
batch["sentence"] = re.sub(chars_to_ignore_regex,
|
105 |
speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
|
106 |
batch["speech"] = speech_array
|
107 |
return batch
|
|
|
54 |
# Preprocessing the datasets.
|
55 |
# We need to read the audio files as arrays
|
56 |
def speech_file_to_array_fn(batch):
|
57 |
+
speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
|
58 |
+
batch["speech"] = speech_array
|
59 |
+
batch["sentence"] = batch["sentence"].upper()
|
60 |
+
return batch
|
61 |
|
62 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
63 |
+
inputs = processor(test_dataset[:2]["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
64 |
|
65 |
with torch.no_grad():
|
66 |
+
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
|
67 |
|
68 |
predicted_ids = torch.argmax(logits, dim=-1)
|
69 |
|
70 |
print("Prediction:", processor.batch_decode(predicted_ids))
|
71 |
+
print("Reference:", test_dataset[:2]["sentence"])
|
72 |
```
|
73 |
|
74 |
|
|
|
93 |
test_dataset = load_dataset("common_voice", LANG_ID, split="test")
|
94 |
wer = load_metric("wer")
|
95 |
|
96 |
+
chars_to_ignore_regex = f"[{re.escape("".join(CHARS_TO_IGNORE))}]"
|
97 |
|
98 |
processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)
|
99 |
model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
|
|
|
102 |
# Preprocessing the datasets.
|
103 |
# We need to read the audio files as arrays
|
104 |
def speech_file_to_array_fn(batch):
|
105 |
+
batch["sentence"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).upper()
|
106 |
speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
|
107 |
batch["speech"] = speech_array
|
108 |
return batch
|