Update eval.py
Browse files
eval.py
CHANGED
@@ -16,6 +16,7 @@ def log_results(result: Dataset, args: Dict[str, str]):
|
|
16 |
log_outputs = args.log_outputs
|
17 |
lm = "withLM" if args.use_lm else "noLM"
|
18 |
model_id = args.model_id.replace("/", "_")
|
|
|
19 |
dataset_id = "_".join(args.dataset.split("/") + [model_id, args.config, args.split, lm])
|
20 |
|
21 |
# load metric
|
@@ -27,7 +28,7 @@ def log_results(result: Dataset, args: Dict[str, str]):
|
|
27 |
cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
|
28 |
|
29 |
# print & log results
|
30 |
-
result_str = f"
|
31 |
print(result_str)
|
32 |
|
33 |
with open(f"{dataset_id}_eval_results.txt", "w") as f:
|
|
|
16 |
log_outputs = args.log_outputs
|
17 |
lm = "withLM" if args.use_lm else "noLM"
|
18 |
model_id = args.model_id.replace("/", "_")
|
19 |
+
model_id = model_id.replace(".", "")
|
20 |
dataset_id = "_".join(args.dataset.split("/") + [model_id, args.config, args.split, lm])
|
21 |
|
22 |
# load metric
|
|
|
28 |
cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
|
29 |
|
30 |
# print & log results
|
31 |
+
result_str = f"{dataset_id}\nWER: {wer_result}\nCER: {cer_result}"
|
32 |
print(result_str)
|
33 |
|
34 |
with open(f"{dataset_id}_eval_results.txt", "w") as f:
|