mirror of
https://github.com/SWivid/F5-TTS.git
synced 2025-12-25 20:34:27 -08:00
68 lines
2.1 KiB
Python
68 lines
2.1 KiB
Python
# Evaluate with Librispeech test-clean, ~3s prompt to generate 4-10s audio (the way of valle/voicebox evaluation)
|
|
|
|
import sys, os
|
|
sys.path.append(os.getcwd())
|
|
|
|
import multiprocessing as mp
|
|
import numpy as np
|
|
|
|
from model.utils import (
|
|
get_librispeech_test,
|
|
run_asr_wer,
|
|
run_sim,
|
|
)
|
|
|
|
|
|
eval_task = "wer" # sim | wer
|
|
lang = "en"
|
|
metalst = "data/librispeech_pc_test_clean_cross_sentence.lst"
|
|
librispeech_test_clean_path = "<SOME_PATH>/LibriSpeech/test-clean" # test-clean path
|
|
gen_wav_dir = "PATH_TO_GENERATED" # generated wavs
|
|
|
|
gpus = [0,1,2,3,4,5,6,7]
|
|
test_set = get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path)
|
|
|
|
## In LibriSpeech, some speakers utilized varying voice characteristics for different characters in the book,
|
|
## leading to a low similarity for the ground truth in some cases.
|
|
# test_set = get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path, eval_ground_truth = True) # eval ground truth
|
|
|
|
local = False
|
|
if local: # use local custom checkpoint dir
|
|
asr_ckpt_dir = "../checkpoints/Systran/faster-whisper-large-v3"
|
|
else:
|
|
asr_ckpt_dir = "" # auto download to cache dir
|
|
|
|
wavlm_ckpt_dir = "../checkpoints/UniSpeech/wavlm_large_finetune.pth"
|
|
|
|
|
|
# --------------------------- WER ---------------------------
|
|
|
|
if eval_task == "wer":
|
|
wers = []
|
|
|
|
with mp.Pool(processes=len(gpus)) as pool:
|
|
args = [(rank, lang, sub_test_set, asr_ckpt_dir) for (rank, sub_test_set) in test_set]
|
|
results = pool.map(run_asr_wer, args)
|
|
for wers_ in results:
|
|
wers.extend(wers_)
|
|
|
|
wer = round(np.mean(wers)*100, 3)
|
|
print(f"\nTotal {len(wers)} samples")
|
|
print(f"WER : {wer}%")
|
|
|
|
|
|
# --------------------------- SIM ---------------------------
|
|
|
|
if eval_task == "sim":
|
|
sim_list = []
|
|
|
|
with mp.Pool(processes=len(gpus)) as pool:
|
|
args = [(rank, sub_test_set, wavlm_ckpt_dir) for (rank, sub_test_set) in test_set]
|
|
results = pool.map(run_sim, args)
|
|
for sim_ in results:
|
|
sim_list.extend(sim_)
|
|
|
|
sim = round(sum(sim_list)/len(sim_list), 3)
|
|
print(f"\nTotal {len(sim_list)} samples")
|
|
print(f"SIM : {sim}")
|