From be53fb179c17c8db16656d017af26578633cd343 Mon Sep 17 00:00:00 2001 From: SWivid Date: Mon, 11 Nov 2024 14:46:50 +0800 Subject: [PATCH] fix. force reload from custom cache file to override infer_gradio init loaded value in case webpage refreshed --- src/f5_tts/infer/infer_gradio.py | 27 +++++++++++++++------------ src/f5_tts/infer/utils_infer.py | 2 +- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/src/f5_tts/infer/infer_gradio.py b/src/f5_tts/infer/infer_gradio.py index 3de5b55..4e5dc63 100644 --- a/src/f5_tts/infer/infer_gradio.py +++ b/src/f5_tts/infer/infer_gradio.py @@ -757,17 +757,22 @@ If you're having issues, try converting your reference audio to WAV or MP3, clip "hf://SWivid/F5-TTS/F5TTS_Base/vocab.txt", ] - def switch_tts_model(new_choice, custom_ckpt_path, custom_vocab_path): + def switch_tts_model(new_choice): global tts_model_choice - if new_choice == "Custom": + if new_choice == "Custom": # override in case webpage is refreshed + custom_ckpt_path, custom_vocab_path = load_last_used_custom() tts_model_choice = ["Custom", custom_ckpt_path, custom_vocab_path] - with open(last_used_custom, "w") as f: - f.write(f"{custom_ckpt_path},{custom_vocab_path}") - return gr.update(visible=True), gr.update(visible=True) + return gr.update(visible=True, value=custom_ckpt_path), gr.update(visible=True, value=custom_vocab_path) else: tts_model_choice = new_choice return gr.update(visible=False), gr.update(visible=False) + def set_custom_model(custom_ckpt_path, custom_vocab_path): + global tts_model_choice + tts_model_choice = ["Custom", custom_ckpt_path, custom_vocab_path] + with open(last_used_custom, "w") as f: + f.write(f"{custom_ckpt_path},{custom_vocab_path}") + with gr.Row(): if not USING_SPACES: choose_tts_model = gr.Radio( @@ -794,20 +799,18 @@ If you're having issues, try converting your reference audio to WAV or MP3, clip choose_tts_model.change( switch_tts_model, - inputs=[choose_tts_model, custom_ckpt_path, custom_vocab_path], + inputs=[choose_tts_model], outputs=[custom_ckpt_path, custom_vocab_path], show_progress="hidden", ) custom_ckpt_path.change( - switch_tts_model, - inputs=[choose_tts_model, custom_ckpt_path, custom_vocab_path], - outputs=[custom_ckpt_path, custom_vocab_path], + set_custom_model, + inputs=[custom_ckpt_path, custom_vocab_path], show_progress="hidden", ) custom_vocab_path.change( - switch_tts_model, - inputs=[choose_tts_model, custom_ckpt_path, custom_vocab_path], - outputs=[custom_ckpt_path, custom_vocab_path], + set_custom_model, + inputs=[custom_ckpt_path, custom_vocab_path], show_progress="hidden", ) diff --git a/src/f5_tts/infer/utils_infer.py b/src/f5_tts/infer/utils_infer.py index f3ebc33..469855f 100644 --- a/src/f5_tts/infer/utils_infer.py +++ b/src/f5_tts/infer/utils_infer.py @@ -194,7 +194,7 @@ def load_model( tokenizer = "custom" print("\nvocab : ", vocab_file) - print("tokenizer : ", tokenizer) + print("token : ", tokenizer) print("model : ", ckpt_path, "\n") vocab_char_map, vocab_size = get_tokenizer(vocab_file, tokenizer)