mirror of
https://github.com/SWivid/F5-TTS.git
synced 2025-12-29 14:15:18 -08:00
fix. force reload from custom cache file to override infer_gradio init loaded value in case webpage refreshed
This commit is contained in:
@@ -757,17 +757,22 @@ If you're having issues, try converting your reference audio to WAV or MP3, clip
|
||||
"hf://SWivid/F5-TTS/F5TTS_Base/vocab.txt",
|
||||
]
|
||||
|
||||
def switch_tts_model(new_choice, custom_ckpt_path, custom_vocab_path):
|
||||
def switch_tts_model(new_choice):
|
||||
global tts_model_choice
|
||||
if new_choice == "Custom":
|
||||
if new_choice == "Custom": # override in case webpage is refreshed
|
||||
custom_ckpt_path, custom_vocab_path = load_last_used_custom()
|
||||
tts_model_choice = ["Custom", custom_ckpt_path, custom_vocab_path]
|
||||
with open(last_used_custom, "w") as f:
|
||||
f.write(f"{custom_ckpt_path},{custom_vocab_path}")
|
||||
return gr.update(visible=True), gr.update(visible=True)
|
||||
return gr.update(visible=True, value=custom_ckpt_path), gr.update(visible=True, value=custom_vocab_path)
|
||||
else:
|
||||
tts_model_choice = new_choice
|
||||
return gr.update(visible=False), gr.update(visible=False)
|
||||
|
||||
def set_custom_model(custom_ckpt_path, custom_vocab_path):
|
||||
global tts_model_choice
|
||||
tts_model_choice = ["Custom", custom_ckpt_path, custom_vocab_path]
|
||||
with open(last_used_custom, "w") as f:
|
||||
f.write(f"{custom_ckpt_path},{custom_vocab_path}")
|
||||
|
||||
with gr.Row():
|
||||
if not USING_SPACES:
|
||||
choose_tts_model = gr.Radio(
|
||||
@@ -794,20 +799,18 @@ If you're having issues, try converting your reference audio to WAV or MP3, clip
|
||||
|
||||
choose_tts_model.change(
|
||||
switch_tts_model,
|
||||
inputs=[choose_tts_model, custom_ckpt_path, custom_vocab_path],
|
||||
inputs=[choose_tts_model],
|
||||
outputs=[custom_ckpt_path, custom_vocab_path],
|
||||
show_progress="hidden",
|
||||
)
|
||||
custom_ckpt_path.change(
|
||||
switch_tts_model,
|
||||
inputs=[choose_tts_model, custom_ckpt_path, custom_vocab_path],
|
||||
outputs=[custom_ckpt_path, custom_vocab_path],
|
||||
set_custom_model,
|
||||
inputs=[custom_ckpt_path, custom_vocab_path],
|
||||
show_progress="hidden",
|
||||
)
|
||||
custom_vocab_path.change(
|
||||
switch_tts_model,
|
||||
inputs=[choose_tts_model, custom_ckpt_path, custom_vocab_path],
|
||||
outputs=[custom_ckpt_path, custom_vocab_path],
|
||||
set_custom_model,
|
||||
inputs=[custom_ckpt_path, custom_vocab_path],
|
||||
show_progress="hidden",
|
||||
)
|
||||
|
||||
|
||||
@@ -194,7 +194,7 @@ def load_model(
|
||||
tokenizer = "custom"
|
||||
|
||||
print("\nvocab : ", vocab_file)
|
||||
print("tokenizer : ", tokenizer)
|
||||
print("token : ", tokenizer)
|
||||
print("model : ", ckpt_path, "\n")
|
||||
|
||||
vocab_char_map, vocab_size = get_tokenizer(vocab_file, tokenizer)
|
||||
|
||||
Reference in New Issue
Block a user