From 700039b55422db5aaa80a1b07fbf671e039ba66b Mon Sep 17 00:00:00 2001 From: lpscr <147736764+lpscr@users.noreply.github.com> Date: Mon, 28 Oct 2024 11:03:10 +0200 Subject: [PATCH] gradio finetune fix wrong value (#301) * fix wrong value print vocab --- src/f5_tts/train/finetune_cli.py | 2 ++ src/f5_tts/train/finetune_gradio.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/f5_tts/train/finetune_cli.py b/src/f5_tts/train/finetune_cli.py index 3a867cf..9a95647 100644 --- a/src/f5_tts/train/finetune_cli.py +++ b/src/f5_tts/train/finetune_cli.py @@ -106,6 +106,8 @@ def main(): vocab_char_map, vocab_size = get_tokenizer(tokenizer_path, tokenizer) + print("\nvocab : ", vocab_size) + mel_spec_kwargs = dict( target_sample_rate=target_sample_rate, n_mel_channels=n_mel_channels, diff --git a/src/f5_tts/train/finetune_gradio.py b/src/f5_tts/train/finetune_gradio.py index 1b42c6a..1c73931 100644 --- a/src/f5_tts/train/finetune_gradio.py +++ b/src/f5_tts/train/finetune_gradio.py @@ -1389,7 +1389,7 @@ If you encounter a memory error, try reducing the batch size per GPU to a smalle save_per_updates.value = save_per_updatesv last_per_steps.value = last_per_stepsv ch_finetune.value = finetunev - file_checkpoint_train.value = file_checkpoint_train + file_checkpoint_train.value = file_checkpoint_trainv tokenizer_type.value = tokenizer_typev tokenizer_file.value = tokenizer_filev mixed_precision.value = mixed_precisionv