diff --git a/src/f5_tts/train/finetune_cli.py b/src/f5_tts/train/finetune_cli.py index 3a867cf..9a95647 100644 --- a/src/f5_tts/train/finetune_cli.py +++ b/src/f5_tts/train/finetune_cli.py @@ -106,6 +106,8 @@ def main(): vocab_char_map, vocab_size = get_tokenizer(tokenizer_path, tokenizer) + print("\nvocab : ", vocab_size) + mel_spec_kwargs = dict( target_sample_rate=target_sample_rate, n_mel_channels=n_mel_channels, diff --git a/src/f5_tts/train/finetune_gradio.py b/src/f5_tts/train/finetune_gradio.py index 1b42c6a..1c73931 100644 --- a/src/f5_tts/train/finetune_gradio.py +++ b/src/f5_tts/train/finetune_gradio.py @@ -1389,7 +1389,7 @@ If you encounter a memory error, try reducing the batch size per GPU to a smalle save_per_updates.value = save_per_updatesv last_per_steps.value = last_per_stepsv ch_finetune.value = finetunev - file_checkpoint_train.value = file_checkpoint_train + file_checkpoint_train.value = file_checkpoint_trainv tokenizer_type.value = tokenizer_typev tokenizer_file.value = tokenizer_filev mixed_precision.value = mixed_precisionv