in gradio finetune fix problem curse problem space in symbols (#296)

* fix space curse problem with utf-8-sig

* fix extend

* Do not overwrite the vocab if it already exists !

* add settings

* add settings

* add settings

* fix path

* change name make more clear the preetain need path
This commit is contained in:
lpscr
2024-10-27 21:09:26 +02:00
committed by GitHub
parent 1888fa4919
commit 0641d5d9b3
2 changed files with 218 additions and 16 deletions

View File

@@ -45,7 +45,7 @@ def parse_args():
parser.add_argument("--save_per_updates", type=int, default=10000, help="Save checkpoint every X steps")
parser.add_argument("--last_per_steps", type=int, default=50000, help="Save last checkpoint every X steps")
parser.add_argument("--finetune", type=bool, default=True, help="Use Finetune")
parser.add_argument("--pretrain", type=str, default=None, help="Use pretrain model for finetune")
parser.add_argument("--pretrain", type=str, default=None, help="the path to the checkpoint")
parser.add_argument(
"--tokenizer", type=str, default="pinyin", choices=["pinyin", "char", "custom"], help="Tokenizer type"
)
@@ -89,7 +89,11 @@ def main():
if args.finetune:
if not os.path.isdir(checkpoint_path):
os.makedirs(checkpoint_path, exist_ok=True)
shutil.copy2(ckpt_path, os.path.join(checkpoint_path, os.path.basename(ckpt_path)))
file_checkpoint = os.path.join(checkpoint_path, os.path.basename(ckpt_path))
if not os.path.isfile(file_checkpoint):
shutil.copy2(ckpt_path, file_checkpoint)
print("copy checkpoint for finetune")
# Use the tokenizer and tokenizer_path provided in the command line arguments
tokenizer = args.tokenizer

View File

@@ -26,7 +26,7 @@ from transformers import pipeline
from cached_path import cached_path
from f5_tts.api import F5TTS
from f5_tts.model.utils import convert_char_to_pinyin
from importlib.resources import files
training_process = None
system = platform.system()
@@ -36,9 +36,9 @@ last_checkpoint = ""
last_device = ""
last_ema = None
path_basic = os.path.abspath(os.path.join(__file__, "../../../.."))
path_data = os.path.join(path_basic, "data")
path_project_ckpts = os.path.join(path_basic, "ckpts")
path_data = str(files("f5_tts").joinpath("../../data"))
path_project_ckpts = str(files("f5_tts").joinpath("../../ckpts"))
file_train = "src/f5_tts/train/finetune_cli.py"
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
@@ -46,6 +46,119 @@ device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is
pipe = None
# Save settings from a JSON file
def save_settings(
project_name,
exp_name,
learning_rate,
batch_size_per_gpu,
batch_size_type,
max_samples,
grad_accumulation_steps,
max_grad_norm,
epochs,
num_warmup_updates,
save_per_updates,
last_per_steps,
finetune,
file_checkpoint_train,
tokenizer_type,
tokenizer_file,
mixed_precision,
):
path_project = os.path.join(path_project_ckpts, project_name)
os.makedirs(path_project, exist_ok=True)
file_setting = os.path.join(path_project, "setting.json")
settings = {
"exp_name": exp_name,
"learning_rate": learning_rate,
"batch_size_per_gpu": batch_size_per_gpu,
"batch_size_type": batch_size_type,
"max_samples": max_samples,
"grad_accumulation_steps": grad_accumulation_steps,
"max_grad_norm": max_grad_norm,
"epochs": epochs,
"num_warmup_updates": num_warmup_updates,
"save_per_updates": save_per_updates,
"last_per_steps": last_per_steps,
"finetune": finetune,
"file_checkpoint_train": file_checkpoint_train,
"tokenizer_type": tokenizer_type,
"tokenizer_file": tokenizer_file,
"mixed_precision": mixed_precision,
}
with open(file_setting, "w") as f:
json.dump(settings, f, indent=4)
return "Settings saved!"
# Load settings from a JSON file
def load_settings(project_name):
project_name = project_name.replace("_pinyin", "").replace("_char", "")
path_project = os.path.join(path_project_ckpts, project_name)
file_setting = os.path.join(path_project, "setting.json")
if not os.path.isfile(file_setting):
settings = {
"exp_name": "F5TTS_Base",
"learning_rate": 1e-05,
"batch_size_per_gpu": 1000,
"batch_size_type": "frame",
"max_samples": 64,
"grad_accumulation_steps": 1,
"max_grad_norm": 1,
"epochs": 100,
"num_warmup_updates": 2,
"save_per_updates": 300,
"last_per_steps": 200,
"finetune": True,
"file_checkpoint_train": "",
"tokenizer_type": "pinyin",
"tokenizer_file": "",
"mixed_precision": "none",
}
return (
settings["exp_name"],
settings["learning_rate"],
settings["batch_size_per_gpu"],
settings["batch_size_type"],
settings["max_samples"],
settings["grad_accumulation_steps"],
settings["max_grad_norm"],
settings["epochs"],
settings["num_warmup_updates"],
settings["save_per_updates"],
settings["last_per_steps"],
settings["finetune"],
settings["file_checkpoint_train"],
settings["tokenizer_type"],
settings["tokenizer_file"],
settings["mixed_precision"],
)
with open(file_setting, "r") as f:
settings = json.load(f)
return (
settings["exp_name"],
settings["learning_rate"],
settings["batch_size_per_gpu"],
settings["batch_size_type"],
settings["max_samples"],
settings["grad_accumulation_steps"],
settings["max_grad_norm"],
settings["epochs"],
settings["num_warmup_updates"],
settings["save_per_updates"],
settings["last_per_steps"],
settings["finetune"],
settings["file_checkpoint_train"],
settings["tokenizer_type"],
settings["tokenizer_file"],
settings["mixed_precision"],
)
# Load metadata
def get_audio_duration(audio_path):
"""Calculate the duration of an audio file."""
@@ -330,6 +443,26 @@ def start_training(
print(cmd)
save_settings(
dataset_name,
exp_name,
learning_rate,
batch_size_per_gpu,
batch_size_type,
max_samples,
grad_accumulation_steps,
max_grad_norm,
epochs,
num_warmup_updates,
save_per_updates,
last_per_steps,
finetune,
file_checkpoint_train,
tokenizer_type,
tokenizer_file,
mixed_precision,
)
try:
# Start the training process
training_process = subprocess.Popen(cmd, shell=True)
@@ -564,10 +697,11 @@ def create_metadata(name_project, ch_tokenizer, progress=gr.Progress()):
new_vocal = ""
if not ch_tokenizer:
file_vocab_finetune = os.path.join(path_data, "Emilia_ZH_EN_pinyin/vocab.txt")
if not os.path.isfile(file_vocab_finetune):
return "Error: Vocabulary file 'Emilia_ZH_EN_pinyin' not found!", ""
shutil.copy2(file_vocab_finetune, file_vocab)
if not os.path.isfile(file_vocab):
file_vocab_finetune = os.path.join(path_data, "Emilia_ZH_EN_pinyin/vocab.txt")
if not os.path.isfile(file_vocab_finetune):
return "Error: Vocabulary file 'Emilia_ZH_EN_pinyin' not found!", ""
shutil.copy2(file_vocab_finetune, file_vocab)
with open(file_vocab, "r", encoding="utf-8-sig") as f:
vocab_char_map = {}
@@ -801,11 +935,13 @@ def vocab_extend(project_name, symbols, model_type):
return "Symbols are okay no need to extend."
size_vocab = len(vocab)
vocab.pop() # fix empty space leave
vocab.pop()
for item in miss_symbols:
vocab.append(item)
with open(file_vocab_project, "w", encoding="utf-8-sig") as f:
vocab.append("")
with open(file_vocab_project, "w", encoding="utf-8") as f:
f.write("\n".join(vocab))
if model_type == "F5-TTS":
@@ -813,14 +949,17 @@ def vocab_extend(project_name, symbols, model_type):
else:
ckpt_path = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.pt"))
new_ckpt_path = os.path.join(path_project_ckpts, name_project)
vocab_size_new = len(miss_symbols)
dataset_name = name_project.replace("_pinyin", "").replace("_char", "")
new_ckpt_path = os.path.join(path_project_ckpts, dataset_name)
os.makedirs(new_ckpt_path, exist_ok=True)
new_ckpt_file = os.path.join(new_ckpt_path, "model_1200000.pt")
size = expand_model_embeddings(ckpt_path, new_ckpt_file, num_new_tokens=len(miss_symbols))
size = expand_model_embeddings(ckpt_path, new_ckpt_file, num_new_tokens=vocab_size_new)
vocab_new = "\n".join(miss_symbols)
return f"vocab old size : {size_vocab}\nvocab new size : {size}\nvocab add : {len(miss_symbols)}\nnew symbols :\n{vocab_new}"
return f"vocab old size : {size_vocab}\nvocab new size : {size}\nvocab add : {vocab_size_new}\nnew symbols :\n{vocab_new}"
def vocab_check(project_name):
@@ -1192,7 +1331,7 @@ If you encounter a memory error, try reducing the batch size per GPU to a smalle
with gr.Row():
ch_finetune = bt_create = gr.Checkbox(label="finetune", value=True)
tokenizer_file = gr.Textbox(label="Tokenizer File", value="")
file_checkpoint_train = gr.Textbox(label="Pretrain Model", value="")
file_checkpoint_train = gr.Textbox(label="Path to the preetrain checkpoint ", value="")
with gr.Row():
exp_name = gr.Radio(label="Model", choices=["F5TTS_Base", "E2TTS_Base"], value="F5TTS_Base")
@@ -1219,6 +1358,42 @@ If you encounter a memory error, try reducing the batch size per GPU to a smalle
start_button = gr.Button("Start Training")
stop_button = gr.Button("Stop Training", interactive=False)
if projects_selelect is not None:
(
exp_namev,
learning_ratev,
batch_size_per_gpuv,
batch_size_typev,
max_samplesv,
grad_accumulation_stepsv,
max_grad_normv,
epochsv,
num_warmupv_updatesv,
save_per_updatesv,
last_per_stepsv,
finetunev,
file_checkpoint_trainv,
tokenizer_typev,
tokenizer_filev,
mixed_precisionv,
) = load_settings(projects_selelect)
exp_name.value = exp_namev
learning_rate.value = learning_ratev
batch_size_per_gpu.value = batch_size_per_gpuv
batch_size_type.value = batch_size_typev
max_samples.value = max_samplesv
grad_accumulation_steps.value = grad_accumulation_stepsv
max_grad_norm.value = max_grad_normv
epochs.value = epochsv
num_warmup_updates.value = num_warmupv_updatesv
save_per_updates.value = save_per_updatesv
last_per_steps.value = last_per_stepsv
ch_finetune.value = finetunev
file_checkpoint_train.value = file_checkpoint_train
tokenizer_type.value = tokenizer_typev
tokenizer_file.value = tokenizer_filev
mixed_precision.value = mixed_precisionv
txt_info_train = gr.Text(label="info", value="")
start_button.click(
fn=start_training,
@@ -1273,6 +1448,29 @@ If you encounter a memory error, try reducing the batch size per GPU to a smalle
check_finetune, inputs=[ch_finetune], outputs=[file_checkpoint_train, tokenizer_file, tokenizer_type]
)
cm_project.change(
fn=load_settings,
inputs=[cm_project],
outputs=[
exp_name,
learning_rate,
batch_size_per_gpu,
batch_size_type,
max_samples,
grad_accumulation_steps,
max_grad_norm,
epochs,
num_warmup_updates,
save_per_updates,
last_per_steps,
ch_finetune,
file_checkpoint_train,
tokenizer_type,
tokenizer_file,
mixed_precision,
],
)
with gr.TabItem("test model"):
exp_name = gr.Radio(label="Model", choices=["F5-TTS", "E2-TTS"], value="F5-TTS")
list_checkpoints, checkpoint_select = get_checkpoints_project(projects_selelect, False)