| | from __future__ import annotations |
| |
|
| | import os |
| | import re |
| | import math |
| | import random |
| | import string |
| | from tqdm import tqdm |
| | from collections import defaultdict |
| |
|
| | import matplotlib |
| | matplotlib.use("Agg") |
| | import matplotlib.pylab as plt |
| |
|
| | import torch |
| | import torch.nn.functional as F |
| | from torch.nn.utils.rnn import pad_sequence |
| | import torchaudio |
| |
|
| | import einx |
| | from einops import rearrange, reduce |
| |
|
| | import jieba |
| | from pypinyin import lazy_pinyin, Style |
| |
|
| | from model.ecapa_tdnn import ECAPA_TDNN_SMALL |
| | from model.modules import MelSpec |
| |
|
| |
|
| | |
| |
|
| | def seed_everything(seed = 0): |
| | random.seed(seed) |
| | os.environ['PYTHONHASHSEED'] = str(seed) |
| | torch.manual_seed(seed) |
| | torch.cuda.manual_seed(seed) |
| | torch.cuda.manual_seed_all(seed) |
| | torch.backends.cudnn.deterministic = True |
| | torch.backends.cudnn.benchmark = False |
| |
|
| | |
| |
|
| | def exists(v): |
| | return v is not None |
| |
|
| | def default(v, d): |
| | return v if exists(v) else d |
| |
|
| | |
| |
|
| | def lens_to_mask( |
| | t: int['b'], |
| | length: int | None = None |
| | ) -> bool['b n']: |
| |
|
| | if not exists(length): |
| | length = t.amax() |
| |
|
| | seq = torch.arange(length, device = t.device) |
| | return einx.less('n, b -> b n', seq, t) |
| |
|
| | def mask_from_start_end_indices( |
| | seq_len: int['b'], |
| | start: int['b'], |
| | end: int['b'] |
| | ): |
| | max_seq_len = seq_len.max().item() |
| | seq = torch.arange(max_seq_len, device = start.device).long() |
| | return einx.greater_equal('n, b -> b n', seq, start) & einx.less('n, b -> b n', seq, end) |
| |
|
| | def mask_from_frac_lengths( |
| | seq_len: int['b'], |
| | frac_lengths: float['b'] |
| | ): |
| | lengths = (frac_lengths * seq_len).long() |
| | max_start = seq_len - lengths |
| |
|
| | rand = torch.rand_like(frac_lengths) |
| | start = (max_start * rand).long().clamp(min = 0) |
| | end = start + lengths |
| |
|
| | return mask_from_start_end_indices(seq_len, start, end) |
| |
|
| | def maybe_masked_mean( |
| | t: float['b n d'], |
| | mask: bool['b n'] = None |
| | ) -> float['b d']: |
| |
|
| | if not exists(mask): |
| | return t.mean(dim = 1) |
| |
|
| | t = einx.where('b n, b n d, -> b n d', mask, t, 0.) |
| | num = reduce(t, 'b n d -> b d', 'sum') |
| | den = reduce(mask.float(), 'b n -> b', 'sum') |
| |
|
| | return einx.divide('b d, b -> b d', num, den.clamp(min = 1.)) |
| |
|
| |
|
| | |
| | def list_str_to_tensor( |
| | text: list[str], |
| | padding_value = -1 |
| | ) -> int['b nt']: |
| | list_tensors = [torch.tensor([*bytes(t, 'UTF-8')]) for t in text] |
| | text = pad_sequence(list_tensors, padding_value = padding_value, batch_first = True) |
| | return text |
| |
|
| | |
| | def list_str_to_idx( |
| | text: list[str] | list[list[str]], |
| | vocab_char_map: dict[str, int], |
| | padding_value = -1 |
| | ) -> int['b nt']: |
| | list_idx_tensors = [torch.tensor([vocab_char_map.get(c, 0) for c in t]) for t in text] |
| | text = pad_sequence(list_idx_tensors, padding_value = padding_value, batch_first = True) |
| | return text |
| |
|
| |
|
| | |
| |
|
| | def get_tokenizer(dataset_name, tokenizer: str = "pinyin"): |
| | ''' |
| | tokenizer - "pinyin" do g2p for only chinese characters, need .txt vocab_file |
| | - "char" for char-wise tokenizer, need .txt vocab_file |
| | - "byte" for utf-8 tokenizer |
| | - "custom" if you're directly passing in a path to the vocab.txt you want to use |
| | vocab_size - if use "pinyin", all available pinyin types, common alphabets (also those with accent) and symbols |
| | - if use "char", derived from unfiltered character & symbol counts of custom dataset |
| | - if use "byte", set to 256 (unicode byte range) |
| | ''' |
| | if tokenizer in ["pinyin", "char"]: |
| | with open (f"data/{dataset_name}_{tokenizer}/vocab.txt", "r", encoding="utf-8") as f: |
| | vocab_char_map = {} |
| | for i, char in enumerate(f): |
| | vocab_char_map[char[:-1]] = i |
| | vocab_size = len(vocab_char_map) |
| | assert vocab_char_map[" "] == 0, "make sure space is of idx 0 in vocab.txt, cuz 0 is used for unknown char" |
| |
|
| | elif tokenizer == "byte": |
| | vocab_char_map = None |
| | vocab_size = 256 |
| | elif tokenizer == "custom": |
| | with open (dataset_name, "r", encoding="utf-8") as f: |
| | vocab_char_map = {} |
| | for i, char in enumerate(f): |
| | vocab_char_map[char[:-1]] = i |
| | vocab_size = len(vocab_char_map) |
| |
|
| | return vocab_char_map, vocab_size |
| |
|
| |
|
| | |
| |
|
| | def convert_char_to_pinyin(text_list, polyphone = True): |
| | final_text_list = [] |
| | god_knows_why_en_testset_contains_zh_quote = str.maketrans({'“': '"', '”': '"', '‘': "'", '’': "'"}) |
| | custom_trans = str.maketrans({';': ','}) |
| | for text in text_list: |
| | char_list = [] |
| | text = text.translate(god_knows_why_en_testset_contains_zh_quote) |
| | text = text.translate(custom_trans) |
| | for seg in jieba.cut(text): |
| | seg_byte_len = len(bytes(seg, 'UTF-8')) |
| | if seg_byte_len == len(seg): |
| | if char_list and seg_byte_len > 1 and char_list[-1] not in " :'\"": |
| | char_list.append(" ") |
| | char_list.extend(seg) |
| | elif polyphone and seg_byte_len == 3 * len(seg): |
| | seg = lazy_pinyin(seg, style=Style.TONE3, tone_sandhi=True) |
| | for c in seg: |
| | if c not in "。,、;:?!《》【】—…": |
| | char_list.append(" ") |
| | char_list.append(c) |
| | else: |
| | for c in seg: |
| | if ord(c) < 256: |
| | char_list.extend(c) |
| | else: |
| | if c not in "。,、;:?!《》【】—…": |
| | char_list.append(" ") |
| | char_list.extend(lazy_pinyin(c, style=Style.TONE3, tone_sandhi=True)) |
| | else: |
| | char_list.append(c) |
| | final_text_list.append(char_list) |
| |
|
| | return final_text_list |
| |
|
| |
|
| | |
| | def save_spectrogram(spectrogram, path): |
| | plt.figure(figsize=(12, 4)) |
| | plt.imshow(spectrogram, origin='lower', aspect='auto') |
| | plt.colorbar() |
| | plt.savefig(path) |
| | plt.close() |
| |
|
| |
|
| | |
| | def get_seedtts_testset_metainfo(metalst): |
| | f = open(metalst); lines = f.readlines(); f.close() |
| | metainfo = [] |
| | for line in lines: |
| | if len(line.strip().split('|')) == 5: |
| | utt, prompt_text, prompt_wav, gt_text, gt_wav = line.strip().split('|') |
| | elif len(line.strip().split('|')) == 4: |
| | utt, prompt_text, prompt_wav, gt_text = line.strip().split('|') |
| | gt_wav = os.path.join(os.path.dirname(metalst), "wavs", utt + ".wav") |
| | if not os.path.isabs(prompt_wav): |
| | prompt_wav = os.path.join(os.path.dirname(metalst), prompt_wav) |
| | metainfo.append((utt, prompt_text, prompt_wav, gt_text, gt_wav)) |
| | return metainfo |
| |
|
| |
|
| | |
| | def get_librispeech_test_clean_metainfo(metalst, librispeech_test_clean_path): |
| | f = open(metalst); lines = f.readlines(); f.close() |
| | metainfo = [] |
| | for line in lines: |
| | ref_utt, ref_dur, ref_txt, gen_utt, gen_dur, gen_txt = line.strip().split('\t') |
| |
|
| | |
| | ref_spk_id, ref_chaptr_id, _ = ref_utt.split('-') |
| | ref_wav = os.path.join(librispeech_test_clean_path, ref_spk_id, ref_chaptr_id, ref_utt + '.flac') |
| |
|
| | |
| | gen_spk_id, gen_chaptr_id, _ = gen_utt.split('-') |
| | gen_wav = os.path.join(librispeech_test_clean_path, gen_spk_id, gen_chaptr_id, gen_utt + '.flac') |
| |
|
| | metainfo.append((gen_utt, ref_txt, ref_wav, " " + gen_txt, gen_wav)) |
| |
|
| | return metainfo |
| |
|
| |
|
| | |
| | def padded_mel_batch(ref_mels): |
| | max_mel_length = torch.LongTensor([mel.shape[-1] for mel in ref_mels]).amax() |
| | padded_ref_mels = [] |
| | for mel in ref_mels: |
| | padded_ref_mel = F.pad(mel, (0, max_mel_length - mel.shape[-1]), value = 0) |
| | padded_ref_mels.append(padded_ref_mel) |
| | padded_ref_mels = torch.stack(padded_ref_mels) |
| | padded_ref_mels = rearrange(padded_ref_mels, 'b d n -> b n d') |
| | return padded_ref_mels |
| |
|
| |
|
| | |
| |
|
| | def get_inference_prompt( |
| | metainfo, |
| | speed = 1., tokenizer = "pinyin", polyphone = True, |
| | target_sample_rate = 24000, n_mel_channels = 100, hop_length = 256, target_rms = 0.1, |
| | use_truth_duration = False, |
| | infer_batch_size = 1, num_buckets = 200, min_secs = 3, max_secs = 40, |
| | ): |
| | prompts_all = [] |
| |
|
| | min_tokens = min_secs * target_sample_rate // hop_length |
| | max_tokens = max_secs * target_sample_rate // hop_length |
| |
|
| | batch_accum = [0] * num_buckets |
| | utts, ref_rms_list, ref_mels, ref_mel_lens, total_mel_lens, final_text_list = \ |
| | ([[] for _ in range(num_buckets)] for _ in range(6)) |
| |
|
| | mel_spectrogram = MelSpec(target_sample_rate=target_sample_rate, n_mel_channels=n_mel_channels, hop_length=hop_length) |
| |
|
| | for utt, prompt_text, prompt_wav, gt_text, gt_wav in tqdm(metainfo, desc="Processing prompts..."): |
| |
|
| | |
| | ref_audio, ref_sr = torchaudio.load(prompt_wav) |
| | ref_rms = torch.sqrt(torch.mean(torch.square(ref_audio))) |
| | if ref_rms < target_rms: |
| | ref_audio = ref_audio * target_rms / ref_rms |
| | assert ref_audio.shape[-1] > 5000, f"Empty prompt wav: {prompt_wav}, or torchaudio backend issue." |
| | if ref_sr != target_sample_rate: |
| | resampler = torchaudio.transforms.Resample(ref_sr, target_sample_rate) |
| | ref_audio = resampler(ref_audio) |
| |
|
| | |
| | if len(prompt_text[-1].encode('utf-8')) == 1: |
| | prompt_text = prompt_text + " " |
| | text = [prompt_text + gt_text] |
| | if tokenizer == "pinyin": |
| | text_list = convert_char_to_pinyin(text, polyphone = polyphone) |
| | else: |
| | text_list = text |
| |
|
| | |
| | ref_mel_len = ref_audio.shape[-1] // hop_length |
| | if use_truth_duration: |
| | gt_audio, gt_sr = torchaudio.load(gt_wav) |
| | if gt_sr != target_sample_rate: |
| | resampler = torchaudio.transforms.Resample(gt_sr, target_sample_rate) |
| | gt_audio = resampler(gt_audio) |
| | total_mel_len = ref_mel_len + int(gt_audio.shape[-1] / hop_length / speed) |
| |
|
| | |
| | |
| | else: |
| | zh_pause_punc = r"。,、;:?!" |
| | ref_text_len = len(prompt_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, prompt_text)) |
| | gen_text_len = len(gt_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, gt_text)) |
| | total_mel_len = ref_mel_len + int(ref_mel_len / ref_text_len * gen_text_len / speed) |
| |
|
| | |
| | ref_mel = mel_spectrogram(ref_audio) |
| | ref_mel = rearrange(ref_mel, '1 d n -> d n') |
| |
|
| | |
| | assert infer_batch_size > 0, "infer_batch_size should be greater than 0." |
| | assert min_tokens <= total_mel_len <= max_tokens, \ |
| | f"Audio {utt} has duration {total_mel_len*hop_length//target_sample_rate}s out of range [{min_secs}, {max_secs}]." |
| | bucket_i = math.floor((total_mel_len - min_tokens) / (max_tokens - min_tokens + 1) * num_buckets) |
| |
|
| | utts[bucket_i].append(utt) |
| | ref_rms_list[bucket_i].append(ref_rms) |
| | ref_mels[bucket_i].append(ref_mel) |
| | ref_mel_lens[bucket_i].append(ref_mel_len) |
| | total_mel_lens[bucket_i].append(total_mel_len) |
| | final_text_list[bucket_i].extend(text_list) |
| |
|
| | batch_accum[bucket_i] += total_mel_len |
| |
|
| | if batch_accum[bucket_i] >= infer_batch_size: |
| | |
| | prompts_all.append(( |
| | utts[bucket_i], |
| | ref_rms_list[bucket_i], |
| | padded_mel_batch(ref_mels[bucket_i]), |
| | ref_mel_lens[bucket_i], |
| | total_mel_lens[bucket_i], |
| | final_text_list[bucket_i] |
| | )) |
| | batch_accum[bucket_i] = 0 |
| | utts[bucket_i], ref_rms_list[bucket_i], ref_mels[bucket_i], ref_mel_lens[bucket_i], total_mel_lens[bucket_i], final_text_list[bucket_i] = [], [], [], [], [], [] |
| |
|
| | |
| | for bucket_i, bucket_frames in enumerate(batch_accum): |
| | if bucket_frames > 0: |
| | prompts_all.append(( |
| | utts[bucket_i], |
| | ref_rms_list[bucket_i], |
| | padded_mel_batch(ref_mels[bucket_i]), |
| | ref_mel_lens[bucket_i], |
| | total_mel_lens[bucket_i], |
| | final_text_list[bucket_i] |
| | )) |
| | |
| | random.seed(666) |
| | random.shuffle(prompts_all) |
| |
|
| | return prompts_all |
| |
|
| |
|
| | |
| | |
| |
|
| | def get_seed_tts_test(metalst, gen_wav_dir, gpus): |
| | f = open(metalst) |
| | lines = f.readlines() |
| | f.close() |
| |
|
| | test_set_ = [] |
| | for line in tqdm(lines): |
| | if len(line.strip().split('|')) == 5: |
| | utt, prompt_text, prompt_wav, gt_text, gt_wav = line.strip().split('|') |
| | elif len(line.strip().split('|')) == 4: |
| | utt, prompt_text, prompt_wav, gt_text = line.strip().split('|') |
| |
|
| | if not os.path.exists(os.path.join(gen_wav_dir, utt + '.wav')): |
| | continue |
| | gen_wav = os.path.join(gen_wav_dir, utt + '.wav') |
| | if not os.path.isabs(prompt_wav): |
| | prompt_wav = os.path.join(os.path.dirname(metalst), prompt_wav) |
| |
|
| | test_set_.append((gen_wav, prompt_wav, gt_text)) |
| |
|
| | num_jobs = len(gpus) |
| | if num_jobs == 1: |
| | return [(gpus[0], test_set_)] |
| | |
| | wav_per_job = len(test_set_) // num_jobs + 1 |
| | test_set = [] |
| | for i in range(num_jobs): |
| | test_set.append((gpus[i], test_set_[i*wav_per_job:(i+1)*wav_per_job])) |
| |
|
| | return test_set |
| |
|
| |
|
| | |
| |
|
| | def get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path, eval_ground_truth = False): |
| | f = open(metalst) |
| | lines = f.readlines() |
| | f.close() |
| |
|
| | test_set_ = [] |
| | for line in tqdm(lines): |
| | ref_utt, ref_dur, ref_txt, gen_utt, gen_dur, gen_txt = line.strip().split('\t') |
| |
|
| | if eval_ground_truth: |
| | gen_spk_id, gen_chaptr_id, _ = gen_utt.split('-') |
| | gen_wav = os.path.join(librispeech_test_clean_path, gen_spk_id, gen_chaptr_id, gen_utt + '.flac') |
| | else: |
| | if not os.path.exists(os.path.join(gen_wav_dir, gen_utt + '.wav')): |
| | raise FileNotFoundError(f"Generated wav not found: {gen_utt}") |
| | gen_wav = os.path.join(gen_wav_dir, gen_utt + '.wav') |
| |
|
| | ref_spk_id, ref_chaptr_id, _ = ref_utt.split('-') |
| | ref_wav = os.path.join(librispeech_test_clean_path, ref_spk_id, ref_chaptr_id, ref_utt + '.flac') |
| |
|
| | test_set_.append((gen_wav, ref_wav, gen_txt)) |
| |
|
| | num_jobs = len(gpus) |
| | if num_jobs == 1: |
| | return [(gpus[0], test_set_)] |
| | |
| | wav_per_job = len(test_set_) // num_jobs + 1 |
| | test_set = [] |
| | for i in range(num_jobs): |
| | test_set.append((gpus[i], test_set_[i*wav_per_job:(i+1)*wav_per_job])) |
| |
|
| | return test_set |
| |
|
| |
|
| | |
| |
|
| | def load_asr_model(lang, ckpt_dir = ""): |
| | if lang == "zh": |
| | from funasr import AutoModel |
| | model = AutoModel( |
| | model = os.path.join(ckpt_dir, "paraformer-zh"), |
| | |
| | |
| | |
| | disable_update=True, |
| | ) |
| | elif lang == "en": |
| | from faster_whisper import WhisperModel |
| | model_size = "large-v3" if ckpt_dir == "" else ckpt_dir |
| | model = WhisperModel(model_size, device="cuda", compute_type="float16") |
| | return model |
| |
|
| |
|
| | |
| |
|
| | def run_asr_wer(args): |
| | rank, lang, test_set, ckpt_dir = args |
| |
|
| | if lang == "zh": |
| | import zhconv |
| | torch.cuda.set_device(rank) |
| | elif lang == "en": |
| | os.environ["CUDA_VISIBLE_DEVICES"] = str(rank) |
| | else: |
| | raise NotImplementedError("lang support only 'zh' (funasr paraformer-zh), 'en' (faster-whisper-large-v3), for now.") |
| |
|
| | asr_model = load_asr_model(lang, ckpt_dir = ckpt_dir) |
| | |
| | from zhon.hanzi import punctuation |
| | punctuation_all = punctuation + string.punctuation |
| | wers = [] |
| |
|
| | from jiwer import compute_measures |
| | for gen_wav, prompt_wav, truth in tqdm(test_set): |
| | if lang == "zh": |
| | res = asr_model.generate(input=gen_wav, batch_size_s=300, disable_pbar=True) |
| | hypo = res[0]["text"] |
| | hypo = zhconv.convert(hypo, 'zh-cn') |
| | elif lang == "en": |
| | segments, _ = asr_model.transcribe(gen_wav, beam_size=5, language="en") |
| | hypo = '' |
| | for segment in segments: |
| | hypo = hypo + ' ' + segment.text |
| |
|
| | |
| | |
| |
|
| | for x in punctuation_all: |
| | truth = truth.replace(x, '') |
| | hypo = hypo.replace(x, '') |
| |
|
| | truth = truth.replace(' ', ' ') |
| | hypo = hypo.replace(' ', ' ') |
| |
|
| | if lang == "zh": |
| | truth = " ".join([x for x in truth]) |
| | hypo = " ".join([x for x in hypo]) |
| | elif lang == "en": |
| | truth = truth.lower() |
| | hypo = hypo.lower() |
| |
|
| | measures = compute_measures(truth, hypo) |
| | wer = measures["wer"] |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | wers.append(wer) |
| |
|
| | return wers |
| |
|
| |
|
| | |
| |
|
| | def run_sim(args): |
| | rank, test_set, ckpt_dir = args |
| | device = f"cuda:{rank}" |
| |
|
| | model = ECAPA_TDNN_SMALL(feat_dim=1024, feat_type='wavlm_large', config_path=None) |
| | state_dict = torch.load(ckpt_dir, weights_only=True, map_location=lambda storage, loc: storage) |
| | model.load_state_dict(state_dict['model'], strict=False) |
| |
|
| | use_gpu=True if torch.cuda.is_available() else False |
| | if use_gpu: |
| | model = model.cuda(device) |
| | model.eval() |
| |
|
| | sim_list = [] |
| | for wav1, wav2, truth in tqdm(test_set): |
| |
|
| | wav1, sr1 = torchaudio.load(wav1) |
| | wav2, sr2 = torchaudio.load(wav2) |
| |
|
| | resample1 = torchaudio.transforms.Resample(orig_freq=sr1, new_freq=16000) |
| | resample2 = torchaudio.transforms.Resample(orig_freq=sr2, new_freq=16000) |
| | wav1 = resample1(wav1) |
| | wav2 = resample2(wav2) |
| |
|
| | if use_gpu: |
| | wav1 = wav1.cuda(device) |
| | wav2 = wav2.cuda(device) |
| | with torch.no_grad(): |
| | emb1 = model(wav1) |
| | emb2 = model(wav2) |
| | |
| | sim = F.cosine_similarity(emb1, emb2)[0].item() |
| | |
| | sim_list.append(sim) |
| | |
| | return sim_list |
| |
|
| |
|
| | |
| |
|
| | def repetition_found(text, length = 2, tolerance = 10): |
| | pattern_count = defaultdict(int) |
| | for i in range(len(text) - length + 1): |
| | pattern = text[i:i + length] |
| | pattern_count[pattern] += 1 |
| | for pattern, count in pattern_count.items(): |
| | if count > tolerance: |
| | return True |
| | return False |
| |
|
| |
|
| | |
| |
|
| | def load_checkpoint(model, ckpt_path, device, use_ema = True): |
| | from ema_pytorch import EMA |
| |
|
| | ckpt_type = ckpt_path.split(".")[-1] |
| | if ckpt_type == "safetensors": |
| | from safetensors.torch import load_file |
| | checkpoint = load_file(ckpt_path, device=device) |
| | else: |
| | checkpoint = torch.load(ckpt_path, weights_only=True, map_location=device) |
| |
|
| | if use_ema == True: |
| | ema_model = EMA(model, include_online_model = False).to(device) |
| | if ckpt_type == "safetensors": |
| | ema_model.load_state_dict(checkpoint) |
| | else: |
| | ema_model.load_state_dict(checkpoint['ema_model_state_dict']) |
| | ema_model.copy_params_from_ema_to_model() |
| | else: |
| | model.load_state_dict(checkpoint['model_state_dict']) |
| | |
| | return model |